Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
4ea4d2dc
Unverified
Commit
4ea4d2dc
authored
Aug 18, 2017
by
Zsolt Felfoldi
Committed by
Péter Szilágyi
Sep 06, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
core, eth: add bloombit indexer, filter based on it
parent
1e67378d
Changes
22
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
1590 additions
and
409 deletions
+1590
-409
blockchain.go
core/blockchain.go
+0
-15
fetcher_test.go
core/bloombits/fetcher_test.go
+101
-0
matcher.go
core/bloombits/matcher.go
+579
-0
matcher_test.go
core/bloombits/matcher_test.go
+196
-0
utils.go
core/bloombits/utils.go
+63
-0
chain_indexer.go
core/chain_indexer.go
+59
-17
chain_indexer_test.go
core/chain_indexer_test.go
+4
-3
database_util.go
core/database_util.go
+21
-48
database_util_test.go
core/database_util_test.go
+0
-108
bloom9.go
core/types/bloom9.go
+14
-0
api_backend.go
eth/api_backend.go
+27
-0
backend.go
eth/backend.go
+6
-4
db_upgrade.go
eth/db_upgrade.go
+32
-38
api.go
eth/filters/api.go
+22
-26
bench_test.go
eth/filters/bench_test.go
+237
-0
filter.go
eth/filters/filter.go
+152
-80
filter_system_test.go
eth/filters/filter_system_test.go
+38
-12
filter_test.go
eth/filters/filter_test.go
+15
-55
handler.go
eth/handler.go
+5
-0
api_backend.go
les/api_backend.go
+18
-0
backend.go
les/backend.go
+1
-1
worker.go
miner/worker.go
+0
-2
No files found.
core/blockchain.go
View file @
4ea4d2dc
...
@@ -759,12 +759,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
...
@@ -759,12 +759,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
log
.
Crit
(
"Failed to write block receipts"
,
"err"
,
err
)
log
.
Crit
(
"Failed to write block receipts"
,
"err"
,
err
)
return
return
}
}
if
err
:=
WriteMipmapBloom
(
bc
.
chainDb
,
block
.
NumberU64
(),
receipts
);
err
!=
nil
{
errs
[
index
]
=
fmt
.
Errorf
(
"failed to write log blooms: %v"
,
err
)
atomic
.
AddInt32
(
&
failed
,
1
)
log
.
Crit
(
"Failed to write log blooms"
,
"err"
,
err
)
return
}
if
err
:=
WriteTxLookupEntries
(
bc
.
chainDb
,
block
);
err
!=
nil
{
if
err
:=
WriteTxLookupEntries
(
bc
.
chainDb
,
block
);
err
!=
nil
{
errs
[
index
]
=
fmt
.
Errorf
(
"failed to write lookup metadata: %v"
,
err
)
errs
[
index
]
=
fmt
.
Errorf
(
"failed to write lookup metadata: %v"
,
err
)
atomic
.
AddInt32
(
&
failed
,
1
)
atomic
.
AddInt32
(
&
failed
,
1
)
...
@@ -1017,10 +1011,6 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
...
@@ -1017,10 +1011,6 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
if
err
:=
WriteTxLookupEntries
(
bc
.
chainDb
,
block
);
err
!=
nil
{
if
err
:=
WriteTxLookupEntries
(
bc
.
chainDb
,
block
);
err
!=
nil
{
return
i
,
err
return
i
,
err
}
}
// Write map map bloom filters
if
err
:=
WriteMipmapBloom
(
bc
.
chainDb
,
block
.
NumberU64
(),
receipts
);
err
!=
nil
{
return
i
,
err
}
// Write hash preimages
// Write hash preimages
if
err
:=
WritePreimages
(
bc
.
chainDb
,
block
.
NumberU64
(),
state
.
Preimages
());
err
!=
nil
{
if
err
:=
WritePreimages
(
bc
.
chainDb
,
block
.
NumberU64
(),
state
.
Preimages
());
err
!=
nil
{
return
i
,
err
return
i
,
err
...
@@ -1178,11 +1168,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
...
@@ -1178,11 +1168,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
if
err
:=
WriteTxLookupEntries
(
bc
.
chainDb
,
block
);
err
!=
nil
{
if
err
:=
WriteTxLookupEntries
(
bc
.
chainDb
,
block
);
err
!=
nil
{
return
err
return
err
}
}
// Write map map bloom filters
receipts
:=
GetBlockReceipts
(
bc
.
chainDb
,
block
.
Hash
(),
block
.
NumberU64
())
if
err
:=
WriteMipmapBloom
(
bc
.
chainDb
,
block
.
NumberU64
(),
receipts
);
err
!=
nil
{
return
err
}
addedTxs
=
append
(
addedTxs
,
block
.
Transactions
()
...
)
addedTxs
=
append
(
addedTxs
,
block
.
Transactions
()
...
)
}
}
...
...
core/bloombits/fetcher_test.go
0 → 100644
View file @
4ea4d2dc
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package
bloombits
import
(
"bytes"
"encoding/binary"
"math/rand"
"sync"
"sync/atomic"
"testing"
"time"
)
const
testFetcherReqCount
=
5000
func
fetcherTestVector
(
b
uint
,
s
uint64
)
[]
byte
{
r
:=
make
([]
byte
,
10
)
binary
.
BigEndian
.
PutUint16
(
r
[
0
:
2
],
uint16
(
b
))
binary
.
BigEndian
.
PutUint64
(
r
[
2
:
10
],
s
)
return
r
}
func
TestFetcher
(
t
*
testing
.
T
)
{
testFetcher
(
t
,
1
)
}
func
TestFetcherMultipleReaders
(
t
*
testing
.
T
)
{
testFetcher
(
t
,
10
)
}
func
testFetcher
(
t
*
testing
.
T
,
cnt
int
)
{
f
:=
&
fetcher
{
requestMap
:
make
(
map
[
uint64
]
fetchRequest
),
}
distCh
:=
make
(
chan
distRequest
,
channelCap
)
stop
:=
make
(
chan
struct
{})
var
reqCount
uint32
for
i
:=
0
;
i
<
10
;
i
++
{
go
func
()
{
for
{
req
,
ok
:=
<-
distCh
if
!
ok
{
return
}
time
.
Sleep
(
time
.
Duration
(
rand
.
Intn
(
100000
)))
atomic
.
AddUint32
(
&
reqCount
,
1
)
f
.
deliver
([]
uint64
{
req
.
sectionIndex
},
[][]
byte
{
fetcherTestVector
(
req
.
bloomIndex
,
req
.
sectionIndex
)})
}
}()
}
var
wg
,
wg2
sync
.
WaitGroup
for
cc
:=
0
;
cc
<
cnt
;
cc
++
{
wg
.
Add
(
1
)
in
:=
make
(
chan
uint64
,
channelCap
)
out
:=
f
.
fetch
(
in
,
distCh
,
stop
,
&
wg2
)
time
.
Sleep
(
time
.
Millisecond
*
10
*
time
.
Duration
(
cc
))
go
func
()
{
for
i
:=
uint64
(
0
);
i
<
testFetcherReqCount
;
i
++
{
in
<-
i
}
}()
go
func
()
{
for
i
:=
uint64
(
0
);
i
<
testFetcherReqCount
;
i
++
{
bv
:=
<-
out
if
!
bytes
.
Equal
(
bv
,
fetcherTestVector
(
0
,
i
))
{
if
len
(
bv
)
!=
10
{
t
.
Errorf
(
"Vector #%d length is %d, expected 10"
,
i
,
len
(
bv
))
}
else
{
j
:=
binary
.
BigEndian
.
Uint64
(
bv
[
2
:
10
])
t
.
Errorf
(
"Expected vector #%d, fetched #%d"
,
i
,
j
)
}
}
}
wg
.
Done
()
}()
}
wg
.
Wait
()
close
(
stop
)
if
reqCount
!=
testFetcherReqCount
{
t
.
Errorf
(
"Request count mismatch: expected %v, got %v"
,
testFetcherReqCount
,
reqCount
)
}
}
core/bloombits/matcher.go
0 → 100644
View file @
4ea4d2dc
This diff is collapsed.
Click to expand it.
core/bloombits/matcher_test.go
0 → 100644
View file @
4ea4d2dc
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package
bloombits
import
(
"math/rand"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/core/types"
)
const
testSectionSize
=
4096
func
matcherTestVector
(
b
uint
,
s
uint64
)
[]
byte
{
r
:=
make
([]
byte
,
testSectionSize
/
8
)
for
i
,
_
:=
range
r
{
var
bb
byte
for
bit
:=
0
;
bit
<
8
;
bit
++
{
blockIdx
:=
s
*
testSectionSize
+
uint64
(
i
*
8
+
bit
)
bb
+=
bb
if
(
blockIdx
%
uint64
(
b
))
==
0
{
bb
++
}
}
r
[
i
]
=
bb
}
return
r
}
func
expMatch1
(
idxs
types
.
BloomIndexList
,
i
uint64
)
bool
{
for
_
,
ii
:=
range
idxs
{
if
(
i
%
uint64
(
ii
))
!=
0
{
return
false
}
}
return
true
}
func
expMatch2
(
idxs
[]
types
.
BloomIndexList
,
i
uint64
)
bool
{
for
_
,
ii
:=
range
idxs
{
if
expMatch1
(
ii
,
i
)
{
return
true
}
}
return
false
}
func
expMatch3
(
idxs
[][]
types
.
BloomIndexList
,
i
uint64
)
bool
{
for
_
,
ii
:=
range
idxs
{
if
!
expMatch2
(
ii
,
i
)
{
return
false
}
}
return
true
}
func
testServeMatcher
(
m
*
Matcher
,
stop
chan
struct
{},
cnt
*
uint32
,
maxRequestLen
int
)
{
// serve matcher with test vectors
for
i
:=
0
;
i
<
10
;
i
++
{
go
func
()
{
for
{
select
{
case
<-
stop
:
return
default
:
}
b
,
ok
:=
m
.
AllocSectionQueue
()
if
!
ok
{
return
}
if
m
.
SectionCount
(
b
)
<
maxRequestLen
{
time
.
Sleep
(
time
.
Microsecond
*
100
)
}
s
:=
m
.
FetchSections
(
b
,
maxRequestLen
)
res
:=
make
([][]
byte
,
len
(
s
))
for
i
,
ss
:=
range
s
{
res
[
i
]
=
matcherTestVector
(
b
,
ss
)
atomic
.
AddUint32
(
cnt
,
1
)
}
m
.
Deliver
(
b
,
s
,
res
)
}
}()
}
}
func
testMatcher
(
t
*
testing
.
T
,
idxs
[][]
types
.
BloomIndexList
,
cnt
uint64
,
stopOnMatches
bool
,
expCount
uint32
)
uint32
{
count1
:=
testMatcherWithReqCount
(
t
,
idxs
,
cnt
,
stopOnMatches
,
expCount
,
1
)
count16
:=
testMatcherWithReqCount
(
t
,
idxs
,
cnt
,
stopOnMatches
,
expCount
,
16
)
if
count1
!=
count16
{
t
.
Errorf
(
"Error matching idxs = %v count = %v stopOnMatches = %v: request count mismatch, %v with maxReqCount = 1 vs. %v with maxReqCount = 16"
,
idxs
,
cnt
,
stopOnMatches
,
count1
,
count16
)
}
return
count1
}
func
testMatcherWithReqCount
(
t
*
testing
.
T
,
idxs
[][]
types
.
BloomIndexList
,
cnt
uint64
,
stopOnMatches
bool
,
expCount
uint32
,
maxReqCount
int
)
uint32
{
m
:=
NewMatcher
(
testSectionSize
,
nil
,
nil
)
for
_
,
idxss
:=
range
idxs
{
for
_
,
idxs
:=
range
idxss
{
for
_
,
idx
:=
range
idxs
{
m
.
newFetcher
(
idx
)
}
}
}
m
.
addresses
=
idxs
[
0
]
m
.
topics
=
idxs
[
1
:
]
var
reqCount
uint32
stop
:=
make
(
chan
struct
{})
chn
:=
m
.
Start
(
0
,
cnt
-
1
)
testServeMatcher
(
m
,
stop
,
&
reqCount
,
maxReqCount
)
for
i
:=
uint64
(
0
);
i
<
cnt
;
i
++
{
if
expMatch3
(
idxs
,
i
)
{
match
,
ok
:=
<-
chn
if
!
ok
{
t
.
Errorf
(
"Error matching idxs = %v count = %v stopOnMatches = %v: expected #%v, results channel closed"
,
idxs
,
cnt
,
stopOnMatches
,
i
)
return
0
}
if
match
!=
i
{
t
.
Errorf
(
"Error matching idxs = %v count = %v stopOnMatches = %v: expected #%v, got #%v"
,
idxs
,
cnt
,
stopOnMatches
,
i
,
match
)
}
if
stopOnMatches
{
m
.
Stop
()
close
(
stop
)
stop
=
make
(
chan
struct
{})
chn
=
m
.
Start
(
i
+
1
,
cnt
-
1
)
testServeMatcher
(
m
,
stop
,
&
reqCount
,
maxReqCount
)
}
}
}
match
,
ok
:=
<-
chn
if
ok
{
t
.
Errorf
(
"Error matching idxs = %v count = %v stopOnMatches = %v: expected closed channel, got #%v"
,
idxs
,
cnt
,
stopOnMatches
,
match
)
}
m
.
Stop
()
close
(
stop
)
if
expCount
!=
0
&&
expCount
!=
reqCount
{
t
.
Errorf
(
"Error matching idxs = %v count = %v stopOnMatches = %v: request count mismatch, expected #%v, got #%v"
,
idxs
,
cnt
,
stopOnMatches
,
expCount
,
reqCount
)
}
return
reqCount
}
func
testRandomIdxs
(
l
[]
int
,
max
int
)
[][]
types
.
BloomIndexList
{
res
:=
make
([][]
types
.
BloomIndexList
,
len
(
l
))
for
i
,
ll
:=
range
l
{
res
[
i
]
=
make
([]
types
.
BloomIndexList
,
ll
)
for
j
,
_
:=
range
res
[
i
]
{
for
k
,
_
:=
range
res
[
i
][
j
]
{
res
[
i
][
j
][
k
]
=
uint
(
rand
.
Intn
(
max
-
1
)
+
2
)
}
}
}
return
res
}
func
TestMatcher
(
t
*
testing
.
T
)
{
testMatcher
(
t
,
[][]
types
.
BloomIndexList
{{{
10
,
20
,
30
}}},
100000
,
false
,
75
)
testMatcher
(
t
,
[][]
types
.
BloomIndexList
{{{
32
,
3125
,
100
}},
{{
40
,
50
,
10
}}},
100000
,
false
,
81
)
testMatcher
(
t
,
[][]
types
.
BloomIndexList
{{{
4
,
8
,
11
},
{
7
,
8
,
17
}},
{{
9
,
9
,
12
},
{
15
,
20
,
13
}},
{{
18
,
15
,
15
},
{
12
,
10
,
4
}}},
10000
,
false
,
36
)
}
func
TestMatcherStopOnMatches
(
t
*
testing
.
T
)
{
testMatcher
(
t
,
[][]
types
.
BloomIndexList
{{{
10
,
20
,
30
}}},
100000
,
true
,
75
)
testMatcher
(
t
,
[][]
types
.
BloomIndexList
{{{
4
,
8
,
11
},
{
7
,
8
,
17
}},
{{
9
,
9
,
12
},
{
15
,
20
,
13
}},
{{
18
,
15
,
15
},
{
12
,
10
,
4
}}},
10000
,
true
,
36
)
}
func
TestMatcherRandom
(
t
*
testing
.
T
)
{
for
i
:=
0
;
i
<
20
;
i
++
{
testMatcher
(
t
,
testRandomIdxs
([]
int
{
1
},
50
),
100000
,
false
,
0
)
testMatcher
(
t
,
testRandomIdxs
([]
int
{
3
},
50
),
100000
,
false
,
0
)
testMatcher
(
t
,
testRandomIdxs
([]
int
{
2
,
2
,
2
},
20
),
100000
,
false
,
0
)
testMatcher
(
t
,
testRandomIdxs
([]
int
{
5
,
5
,
5
},
50
),
100000
,
false
,
0
)
idxs
:=
testRandomIdxs
([]
int
{
2
,
2
,
2
},
20
)
reqCount
:=
testMatcher
(
t
,
idxs
,
10000
,
false
,
0
)
testMatcher
(
t
,
idxs
,
10000
,
true
,
reqCount
)
}
}
eth/backend_test
.go
→
core/bloombits/utils
.go
View file @
4ea4d2dc
// Copyright 201
5
The go-ethereum Authors
// Copyright 201
7
The go-ethereum Authors
// This file is part of the go-ethereum library.
// This file is part of the go-ethereum library.
//
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// The go-ethereum library is free software: you can redistribute it and/or modify
...
@@ -13,62 +13,51 @@
...
@@ -13,62 +13,51 @@
//
//
// You should have received a copy of the GNU Lesser General Public License
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package
bloombits
package
eth
import
(
import
(
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
)
)
func
TestMipmapUpgrade
(
t
*
testing
.
T
)
{
const
BloomLength
=
2048
db
,
_
:=
ethdb
.
NewMemDatabase
()
addr
:=
common
.
BytesToAddress
([]
byte
(
"jeff"
))
genesis
:=
new
(
core
.
Genesis
)
.
MustCommit
(
db
)
chain
,
receipts
:=
core
.
GenerateChain
(
params
.
TestChainConfig
,
genesis
,
db
,
10
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
// BloomBitsCreator takes SectionSize number of header bloom filters and calculates the bloomBits vectors of the section
switch
i
{
type
BloomBitsCreator
struct
{
case
1
:
blooms
[
BloomLength
][]
byte
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
sectionSize
,
bitIndex
uint64
receipt
.
Logs
=
[]
*
types
.
Log
{{
Address
:
addr
}}
}
gen
.
AddUncheckedReceipt
(
receipt
)
case
2
:
func
NewBloomBitsCreator
(
sectionSize
uint64
)
*
BloomBitsCreator
{
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
b
:=
&
BloomBitsCreator
{
sectionSize
:
sectionSize
}
receipt
.
Logs
=
[]
*
types
.
Log
{{
Address
:
addr
}}
for
i
,
_
:=
range
b
.
blooms
{
gen
.
AddUncheckedReceipt
(
receipt
)
b
.
blooms
[
i
]
=
make
([]
byte
,
sectionSize
/
8
)
}
})
for
i
,
block
:=
range
chain
{
core
.
WriteBlock
(
db
,
block
)
if
err
:=
core
.
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
}
if
err
:=
core
.
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
}
if
err
:=
core
.
WriteBlockReceipts
(
db
,
block
.
Hash
(),
block
.
NumberU64
(),
receipts
[
i
]);
err
!=
nil
{
t
.
Fatal
(
"error writing block receipts:"
,
err
)
}
}
}
return
b
}
err
:=
addMipmapBloomBins
(
db
)
// AddHeaderBloom takes a single bloom filter and sets the corresponding bit column in memory accordingly
if
err
!=
nil
{
func
(
b
*
BloomBitsCreator
)
AddHeaderBloom
(
bloom
types
.
Bloom
)
{
t
.
Fatal
(
err
)
if
b
.
bitIndex
>=
b
.
sectionSize
{
panic
(
"too many header blooms added"
)
}
}
bloom
:=
core
.
GetMipmapBloom
(
db
,
1
,
core
.
MIPMapLevels
[
0
])
byteIdx
:=
b
.
bitIndex
/
8
if
(
bloom
==
types
.
Bloom
{})
{
bitMask
:=
byte
(
1
)
<<
byte
(
7
-
b
.
bitIndex
%
8
)
t
.
Error
(
"got empty bloom filter"
)
for
bloomBitIdx
,
_
:=
range
b
.
blooms
{
bloomByteIdx
:=
BloomLength
/
8
-
1
-
bloomBitIdx
/
8
bloomBitMask
:=
byte
(
1
)
<<
byte
(
bloomBitIdx
%
8
)
if
(
bloom
[
bloomByteIdx
]
&
bloomBitMask
)
!=
0
{
b
.
blooms
[
bloomBitIdx
][
byteIdx
]
|=
bitMask
}
}
}
b
.
bitIndex
++
}
data
,
_
:=
db
.
Get
([]
byte
(
"setting-mipmap-version"
))
// GetBitVector returns the bit vector belonging to the given bit index after header blooms have been added
if
len
(
data
)
==
0
{
func
(
b
*
BloomBitsCreator
)
GetBitVector
(
idx
uint
)
[]
byte
{
t
.
Error
(
"setting-mipmap-version not written to database"
)
if
b
.
bitIndex
!=
b
.
sectionSize
{
panic
(
"not enough header blooms added"
)
}
}
return
b
.
blooms
[
idx
][
:
]
}
}
core/chain_indexer.go
View file @
4ea4d2dc
...
@@ -36,7 +36,7 @@ import (
...
@@ -36,7 +36,7 @@ import (
type
ChainIndexerBackend
interface
{
type
ChainIndexerBackend
interface
{
// Reset initiates the processing of a new chain segment, potentially terminating
// Reset initiates the processing of a new chain segment, potentially terminating
// any partially completed operations (in case of a reorg).
// any partially completed operations (in case of a reorg).
Reset
(
section
uint64
)
Reset
(
section
uint64
,
lastSectionHead
common
.
Hash
)
// Process crunches through the next header in the chain segment. The caller
// Process crunches through the next header in the chain segment. The caller
// will ensure a sequential order of headers.
// will ensure a sequential order of headers.
...
@@ -44,7 +44,7 @@ type ChainIndexerBackend interface {
...
@@ -44,7 +44,7 @@ type ChainIndexerBackend interface {
// Commit finalizes the section metadata and stores it into the database. This
// Commit finalizes the section metadata and stores it into the database. This
// interface will usually be a batch writer.
// interface will usually be a batch writer.
Commit
(
db
ethdb
.
Database
)
error
Commit
()
error
}
}
// ChainIndexer does a post-processing job for equally sized sections of the
// ChainIndexer does a post-processing job for equally sized sections of the
...
@@ -101,10 +101,34 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
...
@@ -101,10 +101,34 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
return
c
return
c
}
}
// AddKnownSectionHead marks a new section head as known/processed if it is newer
// than the already known best section head
func
(
c
*
ChainIndexer
)
AddKnownSectionHead
(
section
uint64
,
shead
common
.
Hash
)
{
c
.
lock
.
Lock
()
defer
c
.
lock
.
Unlock
()
if
section
<
c
.
storedSections
{
return
}
c
.
setSectionHead
(
section
,
shead
)
c
.
setValidSections
(
section
+
1
)
}
// IndexerChain interface is used for connecting the indexer to a blockchain
type
IndexerChain
interface
{
CurrentHeader
()
*
types
.
Header
SubscribeChainEvent
(
ch
chan
<-
ChainEvent
)
event
.
Subscription
}
// Start creates a goroutine to feed chain head events into the indexer for
// Start creates a goroutine to feed chain head events into the indexer for
// cascading background processing.
// cascading background processing. Children do not need to be started, they
func
(
c
*
ChainIndexer
)
Start
(
currentHeader
*
types
.
Header
,
eventMux
*
event
.
TypeMux
)
{
// are notified about new events by their parents.
go
c
.
eventLoop
(
currentHeader
,
eventMux
)
func
(
c
*
ChainIndexer
)
Start
(
chain
IndexerChain
)
{
ch
:=
make
(
chan
ChainEvent
,
10
)
sub
:=
chain
.
SubscribeChainEvent
(
ch
)
currentHeader
:=
chain
.
CurrentHeader
()
go
c
.
eventLoop
(
currentHeader
,
ch
,
sub
)
}
}
// Close tears down all goroutines belonging to the indexer and returns any error
// Close tears down all goroutines belonging to the indexer and returns any error
...
@@ -125,6 +149,14 @@ func (c *ChainIndexer) Close() error {
...
@@ -125,6 +149,14 @@ func (c *ChainIndexer) Close() error {
errs
=
append
(
errs
,
err
)
errs
=
append
(
errs
,
err
)
}
}
}
}
// Close all children
for
_
,
child
:=
range
c
.
children
{
if
err
:=
child
.
Close
();
err
!=
nil
{
errs
=
append
(
errs
,
err
)
}
}
// Return any failures
// Return any failures
switch
{
switch
{
case
len
(
errs
)
==
0
:
case
len
(
errs
)
==
0
:
...
@@ -141,12 +173,10 @@ func (c *ChainIndexer) Close() error {
...
@@ -141,12 +173,10 @@ func (c *ChainIndexer) Close() error {
// eventLoop is a secondary - optional - event loop of the indexer which is only
// eventLoop is a secondary - optional - event loop of the indexer which is only
// started for the outermost indexer to push chain head events into a processing
// started for the outermost indexer to push chain head events into a processing
// queue.
// queue.
func
(
c
*
ChainIndexer
)
eventLoop
(
currentHeader
*
types
.
Header
,
eventMux
*
event
.
TypeMux
)
{
func
(
c
*
ChainIndexer
)
eventLoop
(
currentHeader
*
types
.
Header
,
ch
chan
ChainEvent
,
sub
event
.
Subscription
)
{
// Mark the chain indexer as active, requiring an additional teardown
// Mark the chain indexer as active, requiring an additional teardown
atomic
.
StoreUint32
(
&
c
.
active
,
1
)
atomic
.
StoreUint32
(
&
c
.
active
,
1
)
// Subscribe to chain head events
sub
:=
eventMux
.
Subscribe
(
ChainEvent
{})
defer
sub
.
Unsubscribe
()
defer
sub
.
Unsubscribe
()
// Fire the initial new head event to start any outstanding processing
// Fire the initial new head event to start any outstanding processing
...
@@ -163,14 +193,14 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, eventMux *event.Ty
...
@@ -163,14 +193,14 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, eventMux *event.Ty
errc
<-
nil
errc
<-
nil
return
return
case
ev
,
ok
:=
<-
sub
.
Chan
()
:
case
ev
,
ok
:=
<-
ch
:
// Received a new event, ensure it's not nil (closing) and update
// Received a new event, ensure it's not nil (closing) and update
if
!
ok
{
if
!
ok
{
errc
:=
<-
c
.
quit
errc
:=
<-
c
.
quit
errc
<-
nil
errc
<-
nil
return
return
}
}
header
:=
ev
.
Data
.
(
ChainEvent
)
.
Block
.
Header
()
header
:=
ev
.
Block
.
Header
()
if
header
.
ParentHash
!=
prevHash
{
if
header
.
ParentHash
!=
prevHash
{
c
.
newHead
(
FindCommonAncestor
(
c
.
chainDb
,
prevHeader
,
header
)
.
Number
.
Uint64
(),
true
)
c
.
newHead
(
FindCommonAncestor
(
c
.
chainDb
,
prevHeader
,
header
)
.
Number
.
Uint64
(),
true
)
}
}
...
@@ -226,7 +256,10 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
...
@@ -226,7 +256,10 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
// updateLoop is the main event loop of the indexer which pushes chain segments
// updateLoop is the main event loop of the indexer which pushes chain segments
// down into the processing backend.
// down into the processing backend.
func
(
c
*
ChainIndexer
)
updateLoop
()
{
func
(
c
*
ChainIndexer
)
updateLoop
()
{
var
updated
time
.
Time
var
(
updated
time
.
Time
updateMsg
bool
)
for
{
for
{
select
{
select
{
...
@@ -242,6 +275,7 @@ func (c *ChainIndexer) updateLoop() {
...
@@ -242,6 +275,7 @@ func (c *ChainIndexer) updateLoop() {
// Periodically print an upgrade log message to the user
// Periodically print an upgrade log message to the user
if
time
.
Since
(
updated
)
>
8
*
time
.
Second
{
if
time
.
Since
(
updated
)
>
8
*
time
.
Second
{
if
c
.
knownSections
>
c
.
storedSections
+
1
{
if
c
.
knownSections
>
c
.
storedSections
+
1
{
updateMsg
=
true
c
.
log
.
Info
(
"Upgrading chain index"
,
"percentage"
,
c
.
storedSections
*
100
/
c
.
knownSections
)
c
.
log
.
Info
(
"Upgrading chain index"
,
"percentage"
,
c
.
storedSections
*
100
/
c
.
knownSections
)
}
}
updated
=
time
.
Now
()
updated
=
time
.
Now
()
...
@@ -250,17 +284,24 @@ func (c *ChainIndexer) updateLoop() {
...
@@ -250,17 +284,24 @@ func (c *ChainIndexer) updateLoop() {
section
:=
c
.
storedSections
section
:=
c
.
storedSections
var
oldHead
common
.
Hash
var
oldHead
common
.
Hash
if
section
>
0
{
if
section
>
0
{
oldHead
=
c
.
s
ectionHead
(
section
-
1
)
oldHead
=
c
.
S
ectionHead
(
section
-
1
)
}
}
// Process the newly defined section in the background
// Process the newly defined section in the background
c
.
lock
.
Unlock
()
c
.
lock
.
Unlock
()
newHead
,
err
:=
c
.
processSection
(
section
,
oldHead
)
newHead
,
err
:=
c
.
processSection
(
section
,
oldHead
)
if
err
!=
nil
{
c
.
log
.
Error
(
"Section processing failed"
,
"error"
,
err
)
}
c
.
lock
.
Lock
()
c
.
lock
.
Lock
()
// If processing succeeded and no reorgs occcurred, mark the section completed
// If processing succeeded and no reorgs occcurred, mark the section completed
if
err
==
nil
&&
oldHead
==
c
.
s
ectionHead
(
section
-
1
)
{
if
err
==
nil
&&
oldHead
==
c
.
S
ectionHead
(
section
-
1
)
{
c
.
setSectionHead
(
section
,
newHead
)
c
.
setSectionHead
(
section
,
newHead
)
c
.
setValidSections
(
section
+
1
)
c
.
setValidSections
(
section
+
1
)
if
c
.
storedSections
==
c
.
knownSections
&&
updateMsg
{
updateMsg
=
false
c
.
log
.
Info
(
"Finished upgrading chain index"
)
}
c
.
cascadedHead
=
c
.
storedSections
*
c
.
sectionSize
-
1
c
.
cascadedHead
=
c
.
storedSections
*
c
.
sectionSize
-
1
for
_
,
child
:=
range
c
.
children
{
for
_
,
child
:=
range
c
.
children
{
...
@@ -295,7 +336,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
...
@@ -295,7 +336,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
c
.
log
.
Trace
(
"Processing new chain section"
,
"section"
,
section
)
c
.
log
.
Trace
(
"Processing new chain section"
,
"section"
,
section
)
// Reset and partial processing
// Reset and partial processing
c
.
backend
.
Reset
(
section
)
c
.
backend
.
Reset
(
section
,
lastHead
)
for
number
:=
section
*
c
.
sectionSize
;
number
<
(
section
+
1
)
*
c
.
sectionSize
;
number
++
{
for
number
:=
section
*
c
.
sectionSize
;
number
<
(
section
+
1
)
*
c
.
sectionSize
;
number
++
{
hash
:=
GetCanonicalHash
(
c
.
chainDb
,
number
)
hash
:=
GetCanonicalHash
(
c
.
chainDb
,
number
)
...
@@ -311,7 +352,8 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
...
@@ -311,7 +352,8 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
c
.
backend
.
Process
(
header
)
c
.
backend
.
Process
(
header
)
lastHead
=
header
.
Hash
()
lastHead
=
header
.
Hash
()
}
}
if
err
:=
c
.
backend
.
Commit
(
c
.
chainDb
);
err
!=
nil
{
if
err
:=
c
.
backend
.
Commit
();
err
!=
nil
{
c
.
log
.
Error
(
"Section commit failed"
,
"error"
,
err
)
return
common
.
Hash
{},
err
return
common
.
Hash
{},
err
}
}
return
lastHead
,
nil
return
lastHead
,
nil
...
@@ -324,7 +366,7 @@ func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
...
@@ -324,7 +366,7 @@ func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
c
.
lock
.
Lock
()
c
.
lock
.
Lock
()
defer
c
.
lock
.
Unlock
()
defer
c
.
lock
.
Unlock
()
return
c
.
storedSections
,
c
.
storedSections
*
c
.
sectionSize
-
1
,
c
.
s
ectionHead
(
c
.
storedSections
-
1
)
return
c
.
storedSections
,
c
.
storedSections
*
c
.
sectionSize
-
1
,
c
.
S
ectionHead
(
c
.
storedSections
-
1
)
}
}
// AddChildIndexer adds a child ChainIndexer that can use the output of this one
// AddChildIndexer adds a child ChainIndexer that can use the output of this one
...
@@ -366,7 +408,7 @@ func (c *ChainIndexer) setValidSections(sections uint64) {
...
@@ -366,7 +408,7 @@ func (c *ChainIndexer) setValidSections(sections uint64) {
// sectionHead retrieves the last block hash of a processed section from the
// sectionHead retrieves the last block hash of a processed section from the
// index database.
// index database.
func
(
c
*
ChainIndexer
)
s
ectionHead
(
section
uint64
)
common
.
Hash
{
func
(
c
*
ChainIndexer
)
S
ectionHead
(
section
uint64
)
common
.
Hash
{
var
data
[
8
]
byte
var
data
[
8
]
byte
binary
.
BigEndian
.
PutUint64
(
data
[
:
],
section
)
binary
.
BigEndian
.
PutUint64
(
data
[
:
],
section
)
...
...
core/chain_indexer_test.go
View file @
4ea4d2dc
...
@@ -23,6 +23,7 @@ import (
...
@@ -23,6 +23,7 @@ import (
"testing"
"testing"
"time"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb"
)
)
...
@@ -58,7 +59,6 @@ func testChainIndexer(t *testing.T, count int) {
...
@@ -58,7 +59,6 @@ func testChainIndexer(t *testing.T, count int) {
)
)
backends
[
i
]
=
&
testChainIndexBackend
{
t
:
t
,
processCh
:
make
(
chan
uint64
)}
backends
[
i
]
=
&
testChainIndexBackend
{
t
:
t
,
processCh
:
make
(
chan
uint64
)}
backends
[
i
]
.
indexer
=
NewChainIndexer
(
db
,
ethdb
.
NewTable
(
db
,
string
([]
byte
{
byte
(
i
)})),
backends
[
i
],
sectionSize
,
confirmsReq
,
0
,
fmt
.
Sprintf
(
"indexer-%d"
,
i
))
backends
[
i
]
.
indexer
=
NewChainIndexer
(
db
,
ethdb
.
NewTable
(
db
,
string
([]
byte
{
byte
(
i
)})),
backends
[
i
],
sectionSize
,
confirmsReq
,
0
,
fmt
.
Sprintf
(
"indexer-%d"
,
i
))
defer
backends
[
i
]
.
indexer
.
Close
()
if
sections
,
_
,
_
:=
backends
[
i
]
.
indexer
.
Sections
();
sections
!=
0
{
if
sections
,
_
,
_
:=
backends
[
i
]
.
indexer
.
Sections
();
sections
!=
0
{
t
.
Fatalf
(
"Canonical section count mismatch: have %v, want %v"
,
sections
,
0
)
t
.
Fatalf
(
"Canonical section count mismatch: have %v, want %v"
,
sections
,
0
)
...
@@ -67,6 +67,7 @@ func testChainIndexer(t *testing.T, count int) {
...
@@ -67,6 +67,7 @@ func testChainIndexer(t *testing.T, count int) {
backends
[
i
-
1
]
.
indexer
.
AddChildIndexer
(
backends
[
i
]
.
indexer
)
backends
[
i
-
1
]
.
indexer
.
AddChildIndexer
(
backends
[
i
]
.
indexer
)
}
}
}
}
defer
backends
[
0
]
.
indexer
.
Close
()
// parent indexer shuts down children
// notify pings the root indexer about a new head or reorg, then expect
// notify pings the root indexer about a new head or reorg, then expect
// processed blocks if a section is processable
// processed blocks if a section is processable
notify
:=
func
(
headNum
,
failNum
uint64
,
reorg
bool
)
{
notify
:=
func
(
headNum
,
failNum
uint64
,
reorg
bool
)
{
...
@@ -208,7 +209,7 @@ func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
...
@@ -208,7 +209,7 @@ func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
return
b
.
stored
*
b
.
indexer
.
sectionSize
return
b
.
stored
*
b
.
indexer
.
sectionSize
}
}
func
(
b
*
testChainIndexBackend
)
Reset
(
section
uint64
)
{
func
(
b
*
testChainIndexBackend
)
Reset
(
section
uint64
,
lastSectionHead
common
.
Hash
)
{
b
.
section
=
section
b
.
section
=
section
b
.
headerCnt
=
0
b
.
headerCnt
=
0
}
}
...
@@ -226,7 +227,7 @@ func (b *testChainIndexBackend) Process(header *types.Header) {
...
@@ -226,7 +227,7 @@ func (b *testChainIndexBackend) Process(header *types.Header) {
}
}
}
}
func
(
b
*
testChainIndexBackend
)
Commit
(
db
ethdb
.
Database
)
error
{
func
(
b
*
testChainIndexBackend
)
Commit
()
error
{
if
b
.
headerCnt
!=
b
.
indexer
.
sectionSize
{
if
b
.
headerCnt
!=
b
.
indexer
.
sectionSize
{
b
.
t
.
Error
(
"Not enough headers processed"
)
b
.
t
.
Error
(
"Not enough headers processed"
)
}
}
...
...
core/database_util.go
View file @
4ea4d2dc
...
@@ -23,7 +23,6 @@ import (
...
@@ -23,7 +23,6 @@ import (
"errors"
"errors"
"fmt"
"fmt"
"math/big"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
...
@@ -48,9 +47,6 @@ var (
...
@@ -48,9 +47,6 @@ var (
lookupPrefix
=
[]
byte
(
"l"
)
// lookupPrefix + hash -> transaction/receipt lookup metadata
lookupPrefix
=
[]
byte
(
"l"
)
// lookupPrefix + hash -> transaction/receipt lookup metadata
preimagePrefix
=
"secure-key-"
// preimagePrefix + hash -> preimage
preimagePrefix
=
"secure-key-"
// preimagePrefix + hash -> preimage
mipmapPre
=
[]
byte
(
"mipmap-log-bloom-"
)
MIPMapLevels
=
[]
uint64
{
1000000
,
500000
,
100000
,
50000
,
1000
}
configPrefix
=
[]
byte
(
"ethereum-config-"
)
// config prefix for the db
configPrefix
=
[]
byte
(
"ethereum-config-"
)
// config prefix for the db
// used by old db, now only used for conversion
// used by old db, now only used for conversion
...
@@ -59,10 +55,10 @@ var (
...
@@ -59,10 +55,10 @@ var (
ErrChainConfigNotFound
=
errors
.
New
(
"ChainConfig not found"
)
// general config not found error
ErrChainConfigNotFound
=
errors
.
New
(
"ChainConfig not found"
)
// general config not found error
mipmapBloomMu
sync
.
Mutex
// protect against race condition when updating mipmap blooms
preimageCounter
=
metrics
.
NewCounter
(
"db/preimage/total"
)
preimageCounter
=
metrics
.
NewCounter
(
"db/preimage/total"
)
preimageHitCounter
=
metrics
.
NewCounter
(
"db/preimage/hits"
)
preimageHitCounter
=
metrics
.
NewCounter
(
"db/preimage/hits"
)
bloomBitsPrefix
=
[]
byte
(
"bloomBits-"
)
)
)
// txLookupEntry is a positional metadata to help looking up the data content of
// txLookupEntry is a positional metadata to help looking up the data content of
...
@@ -497,48 +493,6 @@ func DeleteTxLookupEntry(db ethdb.Database, hash common.Hash) {
...
@@ -497,48 +493,6 @@ func DeleteTxLookupEntry(db ethdb.Database, hash common.Hash) {
db
.
Delete
(
append
(
lookupPrefix
,
hash
.
Bytes
()
...
))
db
.
Delete
(
append
(
lookupPrefix
,
hash
.
Bytes
()
...
))
}
}
// returns a formatted MIP mapped key by adding prefix, canonical number and level
//
// ex. fn(98, 1000) = (prefix || 1000 || 0)
func
mipmapKey
(
num
,
level
uint64
)
[]
byte
{
lkey
:=
make
([]
byte
,
8
)
binary
.
BigEndian
.
PutUint64
(
lkey
,
level
)
key
:=
new
(
big
.
Int
)
.
SetUint64
(
num
/
level
*
level
)
return
append
(
mipmapPre
,
append
(
lkey
,
key
.
Bytes
()
...
)
...
)
}
// WriteMipmapBloom writes each address included in the receipts' logs to the
// MIP bloom bin.
func
WriteMipmapBloom
(
db
ethdb
.
Database
,
number
uint64
,
receipts
types
.
Receipts
)
error
{
mipmapBloomMu
.
Lock
()
defer
mipmapBloomMu
.
Unlock
()
batch
:=
db
.
NewBatch
()
for
_
,
level
:=
range
MIPMapLevels
{
key
:=
mipmapKey
(
number
,
level
)
bloomDat
,
_
:=
db
.
Get
(
key
)
bloom
:=
types
.
BytesToBloom
(
bloomDat
)
for
_
,
receipt
:=
range
receipts
{
for
_
,
log
:=
range
receipt
.
Logs
{
bloom
.
Add
(
log
.
Address
.
Big
())
}
}
batch
.
Put
(
key
,
bloom
.
Bytes
())
}
if
err
:=
batch
.
Write
();
err
!=
nil
{
return
fmt
.
Errorf
(
"mipmap write fail for: %d: %v"
,
number
,
err
)
}
return
nil
}
// GetMipmapBloom returns a bloom filter using the number and level as input
// parameters. For available levels see MIPMapLevels.
func
GetMipmapBloom
(
db
ethdb
.
Database
,
number
,
level
uint64
)
types
.
Bloom
{
bloomDat
,
_
:=
db
.
Get
(
mipmapKey
(
number
,
level
))
return
types
.
BytesToBloom
(
bloomDat
)
}
// PreimageTable returns a Database instance with the key prefix for preimage entries.
// PreimageTable returns a Database instance with the key prefix for preimage entries.
func
PreimageTable
(
db
ethdb
.
Database
)
ethdb
.
Database
{
func
PreimageTable
(
db
ethdb
.
Database
)
ethdb
.
Database
{
return
ethdb
.
NewTable
(
db
,
preimagePrefix
)
return
ethdb
.
NewTable
(
db
,
preimagePrefix
)
...
@@ -637,3 +591,22 @@ func FindCommonAncestor(db ethdb.Database, a, b *types.Header) *types.Header {
...
@@ -637,3 +591,22 @@ func FindCommonAncestor(db ethdb.Database, a, b *types.Header) *types.Header {
}
}
return
a
return
a
}
}
// GetBloomBits reads the compressed bloomBits vector belonging to the given section and bit index from the db
func
GetBloomBits
(
db
ethdb
.
Database
,
bitIdx
,
sectionIdx
uint64
,
sectionHead
common
.
Hash
)
([]
byte
,
error
)
{
var
encKey
[
10
]
byte
binary
.
BigEndian
.
PutUint16
(
encKey
[
0
:
2
],
uint16
(
bitIdx
))
binary
.
BigEndian
.
PutUint64
(
encKey
[
2
:
10
],
sectionIdx
)
key
:=
append
(
append
(
bloomBitsPrefix
,
encKey
[
:
]
...
),
sectionHead
.
Bytes
()
...
)
bloomBits
,
err
:=
db
.
Get
(
key
)
return
bloomBits
,
err
}
// StoreBloomBits writes the compressed bloomBits vector belonging to the given section and bit index to the db
func
StoreBloomBits
(
db
ethdb
.
Database
,
bitIdx
,
sectionIdx
uint64
,
sectionHead
common
.
Hash
,
bloomBits
[]
byte
)
{
var
encKey
[
10
]
byte
binary
.
BigEndian
.
PutUint16
(
encKey
[
0
:
2
],
uint16
(
bitIdx
))
binary
.
BigEndian
.
PutUint64
(
encKey
[
2
:
10
],
sectionIdx
)
key
:=
append
(
append
(
bloomBitsPrefix
,
encKey
[
:
]
...
),
sectionHead
.
Bytes
()
...
)
db
.
Put
(
key
,
bloomBits
)
}
core/database_util_test.go
View file @
4ea4d2dc
...
@@ -18,17 +18,13 @@ package core
...
@@ -18,17 +18,13 @@ package core
import
(
import
(
"bytes"
"bytes"
"io/ioutil"
"math/big"
"math/big"
"os"
"testing"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rlp"
)
)
...
@@ -390,107 +386,3 @@ func TestBlockReceiptStorage(t *testing.T) {
...
@@ -390,107 +386,3 @@ func TestBlockReceiptStorage(t *testing.T) {
t
.
Fatalf
(
"deleted receipts returned: %v"
,
rs
)
t
.
Fatalf
(
"deleted receipts returned: %v"
,
rs
)
}
}
}
}
func
TestMipmapBloom
(
t
*
testing
.
T
)
{
db
,
_
:=
ethdb
.
NewMemDatabase
()
receipt1
:=
new
(
types
.
Receipt
)
receipt1
.
Logs
=
[]
*
types
.
Log
{
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test"
))},
{
Address
:
common
.
BytesToAddress
([]
byte
(
"address"
))},
}
receipt2
:=
new
(
types
.
Receipt
)
receipt2
.
Logs
=
[]
*
types
.
Log
{
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test"
))},
{
Address
:
common
.
BytesToAddress
([]
byte
(
"address1"
))},
}
WriteMipmapBloom
(
db
,
1
,
types
.
Receipts
{
receipt1
})
WriteMipmapBloom
(
db
,
2
,
types
.
Receipts
{
receipt2
})
for
_
,
level
:=
range
MIPMapLevels
{
bloom
:=
GetMipmapBloom
(
db
,
2
,
level
)
if
!
bloom
.
Test
(
new
(
big
.
Int
)
.
SetBytes
([]
byte
(
"address1"
)))
{
t
.
Error
(
"expected test to be included on level:"
,
level
)
}
}
// reset
db
,
_
=
ethdb
.
NewMemDatabase
()
receipt
:=
new
(
types
.
Receipt
)
receipt
.
Logs
=
[]
*
types
.
Log
{
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test"
))},
}
WriteMipmapBloom
(
db
,
999
,
types
.
Receipts
{
receipt1
})
receipt
=
new
(
types
.
Receipt
)
receipt
.
Logs
=
[]
*
types
.
Log
{
{
Address
:
common
.
BytesToAddress
([]
byte
(
"test 1"
))},
}
WriteMipmapBloom
(
db
,
1000
,
types
.
Receipts
{
receipt
})
bloom
:=
GetMipmapBloom
(
db
,
1000
,
1000
)
if
bloom
.
TestBytes
([]
byte
(
"test"
))
{
t
.
Error
(
"test should not have been included"
)
}
}
func
TestMipmapChain
(
t
*
testing
.
T
)
{
dir
,
err
:=
ioutil
.
TempDir
(
""
,
"mipmap"
)
if
err
!=
nil
{
t
.
Fatal
(
err
)
}
defer
os
.
RemoveAll
(
dir
)
var
(
db
,
_
=
ethdb
.
NewLDBDatabase
(
dir
,
0
,
0
)
key1
,
_
=
crypto
.
HexToECDSA
(
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"
)
addr
=
crypto
.
PubkeyToAddress
(
key1
.
PublicKey
)
addr2
=
common
.
BytesToAddress
([]
byte
(
"jeff"
))
hash1
=
common
.
BytesToHash
([]
byte
(
"topic1"
))
)
defer
db
.
Close
()
gspec
:=
&
Genesis
{
Config
:
params
.
TestChainConfig
,
Alloc
:
GenesisAlloc
{
addr
:
{
Balance
:
big
.
NewInt
(
1000000
)}},
}
genesis
:=
gspec
.
MustCommit
(
db
)
chain
,
receipts
:=
GenerateChain
(
params
.
TestChainConfig
,
genesis
,
db
,
1010
,
func
(
i
int
,
gen
*
BlockGen
)
{
var
receipts
types
.
Receipts
switch
i
{
case
1
:
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
.
Logs
=
[]
*
types
.
Log
{{
Address
:
addr
,
Topics
:
[]
common
.
Hash
{
hash1
}}}
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
case
1000
:
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
.
Logs
=
[]
*
types
.
Log
{{
Address
:
addr2
}}
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
}
// store the receipts
WriteMipmapBloom
(
db
,
uint64
(
i
+
1
),
receipts
)
})
for
i
,
block
:=
range
chain
{
WriteBlock
(
db
,
block
)
if
err
:=
WriteCanonicalHash
(
db
,
block
.
Hash
(),
block
.
NumberU64
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
}
if
err
:=
WriteHeadBlockHash
(
db
,
block
.
Hash
());
err
!=
nil
{
t
.
Fatalf
(
"failed to insert block number: %v"
,
err
)
}
if
err
:=
WriteBlockReceipts
(
db
,
block
.
Hash
(),
block
.
NumberU64
(),
receipts
[
i
]);
err
!=
nil
{
t
.
Fatal
(
"error writing block receipts:"
,
err
)
}
}
bloom
:=
GetMipmapBloom
(
db
,
0
,
1000
)
if
bloom
.
TestBytes
(
addr2
[
:
])
{
t
.
Error
(
"address was included in bloom and should not have"
)
}
}
core/types/bloom9.go
View file @
4ea4d2dc
...
@@ -106,6 +106,20 @@ func LogsBloom(logs []*Log) *big.Int {
...
@@ -106,6 +106,20 @@ func LogsBloom(logs []*Log) *big.Int {
return
bin
return
bin
}
}
type
BloomIndexList
[
3
]
uint
// BloomIndexes returns the bloom filter bit indexes belonging to the given key
func
BloomIndexes
(
b
[]
byte
)
BloomIndexList
{
b
=
crypto
.
Keccak256
(
b
[
:
])
var
r
[
3
]
uint
for
i
,
_
:=
range
r
{
r
[
i
]
=
(
uint
(
b
[
i
+
i
+
1
])
+
(
uint
(
b
[
i
+
i
])
<<
8
))
&
2047
}
return
r
}
func
bloom9
(
b
[]
byte
)
*
big
.
Int
{
func
bloom9
(
b
[]
byte
)
*
big
.
Int
{
b
=
crypto
.
Keccak256
(
b
[
:
])
b
=
crypto
.
Keccak256
(
b
[
:
])
...
...
eth/api_backend.go
View file @
4ea4d2dc
...
@@ -28,6 +28,7 @@ import (
...
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/event"
...
@@ -194,3 +195,29 @@ func (b *EthApiBackend) EventMux() *event.TypeMux {
...
@@ -194,3 +195,29 @@ func (b *EthApiBackend) EventMux() *event.TypeMux {
func
(
b
*
EthApiBackend
)
AccountManager
()
*
accounts
.
Manager
{
func
(
b
*
EthApiBackend
)
AccountManager
()
*
accounts
.
Manager
{
return
b
.
eth
.
AccountManager
()
return
b
.
eth
.
AccountManager
()
}
}
func
(
b
*
EthApiBackend
)
GetBloomBits
(
ctx
context
.
Context
,
bitIdx
uint64
,
sectionIdxList
[]
uint64
)
([][]
byte
,
error
)
{
results
:=
make
([][]
byte
,
len
(
sectionIdxList
))
var
err
error
for
i
,
sectionIdx
:=
range
sectionIdxList
{
sectionHead
:=
core
.
GetCanonicalHash
(
b
.
eth
.
chainDb
,
(
sectionIdx
+
1
)
*
bloomBitsSection
-
1
)
results
[
i
],
err
=
core
.
GetBloomBits
(
b
.
eth
.
chainDb
,
bitIdx
,
sectionIdx
,
sectionHead
)
if
err
!=
nil
{
return
nil
,
err
}
}
return
results
,
nil
}
func
(
b
*
EthApiBackend
)
BloomBitsSections
()
uint64
{
sections
,
_
,
_
:=
b
.
eth
.
bbIndexer
.
Sections
()
return
sections
}
func
(
b
*
EthApiBackend
)
BloomBitsConfig
()
filters
.
BloomConfig
{
return
filters
.
BloomConfig
{
SectionSize
:
bloomBitsSection
,
MaxRequestLen
:
16
,
MaxRequestWait
:
0
,
}
}
eth/backend.go
View file @
4ea4d2dc
...
@@ -77,6 +77,8 @@ type Ethereum struct {
...
@@ -77,6 +77,8 @@ type Ethereum struct {
engine
consensus
.
Engine
engine
consensus
.
Engine
accountManager
*
accounts
.
Manager
accountManager
*
accounts
.
Manager
bbIndexer
*
core
.
ChainIndexer
ApiBackend
*
EthApiBackend
ApiBackend
*
EthApiBackend
miner
*
miner
.
Miner
miner
*
miner
.
Miner
...
@@ -125,11 +127,9 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
...
@@ -125,11 +127,9 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
networkId
:
config
.
NetworkId
,
networkId
:
config
.
NetworkId
,
gasPrice
:
config
.
GasPrice
,
gasPrice
:
config
.
GasPrice
,
etherbase
:
config
.
Etherbase
,
etherbase
:
config
.
Etherbase
,
bbIndexer
:
NewBloomBitsProcessor
(
chainDb
,
bloomBitsSection
),
}
}
if
err
:=
addMipmapBloomBins
(
chainDb
);
err
!=
nil
{
return
nil
,
err
}
log
.
Info
(
"Initialising Ethereum protocol"
,
"versions"
,
ProtocolVersions
,
"network"
,
config
.
NetworkId
)
log
.
Info
(
"Initialising Ethereum protocol"
,
"versions"
,
ProtocolVersions
,
"network"
,
config
.
NetworkId
)
if
!
config
.
SkipBcVersionCheck
{
if
!
config
.
SkipBcVersionCheck
{
...
@@ -151,6 +151,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
...
@@ -151,6 +151,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
eth
.
blockchain
.
SetHead
(
compat
.
RewindTo
)
eth
.
blockchain
.
SetHead
(
compat
.
RewindTo
)
core
.
WriteChainConfig
(
chainDb
,
genesisHash
,
chainConfig
)
core
.
WriteChainConfig
(
chainDb
,
genesisHash
,
chainConfig
)
}
}
eth
.
bbIndexer
.
Start
(
eth
.
blockchain
)
if
config
.
TxPool
.
Journal
!=
""
{
if
config
.
TxPool
.
Journal
!=
""
{
config
.
TxPool
.
Journal
=
ctx
.
ResolvePath
(
config
.
TxPool
.
Journal
)
config
.
TxPool
.
Journal
=
ctx
.
ResolvePath
(
config
.
TxPool
.
Journal
)
...
@@ -260,7 +261,7 @@ func (s *Ethereum) APIs() []rpc.API {
...
@@ -260,7 +261,7 @@ func (s *Ethereum) APIs() []rpc.API {
},
{
},
{
Namespace
:
"eth"
,
Namespace
:
"eth"
,
Version
:
"1.0"
,
Version
:
"1.0"
,
Service
:
filters
.
NewPublicFilterAPI
(
s
.
ApiBackend
,
false
),
Service
:
filters
.
NewPublicFilterAPI
(
s
.
ApiBackend
,
false
,
bloomBitsSection
),
Public
:
true
,
Public
:
true
,
},
{
},
{
Namespace
:
"admin"
,
Namespace
:
"admin"
,
...
@@ -389,6 +390,7 @@ func (s *Ethereum) Stop() error {
...
@@ -389,6 +390,7 @@ func (s *Ethereum) Stop() error {
if
s
.
stopDbUpgrade
!=
nil
{
if
s
.
stopDbUpgrade
!=
nil
{
s
.
stopDbUpgrade
()
s
.
stopDbUpgrade
()
}
}
s
.
bbIndexer
.
Close
()
s
.
blockchain
.
Stop
()
s
.
blockchain
.
Stop
()
s
.
protocolManager
.
Stop
()
s
.
protocolManager
.
Stop
()
if
s
.
lesServer
!=
nil
{
if
s
.
lesServer
!=
nil
{
...
...
eth/db_upgrade.go
View file @
4ea4d2dc
...
@@ -19,11 +19,13 @@ package eth
...
@@ -19,11 +19,13 @@ package eth
import
(
import
(
"bytes"
"bytes"
"fmt"
"time"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rlp"
...
@@ -135,45 +137,37 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
...
@@ -135,45 +137,37 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
}
}
}
}
func
addMipmapBloomBins
(
db
ethdb
.
Database
)
(
err
error
)
{
// BloomBitsIndex implements ChainIndex
const
mipmapVersion
uint
=
2
type
BloomBitsIndex
struct
{
db
ethdb
.
Database
// check if the version is set. We ignore data for now since there's
bc
*
bloombits
.
BloomBitsCreator
// only one version so we can easily ignore it for now
section
,
sectionSize
uint64
var
data
[]
byte
sectionHead
common
.
Hash
data
,
_
=
db
.
Get
([]
byte
(
"setting-mipmap-version"
))
}
if
len
(
data
)
>
0
{
var
version
uint
if
err
:=
rlp
.
DecodeBytes
(
data
,
&
version
);
err
==
nil
&&
version
==
mipmapVersion
{
return
nil
}
}
defer
func
()
{
// number of confirmation blocks before a section is considered probably final and its bloom bits are calculated
if
err
==
nil
{
const
bloomBitsConfirmations
=
256
var
val
[]
byte
val
,
err
=
rlp
.
EncodeToBytes
(
mipmapVersion
)
if
err
==
nil
{
err
=
db
.
Put
([]
byte
(
"setting-mipmap-version"
),
val
)
}
return
}
}()
latestHash
:=
core
.
GetHeadBlockHash
(
db
)
latestBlock
:=
core
.
GetBlock
(
db
,
latestHash
,
core
.
GetBlockNumber
(
db
,
latestHash
))
if
latestBlock
==
nil
{
// clean database
return
}
tstart
:=
time
.
Now
()
// NewBloomBitsProcessor returns a chain processor that generates bloom bits data for the canonical chain
log
.
Warn
(
"Upgrading db log bloom bins"
)
func
NewBloomBitsProcessor
(
db
ethdb
.
Database
,
sectionSize
uint64
)
*
core
.
ChainIndexer
{
for
i
:=
uint64
(
0
);
i
<=
latestBlock
.
NumberU64
();
i
++
{
backend
:=
&
BloomBitsIndex
{
db
:
db
,
sectionSize
:
sectionSize
}
hash
:=
core
.
GetCanonicalHash
(
db
,
i
)
return
core
.
NewChainIndexer
(
db
,
ethdb
.
NewTable
(
db
,
"bbIndex-"
),
backend
,
sectionSize
,
bloomBitsConfirmations
,
time
.
Millisecond
*
100
,
"bloombits"
)
if
(
hash
==
common
.
Hash
{})
{
}
return
fmt
.
Errorf
(
"chain db corrupted. Could not find block %d."
,
i
)
}
func
(
b
*
BloomBitsIndex
)
Reset
(
section
uint64
,
lastSectionHead
common
.
Hash
)
{
core
.
WriteMipmapBloom
(
db
,
i
,
core
.
GetBlockReceipts
(
db
,
hash
,
i
))
b
.
bc
=
bloombits
.
NewBloomBitsCreator
(
b
.
sectionSize
)
b
.
section
=
section
}
func
(
b
*
BloomBitsIndex
)
Process
(
header
*
types
.
Header
)
{
b
.
bc
.
AddHeaderBloom
(
header
.
Bloom
)
b
.
sectionHead
=
header
.
Hash
()
}
func
(
b
*
BloomBitsIndex
)
Commit
()
error
{
for
i
:=
0
;
i
<
bloombits
.
BloomLength
;
i
++
{
compVector
:=
bitutil
.
CompressBytes
(
b
.
bc
.
GetBitVector
(
uint
(
i
)))
core
.
StoreBloomBits
(
b
.
db
,
uint64
(
i
),
b
.
section
,
b
.
sectionHead
,
compVector
)
}
}
log
.
Info
(
"Bloom-bin upgrade completed"
,
"elapsed"
,
common
.
PrettyDuration
(
time
.
Since
(
tstart
)))
return
nil
return
nil
}
}
eth/filters/api.go
View file @
4ea4d2dc
...
@@ -51,24 +51,25 @@ type filter struct {
...
@@ -51,24 +51,25 @@ type filter struct {
// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
// information related to the Ethereum protocol such als blocks, transactions and logs.
// information related to the Ethereum protocol such als blocks, transactions and logs.
type
PublicFilterAPI
struct
{
type
PublicFilterAPI
struct
{
backend
Backend
backend
Backend
useMipMap
bool
bloomBitsSection
uint64
mux
*
event
.
TypeMux
mux
*
event
.
TypeMux
chainDb
ethdb
.
Database
quit
chan
struct
{}
events
*
EventSystem
chainDb
ethdb
.
Database
filtersMu
sync
.
Mutex
events
*
EventSystem
filters
map
[
rpc
.
ID
]
*
filter
filtersMu
sync
.
Mutex
filters
map
[
rpc
.
ID
]
*
filter
}
}
// NewPublicFilterAPI returns a new PublicFilterAPI instance.
// NewPublicFilterAPI returns a new PublicFilterAPI instance.
func
NewPublicFilterAPI
(
backend
Backend
,
lightMode
bool
)
*
PublicFilterAPI
{
func
NewPublicFilterAPI
(
backend
Backend
,
lightMode
bool
,
bloomBitsSection
uint64
)
*
PublicFilterAPI
{
api
:=
&
PublicFilterAPI
{
api
:=
&
PublicFilterAPI
{
backend
:
backend
,
backend
:
backend
,
useMipMap
:
!
lightMode
,
bloomBitsSection
:
bloomBitsSection
,
mux
:
backend
.
EventMux
(),
mux
:
backend
.
EventMux
(),
chainDb
:
backend
.
ChainDb
(),
chainDb
:
backend
.
ChainDb
(),
events
:
NewEventSystem
(
backend
.
EventMux
(),
backend
,
lightMode
),
events
:
NewEventSystem
(
backend
.
EventMux
(),
backend
,
lightMode
),
filters
:
make
(
map
[
rpc
.
ID
]
*
filter
),
filters
:
make
(
map
[
rpc
.
ID
]
*
filter
),
}
}
go
api
.
timeoutLoop
()
go
api
.
timeoutLoop
()
...
@@ -332,11 +333,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([
...
@@ -332,11 +333,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([
crit
.
ToBlock
=
big
.
NewInt
(
rpc
.
LatestBlockNumber
.
Int64
())
crit
.
ToBlock
=
big
.
NewInt
(
rpc
.
LatestBlockNumber
.
Int64
())
}
}
filter
:=
New
(
api
.
backend
,
api
.
useMipMap
)
filter
:=
New
(
api
.
backend
,
crit
.
FromBlock
.
Int64
(),
crit
.
ToBlock
.
Int64
(),
crit
.
Addresses
,
crit
.
Topics
)
filter
.
SetBeginBlock
(
crit
.
FromBlock
.
Int64
())
filter
.
SetEndBlock
(
crit
.
ToBlock
.
Int64
())
filter
.
SetAddresses
(
crit
.
Addresses
)
filter
.
SetTopics
(
crit
.
Topics
)
logs
,
err
:=
filter
.
Find
(
ctx
)
logs
,
err
:=
filter
.
Find
(
ctx
)
return
returnLogs
(
logs
),
err
return
returnLogs
(
logs
),
err
...
@@ -372,19 +369,18 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
...
@@ -372,19 +369,18 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
return
nil
,
fmt
.
Errorf
(
"filter not found"
)
return
nil
,
fmt
.
Errorf
(
"filter not found"
)
}
}
filter
:=
New
(
api
.
backend
,
api
.
useMipMap
)
var
begin
,
end
int64
if
f
.
crit
.
FromBlock
!=
nil
{
if
f
.
crit
.
FromBlock
!=
nil
{
filter
.
SetBeginBlock
(
f
.
crit
.
FromBlock
.
Int64
()
)
begin
=
f
.
crit
.
FromBlock
.
Int64
(
)
}
else
{
}
else
{
filter
.
SetBeginBlock
(
rpc
.
LatestBlockNumber
.
Int64
()
)
begin
=
rpc
.
LatestBlockNumber
.
Int64
(
)
}
}
if
f
.
crit
.
ToBlock
!=
nil
{
if
f
.
crit
.
ToBlock
!=
nil
{
filter
.
SetEndBlock
(
f
.
crit
.
ToBlock
.
Int64
()
)
end
=
f
.
crit
.
ToBlock
.
Int64
(
)
}
else
{
}
else
{
filter
.
SetEndBlock
(
rpc
.
LatestBlockNumber
.
Int64
()
)
end
=
rpc
.
LatestBlockNumber
.
Int64
(
)
}
}
filter
.
SetAddresses
(
f
.
crit
.
Addresses
)
filter
:=
New
(
api
.
backend
,
begin
,
end
,
f
.
crit
.
Addresses
,
f
.
crit
.
Topics
)
filter
.
SetTopics
(
f
.
crit
.
Topics
)
logs
,
err
:=
filter
.
Find
(
ctx
)
logs
,
err
:=
filter
.
Find
(
ctx
)
if
err
!=
nil
{
if
err
!=
nil
{
...
...
eth/filters/bench_test.go
0 → 100644
View file @
4ea4d2dc
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package
filters
import
(
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/node"
"github.com/golang/snappy"
)
func
BenchmarkBloomBits512
(
b
*
testing
.
B
)
{
benchmarkBloomBitsForSize
(
b
,
512
)
}
func
BenchmarkBloomBits1k
(
b
*
testing
.
B
)
{
benchmarkBloomBitsForSize
(
b
,
1024
)
}
func
BenchmarkBloomBits2k
(
b
*
testing
.
B
)
{
benchmarkBloomBitsForSize
(
b
,
2048
)
}
func
BenchmarkBloomBits4k
(
b
*
testing
.
B
)
{
benchmarkBloomBitsForSize
(
b
,
4096
)
}
func
BenchmarkBloomBits8k
(
b
*
testing
.
B
)
{
benchmarkBloomBitsForSize
(
b
,
8192
)
}
func
BenchmarkBloomBits16k
(
b
*
testing
.
B
)
{
benchmarkBloomBitsForSize
(
b
,
16384
)
}
func
BenchmarkBloomBits32k
(
b
*
testing
.
B
)
{
benchmarkBloomBitsForSize
(
b
,
32768
)
}
func
benchmarkBloomBitsForSize
(
b
*
testing
.
B
,
sectionSize
uint64
)
{
benchmarkBloomBits
(
b
,
sectionSize
,
0
)
benchmarkBloomBits
(
b
,
sectionSize
,
1
)
benchmarkBloomBits
(
b
,
sectionSize
,
2
)
}
const
benchFilterCnt
=
2000
func
benchmarkBloomBits
(
b
*
testing
.
B
,
sectionSize
uint64
,
comp
int
)
{
benchDataDir
:=
node
.
DefaultDataDir
()
+
"/geth/chaindata"
fmt
.
Println
(
"Running bloombits benchmark section size:"
,
sectionSize
,
" compression method:"
,
comp
)
var
(
compressFn
func
([]
byte
)
[]
byte
decompressFn
func
([]
byte
,
int
)
([]
byte
,
error
)
)
switch
comp
{
case
0
:
// no compression
compressFn
=
func
(
data
[]
byte
)
[]
byte
{
return
data
}
decompressFn
=
func
(
data
[]
byte
,
target
int
)
([]
byte
,
error
)
{
if
len
(
data
)
!=
target
{
panic
(
nil
)
}
return
data
,
nil
}
case
1
:
// bitutil/compress.go
compressFn
=
bitutil
.
CompressBytes
decompressFn
=
bitutil
.
DecompressBytes
case
2
:
// go snappy
compressFn
=
func
(
data
[]
byte
)
[]
byte
{
return
snappy
.
Encode
(
nil
,
data
)
}
decompressFn
=
func
(
data
[]
byte
,
target
int
)
([]
byte
,
error
)
{
decomp
,
err
:=
snappy
.
Decode
(
nil
,
data
)
if
err
!=
nil
||
len
(
decomp
)
!=
target
{
panic
(
err
)
}
return
decomp
,
nil
}
}
db
,
err
:=
ethdb
.
NewLDBDatabase
(
benchDataDir
,
128
,
1024
)
if
err
!=
nil
{
b
.
Fatalf
(
"error opening database at %v: %v"
,
benchDataDir
,
err
)
}
head
:=
core
.
GetHeadBlockHash
(
db
)
if
head
==
(
common
.
Hash
{})
{
b
.
Fatalf
(
"chain data not found at %v"
,
benchDataDir
)
}
clearBloomBits
(
db
)
fmt
.
Println
(
"Generating bloombits data..."
)
headNum
:=
core
.
GetBlockNumber
(
db
,
head
)
if
headNum
<
sectionSize
+
512
{
b
.
Fatalf
(
"not enough blocks for running a benchmark"
)
}
start
:=
time
.
Now
()
cnt
:=
(
headNum
-
512
)
/
sectionSize
var
dataSize
,
compSize
uint64
for
sectionIdx
:=
uint64
(
0
);
sectionIdx
<
cnt
;
sectionIdx
++
{
bc
:=
bloombits
.
NewBloomBitsCreator
(
sectionSize
)
var
header
*
types
.
Header
for
i
:=
sectionIdx
*
sectionSize
;
i
<
(
sectionIdx
+
1
)
*
sectionSize
;
i
++
{
hash
:=
core
.
GetCanonicalHash
(
db
,
i
)
header
=
core
.
GetHeader
(
db
,
hash
,
i
)
if
header
==
nil
{
b
.
Fatalf
(
"Error creating bloomBits data"
)
}
bc
.
AddHeaderBloom
(
header
.
Bloom
)
}
sectionHead
:=
core
.
GetCanonicalHash
(
db
,
(
sectionIdx
+
1
)
*
sectionSize
-
1
)
for
i
:=
0
;
i
<
bloombits
.
BloomLength
;
i
++
{
data
:=
bc
.
GetBitVector
(
uint
(
i
))
comp
:=
compressFn
(
data
)
dataSize
+=
uint64
(
len
(
data
))
compSize
+=
uint64
(
len
(
comp
))
core
.
StoreBloomBits
(
db
,
uint64
(
i
),
sectionIdx
,
sectionHead
,
comp
)
}
//if sectionIdx%50 == 0 {
// fmt.Println(" section", sectionIdx, "/", cnt)
//}
}
d
:=
time
.
Since
(
start
)
fmt
.
Println
(
"Finished generating bloombits data"
)
fmt
.
Println
(
" "
,
d
,
"total "
,
d
/
time
.
Duration
(
cnt
*
sectionSize
),
"per block"
)
fmt
.
Println
(
" data size:"
,
dataSize
,
" compressed size:"
,
compSize
,
" compression ratio:"
,
float64
(
compSize
)
/
float64
(
dataSize
))
fmt
.
Println
(
"Running filter benchmarks..."
)
start
=
time
.
Now
()
mux
:=
new
(
event
.
TypeMux
)
var
backend
*
testBackend
for
i
:=
0
;
i
<
benchFilterCnt
;
i
++
{
if
i
%
20
==
0
{
db
.
Close
()
db
,
_
=
ethdb
.
NewLDBDatabase
(
benchDataDir
,
128
,
1024
)
backend
=
&
testBackend
{
mux
,
db
,
cnt
,
new
(
event
.
Feed
),
new
(
event
.
Feed
),
new
(
event
.
Feed
),
new
(
event
.
Feed
)}
}
var
addr
common
.
Address
addr
[
0
]
=
byte
(
i
)
addr
[
1
]
=
byte
(
i
/
256
)
filter
:=
New
(
backend
,
0
,
int64
(
cnt
*
sectionSize
-
1
),
[]
common
.
Address
{
addr
},
nil
)
filter
.
decompress
=
decompressFn
if
_
,
err
:=
filter
.
Find
(
context
.
Background
());
err
!=
nil
{
b
.
Error
(
"filter.Find error:"
,
err
)
}
}
d
=
time
.
Since
(
start
)
fmt
.
Println
(
"Finished running filter benchmarks"
)
fmt
.
Println
(
" "
,
d
,
"total "
,
d
/
time
.
Duration
(
benchFilterCnt
),
"per address"
,
d
*
time
.
Duration
(
1000000
)
/
time
.
Duration
(
benchFilterCnt
*
cnt
*
sectionSize
),
"per million blocks"
)
db
.
Close
()
}
func
forEachKey
(
db
ethdb
.
Database
,
startPrefix
,
endPrefix
[]
byte
,
fn
func
(
key
[]
byte
))
{
it
:=
db
.
(
*
ethdb
.
LDBDatabase
)
.
NewIterator
()
it
.
Seek
(
startPrefix
)
for
it
.
Valid
()
{
key
:=
it
.
Key
()
cmpLen
:=
len
(
key
)
if
len
(
endPrefix
)
<
cmpLen
{
cmpLen
=
len
(
endPrefix
)
}
if
bytes
.
Compare
(
key
[
:
cmpLen
],
endPrefix
)
==
1
{
break
}
fn
(
common
.
CopyBytes
(
key
))
it
.
Next
()
}
it
.
Release
()
}
var
bloomBitsPrefix
=
[]
byte
(
"bloomBits-"
)
func
clearBloomBits
(
db
ethdb
.
Database
)
{
fmt
.
Println
(
"Clearing bloombits data..."
)
forEachKey
(
db
,
bloomBitsPrefix
,
bloomBitsPrefix
,
func
(
key
[]
byte
)
{
db
.
Delete
(
key
)
})
}
func
BenchmarkNoBloomBits
(
b
*
testing
.
B
)
{
benchDataDir
:=
node
.
DefaultDataDir
()
+
"/geth/chaindata"
fmt
.
Println
(
"Running benchmark without bloombits"
)
db
,
err
:=
ethdb
.
NewLDBDatabase
(
benchDataDir
,
128
,
1024
)
if
err
!=
nil
{
b
.
Fatalf
(
"error opening database at %v: %v"
,
benchDataDir
,
err
)
}
head
:=
core
.
GetHeadBlockHash
(
db
)
if
head
==
(
common
.
Hash
{})
{
b
.
Fatalf
(
"chain data not found at %v"
,
benchDataDir
)
}
headNum
:=
core
.
GetBlockNumber
(
db
,
head
)
clearBloomBits
(
db
)
fmt
.
Println
(
"Running filter benchmarks..."
)
start
:=
time
.
Now
()
mux
:=
new
(
event
.
TypeMux
)
backend
:=
&
testBackend
{
mux
,
db
,
0
,
new
(
event
.
Feed
),
new
(
event
.
Feed
),
new
(
event
.
Feed
),
new
(
event
.
Feed
)}
filter
:=
New
(
backend
,
0
,
int64
(
headNum
),
[]
common
.
Address
{
common
.
Address
{}},
nil
)
filter
.
Find
(
context
.
Background
())
d
:=
time
.
Since
(
start
)
fmt
.
Println
(
"Finished running filter benchmarks"
)
fmt
.
Println
(
" "
,
d
,
"total "
,
d
*
time
.
Duration
(
1000000
)
/
time
.
Duration
(
headNum
+
1
),
"per million blocks"
)
db
.
Close
()
}
eth/filters/filter.go
View file @
4ea4d2dc
...
@@ -18,11 +18,14 @@ package filters
...
@@ -18,11 +18,14 @@ package filters
import
(
import
(
"context"
"context"
"math"
"math/big"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/event"
...
@@ -34,58 +37,51 @@ type Backend interface {
...
@@ -34,58 +37,51 @@ type Backend interface {
EventMux
()
*
event
.
TypeMux
EventMux
()
*
event
.
TypeMux
HeaderByNumber
(
ctx
context
.
Context
,
blockNr
rpc
.
BlockNumber
)
(
*
types
.
Header
,
error
)
HeaderByNumber
(
ctx
context
.
Context
,
blockNr
rpc
.
BlockNumber
)
(
*
types
.
Header
,
error
)
GetReceipts
(
ctx
context
.
Context
,
blockHash
common
.
Hash
)
(
types
.
Receipts
,
error
)
GetReceipts
(
ctx
context
.
Context
,
blockHash
common
.
Hash
)
(
types
.
Receipts
,
error
)
BloomBitsSections
()
uint64
BloomBitsConfig
()
BloomConfig
SubscribeTxPreEvent
(
chan
<-
core
.
TxPreEvent
)
event
.
Subscription
SubscribeTxPreEvent
(
chan
<-
core
.
TxPreEvent
)
event
.
Subscription
SubscribeChainEvent
(
ch
chan
<-
core
.
ChainEvent
)
event
.
Subscription
SubscribeChainEvent
(
ch
chan
<-
core
.
ChainEvent
)
event
.
Subscription
SubscribeRemovedLogsEvent
(
ch
chan
<-
core
.
RemovedLogsEvent
)
event
.
Subscription
SubscribeRemovedLogsEvent
(
ch
chan
<-
core
.
RemovedLogsEvent
)
event
.
Subscription
SubscribeLogsEvent
(
ch
chan
<-
[]
*
types
.
Log
)
event
.
Subscription
SubscribeLogsEvent
(
ch
chan
<-
[]
*
types
.
Log
)
event
.
Subscription
GetBloomBits
(
ctx
context
.
Context
,
bitIdx
uint64
,
sectionIdxList
[]
uint64
)
([][]
byte
,
error
)
}
type
BloomConfig
struct
{
SectionSize
uint64
MaxRequestLen
int
MaxRequestWait
time
.
Duration
}
}
// Filter can be used to retrieve and filter logs.
// Filter can be used to retrieve and filter logs.
type
Filter
struct
{
type
Filter
struct
{
backend
Backend
backend
Backend
useMipMap
bool
bloomBitsConfig
BloomConfig
db
ethdb
.
Database
db
ethdb
.
Database
begin
,
end
int64
begin
,
end
int64
addresses
[]
common
.
Address
addresses
[]
common
.
Address
topics
[][]
common
.
Hash
topics
[][]
common
.
Hash
decompress
func
([]
byte
,
int
)
([]
byte
,
error
)
matcher
*
bloombits
.
Matcher
}
}
// New creates a new filter which uses a bloom filter on blocks to figure out whether
// New creates a new filter which uses a bloom filter on blocks to figure out whether
// a particular block is interesting or not.
// a particular block is interesting or not.
// MipMaps allow past blocks to be searched much more efficiently, but are not available
func
New
(
backend
Backend
,
begin
,
end
int64
,
addresses
[]
common
.
Address
,
topics
[][]
common
.
Hash
)
*
Filter
{
// to light clients.
func
New
(
backend
Backend
,
useMipMap
bool
)
*
Filter
{
return
&
Filter
{
return
&
Filter
{
backend
:
backend
,
backend
:
backend
,
useMipMap
:
useMipMap
,
begin
:
begin
,
db
:
backend
.
ChainDb
(),
end
:
end
,
addresses
:
addresses
,
topics
:
topics
,
bloomBitsConfig
:
backend
.
BloomBitsConfig
(),
db
:
backend
.
ChainDb
(),
matcher
:
bloombits
.
NewMatcher
(
backend
.
BloomBitsConfig
()
.
SectionSize
,
addresses
,
topics
),
decompress
:
bitutil
.
DecompressBytes
,
}
}
}
}
// SetBeginBlock sets the earliest block for filtering.
// -1 = latest block (i.e., the current block)
// hash = particular hash from-to
func
(
f
*
Filter
)
SetBeginBlock
(
begin
int64
)
{
f
.
begin
=
begin
}
// SetEndBlock sets the latest block for filtering.
func
(
f
*
Filter
)
SetEndBlock
(
end
int64
)
{
f
.
end
=
end
}
// SetAddresses matches only logs that are generated from addresses that are included
// in the given addresses.
func
(
f
*
Filter
)
SetAddresses
(
addr
[]
common
.
Address
)
{
f
.
addresses
=
addr
}
// SetTopics matches only logs that have topics matching the given topics.
func
(
f
*
Filter
)
SetTopics
(
topics
[][]
common
.
Hash
)
{
f
.
topics
=
topics
}
// FindOnce searches the blockchain for matching log entries, returning
// FindOnce searches the blockchain for matching log entries, returning
// all matching entries from the first block that contains matches,
// all matching entries from the first block that contains matches,
// updating the start point of the filter accordingly. If no results are
// updating the start point of the filter accordingly. If no results are
...
@@ -106,18 +102,9 @@ func (f *Filter) FindOnce(ctx context.Context) ([]*types.Log, error) {
...
@@ -106,18 +102,9 @@ func (f *Filter) FindOnce(ctx context.Context) ([]*types.Log, error) {
endBlockNo
=
headBlockNumber
endBlockNo
=
headBlockNumber
}
}
// if no addresses are present we can't make use of fast search which
logs
,
blockNumber
,
err
:=
f
.
getLogs
(
ctx
,
beginBlockNo
,
endBlockNo
)
// uses the mipmap bloom filters to check for fast inclusion and uses
// higher range probability in order to ensure at least a false positive
if
!
f
.
useMipMap
||
len
(
f
.
addresses
)
==
0
{
logs
,
blockNumber
,
err
:=
f
.
getLogs
(
ctx
,
beginBlockNo
,
endBlockNo
)
f
.
begin
=
int64
(
blockNumber
+
1
)
return
logs
,
err
}
logs
,
blockNumber
:=
f
.
mipFind
(
beginBlockNo
,
endBlockNo
,
0
)
f
.
begin
=
int64
(
blockNumber
+
1
)
f
.
begin
=
int64
(
blockNumber
+
1
)
return
logs
,
nil
return
logs
,
err
}
}
// Run filters logs with the current parameters set
// Run filters logs with the current parameters set
...
@@ -131,43 +118,134 @@ func (f *Filter) Find(ctx context.Context) (logs []*types.Log, err error) {
...
@@ -131,43 +118,134 @@ func (f *Filter) Find(ctx context.Context) (logs []*types.Log, err error) {
}
}
}
}
func
(
f
*
Filter
)
mipFind
(
start
,
end
uint64
,
depth
int
)
(
logs
[]
*
types
.
Log
,
blockNumber
uint64
)
{
// nextRequest returns the next request to retrieve for the bloombits matcher
level
:=
core
.
MIPMapLevels
[
depth
]
func
(
f
*
Filter
)
nextRequest
()
(
bloombits
uint
,
sections
[]
uint64
)
{
// normalise numerator so we can work in level specific batches and
bloomIndex
,
ok
:=
f
.
matcher
.
AllocSectionQueue
()
// work with the proper range checks
if
!
ok
{
for
num
:=
start
/
level
*
level
;
num
<=
end
;
num
+=
level
{
return
0
,
nil
// find addresses in bloom filters
}
bloom
:=
core
.
GetMipmapBloom
(
f
.
db
,
num
,
level
)
if
f
.
bloomBitsConfig
.
MaxRequestWait
>
0
&&
// Don't bother checking the first time through the loop - we're probably picking
(
f
.
bloomBitsConfig
.
MaxRequestLen
<=
1
||
// SectionCount is always greater than zero after a successful alloc
// up where a previous run left off.
f
.
matcher
.
SectionCount
(
bloomIndex
)
<
f
.
bloomBitsConfig
.
MaxRequestLen
)
{
first
:=
true
time
.
Sleep
(
f
.
bloomBitsConfig
.
MaxRequestWait
)
for
_
,
addr
:=
range
f
.
addresses
{
}
if
first
||
bloom
.
TestBytes
(
addr
[
:
])
{
return
bloomIndex
,
f
.
matcher
.
FetchSections
(
bloomIndex
,
f
.
bloomBitsConfig
.
MaxRequestLen
)
first
=
false
}
// range check normalised values and make sure that
// we're resolving the correct range instead of the
// serveMatcher serves the bloombits matcher by fetching the requested vectors
// normalised values.
// through the filter backend
start
:=
uint64
(
math
.
Max
(
float64
(
num
),
float64
(
start
)))
func
(
f
*
Filter
)
serveMatcher
(
ctx
context
.
Context
,
stop
chan
struct
{},
wg
*
sync
.
WaitGroup
)
chan
error
{
end
:=
uint64
(
math
.
Min
(
float64
(
num
+
level
-
1
),
float64
(
end
)))
errChn
:=
make
(
chan
error
,
1
)
if
depth
+
1
==
len
(
core
.
MIPMapLevels
)
{
wg
.
Add
(
10
)
l
,
blockNumber
,
_
:=
f
.
getLogs
(
context
.
Background
(),
start
,
end
)
for
i
:=
0
;
i
<
10
;
i
++
{
if
len
(
l
)
>
0
{
go
func
(
i
int
)
{
return
l
,
blockNumber
defer
wg
.
Done
()
for
{
b
,
s
:=
f
.
nextRequest
()
if
s
==
nil
{
return
}
data
,
err
:=
f
.
backend
.
GetBloomBits
(
ctx
,
uint64
(
b
),
s
)
if
err
!=
nil
{
select
{
case
errChn
<-
err
:
case
<-
stop
:
}
}
}
else
{
return
l
,
blockNumber
:=
f
.
mipFind
(
start
,
end
,
depth
+
1
)
}
if
len
(
l
)
>
0
{
decomp
:=
make
([][]
byte
,
len
(
data
))
return
l
,
blockNumber
for
i
,
d
:=
range
data
{
var
err
error
if
decomp
[
i
],
err
=
f
.
decompress
(
d
,
int
(
f
.
bloomBitsConfig
.
SectionSize
/
8
));
err
!=
nil
{
select
{
case
errChn
<-
err
:
case
<-
stop
:
}
return
}
}
}
}
f
.
matcher
.
Deliver
(
b
,
s
,
decomp
)
}
}
}
}
(
i
)
}
}
return
nil
,
end
return
errChn
}
// checkMatches checks if the receipts belonging to the given header contain any log events that
// match the filter criteria. This function is called when the bloom filter signals a potential match.
func
(
f
*
Filter
)
checkMatches
(
ctx
context
.
Context
,
header
*
types
.
Header
)
(
logs
[]
*
types
.
Log
,
err
error
)
{
// Get the logs of the block
receipts
,
err
:=
f
.
backend
.
GetReceipts
(
ctx
,
header
.
Hash
())
if
err
!=
nil
{
return
nil
,
err
}
var
unfiltered
[]
*
types
.
Log
for
_
,
receipt
:=
range
receipts
{
unfiltered
=
append
(
unfiltered
,
([]
*
types
.
Log
)(
receipt
.
Logs
)
...
)
}
logs
=
filterLogs
(
unfiltered
,
nil
,
nil
,
f
.
addresses
,
f
.
topics
)
if
len
(
logs
)
>
0
{
return
logs
,
nil
}
return
nil
,
nil
}
}
func
(
f
*
Filter
)
getLogs
(
ctx
context
.
Context
,
start
,
end
uint64
)
(
logs
[]
*
types
.
Log
,
blockNumber
uint64
,
err
error
)
{
func
(
f
*
Filter
)
getLogs
(
ctx
context
.
Context
,
start
,
end
uint64
)
(
logs
[]
*
types
.
Log
,
blockNumber
uint64
,
err
error
)
{
haveBloomBitsBefore
:=
f
.
backend
.
BloomBitsSections
()
*
f
.
bloomBitsConfig
.
SectionSize
if
haveBloomBitsBefore
>
start
{
e
:=
end
if
haveBloomBitsBefore
<=
e
{
e
=
haveBloomBitsBefore
-
1
}
stop
:=
make
(
chan
struct
{})
var
wg
sync
.
WaitGroup
matches
:=
f
.
matcher
.
Start
(
start
,
e
)
errChn
:=
f
.
serveMatcher
(
ctx
,
stop
,
&
wg
)
defer
func
()
{
f
.
matcher
.
Stop
()
close
(
stop
)
wg
.
Wait
()
}()
loop
:
for
{
select
{
case
i
,
ok
:=
<-
matches
:
if
!
ok
{
break
loop
}
blockNumber
:=
rpc
.
BlockNumber
(
i
)
header
,
err
:=
f
.
backend
.
HeaderByNumber
(
ctx
,
blockNumber
)
if
header
==
nil
||
err
!=
nil
{
return
logs
,
end
,
err
}
logs
,
err
:=
f
.
checkMatches
(
ctx
,
header
)
if
err
!=
nil
{
return
nil
,
end
,
err
}
if
logs
!=
nil
{
return
logs
,
i
,
nil
}
case
err
:=
<-
errChn
:
return
logs
,
end
,
err
case
<-
ctx
.
Done
()
:
return
nil
,
end
,
ctx
.
Err
()
}
}
if
end
<
haveBloomBitsBefore
{
return
logs
,
end
,
nil
}
start
=
haveBloomBitsBefore
}
// search the rest with regular block-by-block bloom filtering
for
i
:=
start
;
i
<=
end
;
i
++
{
for
i
:=
start
;
i
<=
end
;
i
++
{
blockNumber
:=
rpc
.
BlockNumber
(
i
)
blockNumber
:=
rpc
.
BlockNumber
(
i
)
header
,
err
:=
f
.
backend
.
HeaderByNumber
(
ctx
,
blockNumber
)
header
,
err
:=
f
.
backend
.
HeaderByNumber
(
ctx
,
blockNumber
)
...
@@ -178,18 +256,12 @@ func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*types.
...
@@ -178,18 +256,12 @@ func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*types.
// Use bloom filtering to see if this block is interesting given the
// Use bloom filtering to see if this block is interesting given the
// current parameters
// current parameters
if
f
.
bloomFilter
(
header
.
Bloom
)
{
if
f
.
bloomFilter
(
header
.
Bloom
)
{
// Get the logs of the block
logs
,
err
:=
f
.
checkMatches
(
ctx
,
header
)
receipts
,
err
:=
f
.
backend
.
GetReceipts
(
ctx
,
header
.
Hash
())
if
err
!=
nil
{
if
err
!=
nil
{
return
nil
,
end
,
err
return
nil
,
end
,
err
}
}
var
unfiltered
[]
*
types
.
Log
if
logs
!=
nil
{
for
_
,
receipt
:=
range
receipts
{
return
logs
,
i
,
nil
unfiltered
=
append
(
unfiltered
,
([]
*
types
.
Log
)(
receipt
.
Logs
)
...
)
}
logs
=
filterLogs
(
unfiltered
,
nil
,
nil
,
f
.
addresses
,
f
.
topics
)
if
len
(
logs
)
>
0
{
return
logs
,
uint64
(
blockNumber
),
nil
}
}
}
}
}
}
...
...
eth/filters/filter_system_test.go
View file @
4ea4d2dc
...
@@ -36,6 +36,7 @@ import (
...
@@ -36,6 +36,7 @@ import (
type
testBackend
struct
{
type
testBackend
struct
{
mux
*
event
.
TypeMux
mux
*
event
.
TypeMux
db
ethdb
.
Database
db
ethdb
.
Database
sections
uint64
txFeed
*
event
.
Feed
txFeed
*
event
.
Feed
rmLogsFeed
*
event
.
Feed
rmLogsFeed
*
event
.
Feed
logsFeed
*
event
.
Feed
logsFeed
*
event
.
Feed
...
@@ -84,6 +85,31 @@ func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc
...
@@ -84,6 +85,31 @@ func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc
return
b
.
chainFeed
.
Subscribe
(
ch
)
return
b
.
chainFeed
.
Subscribe
(
ch
)
}
}
func
(
b
*
testBackend
)
GetBloomBits
(
ctx
context
.
Context
,
bitIdx
uint64
,
sectionIdxList
[]
uint64
)
([][]
byte
,
error
)
{
results
:=
make
([][]
byte
,
len
(
sectionIdxList
))
var
err
error
for
i
,
sectionIdx
:=
range
sectionIdxList
{
sectionHead
:=
core
.
GetCanonicalHash
(
b
.
db
,
(
sectionIdx
+
1
)
*
testBloomBitsSection
-
1
)
results
[
i
],
err
=
core
.
GetBloomBits
(
b
.
db
,
bitIdx
,
sectionIdx
,
sectionHead
)
if
err
!=
nil
{
return
nil
,
err
}
}
return
results
,
nil
}
func
(
b
*
testBackend
)
BloomBitsSections
()
uint64
{
return
b
.
sections
}
func
(
b
*
testBackend
)
BloomBitsConfig
()
BloomConfig
{
return
BloomConfig
{
SectionSize
:
testBloomBitsSection
,
MaxRequestLen
:
16
,
MaxRequestWait
:
0
,
}
}
// TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
// TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
// It creates multiple subscriptions:
// It creates multiple subscriptions:
// - one at the start and should receive all posted chain events and a second (blockHashes)
// - one at the start and should receive all posted chain events and a second (blockHashes)
...
@@ -99,8 +125,8 @@ func TestBlockSubscription(t *testing.T) {
...
@@ -99,8 +125,8 @@ func TestBlockSubscription(t *testing.T) {
rmLogsFeed
=
new
(
event
.
Feed
)
rmLogsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
backend
=
&
testBackend
{
mux
,
db
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
backend
=
&
testBackend
{
mux
,
db
,
0
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
api
=
NewPublicFilterAPI
(
backend
,
false
)
api
=
NewPublicFilterAPI
(
backend
,
false
,
0
)
genesis
=
new
(
core
.
Genesis
)
.
MustCommit
(
db
)
genesis
=
new
(
core
.
Genesis
)
.
MustCommit
(
db
)
chain
,
_
=
core
.
GenerateChain
(
params
.
TestChainConfig
,
genesis
,
db
,
10
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{})
chain
,
_
=
core
.
GenerateChain
(
params
.
TestChainConfig
,
genesis
,
db
,
10
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{})
chainEvents
=
[]
core
.
ChainEvent
{}
chainEvents
=
[]
core
.
ChainEvent
{}
...
@@ -156,8 +182,8 @@ func TestPendingTxFilter(t *testing.T) {
...
@@ -156,8 +182,8 @@ func TestPendingTxFilter(t *testing.T) {
rmLogsFeed
=
new
(
event
.
Feed
)
rmLogsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
backend
=
&
testBackend
{
mux
,
db
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
backend
=
&
testBackend
{
mux
,
db
,
0
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
api
=
NewPublicFilterAPI
(
backend
,
false
)
api
=
NewPublicFilterAPI
(
backend
,
false
,
0
)
transactions
=
[]
*
types
.
Transaction
{
transactions
=
[]
*
types
.
Transaction
{
types
.
NewTransaction
(
0
,
common
.
HexToAddress
(
"0xb794f5ea0ba39494ce83a213fffba74279579268"
),
new
(
big
.
Int
),
new
(
big
.
Int
),
new
(
big
.
Int
),
nil
),
types
.
NewTransaction
(
0
,
common
.
HexToAddress
(
"0xb794f5ea0ba39494ce83a213fffba74279579268"
),
new
(
big
.
Int
),
new
(
big
.
Int
),
new
(
big
.
Int
),
nil
),
...
@@ -219,8 +245,8 @@ func TestLogFilterCreation(t *testing.T) {
...
@@ -219,8 +245,8 @@ func TestLogFilterCreation(t *testing.T) {
rmLogsFeed
=
new
(
event
.
Feed
)
rmLogsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
backend
=
&
testBackend
{
mux
,
db
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
backend
=
&
testBackend
{
mux
,
db
,
0
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
api
=
NewPublicFilterAPI
(
backend
,
false
)
api
=
NewPublicFilterAPI
(
backend
,
false
,
0
)
testCases
=
[]
struct
{
testCases
=
[]
struct
{
crit
FilterCriteria
crit
FilterCriteria
...
@@ -268,8 +294,8 @@ func TestInvalidLogFilterCreation(t *testing.T) {
...
@@ -268,8 +294,8 @@ func TestInvalidLogFilterCreation(t *testing.T) {
rmLogsFeed
=
new
(
event
.
Feed
)
rmLogsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
backend
=
&
testBackend
{
mux
,
db
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
backend
=
&
testBackend
{
mux
,
db
,
0
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
api
=
NewPublicFilterAPI
(
backend
,
false
)
api
=
NewPublicFilterAPI
(
backend
,
false
,
0
)
)
)
// different situations where log filter creation should fail.
// different situations where log filter creation should fail.
...
@@ -298,8 +324,8 @@ func TestLogFilter(t *testing.T) {
...
@@ -298,8 +324,8 @@ func TestLogFilter(t *testing.T) {
rmLogsFeed
=
new
(
event
.
Feed
)
rmLogsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
backend
=
&
testBackend
{
mux
,
db
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
backend
=
&
testBackend
{
mux
,
db
,
0
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
api
=
NewPublicFilterAPI
(
backend
,
false
)
api
=
NewPublicFilterAPI
(
backend
,
false
,
0
)
firstAddr
=
common
.
HexToAddress
(
"0x1111111111111111111111111111111111111111"
)
firstAddr
=
common
.
HexToAddress
(
"0x1111111111111111111111111111111111111111"
)
secondAddr
=
common
.
HexToAddress
(
"0x2222222222222222222222222222222222222222"
)
secondAddr
=
common
.
HexToAddress
(
"0x2222222222222222222222222222222222222222"
)
...
@@ -415,8 +441,8 @@ func TestPendingLogsSubscription(t *testing.T) {
...
@@ -415,8 +441,8 @@ func TestPendingLogsSubscription(t *testing.T) {
rmLogsFeed
=
new
(
event
.
Feed
)
rmLogsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
backend
=
&
testBackend
{
mux
,
db
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
backend
=
&
testBackend
{
mux
,
db
,
0
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
api
=
NewPublicFilterAPI
(
backend
,
false
)
api
=
NewPublicFilterAPI
(
backend
,
false
,
0
)
firstAddr
=
common
.
HexToAddress
(
"0x1111111111111111111111111111111111111111"
)
firstAddr
=
common
.
HexToAddress
(
"0x1111111111111111111111111111111111111111"
)
secondAddr
=
common
.
HexToAddress
(
"0x2222222222222222222222222222222222222222"
)
secondAddr
=
common
.
HexToAddress
(
"0x2222222222222222222222222222222222222222"
)
...
...
eth/filters/filter_test.go
View file @
4ea4d2dc
...
@@ -32,6 +32,8 @@ import (
...
@@ -32,6 +32,8 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/params"
)
)
const
testBloomBitsSection
=
4096
func
makeReceipt
(
addr
common
.
Address
)
*
types
.
Receipt
{
func
makeReceipt
(
addr
common
.
Address
)
*
types
.
Receipt
{
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
.
Logs
=
[]
*
types
.
Log
{
receipt
.
Logs
=
[]
*
types
.
Log
{
...
@@ -41,8 +43,8 @@ func makeReceipt(addr common.Address) *types.Receipt {
...
@@ -41,8 +43,8 @@ func makeReceipt(addr common.Address) *types.Receipt {
return
receipt
return
receipt
}
}
func
Benchmark
Mipmap
s
(
b
*
testing
.
B
)
{
func
Benchmark
Filter
s
(
b
*
testing
.
B
)
{
dir
,
err
:=
ioutil
.
TempDir
(
""
,
"
mipmap
"
)
dir
,
err
:=
ioutil
.
TempDir
(
""
,
"
filtertest
"
)
if
err
!=
nil
{
if
err
!=
nil
{
b
.
Fatal
(
err
)
b
.
Fatal
(
err
)
}
}
...
@@ -55,7 +57,7 @@ func BenchmarkMipmaps(b *testing.B) {
...
@@ -55,7 +57,7 @@ func BenchmarkMipmaps(b *testing.B) {
rmLogsFeed
=
new
(
event
.
Feed
)
rmLogsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
backend
=
&
testBackend
{
mux
,
db
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
backend
=
&
testBackend
{
mux
,
db
,
0
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
key1
,
_
=
crypto
.
HexToECDSA
(
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"
)
key1
,
_
=
crypto
.
HexToECDSA
(
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"
)
addr1
=
crypto
.
PubkeyToAddress
(
key1
.
PublicKey
)
addr1
=
crypto
.
PubkeyToAddress
(
key1
.
PublicKey
)
addr2
=
common
.
BytesToAddress
([]
byte
(
"jeff"
))
addr2
=
common
.
BytesToAddress
([]
byte
(
"jeff"
))
...
@@ -66,27 +68,21 @@ func BenchmarkMipmaps(b *testing.B) {
...
@@ -66,27 +68,21 @@ func BenchmarkMipmaps(b *testing.B) {
genesis
:=
core
.
GenesisBlockForTesting
(
db
,
addr1
,
big
.
NewInt
(
1000000
))
genesis
:=
core
.
GenesisBlockForTesting
(
db
,
addr1
,
big
.
NewInt
(
1000000
))
chain
,
receipts
:=
core
.
GenerateChain
(
params
.
TestChainConfig
,
genesis
,
db
,
100010
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
chain
,
receipts
:=
core
.
GenerateChain
(
params
.
TestChainConfig
,
genesis
,
db
,
100010
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
var
receipts
types
.
Receipts
switch
i
{
switch
i
{
case
2403
:
case
2403
:
receipt
:=
makeReceipt
(
addr1
)
receipt
:=
makeReceipt
(
addr1
)
receipts
=
types
.
Receipts
{
receipt
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
case
1034
:
case
1034
:
receipt
:=
makeReceipt
(
addr2
)
receipt
:=
makeReceipt
(
addr2
)
receipts
=
types
.
Receipts
{
receipt
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
case
34
:
case
34
:
receipt
:=
makeReceipt
(
addr3
)
receipt
:=
makeReceipt
(
addr3
)
receipts
=
types
.
Receipts
{
receipt
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
case
99999
:
case
99999
:
receipt
:=
makeReceipt
(
addr4
)
receipt
:=
makeReceipt
(
addr4
)
receipts
=
types
.
Receipts
{
receipt
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
}
}
core
.
WriteMipmapBloom
(
db
,
uint64
(
i
+
1
),
receipts
)
})
})
for
i
,
block
:=
range
chain
{
for
i
,
block
:=
range
chain
{
core
.
WriteBlock
(
db
,
block
)
core
.
WriteBlock
(
db
,
block
)
...
@@ -102,10 +98,7 @@ func BenchmarkMipmaps(b *testing.B) {
...
@@ -102,10 +98,7 @@ func BenchmarkMipmaps(b *testing.B) {
}
}
b
.
ResetTimer
()
b
.
ResetTimer
()
filter
:=
New
(
backend
,
true
)
filter
:=
New
(
backend
,
0
,
-
1
,
[]
common
.
Address
{
addr1
,
addr2
,
addr3
,
addr4
},
nil
)
filter
.
SetAddresses
([]
common
.
Address
{
addr1
,
addr2
,
addr3
,
addr4
})
filter
.
SetBeginBlock
(
0
)
filter
.
SetEndBlock
(
-
1
)
for
i
:=
0
;
i
<
b
.
N
;
i
++
{
for
i
:=
0
;
i
<
b
.
N
;
i
++
{
logs
,
_
:=
filter
.
Find
(
context
.
Background
())
logs
,
_
:=
filter
.
Find
(
context
.
Background
())
...
@@ -116,7 +109,7 @@ func BenchmarkMipmaps(b *testing.B) {
...
@@ -116,7 +109,7 @@ func BenchmarkMipmaps(b *testing.B) {
}
}
func
TestFilters
(
t
*
testing
.
T
)
{
func
TestFilters
(
t
*
testing
.
T
)
{
dir
,
err
:=
ioutil
.
TempDir
(
""
,
"
mipmap
"
)
dir
,
err
:=
ioutil
.
TempDir
(
""
,
"
filtertest
"
)
if
err
!=
nil
{
if
err
!=
nil
{
t
.
Fatal
(
err
)
t
.
Fatal
(
err
)
}
}
...
@@ -129,7 +122,7 @@ func TestFilters(t *testing.T) {
...
@@ -129,7 +122,7 @@ func TestFilters(t *testing.T) {
rmLogsFeed
=
new
(
event
.
Feed
)
rmLogsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
logsFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
chainFeed
=
new
(
event
.
Feed
)
backend
=
&
testBackend
{
mux
,
db
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
backend
=
&
testBackend
{
mux
,
db
,
0
,
txFeed
,
rmLogsFeed
,
logsFeed
,
chainFeed
}
key1
,
_
=
crypto
.
HexToECDSA
(
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"
)
key1
,
_
=
crypto
.
HexToECDSA
(
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"
)
addr
=
crypto
.
PubkeyToAddress
(
key1
.
PublicKey
)
addr
=
crypto
.
PubkeyToAddress
(
key1
.
PublicKey
)
...
@@ -142,7 +135,6 @@ func TestFilters(t *testing.T) {
...
@@ -142,7 +135,6 @@ func TestFilters(t *testing.T) {
genesis
:=
core
.
GenesisBlockForTesting
(
db
,
addr
,
big
.
NewInt
(
1000000
))
genesis
:=
core
.
GenesisBlockForTesting
(
db
,
addr
,
big
.
NewInt
(
1000000
))
chain
,
receipts
:=
core
.
GenerateChain
(
params
.
TestChainConfig
,
genesis
,
db
,
1000
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
chain
,
receipts
:=
core
.
GenerateChain
(
params
.
TestChainConfig
,
genesis
,
db
,
1000
,
func
(
i
int
,
gen
*
core
.
BlockGen
)
{
var
receipts
types
.
Receipts
switch
i
{
switch
i
{
case
1
:
case
1
:
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
...
@@ -153,7 +145,6 @@ func TestFilters(t *testing.T) {
...
@@ -153,7 +145,6 @@ func TestFilters(t *testing.T) {
},
},
}
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
case
2
:
case
2
:
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
.
Logs
=
[]
*
types
.
Log
{
receipt
.
Logs
=
[]
*
types
.
Log
{
...
@@ -163,7 +154,6 @@ func TestFilters(t *testing.T) {
...
@@ -163,7 +154,6 @@ func TestFilters(t *testing.T) {
},
},
}
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
case
998
:
case
998
:
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
.
Logs
=
[]
*
types
.
Log
{
receipt
.
Logs
=
[]
*
types
.
Log
{
...
@@ -173,7 +163,6 @@ func TestFilters(t *testing.T) {
...
@@ -173,7 +163,6 @@ func TestFilters(t *testing.T) {
},
},
}
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
case
999
:
case
999
:
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
:=
types
.
NewReceipt
(
nil
,
false
,
new
(
big
.
Int
))
receipt
.
Logs
=
[]
*
types
.
Log
{
receipt
.
Logs
=
[]
*
types
.
Log
{
...
@@ -183,12 +172,7 @@ func TestFilters(t *testing.T) {
...
@@ -183,12 +172,7 @@ func TestFilters(t *testing.T) {
},
},
}
}
gen
.
AddUncheckedReceipt
(
receipt
)
gen
.
AddUncheckedReceipt
(
receipt
)
receipts
=
types
.
Receipts
{
receipt
}
}
}
// i is used as block number for the writes but since the i
// starts at 0 and block 0 (genesis) is already present increment
// by one
core
.
WriteMipmapBloom
(
db
,
uint64
(
i
+
1
),
receipts
)
})
})
for
i
,
block
:=
range
chain
{
for
i
,
block
:=
range
chain
{
core
.
WriteBlock
(
db
,
block
)
core
.
WriteBlock
(
db
,
block
)
...
@@ -203,22 +187,14 @@ func TestFilters(t *testing.T) {
...
@@ -203,22 +187,14 @@ func TestFilters(t *testing.T) {
}
}
}
}
filter
:=
New
(
backend
,
true
)
filter
:=
New
(
backend
,
0
,
-
1
,
[]
common
.
Address
{
addr
},
[][]
common
.
Hash
{{
hash1
,
hash2
,
hash3
,
hash4
}})
filter
.
SetAddresses
([]
common
.
Address
{
addr
})
filter
.
SetTopics
([][]
common
.
Hash
{{
hash1
,
hash2
,
hash3
,
hash4
}})
filter
.
SetBeginBlock
(
0
)
filter
.
SetEndBlock
(
-
1
)
logs
,
_
:=
filter
.
Find
(
context
.
Background
())
logs
,
_
:=
filter
.
Find
(
context
.
Background
())
if
len
(
logs
)
!=
4
{
if
len
(
logs
)
!=
4
{
t
.
Error
(
"expected 4 log, got"
,
len
(
logs
))
t
.
Error
(
"expected 4 log, got"
,
len
(
logs
))
}
}
filter
=
New
(
backend
,
true
)
filter
=
New
(
backend
,
900
,
999
,
[]
common
.
Address
{
addr
},
[][]
common
.
Hash
{{
hash3
}})
filter
.
SetAddresses
([]
common
.
Address
{
addr
})
filter
.
SetTopics
([][]
common
.
Hash
{{
hash3
}})
filter
.
SetBeginBlock
(
900
)
filter
.
SetEndBlock
(
999
)
logs
,
_
=
filter
.
Find
(
context
.
Background
())
logs
,
_
=
filter
.
Find
(
context
.
Background
())
if
len
(
logs
)
!=
1
{
if
len
(
logs
)
!=
1
{
t
.
Error
(
"expected 1 log, got"
,
len
(
logs
))
t
.
Error
(
"expected 1 log, got"
,
len
(
logs
))
...
@@ -227,11 +203,7 @@ func TestFilters(t *testing.T) {
...
@@ -227,11 +203,7 @@ func TestFilters(t *testing.T) {
t
.
Errorf
(
"expected log[0].Topics[0] to be %x, got %x"
,
hash3
,
logs
[
0
]
.
Topics
[
0
])
t
.
Errorf
(
"expected log[0].Topics[0] to be %x, got %x"
,
hash3
,
logs
[
0
]
.
Topics
[
0
])
}
}
filter
=
New
(
backend
,
true
)
filter
=
New
(
backend
,
990
,
-
1
,
[]
common
.
Address
{
addr
},
[][]
common
.
Hash
{{
hash3
}})
filter
.
SetAddresses
([]
common
.
Address
{
addr
})
filter
.
SetTopics
([][]
common
.
Hash
{{
hash3
}})
filter
.
SetBeginBlock
(
990
)
filter
.
SetEndBlock
(
-
1
)
logs
,
_
=
filter
.
Find
(
context
.
Background
())
logs
,
_
=
filter
.
Find
(
context
.
Background
())
if
len
(
logs
)
!=
1
{
if
len
(
logs
)
!=
1
{
t
.
Error
(
"expected 1 log, got"
,
len
(
logs
))
t
.
Error
(
"expected 1 log, got"
,
len
(
logs
))
...
@@ -240,10 +212,7 @@ func TestFilters(t *testing.T) {
...
@@ -240,10 +212,7 @@ func TestFilters(t *testing.T) {
t
.
Errorf
(
"expected log[0].Topics[0] to be %x, got %x"
,
hash3
,
logs
[
0
]
.
Topics
[
0
])
t
.
Errorf
(
"expected log[0].Topics[0] to be %x, got %x"
,
hash3
,
logs
[
0
]
.
Topics
[
0
])
}
}
filter
=
New
(
backend
,
true
)
filter
=
New
(
backend
,
1
,
10
,
nil
,
[][]
common
.
Hash
{{
hash1
,
hash2
}})
filter
.
SetTopics
([][]
common
.
Hash
{{
hash1
,
hash2
}})
filter
.
SetBeginBlock
(
1
)
filter
.
SetEndBlock
(
10
)
logs
,
_
=
filter
.
Find
(
context
.
Background
())
logs
,
_
=
filter
.
Find
(
context
.
Background
())
if
len
(
logs
)
!=
2
{
if
len
(
logs
)
!=
2
{
...
@@ -251,10 +220,7 @@ func TestFilters(t *testing.T) {
...
@@ -251,10 +220,7 @@ func TestFilters(t *testing.T) {
}
}
failHash
:=
common
.
BytesToHash
([]
byte
(
"fail"
))
failHash
:=
common
.
BytesToHash
([]
byte
(
"fail"
))
filter
=
New
(
backend
,
true
)
filter
=
New
(
backend
,
0
,
-
1
,
nil
,
[][]
common
.
Hash
{{
failHash
}})
filter
.
SetTopics
([][]
common
.
Hash
{{
failHash
}})
filter
.
SetBeginBlock
(
0
)
filter
.
SetEndBlock
(
-
1
)
logs
,
_
=
filter
.
Find
(
context
.
Background
())
logs
,
_
=
filter
.
Find
(
context
.
Background
())
if
len
(
logs
)
!=
0
{
if
len
(
logs
)
!=
0
{
...
@@ -262,20 +228,14 @@ func TestFilters(t *testing.T) {
...
@@ -262,20 +228,14 @@ func TestFilters(t *testing.T) {
}
}
failAddr
:=
common
.
BytesToAddress
([]
byte
(
"failmenow"
))
failAddr
:=
common
.
BytesToAddress
([]
byte
(
"failmenow"
))
filter
=
New
(
backend
,
true
)
filter
=
New
(
backend
,
0
,
-
1
,
[]
common
.
Address
{
failAddr
},
nil
)
filter
.
SetAddresses
([]
common
.
Address
{
failAddr
})
filter
.
SetBeginBlock
(
0
)
filter
.
SetEndBlock
(
-
1
)
logs
,
_
=
filter
.
Find
(
context
.
Background
())
logs
,
_
=
filter
.
Find
(
context
.
Background
())
if
len
(
logs
)
!=
0
{
if
len
(
logs
)
!=
0
{
t
.
Error
(
"expected 0 log, got"
,
len
(
logs
))
t
.
Error
(
"expected 0 log, got"
,
len
(
logs
))
}
}
filter
=
New
(
backend
,
true
)
filter
=
New
(
backend
,
0
,
-
1
,
nil
,
[][]
common
.
Hash
{{
failHash
},
{
hash1
}})
filter
.
SetTopics
([][]
common
.
Hash
{{
failHash
},
{
hash1
}})
filter
.
SetBeginBlock
(
0
)
filter
.
SetEndBlock
(
-
1
)
logs
,
_
=
filter
.
Find
(
context
.
Background
())
logs
,
_
=
filter
.
Find
(
context
.
Background
())
if
len
(
logs
)
!=
0
{
if
len
(
logs
)
!=
0
{
...
...
eth/handler.go
View file @
4ea4d2dc
...
@@ -49,6 +49,8 @@ const (
...
@@ -49,6 +49,8 @@ const (
// txChanSize is the size of channel listening to TxPreEvent.
// txChanSize is the size of channel listening to TxPreEvent.
// The number is referenced from the size of tx pool.
// The number is referenced from the size of tx pool.
txChanSize
=
4096
txChanSize
=
4096
bloomBitsSection
=
4096
)
)
var
(
var
(
...
@@ -92,6 +94,8 @@ type ProtocolManager struct {
...
@@ -92,6 +94,8 @@ type ProtocolManager struct {
quitSync
chan
struct
{}
quitSync
chan
struct
{}
noMorePeers
chan
struct
{}
noMorePeers
chan
struct
{}
lesServer
LesServer
// wait group is used for graceful shutdowns during downloading
// wait group is used for graceful shutdowns during downloading
// and processing
// and processing
wg
sync
.
WaitGroup
wg
sync
.
WaitGroup
...
@@ -114,6 +118,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
...
@@ -114,6 +118,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
txsyncCh
:
make
(
chan
*
txsync
),
txsyncCh
:
make
(
chan
*
txsync
),
quitSync
:
make
(
chan
struct
{}),
quitSync
:
make
(
chan
struct
{}),
}
}
// Figure out whether to allow fast sync or not
// Figure out whether to allow fast sync or not
if
mode
==
downloader
.
FastSync
&&
blockchain
.
CurrentBlock
()
.
NumberU64
()
>
0
{
if
mode
==
downloader
.
FastSync
&&
blockchain
.
CurrentBlock
()
.
NumberU64
()
>
0
{
log
.
Warn
(
"Blockchain not empty, fast sync disabled"
)
log
.
Warn
(
"Blockchain not empty, fast sync disabled"
)
...
...
les/api_backend.go
View file @
4ea4d2dc
...
@@ -19,6 +19,7 @@ package les
...
@@ -19,6 +19,7 @@ package les
import
(
import
(
"context"
"context"
"math/big"
"math/big"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
...
@@ -28,6 +29,7 @@ import (
...
@@ -28,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/event"
...
@@ -171,3 +173,19 @@ func (b *LesApiBackend) EventMux() *event.TypeMux {
...
@@ -171,3 +173,19 @@ func (b *LesApiBackend) EventMux() *event.TypeMux {
func
(
b
*
LesApiBackend
)
AccountManager
()
*
accounts
.
Manager
{
func
(
b
*
LesApiBackend
)
AccountManager
()
*
accounts
.
Manager
{
return
b
.
eth
.
accountManager
return
b
.
eth
.
accountManager
}
}
func
(
b
*
LesApiBackend
)
GetBloomBits
(
ctx
context
.
Context
,
bitIdx
uint64
,
sectionIdxList
[]
uint64
)
([][]
byte
,
error
)
{
return
nil
,
nil
// implemented in a subsequent PR
}
func
(
b
*
LesApiBackend
)
BloomBitsSections
()
uint64
{
return
0
}
func
(
b
*
LesApiBackend
)
BloomBitsConfig
()
filters
.
BloomConfig
{
return
filters
.
BloomConfig
{
SectionSize
:
32768
,
MaxRequestLen
:
16
,
MaxRequestWait
:
time
.
Microsecond
*
100
,
}
}
les/backend.go
View file @
4ea4d2dc
...
@@ -169,7 +169,7 @@ func (s *LightEthereum) APIs() []rpc.API {
...
@@ -169,7 +169,7 @@ func (s *LightEthereum) APIs() []rpc.API {
},
{
},
{
Namespace
:
"eth"
,
Namespace
:
"eth"
,
Version
:
"1.0"
,
Version
:
"1.0"
,
Service
:
filters
.
NewPublicFilterAPI
(
s
.
ApiBackend
,
true
),
Service
:
filters
.
NewPublicFilterAPI
(
s
.
ApiBackend
,
true
,
0
),
Public
:
true
,
Public
:
true
,
},
{
},
{
Namespace
:
"net"
,
Namespace
:
"net"
,
...
...
miner/worker.go
View file @
4ea4d2dc
...
@@ -324,8 +324,6 @@ func (self *worker) wait() {
...
@@ -324,8 +324,6 @@ func (self *worker) wait() {
if
stat
==
core
.
CanonStatTy
{
if
stat
==
core
.
CanonStatTy
{
// This puts transactions in a extra db for rpc
// This puts transactions in a extra db for rpc
core
.
WriteTxLookupEntries
(
self
.
chainDb
,
block
)
core
.
WriteTxLookupEntries
(
self
.
chainDb
,
block
)
// Write map map bloom filters
core
.
WriteMipmapBloom
(
self
.
chainDb
,
block
.
NumberU64
(),
work
.
receipts
)
// implicit by posting ChainHeadEvent
// implicit by posting ChainHeadEvent
mustCommitNewWork
=
false
mustCommitNewWork
=
false
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment