Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
G
Geth-Modification
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
张蕾
Geth-Modification
Commits
451ffdb6
Unverified
Commit
451ffdb6
authored
Sep 06, 2017
by
Zsolt Felfoldi
Committed by
Péter Szilágyi
Sep 06, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
core/bloombits: use general filters instead of addresses and topics
parent
6ff2c029
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
33 additions
and
46 deletions
+33
-46
matcher.go
core/bloombits/matcher.go
+13
-41
matcher_test.go
core/bloombits/matcher_test.go
+2
-4
filter.go
eth/filters/filter.go
+18
-1
No files found.
core/bloombits/matcher.go
View file @
451ffdb6
...
@@ -24,7 +24,6 @@ import (
...
@@ -24,7 +24,6 @@ import (
"sync/atomic"
"sync/atomic"
"time"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto"
)
)
...
@@ -68,8 +67,7 @@ type Retrieval struct {
...
@@ -68,8 +67,7 @@ type Retrieval struct {
type
Matcher
struct
{
type
Matcher
struct
{
sectionSize
uint64
// Size of the data batches to filter on
sectionSize
uint64
// Size of the data batches to filter on
addresses
[]
bloomIndexes
// Addresses the system is filtering for
filters
[][]
bloomIndexes
// Filter the system is matching for
topics
[][]
bloomIndexes
// Topics the system is filtering for
schedulers
map
[
uint
]
*
scheduler
// Retrieval schedulers for loading bloom bits
schedulers
map
[
uint
]
*
scheduler
// Retrieval schedulers for loading bloom bits
retrievers
chan
chan
uint
// Retriever processes waiting for bit allocations
retrievers
chan
chan
uint
// Retriever processes waiting for bit allocations
...
@@ -82,7 +80,8 @@ type Matcher struct {
...
@@ -82,7 +80,8 @@ type Matcher struct {
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
// address and topic filtering on them.
// address and topic filtering on them.
func
NewMatcher
(
sectionSize
uint64
,
addresses
[]
common
.
Address
,
topics
[][]
common
.
Hash
)
*
Matcher
{
func
NewMatcher
(
sectionSize
uint64
,
filters
[][][]
byte
)
*
Matcher
{
// Create the matcher instance
m
:=
&
Matcher
{
m
:=
&
Matcher
{
sectionSize
:
sectionSize
,
sectionSize
:
sectionSize
,
schedulers
:
make
(
map
[
uint
]
*
scheduler
),
schedulers
:
make
(
map
[
uint
]
*
scheduler
),
...
@@ -91,48 +90,25 @@ func NewMatcher(sectionSize uint64, addresses []common.Address, topics [][]commo
...
@@ -91,48 +90,25 @@ func NewMatcher(sectionSize uint64, addresses []common.Address, topics [][]commo
retrievals
:
make
(
chan
chan
*
Retrieval
),
retrievals
:
make
(
chan
chan
*
Retrieval
),
deliveries
:
make
(
chan
*
Retrieval
),
deliveries
:
make
(
chan
*
Retrieval
),
}
}
m
.
setAddresses
(
addresses
)
// Calculate the bloom bit indexes for the groups we're interested in
m
.
setTopics
(
topics
)
m
.
filters
=
nil
return
m
}
// setAddresses configures the matcher to only return logs that are generated
for
_
,
filter
:=
range
filters
{
// from addresses that are included in the given list.
bloomBits
:=
make
([]
bloomIndexes
,
len
(
filter
))
func
(
m
*
Matcher
)
setAddresses
(
addresses
[]
common
.
Address
)
{
for
i
,
clause
:=
range
filter
{
// Calculate the bloom bit indexes for the addresses we're interested in
bloomBits
[
i
]
=
calcBloomIndexes
(
clause
)
m
.
addresses
=
make
([]
bloomIndexes
,
len
(
addresses
))
for
i
,
address
:=
range
addresses
{
m
.
addresses
[
i
]
=
calcBloomIndexes
(
address
.
Bytes
())
}
// For every bit, create a scheduler to load/download the bit vectors
for
_
,
bloomIndexList
:=
range
m
.
addresses
{
for
_
,
bloomIndex
:=
range
bloomIndexList
{
m
.
addScheduler
(
bloomIndex
)
}
}
}
m
.
filters
=
append
(
m
.
filters
,
bloomBits
)
}
// setTopics configures the matcher to only return logs that have topics matching
// the given list.
func
(
m
*
Matcher
)
setTopics
(
topicsList
[][]
common
.
Hash
)
{
// Calculate the bloom bit indexes for the topics we're interested in
m
.
topics
=
nil
for
_
,
topics
:=
range
topicsList
{
bloomBits
:=
make
([]
bloomIndexes
,
len
(
topics
))
for
i
,
topic
:=
range
topics
{
bloomBits
[
i
]
=
calcBloomIndexes
(
topic
.
Bytes
())
}
m
.
topics
=
append
(
m
.
topics
,
bloomBits
)
}
}
// For every bit, create a scheduler to load/download the bit vectors
// For every bit, create a scheduler to load/download the bit vectors
for
_
,
bloomIndexLists
:=
range
m
.
topic
s
{
for
_
,
bloomIndexLists
:=
range
m
.
filter
s
{
for
_
,
bloomIndexList
:=
range
bloomIndexLists
{
for
_
,
bloomIndexList
:=
range
bloomIndexLists
{
for
_
,
bloomIndex
:=
range
bloomIndexList
{
for
_
,
bloomIndex
:=
range
bloomIndexList
{
m
.
addScheduler
(
bloomIndex
)
m
.
addScheduler
(
bloomIndex
)
}
}
}
}
}
}
return
m
}
}
// addScheduler adds a bit stream retrieval scheduler for the given bit index if
// addScheduler adds a bit stream retrieval scheduler for the given bit index if
...
@@ -250,14 +226,10 @@ func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) ch
...
@@ -250,14 +226,10 @@ func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) ch
}
}
}()
}()
// Assemble the daisy-chained filtering pipeline
// Assemble the daisy-chained filtering pipeline
blooms
:=
m
.
topics
if
len
(
m
.
addresses
)
>
0
{
blooms
=
append
([][]
bloomIndexes
{
m
.
addresses
},
blooms
...
)
}
next
:=
source
next
:=
source
dist
:=
make
(
chan
*
request
,
buffer
)
dist
:=
make
(
chan
*
request
,
buffer
)
for
_
,
bloom
:=
range
bloom
s
{
for
_
,
bloom
:=
range
m
.
filter
s
{
next
=
m
.
subMatch
(
next
,
dist
,
bloom
,
session
)
next
=
m
.
subMatch
(
next
,
dist
,
bloom
,
session
)
}
}
// Start the request distribution
// Start the request distribution
...
...
core/bloombits/matcher_test.go
View file @
451ffdb6
...
@@ -94,10 +94,8 @@ func testMatcherBothModes(t *testing.T, filter [][]bloomIndexes, blocks uint64,
...
@@ -94,10 +94,8 @@ func testMatcherBothModes(t *testing.T, filter [][]bloomIndexes, blocks uint64,
// number of requests made for cross validation between different modes.
// number of requests made for cross validation between different modes.
func
testMatcher
(
t
*
testing
.
T
,
filter
[][]
bloomIndexes
,
blocks
uint64
,
intermittent
bool
,
retrievals
uint32
,
maxReqCount
int
)
uint32
{
func
testMatcher
(
t
*
testing
.
T
,
filter
[][]
bloomIndexes
,
blocks
uint64
,
intermittent
bool
,
retrievals
uint32
,
maxReqCount
int
)
uint32
{
// Create a new matcher an simulate our explicit random bitsets
// Create a new matcher an simulate our explicit random bitsets
matcher
:=
NewMatcher
(
testSectionSize
,
nil
,
nil
)
matcher
:=
NewMatcher
(
testSectionSize
,
nil
)
matcher
.
filters
=
filter
matcher
.
addresses
=
filter
[
0
]
matcher
.
topics
=
filter
[
1
:
]
for
_
,
rule
:=
range
filter
{
for
_
,
rule
:=
range
filter
{
for
_
,
topic
:=
range
rule
{
for
_
,
topic
:=
range
rule
{
...
...
eth/filters/filter.go
View file @
451ffdb6
...
@@ -60,6 +60,23 @@ type Filter struct {
...
@@ -60,6 +60,23 @@ type Filter struct {
// New creates a new filter which uses a bloom filter on blocks to figure out whether
// New creates a new filter which uses a bloom filter on blocks to figure out whether
// a particular block is interesting or not.
// a particular block is interesting or not.
func
New
(
backend
Backend
,
begin
,
end
int64
,
addresses
[]
common
.
Address
,
topics
[][]
common
.
Hash
)
*
Filter
{
func
New
(
backend
Backend
,
begin
,
end
int64
,
addresses
[]
common
.
Address
,
topics
[][]
common
.
Hash
)
*
Filter
{
// Flatten the address and topic filter clauses into a single filter system
var
filters
[][][]
byte
if
len
(
addresses
)
>
0
{
filter
:=
make
([][]
byte
,
len
(
addresses
))
for
i
,
address
:=
range
addresses
{
filter
[
i
]
=
address
.
Bytes
()
}
filters
=
append
(
filters
,
filter
)
}
for
_
,
topicList
:=
range
topics
{
filter
:=
make
([][]
byte
,
len
(
topicList
))
for
i
,
topic
:=
range
topicList
{
filter
[
i
]
=
topic
.
Bytes
()
}
filters
=
append
(
filters
,
filter
)
}
// Assemble and return the filter
size
,
_
:=
backend
.
BloomStatus
()
size
,
_
:=
backend
.
BloomStatus
()
return
&
Filter
{
return
&
Filter
{
...
@@ -69,7 +86,7 @@ func New(backend Backend, begin, end int64, addresses []common.Address, topics [
...
@@ -69,7 +86,7 @@ func New(backend Backend, begin, end int64, addresses []common.Address, topics [
addresses
:
addresses
,
addresses
:
addresses
,
topics
:
topics
,
topics
:
topics
,
db
:
backend
.
ChainDb
(),
db
:
backend
.
ChainDb
(),
matcher
:
bloombits
.
NewMatcher
(
size
,
addresses
,
topic
s
),
matcher
:
bloombits
.
NewMatcher
(
size
,
filter
s
),
}
}
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment