aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZsolt Felfoldi <zsfelfoldi@gmail.com>2017-08-19 03:52:20 +0800
committerPéter Szilágyi <peterke@gmail.com>2017-09-06 16:13:13 +0800
commit4ea4d2dc3473afd9d2eda6ef6b359accce1f0946 (patch)
treee651cfc2e3aa36083b333bf34dc3cccef2623f26
parent1e67378df879b1ce566f17dd95a3b126056254b5 (diff)
downloadgo-tangerine-4ea4d2dc3473afd9d2eda6ef6b359accce1f0946.tar
go-tangerine-4ea4d2dc3473afd9d2eda6ef6b359accce1f0946.tar.gz
go-tangerine-4ea4d2dc3473afd9d2eda6ef6b359accce1f0946.tar.bz2
go-tangerine-4ea4d2dc3473afd9d2eda6ef6b359accce1f0946.tar.lz
go-tangerine-4ea4d2dc3473afd9d2eda6ef6b359accce1f0946.tar.xz
go-tangerine-4ea4d2dc3473afd9d2eda6ef6b359accce1f0946.tar.zst
go-tangerine-4ea4d2dc3473afd9d2eda6ef6b359accce1f0946.zip
core, eth: add bloombit indexer, filter based on it
-rw-r--r--core/blockchain.go15
-rw-r--r--core/bloombits/fetcher_test.go101
-rw-r--r--core/bloombits/matcher.go579
-rw-r--r--core/bloombits/matcher_test.go196
-rw-r--r--core/bloombits/utils.go63
-rw-r--r--core/chain_indexer.go76
-rw-r--r--core/chain_indexer_test.go7
-rw-r--r--core/database_util.go69
-rw-r--r--core/database_util_test.go108
-rw-r--r--core/types/bloom9.go14
-rw-r--r--eth/api_backend.go27
-rw-r--r--eth/backend.go10
-rw-r--r--eth/backend_test.go74
-rw-r--r--eth/db_upgrade.go70
-rw-r--r--eth/filters/api.go48
-rw-r--r--eth/filters/bench_test.go237
-rw-r--r--eth/filters/filter.go232
-rw-r--r--eth/filters/filter_system_test.go50
-rw-r--r--eth/filters/filter_test.go70
-rw-r--r--eth/handler.go5
-rw-r--r--les/api_backend.go18
-rw-r--r--les/backend.go2
-rw-r--r--miner/worker.go2
23 files changed, 1590 insertions, 483 deletions
diff --git a/core/blockchain.go b/core/blockchain.go
index 0bb12fc19..d74b3520b 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -759,12 +759,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
log.Crit("Failed to write block receipts", "err", err)
return
}
- if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
- errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
- atomic.AddInt32(&failed, 1)
- log.Crit("Failed to write log blooms", "err", err)
- return
- }
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
errs[index] = fmt.Errorf("failed to write lookup metadata: %v", err)
atomic.AddInt32(&failed, 1)
@@ -1017,10 +1011,6 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
return i, err
}
- // Write map map bloom filters
- if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
- return i, err
- }
// Write hash preimages
if err := WritePreimages(bc.chainDb, block.NumberU64(), state.Preimages()); err != nil {
return i, err
@@ -1178,11 +1168,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
return err
}
- // Write map map bloom filters
- receipts := GetBlockReceipts(bc.chainDb, block.Hash(), block.NumberU64())
- if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
- return err
- }
addedTxs = append(addedTxs, block.Transactions()...)
}
diff --git a/core/bloombits/fetcher_test.go b/core/bloombits/fetcher_test.go
new file mode 100644
index 000000000..9c229cf8d
--- /dev/null
+++ b/core/bloombits/fetcher_test.go
@@ -0,0 +1,101 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+package bloombits
+
+import (
+ "bytes"
+ "encoding/binary"
+ "math/rand"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+const testFetcherReqCount = 5000
+
+func fetcherTestVector(b uint, s uint64) []byte {
+ r := make([]byte, 10)
+ binary.BigEndian.PutUint16(r[0:2], uint16(b))
+ binary.BigEndian.PutUint64(r[2:10], s)
+ return r
+}
+
+func TestFetcher(t *testing.T) {
+ testFetcher(t, 1)
+}
+
+func TestFetcherMultipleReaders(t *testing.T) {
+ testFetcher(t, 10)
+}
+
+func testFetcher(t *testing.T, cnt int) {
+ f := &fetcher{
+ requestMap: make(map[uint64]fetchRequest),
+ }
+ distCh := make(chan distRequest, channelCap)
+ stop := make(chan struct{})
+ var reqCount uint32
+
+ for i := 0; i < 10; i++ {
+ go func() {
+ for {
+ req, ok := <-distCh
+ if !ok {
+ return
+ }
+ time.Sleep(time.Duration(rand.Intn(100000)))
+ atomic.AddUint32(&reqCount, 1)
+ f.deliver([]uint64{req.sectionIndex}, [][]byte{fetcherTestVector(req.bloomIndex, req.sectionIndex)})
+ }
+ }()
+ }
+
+ var wg, wg2 sync.WaitGroup
+ for cc := 0; cc < cnt; cc++ {
+ wg.Add(1)
+ in := make(chan uint64, channelCap)
+ out := f.fetch(in, distCh, stop, &wg2)
+
+ time.Sleep(time.Millisecond * 10 * time.Duration(cc))
+ go func() {
+ for i := uint64(0); i < testFetcherReqCount; i++ {
+ in <- i
+ }
+ }()
+
+ go func() {
+ for i := uint64(0); i < testFetcherReqCount; i++ {
+ bv := <-out
+ if !bytes.Equal(bv, fetcherTestVector(0, i)) {
+ if len(bv) != 10 {
+ t.Errorf("Vector #%d length is %d, expected 10", i, len(bv))
+ } else {
+ j := binary.BigEndian.Uint64(bv[2:10])
+ t.Errorf("Expected vector #%d, fetched #%d", i, j)
+ }
+ }
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+ close(stop)
+ if reqCount != testFetcherReqCount {
+ t.Errorf("Request count mismatch: expected %v, got %v", testFetcherReqCount, reqCount)
+ }
+}
diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go
new file mode 100644
index 000000000..5a7df6b1c
--- /dev/null
+++ b/core/bloombits/matcher.go
@@ -0,0 +1,579 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+package bloombits
+
+import (
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/bitutil"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+const channelCap = 100
+
+// fetcher handles bit vector retrieval pipelines for a single bit index
+type fetcher struct {
+ bloomIndex uint
+ requestMap map[uint64]fetchRequest
+ requestLock sync.RWMutex
+}
+
+// fetchRequest represents the state of a bit vector requested from a fetcher. When a distRequest has been sent to the distributor but
+// the data has not been delivered yet, queued is true. When delivered, it is stored in the data field and the delivered channel is closed.
+type fetchRequest struct {
+ data []byte
+ queued bool
+ delivered chan struct{}
+}
+
+// distRequest is sent by the fetcher to the distributor which groups and prioritizes these requests.
+type distRequest struct {
+ bloomIndex uint
+ sectionIndex uint64
+}
+
+// fetch creates a retrieval pipeline, receiving section indexes from sectionCh and returning the results
+// in the same order through the returned channel. Multiple fetch instances of the same fetcher are allowed
+// to run in parallel, in case the same bit index appears multiple times in the filter structure. Each section
+// is requested only once, requests are sent to the request distributor (part of Matcher) through distCh.
+func (f *fetcher) fetch(sectionCh chan uint64, distCh chan distRequest, stop chan struct{}, wg *sync.WaitGroup) chan []byte {
+ dataCh := make(chan []byte, channelCap)
+ returnCh := make(chan uint64, channelCap)
+ wg.Add(2)
+
+ go func() {
+ defer wg.Done()
+ defer close(returnCh)
+
+ for {
+ select {
+ case <-stop:
+ return
+ case idx, ok := <-sectionCh:
+ if !ok {
+ return
+ }
+
+ req := false
+ f.requestLock.Lock()
+ r := f.requestMap[idx]
+ if r.data == nil {
+ req = !r.queued
+ r.queued = true
+ if r.delivered == nil {
+ r.delivered = make(chan struct{})
+ }
+ f.requestMap[idx] = r
+ }
+ f.requestLock.Unlock()
+ if req {
+ distCh <- distRequest{bloomIndex: f.bloomIndex, sectionIndex: idx} // success is guaranteed, distibuteRequests shuts down after fetch
+ }
+ select {
+ case <-stop:
+ return
+ case returnCh <- idx:
+ }
+ }
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+ defer close(dataCh)
+
+ for {
+ select {
+ case <-stop:
+ return
+ case idx, ok := <-returnCh:
+ if !ok {
+ return
+ }
+
+ f.requestLock.RLock()
+ r := f.requestMap[idx]
+ f.requestLock.RUnlock()
+
+ if r.data == nil {
+ select {
+ case <-stop:
+ return
+ case <-r.delivered:
+ f.requestLock.RLock()
+ r = f.requestMap[idx]
+ f.requestLock.RUnlock()
+ }
+ }
+ select {
+ case <-stop:
+ return
+ case dataCh <- r.data:
+ }
+ }
+ }
+ }()
+
+ return dataCh
+}
+
+// deliver is called by the request distributor when a reply to a request has
+// arrived
+func (f *fetcher) deliver(sectionIdxList []uint64, data [][]byte) {
+ f.requestLock.Lock()
+ defer f.requestLock.Unlock()
+
+ for i, sectionIdx := range sectionIdxList {
+ r := f.requestMap[sectionIdx]
+ if r.data != nil {
+ panic("BloomBits section data delivered twice")
+ }
+ r.data = data[i]
+ close(r.delivered)
+ f.requestMap[sectionIdx] = r
+ }
+}
+
+// Matcher is a pipelined structure of fetchers and logic matchers which perform
+// binary AND/OR operations on the bitstreams, finally creating a stream of potential matches.
+type Matcher struct {
+ addresses []types.BloomIndexList
+ topics [][]types.BloomIndexList
+ fetchers map[uint]*fetcher
+ sectionSize uint64
+
+ distCh chan distRequest
+ reqs map[uint][]uint64
+ freeQueues map[uint]struct{}
+ allocQueue []chan uint
+ running bool
+ stop chan struct{}
+ lock sync.Mutex
+ wg, distWg sync.WaitGroup
+}
+
+// NewMatcher creates a new Matcher instance
+func NewMatcher(sectionSize uint64, addresses []common.Address, topics [][]common.Hash) *Matcher {
+ m := &Matcher{
+ fetchers: make(map[uint]*fetcher),
+ reqs: make(map[uint][]uint64),
+ freeQueues: make(map[uint]struct{}),
+ distCh: make(chan distRequest, channelCap),
+ sectionSize: sectionSize,
+ }
+ m.setAddresses(addresses)
+ m.setTopics(topics)
+ return m
+}
+
+// setAddresses matches only logs that are generated from addresses that are included
+// in the given addresses.
+func (m *Matcher) setAddresses(addresses []common.Address) {
+ m.addresses = make([]types.BloomIndexList, len(addresses))
+ for i, address := range addresses {
+ m.addresses[i] = types.BloomIndexes(address.Bytes())
+ }
+
+ for _, bloomIndexList := range m.addresses {
+ for _, bloomIndex := range bloomIndexList {
+ m.newFetcher(bloomIndex)
+ }
+ }
+}
+
+// setTopics matches only logs that have topics matching the given topics.
+func (m *Matcher) setTopics(topics [][]common.Hash) {
+ m.topics = nil
+loop:
+ for _, topicList := range topics {
+ t := make([]types.BloomIndexList, len(topicList))
+ for i, topic := range topicList {
+ if (topic == common.Hash{}) {
+ continue loop
+ }
+ t[i] = types.BloomIndexes(topic.Bytes())
+ }
+ m.topics = append(m.topics, t)
+ }
+
+ for _, bloomIndexLists := range m.topics {
+ for _, bloomIndexList := range bloomIndexLists {
+ for _, bloomIndex := range bloomIndexList {
+ m.newFetcher(bloomIndex)
+ }
+ }
+ }
+}
+
+// match creates a daisy-chain of sub-matchers, one for the address set and one for each topic set, each
+// sub-matcher receiving a section only if the previous ones have all found a potential match in one of
+// the blocks of the section, then binary AND-ing its own matches and forwaring the result to the next one
+func (m *Matcher) match(processCh chan partialMatches) chan partialMatches {
+ indexLists := m.topics
+ if len(m.addresses) > 0 {
+ indexLists = append([][]types.BloomIndexList{m.addresses}, indexLists...)
+ }
+ m.distributeRequests()
+
+ for _, subIndexList := range indexLists {
+ processCh = m.subMatch(processCh, subIndexList)
+ }
+ return processCh
+}
+
+// partialMatches with a non-nil vector represents a section in which some sub-matchers have already
+// found potential matches. Subsequent sub-matchers will binary AND their matches with this vector.
+// If vector is nil, it represents a section to be processed by the first sub-matcher.
+type partialMatches struct {
+ sectionIndex uint64
+ vector []byte
+}
+
+// newFetcher adds a fetcher for the given bit index if it has not existed before
+func (m *Matcher) newFetcher(idx uint) {
+ if _, ok := m.fetchers[idx]; ok {
+ return
+ }
+ f := &fetcher{
+ bloomIndex: idx,
+ requestMap: make(map[uint64]fetchRequest),
+ }
+ m.fetchers[idx] = f
+}
+
+// subMatch creates a sub-matcher that filters for a set of addresses or topics, binary OR-s those matches, then
+// binary AND-s the result to the daisy-chain input (processCh) and forwards it to the daisy-chain output.
+// The matches of each address/topic are calculated by fetching the given sections of the three bloom bit indexes belonging to
+// that address/topic, and binary AND-ing those vectors together.
+func (m *Matcher) subMatch(processCh chan partialMatches, bloomIndexLists []types.BloomIndexList) chan partialMatches {
+ // set up fetchers
+ fetchIndexChannels := make([][3]chan uint64, len(bloomIndexLists))
+ fetchDataChannels := make([][3]chan []byte, len(bloomIndexLists))
+ for i, bloomIndexList := range bloomIndexLists {
+ for j, bloomIndex := range bloomIndexList {
+ fetchIndexChannels[i][j] = make(chan uint64, channelCap)
+ fetchDataChannels[i][j] = m.fetchers[bloomIndex].fetch(fetchIndexChannels[i][j], m.distCh, m.stop, &m.wg)
+ }
+ }
+
+ fetchedCh := make(chan partialMatches, channelCap) // entries from processCh are forwarded here after fetches have been initiated
+ resultsCh := make(chan partialMatches, channelCap)
+
+ m.wg.Add(2)
+ // goroutine for starting retrievals
+ go func() {
+ defer m.wg.Done()
+
+ for {
+ select {
+ case <-m.stop:
+ return
+ case s, ok := <-processCh:
+ if !ok {
+ close(fetchedCh)
+ for _, fetchIndexChs := range fetchIndexChannels {
+ for _, fetchIndexCh := range fetchIndexChs {
+ close(fetchIndexCh)
+ }
+ }
+ return
+ }
+
+ for _, fetchIndexChs := range fetchIndexChannels {
+ for _, fetchIndexCh := range fetchIndexChs {
+ select {
+ case <-m.stop:
+ return
+ case fetchIndexCh <- s.sectionIndex:
+ }
+ }
+ }
+ select {
+ case <-m.stop:
+ return
+ case fetchedCh <- s:
+ }
+ }
+ }
+ }()
+
+ // goroutine for processing retrieved data
+ go func() {
+ defer m.wg.Done()
+
+ for {
+ select {
+ case <-m.stop:
+ return
+ case s, ok := <-fetchedCh:
+ if !ok {
+ close(resultsCh)
+ return
+ }
+
+ var orVector []byte
+ for _, fetchDataChs := range fetchDataChannels {
+ var andVector []byte
+ for _, fetchDataCh := range fetchDataChs {
+ var data []byte
+ select {
+ case <-m.stop:
+ return
+ case data = <-fetchDataCh:
+ }
+ if andVector == nil {
+ andVector = make([]byte, int(m.sectionSize/8))
+ copy(andVector, data)
+ } else {
+ bitutil.ANDBytes(andVector, andVector, data)
+ }
+ }
+ if orVector == nil {
+ orVector = andVector
+ } else {
+ bitutil.ORBytes(orVector, orVector, andVector)
+ }
+ }
+
+ if orVector == nil {
+ orVector = make([]byte, int(m.sectionSize/8))
+ }
+ if s.vector != nil {
+ bitutil.ANDBytes(orVector, orVector, s.vector)
+ }
+ if bitutil.TestBytes(orVector) {
+ select {
+ case <-m.stop:
+ return
+ case resultsCh <- partialMatches{s.sectionIndex, orVector}:
+ }
+ }
+ }
+ }
+ }()
+
+ return resultsCh
+}
+
+// Start starts the matching process and returns a stream of bloom matches in
+// a given range of blocks.
+// It returns a results channel immediately and stops if Stop is called or there
+// are no more matches in the range (in which case the results channel is closed).
+// Start/Stop can be called multiple times for different ranges, in which case already
+// delivered bit vectors are not requested again.
+func (m *Matcher) Start(begin, end uint64) chan uint64 {
+ m.stop = make(chan struct{})
+ processCh := make(chan partialMatches, channelCap)
+ resultsCh := make(chan uint64, channelCap)
+
+ res := m.match(processCh)
+
+ startSection := begin / m.sectionSize
+ endSection := end / m.sectionSize
+
+ m.wg.Add(2)
+ go func() {
+ defer m.wg.Done()
+ defer close(processCh)
+
+ for i := startSection; i <= endSection; i++ {
+ select {
+ case processCh <- partialMatches{i, nil}:
+ case <-m.stop:
+ return
+ }
+ }
+ }()
+
+ go func() {
+ defer m.wg.Done()
+ defer close(resultsCh)
+
+ for {
+ select {
+ case r, ok := <-res:
+ if !ok {
+ return
+ }
+ sectionStart := r.sectionIndex * m.sectionSize
+ s := sectionStart
+ if begin > s {
+ s = begin
+ }
+ e := sectionStart + m.sectionSize - 1
+ if end < e {
+ e = end
+ }
+ for i := s; i <= e; i++ {
+ b := r.vector[(i-sectionStart)/8]
+ bit := 7 - i%8
+ if b != 0 {
+ if b&(1<<bit) != 0 {
+ select {
+ case <-m.stop:
+ return
+ case resultsCh <- i:
+ }
+ }
+ } else {
+ i += bit
+ }
+ }
+
+ case <-m.stop:
+ return
+ }
+ }
+ }()
+
+ return resultsCh
+}
+
+// Stop stops the matching process
+func (m *Matcher) Stop() {
+ close(m.stop)
+ m.distWg.Wait()
+}
+
+// distributeRequests receives requests from the fetchers and either queues them
+// or immediately forwards them to one of the waiting NextRequest functions.
+// Requests with a lower section idx are always prioritized.
+func (m *Matcher) distributeRequests() {
+ m.distWg.Add(1)
+ stopDist := make(chan struct{})
+ go func() {
+ <-m.stop
+ m.wg.Wait()
+ close(stopDist)
+ }()
+
+ m.running = true
+
+ go func() {
+ for {
+ select {
+ case r := <-m.distCh:
+ m.lock.Lock()
+ queue := m.reqs[r.bloomIndex]
+ i := 0
+ for i < len(queue) && r.sectionIndex > queue[i] {
+ i++
+ }
+ queue = append(queue, 0)
+ copy(queue[i+1:], queue[i:len(queue)-1])
+ queue[i] = r.sectionIndex
+ m.reqs[r.bloomIndex] = queue
+ if len(queue) == 1 {
+ m.freeQueue(r.bloomIndex)
+ }
+ m.lock.Unlock()
+ case <-stopDist:
+ m.lock.Lock()
+ for _, ch := range m.allocQueue {
+ close(ch)
+ }
+ m.allocQueue = nil
+ m.running = false
+ m.lock.Unlock()
+ m.distWg.Done()
+ return
+ }
+ }
+ }()
+}
+
+// freeQueue marks a queue as free if there are no AllocSectionQueue functions
+// waiting for allocation. If there is someone waiting, the queue is immediately
+// allocated.
+func (m *Matcher) freeQueue(bloomIndex uint) {
+ if len(m.allocQueue) > 0 {
+ m.allocQueue[0] <- bloomIndex
+ m.allocQueue = m.allocQueue[1:]
+ } else {
+ m.freeQueues[bloomIndex] = struct{}{}
+ }
+}
+
+// AllocSectionQueue allocates a queue of requested section indexes belonging to the same
+// bloom bit index for a client process that can either immediately fetch the contents
+// of the queue or wait a little while for more section indexes to be requested.
+func (m *Matcher) AllocSectionQueue() (uint, bool) {
+ m.lock.Lock()
+ if !m.running {
+ m.lock.Unlock()
+ return 0, false
+ }
+
+ var allocCh chan uint
+ if len(m.freeQueues) > 0 {
+ var (
+ found bool
+ bestSection uint64
+ bestIndex uint
+ )
+ for bloomIndex, _ := range m.freeQueues {
+ if !found || m.reqs[bloomIndex][0] < bestSection {
+ found = true
+ bestIndex = bloomIndex
+ bestSection = m.reqs[bloomIndex][0]
+ }
+ }
+ delete(m.freeQueues, bestIndex)
+ m.lock.Unlock()
+ return bestIndex, true
+ } else {
+ allocCh = make(chan uint)
+ m.allocQueue = append(m.allocQueue, allocCh)
+ }
+ m.lock.Unlock()
+
+ bloomIndex, ok := <-allocCh
+ return bloomIndex, ok
+}
+
+// SectionCount returns the length of the section index queue belonging to the given bloom bit index
+func (m *Matcher) SectionCount(bloomIndex uint) int {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ return len(m.reqs[bloomIndex])
+}
+
+// FetchSections fetches all or part of an already allocated queue and deallocates it
+func (m *Matcher) FetchSections(bloomIndex uint, maxCount int) []uint64 {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ queue := m.reqs[bloomIndex]
+ if maxCount < len(queue) {
+ // return only part of the existing queue, mark the rest as free
+ m.reqs[bloomIndex] = queue[maxCount:]
+ m.freeQueue(bloomIndex)
+ return queue[:maxCount]
+ } else {
+ // return the entire queue
+ delete(m.reqs, bloomIndex)
+ return queue
+ }
+}
+
+// Deliver delivers a bit vector to the appropriate fetcher.
+// It is possible to deliver data even after Stop has been called. Once a vector has been
+// requested, the matcher will keep waiting for delivery.
+func (m *Matcher) Deliver(bloomIndex uint, sectionIdxList []uint64, data [][]byte) {
+ m.fetchers[bloomIndex].deliver(sectionIdxList, data)
+}
diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go
new file mode 100644
index 000000000..bef1491b8
--- /dev/null
+++ b/core/bloombits/matcher_test.go
@@ -0,0 +1,196 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+package bloombits
+
+import (
+ "math/rand"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+const testSectionSize = 4096
+
+func matcherTestVector(b uint, s uint64) []byte {
+ r := make([]byte, testSectionSize/8)
+ for i, _ := range r {
+ var bb byte
+ for bit := 0; bit < 8; bit++ {
+ blockIdx := s*testSectionSize + uint64(i*8+bit)
+ bb += bb
+ if (blockIdx % uint64(b)) == 0 {
+ bb++
+ }
+ }
+ r[i] = bb
+ }
+ return r
+}
+
+func expMatch1(idxs types.BloomIndexList, i uint64) bool {
+ for _, ii := range idxs {
+ if (i % uint64(ii)) != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func expMatch2(idxs []types.BloomIndexList, i uint64) bool {
+ for _, ii := range idxs {
+ if expMatch1(ii, i) {
+ return true
+ }
+ }
+ return false
+}
+
+func expMatch3(idxs [][]types.BloomIndexList, i uint64) bool {
+ for _, ii := range idxs {
+ if !expMatch2(ii, i) {
+ return false
+ }
+ }
+ return true
+}
+
+func testServeMatcher(m *Matcher, stop chan struct{}, cnt *uint32, maxRequestLen int) {
+ // serve matcher with test vectors
+ for i := 0; i < 10; i++ {
+ go func() {
+ for {
+ select {
+ case <-stop:
+ return
+ default:
+ }
+ b, ok := m.AllocSectionQueue()
+ if !ok {
+ return
+ }
+ if m.SectionCount(b) < maxRequestLen {
+ time.Sleep(time.Microsecond * 100)
+ }
+ s := m.FetchSections(b, maxRequestLen)
+ res := make([][]byte, len(s))
+ for i, ss := range s {
+ res[i] = matcherTestVector(b, ss)
+ atomic.AddUint32(cnt, 1)
+ }
+ m.Deliver(b, s, res)
+ }
+ }()
+ }
+}
+
+func testMatcher(t *testing.T, idxs [][]types.BloomIndexList, cnt uint64, stopOnMatches bool, expCount uint32) uint32 {
+ count1 := testMatcherWithReqCount(t, idxs, cnt, stopOnMatches, expCount, 1)
+ count16 := testMatcherWithReqCount(t, idxs, cnt, stopOnMatches, expCount, 16)
+ if count1 != count16 {
+ t.Errorf("Error matching idxs = %v count = %v stopOnMatches = %v: request count mismatch, %v with maxReqCount = 1 vs. %v with maxReqCount = 16", idxs, cnt, stopOnMatches, count1, count16)
+ }
+ return count1
+}
+
+func testMatcherWithReqCount(t *testing.T, idxs [][]types.BloomIndexList, cnt uint64, stopOnMatches bool, expCount uint32, maxReqCount int) uint32 {
+ m := NewMatcher(testSectionSize, nil, nil)
+
+ for _, idxss := range idxs {
+ for _, idxs := range idxss {
+ for _, idx := range idxs {
+ m.newFetcher(idx)
+ }
+ }
+ }
+
+ m.addresses = idxs[0]
+ m.topics = idxs[1:]
+ var reqCount uint32
+
+ stop := make(chan struct{})
+ chn := m.Start(0, cnt-1)
+ testServeMatcher(m, stop, &reqCount, maxReqCount)
+
+ for i := uint64(0); i < cnt; i++ {
+ if expMatch3(idxs, i) {
+ match, ok := <-chn
+ if !ok {
+ t.Errorf("Error matching idxs = %v count = %v stopOnMatches = %v: expected #%v, results channel closed", idxs, cnt, stopOnMatches, i)
+ return 0
+ }
+ if match != i {
+ t.Errorf("Error matching idxs = %v count = %v stopOnMatches = %v: expected #%v, got #%v", idxs, cnt, stopOnMatches, i, match)
+ }
+ if stopOnMatches {
+ m.Stop()
+ close(stop)
+ stop = make(chan struct{})
+ chn = m.Start(i+1, cnt-1)
+ testServeMatcher(m, stop, &reqCount, maxReqCount)
+ }
+ }
+ }
+ match, ok := <-chn
+ if ok {
+ t.Errorf("Error matching idxs = %v count = %v stopOnMatches = %v: expected closed channel, got #%v", idxs, cnt, stopOnMatches, match)
+ }
+ m.Stop()
+ close(stop)
+
+ if expCount != 0 && expCount != reqCount {
+ t.Errorf("Error matching idxs = %v count = %v stopOnMatches = %v: request count mismatch, expected #%v, got #%v", idxs, cnt, stopOnMatches, expCount, reqCount)
+ }
+
+ return reqCount
+}
+
+func testRandomIdxs(l []int, max int) [][]types.BloomIndexList {
+ res := make([][]types.BloomIndexList, len(l))
+ for i, ll := range l {
+ res[i] = make([]types.BloomIndexList, ll)
+ for j, _ := range res[i] {
+ for k, _ := range res[i][j] {
+ res[i][j][k] = uint(rand.Intn(max-1) + 2)
+ }
+ }
+ }
+ return res
+}
+
+func TestMatcher(t *testing.T) {
+ testMatcher(t, [][]types.BloomIndexList{{{10, 20, 30}}}, 100000, false, 75)
+ testMatcher(t, [][]types.BloomIndexList{{{32, 3125, 100}}, {{40, 50, 10}}}, 100000, false, 81)
+ testMatcher(t, [][]types.BloomIndexList{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 10000, false, 36)
+}
+
+func TestMatcherStopOnMatches(t *testing.T) {
+ testMatcher(t, [][]types.BloomIndexList{{{10, 20, 30}}}, 100000, true, 75)
+ testMatcher(t, [][]types.BloomIndexList{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 10000, true, 36)
+}
+
+func TestMatcherRandom(t *testing.T) {
+ for i := 0; i < 20; i++ {
+ testMatcher(t, testRandomIdxs([]int{1}, 50), 100000, false, 0)
+ testMatcher(t, testRandomIdxs([]int{3}, 50), 100000, false, 0)
+ testMatcher(t, testRandomIdxs([]int{2, 2, 2}, 20), 100000, false, 0)
+ testMatcher(t, testRandomIdxs([]int{5, 5, 5}, 50), 100000, false, 0)
+ idxs := testRandomIdxs([]int{2, 2, 2}, 20)
+ reqCount := testMatcher(t, idxs, 10000, false, 0)
+ testMatcher(t, idxs, 10000, true, reqCount)
+ }
+}
diff --git a/core/bloombits/utils.go b/core/bloombits/utils.go
new file mode 100644
index 000000000..d0755cb65
--- /dev/null
+++ b/core/bloombits/utils.go
@@ -0,0 +1,63 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+package bloombits
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+const BloomLength = 2048
+
+// BloomBitsCreator takes SectionSize number of header bloom filters and calculates the bloomBits vectors of the section
+type BloomBitsCreator struct {
+ blooms [BloomLength][]byte
+ sectionSize, bitIndex uint64
+}
+
+func NewBloomBitsCreator(sectionSize uint64) *BloomBitsCreator {
+ b := &BloomBitsCreator{sectionSize: sectionSize}
+ for i, _ := range b.blooms {
+ b.blooms[i] = make([]byte, sectionSize/8)
+ }
+ return b
+}
+
+// AddHeaderBloom takes a single bloom filter and sets the corresponding bit column in memory accordingly
+func (b *BloomBitsCreator) AddHeaderBloom(bloom types.Bloom) {
+ if b.bitIndex >= b.sectionSize {
+ panic("too many header blooms added")
+ }
+
+ byteIdx := b.bitIndex / 8
+ bitMask := byte(1) << byte(7-b.bitIndex%8)
+ for bloomBitIdx, _ := range b.blooms {
+ bloomByteIdx := BloomLength/8 - 1 - bloomBitIdx/8
+ bloomBitMask := byte(1) << byte(bloomBitIdx%8)
+ if (bloom[bloomByteIdx] & bloomBitMask) != 0 {
+ b.blooms[bloomBitIdx][byteIdx] |= bitMask
+ }
+ }
+ b.bitIndex++
+}
+
+// GetBitVector returns the bit vector belonging to the given bit index after header blooms have been added
+func (b *BloomBitsCreator) GetBitVector(idx uint) []byte {
+ if b.bitIndex != b.sectionSize {
+ panic("not enough header blooms added")
+ }
+
+ return b.blooms[idx][:]
+}
diff --git a/core/chain_indexer.go b/core/chain_indexer.go
index 9a88a5b1b..56360b59a 100644
--- a/core/chain_indexer.go
+++ b/core/chain_indexer.go
@@ -36,7 +36,7 @@ import (
type ChainIndexerBackend interface {
// Reset initiates the processing of a new chain segment, potentially terminating
// any partially completed operations (in case of a reorg).
- Reset(section uint64)
+ Reset(section uint64, lastSectionHead common.Hash)
// Process crunches through the next header in the chain segment. The caller
// will ensure a sequential order of headers.
@@ -44,7 +44,7 @@ type ChainIndexerBackend interface {
// Commit finalizes the section metadata and stores it into the database. This
// interface will usually be a batch writer.
- Commit(db ethdb.Database) error
+ Commit() error
}
// ChainIndexer does a post-processing job for equally sized sections of the
@@ -101,10 +101,34 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
return c
}
+// AddKnownSectionHead marks a new section head as known/processed if it is newer
+// than the already known best section head
+func (c *ChainIndexer) AddKnownSectionHead(section uint64, shead common.Hash) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if section < c.storedSections {
+ return
+ }
+ c.setSectionHead(section, shead)
+ c.setValidSections(section + 1)
+}
+
+// IndexerChain interface is used for connecting the indexer to a blockchain
+type IndexerChain interface {
+ CurrentHeader() *types.Header
+ SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription
+}
+
// Start creates a goroutine to feed chain head events into the indexer for
-// cascading background processing.
-func (c *ChainIndexer) Start(currentHeader *types.Header, eventMux *event.TypeMux) {
- go c.eventLoop(currentHeader, eventMux)
+// cascading background processing. Children do not need to be started, they
+// are notified about new events by their parents.
+func (c *ChainIndexer) Start(chain IndexerChain) {
+ ch := make(chan ChainEvent, 10)
+ sub := chain.SubscribeChainEvent(ch)
+ currentHeader := chain.CurrentHeader()
+
+ go c.eventLoop(currentHeader, ch, sub)
}
// Close tears down all goroutines belonging to the indexer and returns any error
@@ -125,6 +149,14 @@ func (c *ChainIndexer) Close() error {
errs = append(errs, err)
}
}
+
+ // Close all children
+ for _, child := range c.children {
+ if err := child.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
// Return any failures
switch {
case len(errs) == 0:
@@ -141,12 +173,10 @@ func (c *ChainIndexer) Close() error {
// eventLoop is a secondary - optional - event loop of the indexer which is only
// started for the outermost indexer to push chain head events into a processing
// queue.
-func (c *ChainIndexer) eventLoop(currentHeader *types.Header, eventMux *event.TypeMux) {
+func (c *ChainIndexer) eventLoop(currentHeader *types.Header, ch chan ChainEvent, sub event.Subscription) {
// Mark the chain indexer as active, requiring an additional teardown
atomic.StoreUint32(&c.active, 1)
- // Subscribe to chain head events
- sub := eventMux.Subscribe(ChainEvent{})
defer sub.Unsubscribe()
// Fire the initial new head event to start any outstanding processing
@@ -163,14 +193,14 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, eventMux *event.Ty
errc <- nil
return
- case ev, ok := <-sub.Chan():
+ case ev, ok := <-ch:
// Received a new event, ensure it's not nil (closing) and update
if !ok {
errc := <-c.quit
errc <- nil
return
}
- header := ev.Data.(ChainEvent).Block.Header()
+ header := ev.Block.Header()
if header.ParentHash != prevHash {
c.newHead(FindCommonAncestor(c.chainDb, prevHeader, header).Number.Uint64(), true)
}
@@ -226,7 +256,10 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
// updateLoop is the main event loop of the indexer which pushes chain segments
// down into the processing backend.
func (c *ChainIndexer) updateLoop() {
- var updated time.Time
+ var (
+ updated time.Time
+ updateMsg bool
+ )
for {
select {
@@ -242,6 +275,7 @@ func (c *ChainIndexer) updateLoop() {
// Periodically print an upgrade log message to the user
if time.Since(updated) > 8*time.Second {
if c.knownSections > c.storedSections+1 {
+ updateMsg = true
c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections)
}
updated = time.Now()
@@ -250,17 +284,24 @@ func (c *ChainIndexer) updateLoop() {
section := c.storedSections
var oldHead common.Hash
if section > 0 {
- oldHead = c.sectionHead(section - 1)
+ oldHead = c.SectionHead(section - 1)
}
// Process the newly defined section in the background
c.lock.Unlock()
newHead, err := c.processSection(section, oldHead)
+ if err != nil {
+ c.log.Error("Section processing failed", "error", err)
+ }
c.lock.Lock()
// If processing succeeded and no reorgs occcurred, mark the section completed
- if err == nil && oldHead == c.sectionHead(section-1) {
+ if err == nil && oldHead == c.SectionHead(section-1) {
c.setSectionHead(section, newHead)
c.setValidSections(section + 1)
+ if c.storedSections == c.knownSections && updateMsg {
+ updateMsg = false
+ c.log.Info("Finished upgrading chain index")
+ }
c.cascadedHead = c.storedSections*c.sectionSize - 1
for _, child := range c.children {
@@ -295,7 +336,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
c.log.Trace("Processing new chain section", "section", section)
// Reset and partial processing
- c.backend.Reset(section)
+ c.backend.Reset(section, lastHead)
for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
hash := GetCanonicalHash(c.chainDb, number)
@@ -311,7 +352,8 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
c.backend.Process(header)
lastHead = header.Hash()
}
- if err := c.backend.Commit(c.chainDb); err != nil {
+ if err := c.backend.Commit(); err != nil {
+ c.log.Error("Section commit failed", "error", err)
return common.Hash{}, err
}
return lastHead, nil
@@ -324,7 +366,7 @@ func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
c.lock.Lock()
defer c.lock.Unlock()
- return c.storedSections, c.storedSections*c.sectionSize - 1, c.sectionHead(c.storedSections - 1)
+ return c.storedSections, c.storedSections*c.sectionSize - 1, c.SectionHead(c.storedSections - 1)
}
// AddChildIndexer adds a child ChainIndexer that can use the output of this one
@@ -366,7 +408,7 @@ func (c *ChainIndexer) setValidSections(sections uint64) {
// sectionHead retrieves the last block hash of a processed section from the
// index database.
-func (c *ChainIndexer) sectionHead(section uint64) common.Hash {
+func (c *ChainIndexer) SectionHead(section uint64) common.Hash {
var data [8]byte
binary.BigEndian.PutUint64(data[:], section)
diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go
index 780e46e43..247f52cf9 100644
--- a/core/chain_indexer_test.go
+++ b/core/chain_indexer_test.go
@@ -23,6 +23,7 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
)
@@ -58,7 +59,6 @@ func testChainIndexer(t *testing.T, count int) {
)
backends[i] = &testChainIndexBackend{t: t, processCh: make(chan uint64)}
backends[i].indexer = NewChainIndexer(db, ethdb.NewTable(db, string([]byte{byte(i)})), backends[i], sectionSize, confirmsReq, 0, fmt.Sprintf("indexer-%d", i))
- defer backends[i].indexer.Close()
if sections, _, _ := backends[i].indexer.Sections(); sections != 0 {
t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, 0)
@@ -67,6 +67,7 @@ func testChainIndexer(t *testing.T, count int) {
backends[i-1].indexer.AddChildIndexer(backends[i].indexer)
}
}
+ defer backends[0].indexer.Close() // parent indexer shuts down children
// notify pings the root indexer about a new head or reorg, then expect
// processed blocks if a section is processable
notify := func(headNum, failNum uint64, reorg bool) {
@@ -208,7 +209,7 @@ func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
return b.stored * b.indexer.sectionSize
}
-func (b *testChainIndexBackend) Reset(section uint64) {
+func (b *testChainIndexBackend) Reset(section uint64, lastSectionHead common.Hash) {
b.section = section
b.headerCnt = 0
}
@@ -226,7 +227,7 @@ func (b *testChainIndexBackend) Process(header *types.Header) {
}
}
-func (b *testChainIndexBackend) Commit(db ethdb.Database) error {
+func (b *testChainIndexBackend) Commit() error {
if b.headerCnt != b.indexer.sectionSize {
b.t.Error("Not enough headers processed")
}
diff --git a/core/database_util.go b/core/database_util.go
index 697111394..179d6f1b2 100644
--- a/core/database_util.go
+++ b/core/database_util.go
@@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"math/big"
- "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@@ -48,9 +47,6 @@ var (
lookupPrefix = []byte("l") // lookupPrefix + hash -> transaction/receipt lookup metadata
preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage
- mipmapPre = []byte("mipmap-log-bloom-")
- MIPMapLevels = []uint64{1000000, 500000, 100000, 50000, 1000}
-
configPrefix = []byte("ethereum-config-") // config prefix for the db
// used by old db, now only used for conversion
@@ -59,10 +55,10 @@ var (
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
- mipmapBloomMu sync.Mutex // protect against race condition when updating mipmap blooms
-
preimageCounter = metrics.NewCounter("db/preimage/total")
preimageHitCounter = metrics.NewCounter("db/preimage/hits")
+
+ bloomBitsPrefix = []byte("bloomBits-")
)
// txLookupEntry is a positional metadata to help looking up the data content of
@@ -497,48 +493,6 @@ func DeleteTxLookupEntry(db ethdb.Database, hash common.Hash) {
db.Delete(append(lookupPrefix, hash.Bytes()...))
}
-// returns a formatted MIP mapped key by adding prefix, canonical number and level
-//
-// ex. fn(98, 1000) = (prefix || 1000 || 0)
-func mipmapKey(num, level uint64) []byte {
- lkey := make([]byte, 8)
- binary.BigEndian.PutUint64(lkey, level)
- key := new(big.Int).SetUint64(num / level * level)
-
- return append(mipmapPre, append(lkey, key.Bytes()...)...)
-}
-
-// WriteMipmapBloom writes each address included in the receipts' logs to the
-// MIP bloom bin.
-func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
- mipmapBloomMu.Lock()
- defer mipmapBloomMu.Unlock()
-
- batch := db.NewBatch()
- for _, level := range MIPMapLevels {
- key := mipmapKey(number, level)
- bloomDat, _ := db.Get(key)
- bloom := types.BytesToBloom(bloomDat)
- for _, receipt := range receipts {
- for _, log := range receipt.Logs {
- bloom.Add(log.Address.Big())
- }
- }
- batch.Put(key, bloom.Bytes())
- }
- if err := batch.Write(); err != nil {
- return fmt.Errorf("mipmap write fail for: %d: %v", number, err)
- }
- return nil
-}
-
-// GetMipmapBloom returns a bloom filter using the number and level as input
-// parameters. For available levels see MIPMapLevels.
-func GetMipmapBloom(db ethdb.Database, number, level uint64) types.Bloom {
- bloomDat, _ := db.Get(mipmapKey(number, level))
- return types.BytesToBloom(bloomDat)
-}
-
// PreimageTable returns a Database instance with the key prefix for preimage entries.
func PreimageTable(db ethdb.Database) ethdb.Database {
return ethdb.NewTable(db, preimagePrefix)
@@ -637,3 +591,22 @@ func FindCommonAncestor(db ethdb.Database, a, b *types.Header) *types.Header {
}
return a
}
+
+// GetBloomBits reads the compressed bloomBits vector belonging to the given section and bit index from the db
+func GetBloomBits(db ethdb.Database, bitIdx, sectionIdx uint64, sectionHead common.Hash) ([]byte, error) {
+ var encKey [10]byte
+ binary.BigEndian.PutUint16(encKey[0:2], uint16(bitIdx))
+ binary.BigEndian.PutUint64(encKey[2:10], sectionIdx)
+ key := append(append(bloomBitsPrefix, encKey[:]...), sectionHead.Bytes()...)
+ bloomBits, err := db.Get(key)
+ return bloomBits, err
+}
+
+// StoreBloomBits writes the compressed bloomBits vector belonging to the given section and bit index to the db
+func StoreBloomBits(db ethdb.Database, bitIdx, sectionIdx uint64, sectionHead common.Hash, bloomBits []byte) {
+ var encKey [10]byte
+ binary.BigEndian.PutUint16(encKey[0:2], uint16(bitIdx))
+ binary.BigEndian.PutUint64(encKey[2:10], sectionIdx)
+ key := append(append(bloomBitsPrefix, encKey[:]...), sectionHead.Bytes()...)
+ db.Put(key, bloomBits)
+}
diff --git a/core/database_util_test.go b/core/database_util_test.go
index e91f1b593..940221a29 100644
--- a/core/database_util_test.go
+++ b/core/database_util_test.go
@@ -18,17 +18,13 @@ package core
import (
"bytes"
- "io/ioutil"
"math/big"
- "os"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -390,107 +386,3 @@ func TestBlockReceiptStorage(t *testing.T) {
t.Fatalf("deleted receipts returned: %v", rs)
}
}
-
-func TestMipmapBloom(t *testing.T) {
- db, _ := ethdb.NewMemDatabase()
-
- receipt1 := new(types.Receipt)
- receipt1.Logs = []*types.Log{
- {Address: common.BytesToAddress([]byte("test"))},
- {Address: common.BytesToAddress([]byte("address"))},
- }
- receipt2 := new(types.Receipt)
- receipt2.Logs = []*types.Log{
- {Address: common.BytesToAddress([]byte("test"))},
- {Address: common.BytesToAddress([]byte("address1"))},
- }
-
- WriteMipmapBloom(db, 1, types.Receipts{receipt1})
- WriteMipmapBloom(db, 2, types.Receipts{receipt2})
-
- for _, level := range MIPMapLevels {
- bloom := GetMipmapBloom(db, 2, level)
- if !bloom.Test(new(big.Int).SetBytes([]byte("address1"))) {
- t.Error("expected test to be included on level:", level)
- }
- }
-
- // reset
- db, _ = ethdb.NewMemDatabase()
- receipt := new(types.Receipt)
- receipt.Logs = []*types.Log{
- {Address: common.BytesToAddress([]byte("test"))},
- }
- WriteMipmapBloom(db, 999, types.Receipts{receipt1})
-
- receipt = new(types.Receipt)
- receipt.Logs = []*types.Log{
- {Address: common.BytesToAddress([]byte("test 1"))},
- }
- WriteMipmapBloom(db, 1000, types.Receipts{receipt})
-
- bloom := GetMipmapBloom(db, 1000, 1000)
- if bloom.TestBytes([]byte("test")) {
- t.Error("test should not have been included")
- }
-}
-
-func TestMipmapChain(t *testing.T) {
- dir, err := ioutil.TempDir("", "mipmap")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
-
- var (
- db, _ = ethdb.NewLDBDatabase(dir, 0, 0)
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr = crypto.PubkeyToAddress(key1.PublicKey)
- addr2 = common.BytesToAddress([]byte("jeff"))
-
- hash1 = common.BytesToHash([]byte("topic1"))
- )
- defer db.Close()
-
- gspec := &Genesis{
- Config: params.TestChainConfig,
- Alloc: GenesisAlloc{addr: {Balance: big.NewInt(1000000)}},
- }
- genesis := gspec.MustCommit(db)
- chain, receipts := GenerateChain(params.TestChainConfig, genesis, db, 1010, func(i int, gen *BlockGen) {
- var receipts types.Receipts
- switch i {
- case 1:
- receipt := types.NewReceipt(nil, false, new(big.Int))
- receipt.Logs = []*types.Log{{Address: addr, Topics: []common.Hash{hash1}}}
- gen.AddUncheckedReceipt(receipt)
- receipts = types.Receipts{receipt}
- case 1000:
- receipt := types.NewReceipt(nil, false, new(big.Int))
- receipt.Logs = []*types.Log{{Address: addr2}}
- gen.AddUncheckedReceipt(receipt)
- receipts = types.Receipts{receipt}
-
- }
-
- // store the receipts
- WriteMipmapBloom(db, uint64(i+1), receipts)
- })
- for i, block := range chain {
- WriteBlock(db, block)
- if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
- t.Fatalf("failed to insert block number: %v", err)
- }
- if err := WriteHeadBlockHash(db, block.Hash()); err != nil {
- t.Fatalf("failed to insert block number: %v", err)
- }
- if err := WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil {
- t.Fatal("error writing block receipts:", err)
- }
- }
-
- bloom := GetMipmapBloom(db, 0, 1000)
- if bloom.TestBytes(addr2[:]) {
- t.Error("address was included in bloom and should not have")
- }
-}
diff --git a/core/types/bloom9.go b/core/types/bloom9.go
index 60aacc301..bdc6e60e7 100644
--- a/core/types/bloom9.go
+++ b/core/types/bloom9.go
@@ -106,6 +106,20 @@ func LogsBloom(logs []*Log) *big.Int {
return bin
}
+type BloomIndexList [3]uint
+
+// BloomIndexes returns the bloom filter bit indexes belonging to the given key
+func BloomIndexes(b []byte) BloomIndexList {
+ b = crypto.Keccak256(b[:])
+
+ var r [3]uint
+ for i, _ := range r {
+ r[i] = (uint(b[i+i+1]) + (uint(b[i+i]) << 8)) & 2047
+ }
+
+ return r
+}
+
func bloom9(b []byte) *big.Int {
b = crypto.Keccak256(b[:])
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 19ef79f23..fa3cf3f80 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -194,3 +195,29 @@ func (b *EthApiBackend) EventMux() *event.TypeMux {
func (b *EthApiBackend) AccountManager() *accounts.Manager {
return b.eth.AccountManager()
}
+
+func (b *EthApiBackend) GetBloomBits(ctx context.Context, bitIdx uint64, sectionIdxList []uint64) ([][]byte, error) {
+ results := make([][]byte, len(sectionIdxList))
+ var err error
+ for i, sectionIdx := range sectionIdxList {
+ sectionHead := core.GetCanonicalHash(b.eth.chainDb, (sectionIdx+1)*bloomBitsSection-1)
+ results[i], err = core.GetBloomBits(b.eth.chainDb, bitIdx, sectionIdx, sectionHead)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return results, nil
+}
+
+func (b *EthApiBackend) BloomBitsSections() uint64 {
+ sections, _, _ := b.eth.bbIndexer.Sections()
+ return sections
+}
+
+func (b *EthApiBackend) BloomBitsConfig() filters.BloomConfig {
+ return filters.BloomConfig{
+ SectionSize: bloomBitsSection,
+ MaxRequestLen: 16,
+ MaxRequestWait: 0,
+ }
+}
diff --git a/eth/backend.go b/eth/backend.go
index 5837c8564..efc0a2317 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -77,6 +77,8 @@ type Ethereum struct {
engine consensus.Engine
accountManager *accounts.Manager
+ bbIndexer *core.ChainIndexer
+
ApiBackend *EthApiBackend
miner *miner.Miner
@@ -125,11 +127,9 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
networkId: config.NetworkId,
gasPrice: config.GasPrice,
etherbase: config.Etherbase,
+ bbIndexer: NewBloomBitsProcessor(chainDb, bloomBitsSection),
}
- if err := addMipmapBloomBins(chainDb); err != nil {
- return nil, err
- }
log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId)
if !config.SkipBcVersionCheck {
@@ -151,6 +151,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
eth.blockchain.SetHead(compat.RewindTo)
core.WriteChainConfig(chainDb, genesisHash, chainConfig)
}
+ eth.bbIndexer.Start(eth.blockchain)
if config.TxPool.Journal != "" {
config.TxPool.Journal = ctx.ResolvePath(config.TxPool.Journal)
@@ -260,7 +261,7 @@ func (s *Ethereum) APIs() []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
- Service: filters.NewPublicFilterAPI(s.ApiBackend, false),
+ Service: filters.NewPublicFilterAPI(s.ApiBackend, false, bloomBitsSection),
Public: true,
}, {
Namespace: "admin",
@@ -389,6 +390,7 @@ func (s *Ethereum) Stop() error {
if s.stopDbUpgrade != nil {
s.stopDbUpgrade()
}
+ s.bbIndexer.Close()
s.blockchain.Stop()
s.protocolManager.Stop()
if s.lesServer != nil {
diff --git a/eth/backend_test.go b/eth/backend_test.go
deleted file mode 100644
index 1fd25e95a..000000000
--- a/eth/backend_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package eth
-
-import (
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
-)
-
-func TestMipmapUpgrade(t *testing.T) {
- db, _ := ethdb.NewMemDatabase()
- addr := common.BytesToAddress([]byte("jeff"))
- genesis := new(core.Genesis).MustCommit(db)
-
- chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, db, 10, func(i int, gen *core.BlockGen) {
- switch i {
- case 1:
- receipt := types.NewReceipt(nil, false, new(big.Int))
- receipt.Logs = []*types.Log{{Address: addr}}
- gen.AddUncheckedReceipt(receipt)
- case 2:
- receipt := types.NewReceipt(nil, false, new(big.Int))
- receipt.Logs = []*types.Log{{Address: addr}}
- gen.AddUncheckedReceipt(receipt)
- }
- })
- for i, block := range chain {
- core.WriteBlock(db, block)
- if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
- t.Fatalf("failed to insert block number: %v", err)
- }
- if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
- t.Fatalf("failed to insert block number: %v", err)
- }
- if err := core.WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil {
- t.Fatal("error writing block receipts:", err)
- }
- }
-
- err := addMipmapBloomBins(db)
- if err != nil {
- t.Fatal(err)
- }
-
- bloom := core.GetMipmapBloom(db, 1, core.MIPMapLevels[0])
- if (bloom == types.Bloom{}) {
- t.Error("got empty bloom filter")
- }
-
- data, _ := db.Get([]byte("setting-mipmap-version"))
- if len(data) == 0 {
- t.Error("setting-mipmap-version not written to database")
- }
-}
diff --git a/eth/db_upgrade.go b/eth/db_upgrade.go
index 90111b2b3..ce8ce699a 100644
--- a/eth/db_upgrade.go
+++ b/eth/db_upgrade.go
@@ -19,11 +19,13 @@ package eth
import (
"bytes"
- "fmt"
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/bloombits"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
@@ -135,45 +137,37 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
}
}
-func addMipmapBloomBins(db ethdb.Database) (err error) {
- const mipmapVersion uint = 2
-
- // check if the version is set. We ignore data for now since there's
- // only one version so we can easily ignore it for now
- var data []byte
- data, _ = db.Get([]byte("setting-mipmap-version"))
- if len(data) > 0 {
- var version uint
- if err := rlp.DecodeBytes(data, &version); err == nil && version == mipmapVersion {
- return nil
- }
- }
+// BloomBitsIndex implements ChainIndex
+type BloomBitsIndex struct {
+ db ethdb.Database
+ bc *bloombits.BloomBitsCreator
+ section, sectionSize uint64
+ sectionHead common.Hash
+}
- defer func() {
- if err == nil {
- var val []byte
- val, err = rlp.EncodeToBytes(mipmapVersion)
- if err == nil {
- err = db.Put([]byte("setting-mipmap-version"), val)
- }
- return
- }
- }()
- latestHash := core.GetHeadBlockHash(db)
- latestBlock := core.GetBlock(db, latestHash, core.GetBlockNumber(db, latestHash))
- if latestBlock == nil { // clean database
- return
- }
+// number of confirmation blocks before a section is considered probably final and its bloom bits are calculated
+const bloomBitsConfirmations = 256
- tstart := time.Now()
- log.Warn("Upgrading db log bloom bins")
- for i := uint64(0); i <= latestBlock.NumberU64(); i++ {
- hash := core.GetCanonicalHash(db, i)
- if (hash == common.Hash{}) {
- return fmt.Errorf("chain db corrupted. Could not find block %d.", i)
- }
- core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash, i))
+// NewBloomBitsProcessor returns a chain processor that generates bloom bits data for the canonical chain
+func NewBloomBitsProcessor(db ethdb.Database, sectionSize uint64) *core.ChainIndexer {
+ backend := &BloomBitsIndex{db: db, sectionSize: sectionSize}
+ return core.NewChainIndexer(db, ethdb.NewTable(db, "bbIndex-"), backend, sectionSize, bloomBitsConfirmations, time.Millisecond*100, "bloombits")
+}
+
+func (b *BloomBitsIndex) Reset(section uint64, lastSectionHead common.Hash) {
+ b.bc = bloombits.NewBloomBitsCreator(b.sectionSize)
+ b.section = section
+}
+
+func (b *BloomBitsIndex) Process(header *types.Header) {
+ b.bc.AddHeaderBloom(header.Bloom)
+ b.sectionHead = header.Hash()
+}
+
+func (b *BloomBitsIndex) Commit() error {
+ for i := 0; i < bloombits.BloomLength; i++ {
+ compVector := bitutil.CompressBytes(b.bc.GetBitVector(uint(i)))
+ core.StoreBloomBits(b.db, uint64(i), b.section, b.sectionHead, compVector)
}
- log.Info("Bloom-bin upgrade completed", "elapsed", common.PrettyDuration(time.Since(tstart)))
return nil
}
diff --git a/eth/filters/api.go b/eth/filters/api.go
index fff58a268..11767753e 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -51,24 +51,25 @@ type filter struct {
// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
// information related to the Ethereum protocol such als blocks, transactions and logs.
type PublicFilterAPI struct {
- backend Backend
- useMipMap bool
- mux *event.TypeMux
- chainDb ethdb.Database
- events *EventSystem
- filtersMu sync.Mutex
- filters map[rpc.ID]*filter
+ backend Backend
+ bloomBitsSection uint64
+ mux *event.TypeMux
+ quit chan struct{}
+ chainDb ethdb.Database
+ events *EventSystem
+ filtersMu sync.Mutex
+ filters map[rpc.ID]*filter
}
// NewPublicFilterAPI returns a new PublicFilterAPI instance.
-func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI {
+func NewPublicFilterAPI(backend Backend, lightMode bool, bloomBitsSection uint64) *PublicFilterAPI {
api := &PublicFilterAPI{
- backend: backend,
- useMipMap: !lightMode,
- mux: backend.EventMux(),
- chainDb: backend.ChainDb(),
- events: NewEventSystem(backend.EventMux(), backend, lightMode),
- filters: make(map[rpc.ID]*filter),
+ backend: backend,
+ bloomBitsSection: bloomBitsSection,
+ mux: backend.EventMux(),
+ chainDb: backend.ChainDb(),
+ events: NewEventSystem(backend.EventMux(), backend, lightMode),
+ filters: make(map[rpc.ID]*filter),
}
go api.timeoutLoop()
@@ -332,11 +333,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([
crit.ToBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
}
- filter := New(api.backend, api.useMipMap)
- filter.SetBeginBlock(crit.FromBlock.Int64())
- filter.SetEndBlock(crit.ToBlock.Int64())
- filter.SetAddresses(crit.Addresses)
- filter.SetTopics(crit.Topics)
+ filter := New(api.backend, crit.FromBlock.Int64(), crit.ToBlock.Int64(), crit.Addresses, crit.Topics)
logs, err := filter.Find(ctx)
return returnLogs(logs), err
@@ -372,19 +369,18 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
return nil, fmt.Errorf("filter not found")
}
- filter := New(api.backend, api.useMipMap)
+ var begin, end int64
if f.crit.FromBlock != nil {
- filter.SetBeginBlock(f.crit.FromBlock.Int64())
+ begin = f.crit.FromBlock.Int64()
} else {
- filter.SetBeginBlock(rpc.LatestBlockNumber.Int64())
+ begin = rpc.LatestBlockNumber.Int64()
}
if f.crit.ToBlock != nil {
- filter.SetEndBlock(f.crit.ToBlock.Int64())
+ end = f.crit.ToBlock.Int64()
} else {
- filter.SetEndBlock(rpc.LatestBlockNumber.Int64())
+ end = rpc.LatestBlockNumber.Int64()
}
- filter.SetAddresses(f.crit.Addresses)
- filter.SetTopics(f.crit.Topics)
+ filter := New(api.backend, begin, end, f.crit.Addresses, f.crit.Topics)
logs, err := filter.Find(ctx)
if err != nil {
diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go
new file mode 100644
index 000000000..2487bc0eb
--- /dev/null
+++ b/eth/filters/bench_test.go
@@ -0,0 +1,237 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package filters
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/bitutil"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/bloombits"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/golang/snappy"
+)
+
+func BenchmarkBloomBits512(b *testing.B) {
+ benchmarkBloomBitsForSize(b, 512)
+}
+
+func BenchmarkBloomBits1k(b *testing.B) {
+ benchmarkBloomBitsForSize(b, 1024)
+}
+
+func BenchmarkBloomBits2k(b *testing.B) {
+ benchmarkBloomBitsForSize(b, 2048)
+}
+
+func BenchmarkBloomBits4k(b *testing.B) {
+ benchmarkBloomBitsForSize(b, 4096)
+}
+
+func BenchmarkBloomBits8k(b *testing.B) {
+ benchmarkBloomBitsForSize(b, 8192)
+}
+
+func BenchmarkBloomBits16k(b *testing.B) {
+ benchmarkBloomBitsForSize(b, 16384)
+}
+
+func BenchmarkBloomBits32k(b *testing.B) {
+ benchmarkBloomBitsForSize(b, 32768)
+}
+
+func benchmarkBloomBitsForSize(b *testing.B, sectionSize uint64) {
+ benchmarkBloomBits(b, sectionSize, 0)
+ benchmarkBloomBits(b, sectionSize, 1)
+ benchmarkBloomBits(b, sectionSize, 2)
+}
+
+const benchFilterCnt = 2000
+
+func benchmarkBloomBits(b *testing.B, sectionSize uint64, comp int) {
+ benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
+ fmt.Println("Running bloombits benchmark section size:", sectionSize, " compression method:", comp)
+
+ var (
+ compressFn func([]byte) []byte
+ decompressFn func([]byte, int) ([]byte, error)
+ )
+ switch comp {
+ case 0:
+ // no compression
+ compressFn = func(data []byte) []byte {
+ return data
+ }
+ decompressFn = func(data []byte, target int) ([]byte, error) {
+ if len(data) != target {
+ panic(nil)
+ }
+ return data, nil
+ }
+ case 1:
+ // bitutil/compress.go
+ compressFn = bitutil.CompressBytes
+ decompressFn = bitutil.DecompressBytes
+ case 2:
+ // go snappy
+ compressFn = func(data []byte) []byte {
+ return snappy.Encode(nil, data)
+ }
+ decompressFn = func(data []byte, target int) ([]byte, error) {
+ decomp, err := snappy.Decode(nil, data)
+ if err != nil || len(decomp) != target {
+ panic(err)
+ }
+ return decomp, nil
+ }
+ }
+
+ db, err := ethdb.NewLDBDatabase(benchDataDir, 128, 1024)
+ if err != nil {
+ b.Fatalf("error opening database at %v: %v", benchDataDir, err)
+ }
+ head := core.GetHeadBlockHash(db)
+ if head == (common.Hash{}) {
+ b.Fatalf("chain data not found at %v", benchDataDir)
+ }
+
+ clearBloomBits(db)
+ fmt.Println("Generating bloombits data...")
+ headNum := core.GetBlockNumber(db, head)
+ if headNum < sectionSize+512 {
+ b.Fatalf("not enough blocks for running a benchmark")
+ }
+
+ start := time.Now()
+ cnt := (headNum - 512) / sectionSize
+ var dataSize, compSize uint64
+ for sectionIdx := uint64(0); sectionIdx < cnt; sectionIdx++ {
+ bc := bloombits.NewBloomBitsCreator(sectionSize)
+ var header *types.Header
+ for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ {
+ hash := core.GetCanonicalHash(db, i)
+ header = core.GetHeader(db, hash, i)
+ if header == nil {
+ b.Fatalf("Error creating bloomBits data")
+ }
+ bc.AddHeaderBloom(header.Bloom)
+ }
+ sectionHead := core.GetCanonicalHash(db, (sectionIdx+1)*sectionSize-1)
+ for i := 0; i < bloombits.BloomLength; i++ {
+ data := bc.GetBitVector(uint(i))
+ comp := compressFn(data)
+ dataSize += uint64(len(data))
+ compSize += uint64(len(comp))
+ core.StoreBloomBits(db, uint64(i), sectionIdx, sectionHead, comp)
+ }
+ //if sectionIdx%50 == 0 {
+ // fmt.Println(" section", sectionIdx, "/", cnt)
+ //}
+ }
+
+ d := time.Since(start)
+ fmt.Println("Finished generating bloombits data")
+ fmt.Println(" ", d, "total ", d/time.Duration(cnt*sectionSize), "per block")
+ fmt.Println(" data size:", dataSize, " compressed size:", compSize, " compression ratio:", float64(compSize)/float64(dataSize))
+
+ fmt.Println("Running filter benchmarks...")
+ start = time.Now()
+ mux := new(event.TypeMux)
+ var backend *testBackend
+
+ for i := 0; i < benchFilterCnt; i++ {
+ if i%20 == 0 {
+ db.Close()
+ db, _ = ethdb.NewLDBDatabase(benchDataDir, 128, 1024)
+ backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
+ }
+ var addr common.Address
+ addr[0] = byte(i)
+ addr[1] = byte(i / 256)
+ filter := New(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)
+ filter.decompress = decompressFn
+ if _, err := filter.Find(context.Background()); err != nil {
+ b.Error("filter.Find error:", err)
+ }
+ }
+ d = time.Since(start)
+ fmt.Println("Finished running filter benchmarks")
+ fmt.Println(" ", d, "total ", d/time.Duration(benchFilterCnt), "per address", d*time.Duration(1000000)/time.Duration(benchFilterCnt*cnt*sectionSize), "per million blocks")
+ db.Close()
+}
+
+func forEachKey(db ethdb.Database, startPrefix, endPrefix []byte, fn func(key []byte)) {
+ it := db.(*ethdb.LDBDatabase).NewIterator()
+ it.Seek(startPrefix)
+ for it.Valid() {
+ key := it.Key()
+ cmpLen := len(key)
+ if len(endPrefix) < cmpLen {
+ cmpLen = len(endPrefix)
+ }
+ if bytes.Compare(key[:cmpLen], endPrefix) == 1 {
+ break
+ }
+ fn(common.CopyBytes(key))
+ it.Next()
+ }
+ it.Release()
+}
+
+var bloomBitsPrefix = []byte("bloomBits-")
+
+func clearBloomBits(db ethdb.Database) {
+ fmt.Println("Clearing bloombits data...")
+ forEachKey(db, bloomBitsPrefix, bloomBitsPrefix, func(key []byte) {
+ db.Delete(key)
+ })
+}
+
+func BenchmarkNoBloomBits(b *testing.B) {
+ benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
+ fmt.Println("Running benchmark without bloombits")
+ db, err := ethdb.NewLDBDatabase(benchDataDir, 128, 1024)
+ if err != nil {
+ b.Fatalf("error opening database at %v: %v", benchDataDir, err)
+ }
+ head := core.GetHeadBlockHash(db)
+ if head == (common.Hash{}) {
+ b.Fatalf("chain data not found at %v", benchDataDir)
+ }
+ headNum := core.GetBlockNumber(db, head)
+
+ clearBloomBits(db)
+
+ fmt.Println("Running filter benchmarks...")
+ start := time.Now()
+ mux := new(event.TypeMux)
+ backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
+ filter := New(backend, 0, int64(headNum), []common.Address{common.Address{}}, nil)
+ filter.Find(context.Background())
+ d := time.Since(start)
+ fmt.Println("Finished running filter benchmarks")
+ fmt.Println(" ", d, "total ", d*time.Duration(1000000)/time.Duration(headNum+1), "per million blocks")
+ db.Close()
+}
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index f848bc6af..ea9ccf2f9 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -18,11 +18,14 @@ package filters
import (
"context"
- "math"
"math/big"
+ "sync"
+ "time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -34,58 +37,51 @@ type Backend interface {
EventMux() *event.TypeMux
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
+ BloomBitsSections() uint64
+ BloomBitsConfig() BloomConfig
SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
+ GetBloomBits(ctx context.Context, bitIdx uint64, sectionIdxList []uint64) ([][]byte, error)
+}
+
+type BloomConfig struct {
+ SectionSize uint64
+ MaxRequestLen int
+ MaxRequestWait time.Duration
}
// Filter can be used to retrieve and filter logs.
type Filter struct {
- backend Backend
- useMipMap bool
+ backend Backend
+ bloomBitsConfig BloomConfig
db ethdb.Database
begin, end int64
addresses []common.Address
topics [][]common.Hash
+
+ decompress func([]byte, int) ([]byte, error)
+ matcher *bloombits.Matcher
}
// New creates a new filter which uses a bloom filter on blocks to figure out whether
// a particular block is interesting or not.
-// MipMaps allow past blocks to be searched much more efficiently, but are not available
-// to light clients.
-func New(backend Backend, useMipMap bool) *Filter {
+func New(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
return &Filter{
- backend: backend,
- useMipMap: useMipMap,
- db: backend.ChainDb(),
+ backend: backend,
+ begin: begin,
+ end: end,
+ addresses: addresses,
+ topics: topics,
+ bloomBitsConfig: backend.BloomBitsConfig(),
+ db: backend.ChainDb(),
+ matcher: bloombits.NewMatcher(backend.BloomBitsConfig().SectionSize, addresses, topics),
+ decompress: bitutil.DecompressBytes,
}
}
-// SetBeginBlock sets the earliest block for filtering.
-// -1 = latest block (i.e., the current block)
-// hash = particular hash from-to
-func (f *Filter) SetBeginBlock(begin int64) {
- f.begin = begin
-}
-
-// SetEndBlock sets the latest block for filtering.
-func (f *Filter) SetEndBlock(end int64) {
- f.end = end
-}
-
-// SetAddresses matches only logs that are generated from addresses that are included
-// in the given addresses.
-func (f *Filter) SetAddresses(addr []common.Address) {
- f.addresses = addr
-}
-
-// SetTopics matches only logs that have topics matching the given topics.
-func (f *Filter) SetTopics(topics [][]common.Hash) {
- f.topics = topics
-}
-
// FindOnce searches the blockchain for matching log entries, returning
// all matching entries from the first block that contains matches,
// updating the start point of the filter accordingly. If no results are
@@ -106,18 +102,9 @@ func (f *Filter) FindOnce(ctx context.Context) ([]*types.Log, error) {
endBlockNo = headBlockNumber
}
- // if no addresses are present we can't make use of fast search which
- // uses the mipmap bloom filters to check for fast inclusion and uses
- // higher range probability in order to ensure at least a false positive
- if !f.useMipMap || len(f.addresses) == 0 {
- logs, blockNumber, err := f.getLogs(ctx, beginBlockNo, endBlockNo)
- f.begin = int64(blockNumber + 1)
- return logs, err
- }
-
- logs, blockNumber := f.mipFind(beginBlockNo, endBlockNo, 0)
+ logs, blockNumber, err := f.getLogs(ctx, beginBlockNo, endBlockNo)
f.begin = int64(blockNumber + 1)
- return logs, nil
+ return logs, err
}
// Run filters logs with the current parameters set
@@ -131,43 +118,134 @@ func (f *Filter) Find(ctx context.Context) (logs []*types.Log, err error) {
}
}
-func (f *Filter) mipFind(start, end uint64, depth int) (logs []*types.Log, blockNumber uint64) {
- level := core.MIPMapLevels[depth]
- // normalise numerator so we can work in level specific batches and
- // work with the proper range checks
- for num := start / level * level; num <= end; num += level {
- // find addresses in bloom filters
- bloom := core.GetMipmapBloom(f.db, num, level)
- // Don't bother checking the first time through the loop - we're probably picking
- // up where a previous run left off.
- first := true
- for _, addr := range f.addresses {
- if first || bloom.TestBytes(addr[:]) {
- first = false
- // range check normalised values and make sure that
- // we're resolving the correct range instead of the
- // normalised values.
- start := uint64(math.Max(float64(num), float64(start)))
- end := uint64(math.Min(float64(num+level-1), float64(end)))
- if depth+1 == len(core.MIPMapLevels) {
- l, blockNumber, _ := f.getLogs(context.Background(), start, end)
- if len(l) > 0 {
- return l, blockNumber
+// nextRequest returns the next request to retrieve for the bloombits matcher
+func (f *Filter) nextRequest() (bloombits uint, sections []uint64) {
+ bloomIndex, ok := f.matcher.AllocSectionQueue()
+ if !ok {
+ return 0, nil
+ }
+ if f.bloomBitsConfig.MaxRequestWait > 0 &&
+ (f.bloomBitsConfig.MaxRequestLen <= 1 || // SectionCount is always greater than zero after a successful alloc
+ f.matcher.SectionCount(bloomIndex) < f.bloomBitsConfig.MaxRequestLen) {
+ time.Sleep(f.bloomBitsConfig.MaxRequestWait)
+ }
+ return bloomIndex, f.matcher.FetchSections(bloomIndex, f.bloomBitsConfig.MaxRequestLen)
+}
+
+// serveMatcher serves the bloombits matcher by fetching the requested vectors
+// through the filter backend
+func (f *Filter) serveMatcher(ctx context.Context, stop chan struct{}, wg *sync.WaitGroup) chan error {
+ errChn := make(chan error, 1)
+ wg.Add(10)
+ for i := 0; i < 10; i++ {
+ go func(i int) {
+ defer wg.Done()
+
+ for {
+ b, s := f.nextRequest()
+ if s == nil {
+ return
+ }
+ data, err := f.backend.GetBloomBits(ctx, uint64(b), s)
+ if err != nil {
+ select {
+ case errChn <- err:
+ case <-stop:
}
- } else {
- l, blockNumber := f.mipFind(start, end, depth+1)
- if len(l) > 0 {
- return l, blockNumber
+ return
+ }
+ decomp := make([][]byte, len(data))
+ for i, d := range data {
+ var err error
+ if decomp[i], err = f.decompress(d, int(f.bloomBitsConfig.SectionSize/8)); err != nil {
+ select {
+ case errChn <- err:
+ case <-stop:
+ }
+ return
}
}
+ f.matcher.Deliver(b, s, decomp)
}
- }
+ }(i)
}
- return nil, end
+ return errChn
+}
+
+// checkMatches checks if the receipts belonging to the given header contain any log events that
+// match the filter criteria. This function is called when the bloom filter signals a potential match.
+func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) {
+ // Get the logs of the block
+ receipts, err := f.backend.GetReceipts(ctx, header.Hash())
+ if err != nil {
+ return nil, err
+ }
+ var unfiltered []*types.Log
+ for _, receipt := range receipts {
+ unfiltered = append(unfiltered, ([]*types.Log)(receipt.Logs)...)
+ }
+ logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
+ if len(logs) > 0 {
+ return logs, nil
+ }
+ return nil, nil
}
func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*types.Log, blockNumber uint64, err error) {
+ haveBloomBitsBefore := f.backend.BloomBitsSections() * f.bloomBitsConfig.SectionSize
+ if haveBloomBitsBefore > start {
+ e := end
+ if haveBloomBitsBefore <= e {
+ e = haveBloomBitsBefore - 1
+ }
+
+ stop := make(chan struct{})
+ var wg sync.WaitGroup
+ matches := f.matcher.Start(start, e)
+ errChn := f.serveMatcher(ctx, stop, &wg)
+
+ defer func() {
+ f.matcher.Stop()
+ close(stop)
+ wg.Wait()
+ }()
+
+ loop:
+ for {
+ select {
+ case i, ok := <-matches:
+ if !ok {
+ break loop
+ }
+
+ blockNumber := rpc.BlockNumber(i)
+ header, err := f.backend.HeaderByNumber(ctx, blockNumber)
+ if header == nil || err != nil {
+ return logs, end, err
+ }
+
+ logs, err := f.checkMatches(ctx, header)
+ if err != nil {
+ return nil, end, err
+ }
+ if logs != nil {
+ return logs, i, nil
+ }
+ case err := <-errChn:
+ return logs, end, err
+ case <-ctx.Done():
+ return nil, end, ctx.Err()
+ }
+ }
+
+ if end < haveBloomBitsBefore {
+ return logs, end, nil
+ }
+ start = haveBloomBitsBefore
+ }
+
+ // search the rest with regular block-by-block bloom filtering
for i := start; i <= end; i++ {
blockNumber := rpc.BlockNumber(i)
header, err := f.backend.HeaderByNumber(ctx, blockNumber)
@@ -178,18 +256,12 @@ func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*types.
// Use bloom filtering to see if this block is interesting given the
// current parameters
if f.bloomFilter(header.Bloom) {
- // Get the logs of the block
- receipts, err := f.backend.GetReceipts(ctx, header.Hash())
+ logs, err := f.checkMatches(ctx, header)
if err != nil {
return nil, end, err
}
- var unfiltered []*types.Log
- for _, receipt := range receipts {
- unfiltered = append(unfiltered, ([]*types.Log)(receipt.Logs)...)
- }
- logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
- if len(logs) > 0 {
- return logs, uint64(blockNumber), nil
+ if logs != nil {
+ return logs, i, nil
}
}
}
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index fcc888b8c..140fad555 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -36,6 +36,7 @@ import (
type testBackend struct {
mux *event.TypeMux
db ethdb.Database
+ sections uint64
txFeed *event.Feed
rmLogsFeed *event.Feed
logsFeed *event.Feed
@@ -84,6 +85,31 @@ func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc
return b.chainFeed.Subscribe(ch)
}
+func (b *testBackend) GetBloomBits(ctx context.Context, bitIdx uint64, sectionIdxList []uint64) ([][]byte, error) {
+ results := make([][]byte, len(sectionIdxList))
+ var err error
+ for i, sectionIdx := range sectionIdxList {
+ sectionHead := core.GetCanonicalHash(b.db, (sectionIdx+1)*testBloomBitsSection-1)
+ results[i], err = core.GetBloomBits(b.db, bitIdx, sectionIdx, sectionHead)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return results, nil
+}
+
+func (b *testBackend) BloomBitsSections() uint64 {
+ return b.sections
+}
+
+func (b *testBackend) BloomBitsConfig() BloomConfig {
+ return BloomConfig{
+ SectionSize: testBloomBitsSection,
+ MaxRequestLen: 16,
+ MaxRequestWait: 0,
+ }
+}
+
// TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
// It creates multiple subscriptions:
// - one at the start and should receive all posted chain events and a second (blockHashes)
@@ -99,8 +125,8 @@ func TestBlockSubscription(t *testing.T) {
rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed)
chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ api = NewPublicFilterAPI(backend, false, 0)
genesis = new(core.Genesis).MustCommit(db)
chain, _ = core.GenerateChain(params.TestChainConfig, genesis, db, 10, func(i int, gen *core.BlockGen) {})
chainEvents = []core.ChainEvent{}
@@ -156,8 +182,8 @@ func TestPendingTxFilter(t *testing.T) {
rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed)
chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ api = NewPublicFilterAPI(backend, false, 0)
transactions = []*types.Transaction{
types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), new(big.Int), new(big.Int), nil),
@@ -219,8 +245,8 @@ func TestLogFilterCreation(t *testing.T) {
rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed)
chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ api = NewPublicFilterAPI(backend, false, 0)
testCases = []struct {
crit FilterCriteria
@@ -268,8 +294,8 @@ func TestInvalidLogFilterCreation(t *testing.T) {
rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed)
chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ api = NewPublicFilterAPI(backend, false, 0)
)
// different situations where log filter creation should fail.
@@ -298,8 +324,8 @@ func TestLogFilter(t *testing.T) {
rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed)
chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ api = NewPublicFilterAPI(backend, false, 0)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
@@ -415,8 +441,8 @@ func TestPendingLogsSubscription(t *testing.T) {
rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed)
chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ api = NewPublicFilterAPI(backend, false, 0)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index cf508a218..f1c6481d7 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -32,6 +32,8 @@ import (
"github.com/ethereum/go-ethereum/params"
)
+const testBloomBitsSection = 4096
+
func makeReceipt(addr common.Address) *types.Receipt {
receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{
@@ -41,8 +43,8 @@ func makeReceipt(addr common.Address) *types.Receipt {
return receipt
}
-func BenchmarkMipmaps(b *testing.B) {
- dir, err := ioutil.TempDir("", "mipmap")
+func BenchmarkFilters(b *testing.B) {
+ dir, err := ioutil.TempDir("", "filtertest")
if err != nil {
b.Fatal(err)
}
@@ -55,7 +57,7 @@ func BenchmarkMipmaps(b *testing.B) {
rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed)
chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = common.BytesToAddress([]byte("jeff"))
@@ -66,27 +68,21 @@ func BenchmarkMipmaps(b *testing.B) {
genesis := core.GenesisBlockForTesting(db, addr1, big.NewInt(1000000))
chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, db, 100010, func(i int, gen *core.BlockGen) {
- var receipts types.Receipts
switch i {
case 2403:
receipt := makeReceipt(addr1)
- receipts = types.Receipts{receipt}
gen.AddUncheckedReceipt(receipt)
case 1034:
receipt := makeReceipt(addr2)
- receipts = types.Receipts{receipt}
gen.AddUncheckedReceipt(receipt)
case 34:
receipt := makeReceipt(addr3)
- receipts = types.Receipts{receipt}
gen.AddUncheckedReceipt(receipt)
case 99999:
receipt := makeReceipt(addr4)
- receipts = types.Receipts{receipt}
gen.AddUncheckedReceipt(receipt)
}
- core.WriteMipmapBloom(db, uint64(i+1), receipts)
})
for i, block := range chain {
core.WriteBlock(db, block)
@@ -102,10 +98,7 @@ func BenchmarkMipmaps(b *testing.B) {
}
b.ResetTimer()
- filter := New(backend, true)
- filter.SetAddresses([]common.Address{addr1, addr2, addr3, addr4})
- filter.SetBeginBlock(0)
- filter.SetEndBlock(-1)
+ filter := New(backend, 0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil)
for i := 0; i < b.N; i++ {
logs, _ := filter.Find(context.Background())
@@ -116,7 +109,7 @@ func BenchmarkMipmaps(b *testing.B) {
}
func TestFilters(t *testing.T) {
- dir, err := ioutil.TempDir("", "mipmap")
+ dir, err := ioutil.TempDir("", "filtertest")
if err != nil {
t.Fatal(err)
}
@@ -129,7 +122,7 @@ func TestFilters(t *testing.T) {
rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed)
chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key1.PublicKey)
@@ -142,7 +135,6 @@ func TestFilters(t *testing.T) {
genesis := core.GenesisBlockForTesting(db, addr, big.NewInt(1000000))
chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, db, 1000, func(i int, gen *core.BlockGen) {
- var receipts types.Receipts
switch i {
case 1:
receipt := types.NewReceipt(nil, false, new(big.Int))
@@ -153,7 +145,6 @@ func TestFilters(t *testing.T) {
},
}
gen.AddUncheckedReceipt(receipt)
- receipts = types.Receipts{receipt}
case 2:
receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{
@@ -163,7 +154,6 @@ func TestFilters(t *testing.T) {
},
}
gen.AddUncheckedReceipt(receipt)
- receipts = types.Receipts{receipt}
case 998:
receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{
@@ -173,7 +163,6 @@ func TestFilters(t *testing.T) {
},
}
gen.AddUncheckedReceipt(receipt)
- receipts = types.Receipts{receipt}
case 999:
receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{
@@ -183,12 +172,7 @@ func TestFilters(t *testing.T) {
},
}
gen.AddUncheckedReceipt(receipt)
- receipts = types.Receipts{receipt}
}
- // i is used as block number for the writes but since the i
- // starts at 0 and block 0 (genesis) is already present increment
- // by one
- core.WriteMipmapBloom(db, uint64(i+1), receipts)
})
for i, block := range chain {
core.WriteBlock(db, block)
@@ -203,22 +187,14 @@ func TestFilters(t *testing.T) {
}
}
- filter := New(backend, true)
- filter.SetAddresses([]common.Address{addr})
- filter.SetTopics([][]common.Hash{{hash1, hash2, hash3, hash4}})
- filter.SetBeginBlock(0)
- filter.SetEndBlock(-1)
+ filter := New(backend, 0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})
logs, _ := filter.Find(context.Background())
if len(logs) != 4 {
t.Error("expected 4 log, got", len(logs))
}
- filter = New(backend, true)
- filter.SetAddresses([]common.Address{addr})
- filter.SetTopics([][]common.Hash{{hash3}})
- filter.SetBeginBlock(900)
- filter.SetEndBlock(999)
+ filter = New(backend, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}})
logs, _ = filter.Find(context.Background())
if len(logs) != 1 {
t.Error("expected 1 log, got", len(logs))
@@ -227,11 +203,7 @@ func TestFilters(t *testing.T) {
t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
}
- filter = New(backend, true)
- filter.SetAddresses([]common.Address{addr})
- filter.SetTopics([][]common.Hash{{hash3}})
- filter.SetBeginBlock(990)
- filter.SetEndBlock(-1)
+ filter = New(backend, 990, -1, []common.Address{addr}, [][]common.Hash{{hash3}})
logs, _ = filter.Find(context.Background())
if len(logs) != 1 {
t.Error("expected 1 log, got", len(logs))
@@ -240,10 +212,7 @@ func TestFilters(t *testing.T) {
t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
}
- filter = New(backend, true)
- filter.SetTopics([][]common.Hash{{hash1, hash2}})
- filter.SetBeginBlock(1)
- filter.SetEndBlock(10)
+ filter = New(backend, 1, 10, nil, [][]common.Hash{{hash1, hash2}})
logs, _ = filter.Find(context.Background())
if len(logs) != 2 {
@@ -251,10 +220,7 @@ func TestFilters(t *testing.T) {
}
failHash := common.BytesToHash([]byte("fail"))
- filter = New(backend, true)
- filter.SetTopics([][]common.Hash{{failHash}})
- filter.SetBeginBlock(0)
- filter.SetEndBlock(-1)
+ filter = New(backend, 0, -1, nil, [][]common.Hash{{failHash}})
logs, _ = filter.Find(context.Background())
if len(logs) != 0 {
@@ -262,20 +228,14 @@ func TestFilters(t *testing.T) {
}
failAddr := common.BytesToAddress([]byte("failmenow"))
- filter = New(backend, true)
- filter.SetAddresses([]common.Address{failAddr})
- filter.SetBeginBlock(0)
- filter.SetEndBlock(-1)
+ filter = New(backend, 0, -1, []common.Address{failAddr}, nil)
logs, _ = filter.Find(context.Background())
if len(logs) != 0 {
t.Error("expected 0 log, got", len(logs))
}
- filter = New(backend, true)
- filter.SetTopics([][]common.Hash{{failHash}, {hash1}})
- filter.SetBeginBlock(0)
- filter.SetEndBlock(-1)
+ filter = New(backend, 0, -1, nil, [][]common.Hash{{failHash}, {hash1}})
logs, _ = filter.Find(context.Background())
if len(logs) != 0 {
diff --git a/eth/handler.go b/eth/handler.go
index 28ae208c0..aadd771df 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -49,6 +49,8 @@ const (
// txChanSize is the size of channel listening to TxPreEvent.
// The number is referenced from the size of tx pool.
txChanSize = 4096
+
+ bloomBitsSection = 4096
)
var (
@@ -92,6 +94,8 @@ type ProtocolManager struct {
quitSync chan struct{}
noMorePeers chan struct{}
+ lesServer LesServer
+
// wait group is used for graceful shutdowns during downloading
// and processing
wg sync.WaitGroup
@@ -114,6 +118,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
txsyncCh: make(chan *txsync),
quitSync: make(chan struct{}),
}
+
// Figure out whether to allow fast sync or not
if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
log.Warn("Blockchain not empty, fast sync disabled")
diff --git a/les/api_backend.go b/les/api_backend.go
index 1323e8864..c2ba27028 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -19,6 +19,7 @@ package les
import (
"context"
"math/big"
+ "time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
@@ -28,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -171,3 +173,19 @@ func (b *LesApiBackend) EventMux() *event.TypeMux {
func (b *LesApiBackend) AccountManager() *accounts.Manager {
return b.eth.accountManager
}
+
+func (b *LesApiBackend) GetBloomBits(ctx context.Context, bitIdx uint64, sectionIdxList []uint64) ([][]byte, error) {
+ return nil, nil // implemented in a subsequent PR
+}
+
+func (b *LesApiBackend) BloomBitsSections() uint64 {
+ return 0
+}
+
+func (b *LesApiBackend) BloomBitsConfig() filters.BloomConfig {
+ return filters.BloomConfig{
+ SectionSize: 32768,
+ MaxRequestLen: 16,
+ MaxRequestWait: time.Microsecond * 100,
+ }
+}
diff --git a/les/backend.go b/les/backend.go
index 4c33417c0..a3670b5ac 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -169,7 +169,7 @@ func (s *LightEthereum) APIs() []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
- Service: filters.NewPublicFilterAPI(s.ApiBackend, true),
+ Service: filters.NewPublicFilterAPI(s.ApiBackend, true, 0),
Public: true,
}, {
Namespace: "net",
diff --git a/miner/worker.go b/miner/worker.go
index 5bac5d6e8..e1154ac06 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -324,8 +324,6 @@ func (self *worker) wait() {
if stat == core.CanonStatTy {
// This puts transactions in a extra db for rpc
core.WriteTxLookupEntries(self.chainDb, block)
- // Write map map bloom filters
- core.WriteMipmapBloom(self.chainDb, block.NumberU64(), work.receipts)
// implicit by posting ChainHeadEvent
mustCommitNewWork = false
}