aboutsummaryrefslogtreecommitdiffstats
path: root/eth/fetcher
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2015-06-17 04:18:01 +0800
committerPéter Szilágyi <peterke@gmail.com>2015-06-18 20:56:08 +0800
commit497a7f1717a798e59c9550fbb58504b08fe13b21 (patch)
tree458d972505f6214acfb0597f3b4e56d3f01982b7 /eth/fetcher
parent026ee40650bde909c962f15e32bdea6c1d6e978a (diff)
downloadgo-tangerine-497a7f1717a798e59c9550fbb58504b08fe13b21.tar
go-tangerine-497a7f1717a798e59c9550fbb58504b08fe13b21.tar.gz
go-tangerine-497a7f1717a798e59c9550fbb58504b08fe13b21.tar.bz2
go-tangerine-497a7f1717a798e59c9550fbb58504b08fe13b21.tar.lz
go-tangerine-497a7f1717a798e59c9550fbb58504b08fe13b21.tar.xz
go-tangerine-497a7f1717a798e59c9550fbb58504b08fe13b21.tar.zst
go-tangerine-497a7f1717a798e59c9550fbb58504b08fe13b21.zip
eth, eth/fetcher: define and enforce propagation boundaries
Diffstat (limited to 'eth/fetcher')
-rw-r--r--eth/fetcher/fetcher.go55
-rw-r--r--eth/fetcher/fetcher_test.go28
2 files changed, 60 insertions, 23 deletions
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go
index 207bd9323..76d3798cb 100644
--- a/eth/fetcher/fetcher.go
+++ b/eth/fetcher/fetcher.go
@@ -3,6 +3,7 @@ package fetcher
import (
"errors"
+ "math"
"math/rand"
"time"
@@ -60,6 +61,10 @@ type Fetcher struct {
filter chan chan []*types.Block
quit chan struct{}
+ // Block cache
+ queue *prque.Prque // Queue containing the import operations (block number sorted)
+ queued map[common.Hash]struct{} // Presence set of already queued blocks (to dedup imports)
+
// Callbacks
hasBlock hashCheckFn // Checks if a block is present in the chain
importBlock blockImporterFn // Injects a block from an origin peer into the chain
@@ -73,6 +78,8 @@ func New(hasBlock hashCheckFn, importBlock blockImporterFn, chainHeight chainHei
insert: make(chan *inject),
filter: make(chan chan []*types.Block),
quit: make(chan struct{}),
+ queue: prque.New(),
+ queued: make(map[common.Hash]struct{}),
hasBlock: hasBlock,
importBlock: importBlock,
chainHeight: chainHeight,
@@ -154,23 +161,6 @@ func (f *Fetcher) loop() {
announced := make(map[common.Hash][]*announce)
fetching := make(map[common.Hash]*announce)
- // Create the priority queue and a matching presence set
- queue := prque.New()
- queued := make(map[common.Hash]struct{})
- enqueue := func(peer string, block *types.Block) {
- // Make sure the block isn't in some weird place
- if f.chainHeight()+maxQueueDist < block.NumberU64() {
- return
- }
- // If not, schedule the block for future import
- hash := block.Hash()
- if _, ok := queued[hash]; !ok {
- queued[hash] = struct{}{}
- queue.Push(&inject{origin: peer, block: block}, -float32(block.NumberU64()))
-
- glog.V(logger.Detail).Infof("Peer %s: queued block %x, total %v", peer, hash.Bytes()[:4], queue.Size())
- }
- }
// Iterate the block fetching until a quit is requested
fetch := time.NewTimer(0)
done := make(chan common.Hash)
@@ -185,16 +175,16 @@ func (f *Fetcher) loop() {
}
// Import any queued blocks that could potentially fit
height := f.chainHeight()
- for !queue.Empty() {
+ for !f.queue.Empty() {
// If too high up the chain, continue later
- op := queue.PopItem().(*inject)
+ op := f.queue.PopItem().(*inject)
if number := op.block.NumberU64(); number > height+1 {
- queue.Push(op, -float32(op.block.NumberU64()))
+ f.queue.Push(op, -float32(op.block.NumberU64()))
break
}
// Otherwise if not known yet, try and import
hash := op.block.Hash()
- delete(queued, hash)
+ delete(f.queued, hash)
if f.hasBlock(hash) {
continue
}
@@ -229,7 +219,7 @@ func (f *Fetcher) loop() {
case op := <-f.insert:
// A direct block insertion was requested, try and fill any pending gaps
- enqueue(op.origin, op.block)
+ f.enqueue(op.origin, op.block)
case hash := <-done:
// A pending import finished, remove all traces of the notification
@@ -301,9 +291,28 @@ func (f *Fetcher) loop() {
// Schedule the retrieved blocks for ordered import
for _, block := range explicit {
if announce := fetching[block.Hash()]; announce != nil {
- enqueue(announce.origin, block)
+ f.enqueue(announce.origin, block)
}
}
}
}
}
+
+// enqueue schedules a new future import operation, if the block to be imported
+// has not yet been seen.
+func (f *Fetcher) enqueue(peer string, block *types.Block) {
+ // Make sure the block isn't in some weird place
+ if math.Abs(float64(f.chainHeight())-float64(block.NumberU64())) > maxQueueDist {
+ return
+ }
+ // Schedule the block for future importing
+ hash := block.Hash()
+ if _, ok := f.queued[hash]; !ok {
+ f.queued[hash] = struct{}{}
+ f.queue.Push(&inject{origin: peer, block: block}, -float32(block.NumberU64()))
+
+ if glog.V(logger.Detail) {
+ glog.Infof("Peer %s: queued block %x, total %v", peer, hash.Bytes()[:4], f.queue.Size())
+ }
+ }
+}
diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/fetcher_test.go
index e11a211a1..af2652a86 100644
--- a/eth/fetcher/fetcher_test.go
+++ b/eth/fetcher/fetcher_test.go
@@ -335,3 +335,31 @@ func TestImportDeduplication(t *testing.T) {
t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2)
}
}
+
+// Tests that blocks with numbers much lower or higher than out current head get
+// discarded no prevent wasting resources on useless blocks from faulty peers.
+func TestDistantDiscarding(t *testing.T) {
+ // Create a long chain to import
+ hashes := createHashes(3*maxQueueDist, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+
+ head := hashes[len(hashes)/2]
+
+ // Create a tester and simulate a head block being the middle of the above chain
+ tester := newTester()
+ tester.ownHashes = []common.Hash{head}
+ tester.ownBlocks = map[common.Hash]*types.Block{head: blocks[head]}
+
+ // Ensure that a block with a lower number than the threshold is discarded
+ tester.fetcher.Enqueue("lower", blocks[hashes[0]])
+ time.Sleep(10 * time.Millisecond)
+ if !tester.fetcher.queue.Empty() {
+ t.Fatalf("fetcher queued stale block")
+ }
+ // Ensure that a block with a higher number than the threshold is discarded
+ tester.fetcher.Enqueue("higher", blocks[hashes[len(hashes)-1]])
+ time.Sleep(10 * time.Millisecond)
+ if !tester.fetcher.queue.Empty() {
+ t.Fatalf("fetcher queued future block")
+ }
+}