aboutsummaryrefslogtreecommitdiffstats
path: root/eth
diff options
context:
space:
mode:
Diffstat (limited to 'eth')
-rw-r--r--eth/backend.go2
-rw-r--r--eth/downloader/downloader.go89
-rw-r--r--eth/downloader/downloader_test.go79
-rw-r--r--eth/fetcher/fetcher.go456
-rw-r--r--eth/fetcher/fetcher_test.go524
-rw-r--r--eth/handler.go227
-rw-r--r--eth/protocol_test.go2
-rw-r--r--eth/sync.go147
8 files changed, 1188 insertions, 338 deletions
diff --git a/eth/backend.go b/eth/backend.go
index c621fa260..37fe66abf 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -313,7 +313,7 @@ func New(config *Config) (*Ethereum, error) {
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.chainManager, eth.EventMux())
eth.chainManager.SetProcessor(eth.blockProcessor)
- eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.eventMux, eth.txPool, eth.chainManager)
+ eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.chainManager)
eth.miner = miner.New(eth, eth.EventMux(), eth.pow)
eth.miner.SetGasPrice(config.GasPrice)
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 306c4fd2d..39976aae1 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -1,3 +1,4 @@
+// Package downloader contains the manual full chain synchronisation.
package downloader
import (
@@ -33,23 +34,22 @@ var (
)
var (
- errBusy = errors.New("busy")
- errUnknownPeer = errors.New("peer is unknown or unhealthy")
- errBadPeer = errors.New("action from bad peer ignored")
- errStallingPeer = errors.New("peer is stalling")
- errBannedHead = errors.New("peer head hash already banned")
- errNoPeers = errors.New("no peers to keep download active")
- errPendingQueue = errors.New("pending items in queue")
- errTimeout = errors.New("timeout")
- errEmptyHashSet = errors.New("empty hash set by peer")
- errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
- errAlreadyInPool = errors.New("hash already in pool")
- errInvalidChain = errors.New("retrieved hash chain is invalid")
- errCrossCheckFailed = errors.New("block cross-check failed")
- errCancelHashFetch = errors.New("hash fetching canceled (requested)")
- errCancelBlockFetch = errors.New("block downloading canceled (requested)")
- errCancelChainImport = errors.New("chain importing canceled (requested)")
- errNoSyncActive = errors.New("no sync active")
+ errBusy = errors.New("busy")
+ errUnknownPeer = errors.New("peer is unknown or unhealthy")
+ errBadPeer = errors.New("action from bad peer ignored")
+ errStallingPeer = errors.New("peer is stalling")
+ errBannedHead = errors.New("peer head hash already banned")
+ errNoPeers = errors.New("no peers to keep download active")
+ errPendingQueue = errors.New("pending items in queue")
+ errTimeout = errors.New("timeout")
+ errEmptyHashSet = errors.New("empty hash set by peer")
+ errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
+ errAlreadyInPool = errors.New("hash already in pool")
+ errInvalidChain = errors.New("retrieved hash chain is invalid")
+ errCrossCheckFailed = errors.New("block cross-check failed")
+ errCancelHashFetch = errors.New("hash fetching canceled (requested)")
+ errCancelBlockFetch = errors.New("block downloading canceled (requested)")
+ errNoSyncActive = errors.New("no sync active")
)
// hashCheckFn is a callback type for verifying a hash's presence in the local chain.
@@ -87,6 +87,8 @@ type Downloader struct {
checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain
banned *set.Set // Set of hashes we've received and banned
+ interrupt int32 // Atomic boolean to signal termination
+
// Statistics
importStart time.Time // Instance when the last blocks were taken from the cache
importQueue []*Block // Previously taken blocks to check import progress
@@ -97,7 +99,7 @@ type Downloader struct {
hasBlock hashCheckFn // Checks if a block is present in the chain
getBlock blockRetrievalFn // Retrieves a block from the chain
insertChain chainInsertFn // Injects a batch of blocks into the chain
- dropPeer peerDropFn // Retrieved the TD of our own chain
+ dropPeer peerDropFn // Drops a peer for misbehaving
// Status
synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
@@ -245,12 +247,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash) error {
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
glog.V(logger.Info).Infoln("Block synchronisation started")
}
-
- // Create cancel channel for aborting mid-flight
- d.cancelLock.Lock()
- d.cancelCh = make(chan struct{})
- d.cancelLock.Unlock()
-
// Abort if the queue still contains some leftover data
if _, cached := d.queue.Size(); cached > 0 && d.queue.GetHeadBlock() != nil {
return errPendingQueue
@@ -260,12 +256,16 @@ func (d *Downloader) synchronise(id string, hash common.Hash) error {
d.peers.Reset()
d.checks = make(map[common.Hash]*crossCheck)
+ // Create cancel channel for aborting mid-flight
+ d.cancelLock.Lock()
+ d.cancelCh = make(chan struct{})
+ d.cancelLock.Unlock()
+
// Retrieve the origin peer and initiate the downloading process
p := d.peers.Peer(id)
if p == nil {
return errUnknownPeer
}
-
return d.syncWithPeer(p, hash)
}
@@ -282,7 +282,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash) (err error) {
defer func() {
// reset on error
if err != nil {
- d.Cancel()
+ d.cancel()
d.mux.Post(FailedEvent{err})
} else {
d.mux.Post(DoneEvent{})
@@ -301,9 +301,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash) (err error) {
return nil
}
-// Cancel cancels all of the operations and resets the queue. It returns true
+// cancel cancels all of the operations and resets the queue. It returns true
// if the cancel operation was completed.
-func (d *Downloader) Cancel() {
+func (d *Downloader) cancel() {
// Close the current cancel channel
d.cancelLock.Lock()
if d.cancelCh != nil {
@@ -320,6 +320,12 @@ func (d *Downloader) Cancel() {
d.queue.Reset()
}
+// Terminate interrupts the downloader, canceling all pending operations.
+func (d *Downloader) Terminate() {
+ atomic.StoreInt32(&d.interrupt, 1)
+ d.cancel()
+}
+
// fetchHahes starts retrieving hashes backwards from a specific peer and hash,
// up until it finds a common ancestor. If the source peer times out, alternative
// ones are tried for continuation.
@@ -548,6 +554,7 @@ out:
peer.Demote()
peer.SetIdle()
glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err)
+ go d.process()
}
}
@@ -712,7 +719,7 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error {
// between these state changes, a block may have arrived, but a processing
// attempt denied, so we need to re-enter to ensure the block isn't left
// to idle in the cache.
-func (d *Downloader) process() (err error) {
+func (d *Downloader) process() {
// Make sure only one goroutine is ever allowed to process blocks at once
if !atomic.CompareAndSwapInt32(&d.processing, 0, 1) {
return
@@ -722,8 +729,8 @@ func (d *Downloader) process() (err error) {
// the fresh blocks might have been rejected entry to to this present thread
// not yet releasing the `processing` state.
defer func() {
- if err == nil && d.queue.GetHeadBlock() != nil {
- err = d.process()
+ if atomic.LoadInt32(&d.interrupt) == 0 && d.queue.GetHeadBlock() != nil {
+ d.process()
}
}()
// Release the lock upon exit (note, before checking for reentry!), and set
@@ -736,18 +743,12 @@ func (d *Downloader) process() (err error) {
atomic.StoreInt32(&d.processing, 0)
}()
-
- // Fetch the current cancel channel to allow termination
- d.cancelLock.RLock()
- cancel := d.cancelCh
- d.cancelLock.RUnlock()
-
// Repeat the processing as long as there are blocks to import
for {
// Fetch the next batch of blocks
blocks := d.queue.TakeBlocks()
if len(blocks) == 0 {
- return nil
+ return
}
// Reset the import statistics
d.importLock.Lock()
@@ -758,12 +759,10 @@ func (d *Downloader) process() (err error) {
// Actually import the blocks
glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number())
- for len(blocks) != 0 { // TODO: quit
+ for len(blocks) != 0 {
// Check for any termination requests
- select {
- case <-cancel:
- return errCancelChainImport
- default:
+ if atomic.LoadInt32(&d.interrupt) == 1 {
+ return
}
// Retrieve the first batch of blocks to insert
max := int(math.Min(float64(len(blocks)), float64(maxBlockProcess)))
@@ -776,8 +775,8 @@ func (d *Downloader) process() (err error) {
if err != nil {
glog.V(logger.Debug).Infof("Block #%d import failed: %v", raw[index].NumberU64(), err)
d.dropPeer(blocks[index].OriginPeer)
- d.Cancel()
- return errCancelChainImport
+ d.cancel()
+ return
}
blocks = blocks[max:]
}
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index f71c16237..4fc4e1434 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -52,6 +52,8 @@ func copyBlock(block *types.Block) *types.Block {
return createBlock(int(block.Number().Int64()), block.ParentHeaderHash, block.HeaderHash)
}
+// createBlocksFromHashes assembles a collection of blocks, each having a correct
+// place in the given hash chain.
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
blocks := make(map[common.Hash]*types.Block)
for i := 0; i < len(hashes); i++ {
@@ -64,6 +66,7 @@ func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
return blocks
}
+// downloadTester is a test simulator for mocking out local block chain.
type downloadTester struct {
downloader *Downloader
@@ -75,6 +78,7 @@ type downloadTester struct {
maxHashFetch int // Overrides the maximum number of retrieved hashes
}
+// newTester creates a new downloader test mocker.
func newTester() *downloadTester {
tester := &downloadTester{
ownHashes: []common.Hash{knownHash},
@@ -82,9 +86,7 @@ func newTester() *downloadTester {
peerHashes: make(map[string][]common.Hash),
peerBlocks: make(map[string]map[common.Hash]*types.Block),
}
- var mux event.TypeMux
- downloader := New(&mux, tester.hasBlock, tester.getBlock, tester.insertChain, tester.dropPeer)
- tester.downloader = downloader
+ tester.downloader = New(new(event.TypeMux), tester.hasBlock, tester.getBlock, tester.insertChain, tester.dropPeer)
return tester
}
@@ -247,7 +249,7 @@ func TestCancel(t *testing.T) {
tester.newPeer("peer", hashes, blocks)
// Make sure canceling works with a pristine downloader
- tester.downloader.Cancel()
+ tester.downloader.cancel()
hashCount, blockCount := tester.downloader.queue.Size()
if hashCount > 0 || blockCount > 0 {
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
@@ -256,7 +258,7 @@ func TestCancel(t *testing.T) {
if err := tester.sync("peer"); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
- tester.downloader.Cancel()
+ tester.downloader.cancel()
hashCount, blockCount = tester.downloader.queue.Size()
if hashCount > 0 || blockCount > 0 {
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
@@ -359,7 +361,7 @@ func TestSlowSynchronisation(t *testing.T) {
// Create a batch of blocks, with a slow and a full speed peer
targetCycles := 2
targetBlocks := targetCycles*blockCacheLimit - 15
- targetIODelay := 500 * time.Millisecond
+ targetIODelay := time.Second
hashes := createHashes(targetBlocks, knownHash)
blocks := createBlocksFromHashes(hashes)
@@ -708,6 +710,40 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
}
}
+// Tests a corner case (potential attack) where a peer delivers both good as well
+// as unrequested blocks to a hash request. This may trigger a different code
+// path than the fully correct or fully invalid delivery, potentially causing
+// internal state problems
+//
+// No, don't delete this test, it actually did happen!
+func TestOverlappingDeliveryAttack(t *testing.T) {
+ // Create an arbitrary batch of blocks ( < cache-size not to block)
+ targetBlocks := blockCacheLimit - 23
+ hashes := createHashes(targetBlocks, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+
+ // Register an attacker that always returns non-requested blocks too
+ tester := newTester()
+ tester.newPeer("attack", hashes, blocks)
+
+ rawGetBlocks := tester.downloader.peers.Peer("attack").getBlocks
+ tester.downloader.peers.Peer("attack").getBlocks = func(request []common.Hash) error {
+ // Add a non requested hash the screw the delivery (genesis should be fine)
+ return rawGetBlocks(append(request, hashes[0]))
+ }
+ // Test that synchronisation can complete, check for import success
+ if err := tester.sync("attack"); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ start := time.Now()
+ for len(tester.ownHashes) != len(hashes) && time.Since(start) < time.Second {
+ time.Sleep(50 * time.Millisecond)
+ }
+ if len(tester.ownHashes) != len(hashes) {
+ t.Fatalf("chain length mismatch: have %v, want %v", len(tester.ownHashes), len(hashes))
+ }
+}
+
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
func TestHashAttackerDropping(t *testing.T) {
// Define the disconnection requirement for individual hash fetch errors
@@ -715,22 +751,21 @@ func TestHashAttackerDropping(t *testing.T) {
result error
drop bool
}{
- {nil, false}, // Sync succeeded, all is well
- {errBusy, false}, // Sync is already in progress, no problem
- {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
- {errBadPeer, true}, // Peer was deemed bad for some reason, drop it
- {errStallingPeer, true}, // Peer was detected to be stalling, drop it
- {errBannedHead, true}, // Peer's head hash is a known bad hash, drop it
- {errNoPeers, false}, // No peers to download from, soft race, no issue
- {errPendingQueue, false}, // There are blocks still cached, wait to exhaust, no issue
- {errTimeout, true}, // No hashes received in due time, drop the peer
- {errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
- {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
- {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
- {errCrossCheckFailed, true}, // Hash-origin failed to pass a block cross check, drop
- {errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
- {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
- {errCancelChainImport, false}, // Synchronisation was canceled, origin may be innocent, don't drop
+ {nil, false}, // Sync succeeded, all is well
+ {errBusy, false}, // Sync is already in progress, no problem
+ {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
+ {errBadPeer, true}, // Peer was deemed bad for some reason, drop it
+ {errStallingPeer, true}, // Peer was detected to be stalling, drop it
+ {errBannedHead, true}, // Peer's head hash is a known bad hash, drop it
+ {errNoPeers, false}, // No peers to download from, soft race, no issue
+ {errPendingQueue, false}, // There are blocks still cached, wait to exhaust, no issue
+ {errTimeout, true}, // No hashes received in due time, drop the peer
+ {errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
+ {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
+ {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
+ {errCrossCheckFailed, true}, // Hash-origin failed to pass a block cross check, drop
+ {errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
+ {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
}
// Run the tests and check disconnection status
tester := newTester()
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go
new file mode 100644
index 000000000..90a202235
--- /dev/null
+++ b/eth/fetcher/fetcher.go
@@ -0,0 +1,456 @@
+// Package fetcher contains the block announcement based synchonisation.
+package fetcher
+
+import (
+ "errors"
+ "fmt"
+ "math/rand"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/logger"
+ "github.com/ethereum/go-ethereum/logger/glog"
+ "gopkg.in/karalabe/cookiejar.v2/collections/prque"
+)
+
+const (
+ arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
+ gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
+ fetchTimeout = 5 * time.Second // Maximum alloted time to return an explicitly requested block
+ maxUncleDist = 7 // Maximum allowed backward distance from the chain head
+ maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
+ hashLimit = 256 // Maximum number of unique blocks a peer may have announced
+ blockLimit = 64 // Maximum number of unique blocks a per may have delivered
+)
+
+var (
+ errTerminated = errors.New("terminated")
+)
+
+// blockRetrievalFn is a callback type for retrieving a block from the local chain.
+type blockRetrievalFn func(common.Hash) *types.Block
+
+// blockRequesterFn is a callback type for sending a block retrieval request.
+type blockRequesterFn func([]common.Hash) error
+
+// blockValidatorFn is a callback type to verify a block's header for fast propagation.
+type blockValidatorFn func(block *types.Block, parent *types.Block) error
+
+// blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
+type blockBroadcasterFn func(block *types.Block, propagate bool)
+
+// chainHeightFn is a callback type to retrieve the current chain height.
+type chainHeightFn func() uint64
+
+// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
+type chainInsertFn func(types.Blocks) (int, error)
+
+// peerDropFn is a callback type for dropping a peer detected as malicious.
+type peerDropFn func(id string)
+
+// announce is the hash notification of the availability of a new block in the
+// network.
+type announce struct {
+ hash common.Hash // Hash of the block being announced
+ time time.Time // Timestamp of the announcement
+
+ origin string // Identifier of the peer originating the notification
+ fetch blockRequesterFn // Fetcher function to retrieve
+}
+
+// inject represents a schedules import operation.
+type inject struct {
+ origin string
+ block *types.Block
+}
+
+// Fetcher is responsible for accumulating block announcements from various peers
+// and scheduling them for retrieval.
+type Fetcher struct {
+ // Various event channels
+ notify chan *announce
+ inject chan *inject
+ filter chan chan []*types.Block
+ done chan common.Hash
+ quit chan struct{}
+
+ // Announce states
+ announces map[string]int // Per peer announce counts to prevent memory exhaustion
+ announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching
+ fetching map[common.Hash]*announce // Announced blocks, currently fetching
+
+ // Block cache
+ queue *prque.Prque // Queue containing the import operations (block number sorted)
+ queues map[string]int // Per peer block counts to prevent memory exhaustion
+ queued map[common.Hash]*inject // Set of already queued blocks (to dedup imports)
+
+ // Callbacks
+ getBlock blockRetrievalFn // Retrieves a block from the local chain
+ validateBlock blockValidatorFn // Checks if a block's headers have a valid proof of work
+ broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
+ chainHeight chainHeightFn // Retrieves the current chain's height
+ insertChain chainInsertFn // Injects a batch of blocks into the chain
+ dropPeer peerDropFn // Drops a peer for misbehaving
+
+ // Testing hooks
+ fetchingHook func([]common.Hash) // Method to call upon starting a block fetch
+ importedHook func(*types.Block) // Method to call upon successful block import
+}
+
+// New creates a block fetcher to retrieve blocks based on hash announcements.
+func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
+ return &Fetcher{
+ notify: make(chan *announce),
+ inject: make(chan *inject),
+ filter: make(chan chan []*types.Block),
+ done: make(chan common.Hash),
+ quit: make(chan struct{}),
+ announces: make(map[string]int),
+ announced: make(map[common.Hash][]*announce),
+ fetching: make(map[common.Hash]*announce),
+ queue: prque.New(),
+ queues: make(map[string]int),
+ queued: make(map[common.Hash]*inject),
+ getBlock: getBlock,
+ validateBlock: validateBlock,
+ broadcastBlock: broadcastBlock,
+ chainHeight: chainHeight,
+ insertChain: insertChain,
+ dropPeer: dropPeer,
+ }
+}
+
+// Start boots up the announcement based synchoniser, accepting and processing
+// hash notifications and block fetches until termination requested.
+func (f *Fetcher) Start() {
+ go f.loop()
+}
+
+// Stop terminates the announcement based synchroniser, canceling all pending
+// operations.
+func (f *Fetcher) Stop() {
+ close(f.quit)
+}
+
+// Notify announces the fetcher of the potential availability of a new block in
+// the network.
+func (f *Fetcher) Notify(peer string, hash common.Hash, time time.Time, fetcher blockRequesterFn) error {
+ block := &announce{
+ hash: hash,
+ time: time,
+ origin: peer,
+ fetch: fetcher,
+ }
+ select {
+ case f.notify <- block:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// Enqueue tries to fill gaps the the fetcher's future import queue.
+func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
+ op := &inject{
+ origin: peer,
+ block: block,
+ }
+ select {
+ case f.inject <- op:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// Filter extracts all the blocks that were explicitly requested by the fetcher,
+// returning those that should be handled differently.
+func (f *Fetcher) Filter(blocks types.Blocks) types.Blocks {
+ // Send the filter channel to the fetcher
+ filter := make(chan []*types.Block)
+
+ select {
+ case f.filter <- filter:
+ case <-f.quit:
+ return nil
+ }
+ // Request the filtering of the block list
+ select {
+ case filter <- blocks:
+ case <-f.quit:
+ return nil
+ }
+ // Retrieve the blocks remaining after filtering
+ select {
+ case blocks := <-filter:
+ return blocks
+ case <-f.quit:
+ return nil
+ }
+}
+
+// Loop is the main fetcher loop, checking and processing various notification
+// events.
+func (f *Fetcher) loop() {
+ // Iterate the block fetching until a quit is requested
+ fetch := time.NewTimer(0)
+ for {
+ // Clean up any expired block fetches
+ for hash, announce := range f.fetching {
+ if time.Since(announce.time) > fetchTimeout {
+ f.forgetHash(hash)
+ }
+ }
+ // Import any queued blocks that could potentially fit
+ height := f.chainHeight()
+ for !f.queue.Empty() {
+ op := f.queue.PopItem().(*inject)
+
+ // If too high up the chain or phase, continue later
+ number := op.block.NumberU64()
+ if number > height+1 {
+ f.queue.Push(op, -float32(op.block.NumberU64()))
+ break
+ }
+ // Otherwise if fresh and still unknown, try and import
+ hash := op.block.Hash()
+ if number+maxUncleDist < height || f.getBlock(hash) != nil {
+ f.forgetBlock(hash)
+ continue
+ }
+ f.insert(op.origin, op.block)
+ }
+ // Wait for an outside event to occur
+ select {
+ case <-f.quit:
+ // Fetcher terminating, abort all operations
+ return
+
+ case notification := <-f.notify:
+ // A block was announced, make sure the peer isn't DOSing us
+ count := f.announces[notification.origin] + 1
+ if count > hashLimit {
+ glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
+ break
+ }
+ // All is well, schedule the announce if block's not yet downloading
+ if _, ok := f.fetching[notification.hash]; ok {
+ break
+ }
+ f.announces[notification.origin] = count
+ f.announced[notification.hash] = append(f.announced[notification.hash], notification)
+ if len(f.announced) == 1 {
+ f.reschedule(fetch)
+ }
+
+ case op := <-f.inject:
+ // A direct block insertion was requested, try and fill any pending gaps
+ f.enqueue(op.origin, op.block)
+
+ case hash := <-f.done:
+ // A pending import finished, remove all traces of the notification
+ f.forgetHash(hash)
+ f.forgetBlock(hash)
+
+ case <-fetch.C:
+ // At least one block's timer ran out, check for needing retrieval
+ request := make(map[string][]common.Hash)
+
+ for hash, announces := range f.announced {
+ if time.Since(announces[0].time) > arriveTimeout-gatherSlack {
+ // Pick a random peer to retrieve from, reset all others
+ announce := announces[rand.Intn(len(announces))]
+ f.forgetHash(hash)
+
+ // If the block still didn't arrive, queue for fetching
+ if f.getBlock(hash) == nil {
+ request[announce.origin] = append(request[announce.origin], hash)
+ f.fetching[hash] = announce
+ }
+ }
+ }
+ // Send out all block requests
+ for peer, hashes := range request {
+ if glog.V(logger.Detail) && len(hashes) > 0 {
+ list := "["
+ for _, hash := range hashes {
+ list += fmt.Sprintf("%x, ", hash[:4])
+ }
+ list = list[:len(list)-2] + "]"
+
+ glog.V(logger.Detail).Infof("Peer %s: fetching %s", peer, list)
+ }
+ // Create a closure of the fetch and schedule in on a new thread
+ fetcher, hashes := f.fetching[hashes[0]].fetch, hashes
+ go func() {
+ if f.fetchingHook != nil {
+ f.fetchingHook(hashes)
+ }
+ fetcher(hashes)
+ }()
+ }
+ // Schedule the next fetch if blocks are still pending
+ f.reschedule(fetch)
+
+ case filter := <-f.filter:
+ // Blocks arrived, extract any explicit fetches, return all else
+ var blocks types.Blocks
+ select {
+ case blocks = <-filter:
+ case <-f.quit:
+ return
+ }
+
+ explicit, download := []*types.Block{}, []*types.Block{}
+ for _, block := range blocks {
+ hash := block.Hash()
+
+ // Filter explicitly requested blocks from hash announcements
+ if _, ok := f.fetching[hash]; ok {
+ // Discard if already imported by other means
+ if f.getBlock(hash) == nil {
+ explicit = append(explicit, block)
+ } else {
+ f.forgetHash(hash)
+ }
+ } else {
+ download = append(download, block)
+ }
+ }
+
+ select {
+ case filter <- download:
+ case <-f.quit:
+ return
+ }
+ // Schedule the retrieved blocks for ordered import
+ for _, block := range explicit {
+ if announce := f.fetching[block.Hash()]; announce != nil {
+ f.enqueue(announce.origin, block)
+ }
+ }
+ }
+ }
+}
+
+// reschedule resets the specified fetch timer to the next announce timeout.
+func (f *Fetcher) reschedule(fetch *time.Timer) {
+ // Short circuit if no blocks are announced
+ if len(f.announced) == 0 {
+ return
+ }
+ // Otherwise find the earliest expiring announcement
+ earliest := time.Now()
+ for _, announces := range f.announced {
+ if earliest.After(announces[0].time) {
+ earliest = announces[0].time
+ }
+ }
+ fetch.Reset(arriveTimeout - time.Since(earliest))
+}
+
+// enqueue schedules a new future import operation, if the block to be imported
+// has not yet been seen.
+func (f *Fetcher) enqueue(peer string, block *types.Block) {
+ hash := block.Hash()
+
+ // Ensure the peer isn't DOSing us
+ count := f.queues[peer] + 1
+ if count > blockLimit {
+ glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit)
+ return
+ }
+ // Discard any past or too distant blocks
+ if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
+ glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
+ return
+ }
+ // Schedule the block for future importing
+ if _, ok := f.queued[hash]; !ok {
+ op := &inject{
+ origin: peer,
+ block: block,
+ }
+ f.queues[peer] = count
+ f.queued[hash] = op
+ f.queue.Push(op, -float32(block.NumberU64()))
+
+ if glog.V(logger.Debug) {
+ glog.Infof("Peer %s: queued block #%d [%x], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
+ }
+ }
+}
+
+// insert spawns a new goroutine to run a block insertion into the chain. If the
+// block's number is at the same height as the current import phase, if updates
+// the phase states accordingly.
+func (f *Fetcher) insert(peer string, block *types.Block) {
+ hash := block.Hash()
+
+ // Run the import on a new thread
+ glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x]", peer, block.NumberU64(), hash[:4])
+ go func() {
+ defer func() { f.done <- hash }()
+
+ // If the parent's unknown, abort insertion
+ parent := f.getBlock(block.ParentHash())
+ if parent == nil {
+ return
+ }
+ // Quickly validate the header and propagate the block if it passes
+ if err := f.validateBlock(block, parent); err != nil {
+ glog.V(logger.Debug).Infof("Peer %s: block #%d [%x] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
+ f.dropPeer(peer)
+ return
+ }
+ go f.broadcastBlock(block, true)
+
+ // Run the actual import and log any issues
+ if _, err := f.insertChain(types.Blocks{block}); err != nil {
+ glog.V(logger.Warn).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
+ return
+ }
+ // If import succeeded, broadcast the block
+ go f.broadcastBlock(block, false)
+
+ // Invoke the testing hook if needed
+ if f.importedHook != nil {
+ f.importedHook(block)
+ }
+ }()
+}
+
+// forgetHash removes all traces of a block announcement from the fetcher's
+// internal state.
+func (f *Fetcher) forgetHash(hash common.Hash) {
+ // Remove all pending announces and decrement DOS counters
+ for _, announce := range f.announced[hash] {
+ f.announces[announce.origin]--
+ if f.announces[announce.origin] == 0 {
+ delete(f.announces, announce.origin)
+ }
+ }
+ delete(f.announced, hash)
+
+ // Remove any pending fetches and decrement the DOS counters
+ if announce := f.fetching[hash]; announce != nil {
+ f.announces[announce.origin]--
+ if f.announces[announce.origin] == 0 {
+ delete(f.announces, announce.origin)
+ }
+ delete(f.fetching, hash)
+ }
+}
+
+// forgetBlock removes all traces of a queued block frmo the fetcher's internal
+// state.
+func (f *Fetcher) forgetBlock(hash common.Hash) {
+ if insert := f.queued[hash]; insert != nil {
+ f.queues[insert.origin]--
+ if f.queues[insert.origin] == 0 {
+ delete(f.queues, insert.origin)
+ }
+ delete(f.queued, hash)
+ }
+}
diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/fetcher_test.go
new file mode 100644
index 000000000..80247d9d2
--- /dev/null
+++ b/eth/fetcher/fetcher_test.go
@@ -0,0 +1,524 @@
+package fetcher
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/big"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+var (
+ knownHash = common.Hash{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
+ unknownHash = common.Hash{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}
+ bannedHash = common.Hash{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}
+
+ genesis = createBlock(1, common.Hash{}, knownHash)
+)
+
+// idCounter is used by the createHashes method the generate deterministic but unique hashes
+var idCounter = int64(2) // #1 is the genesis block
+
+// createHashes generates a batch of hashes rooted at a specific point in the chain.
+func createHashes(amount int, root common.Hash) (hashes []common.Hash) {
+ hashes = make([]common.Hash, amount+1)
+ hashes[len(hashes)-1] = root
+
+ for i := 0; i < len(hashes)-1; i++ {
+ binary.BigEndian.PutUint64(hashes[i][:8], uint64(idCounter))
+ idCounter++
+ }
+ return
+}
+
+// createBlock assembles a new block at the given chain height.
+func createBlock(i int, parent, hash common.Hash) *types.Block {
+ header := &types.Header{Number: big.NewInt(int64(i))}
+ block := types.NewBlockWithHeader(header)
+ block.HeaderHash = hash
+ block.ParentHeaderHash = parent
+ return block
+}
+
+// copyBlock makes a deep copy of a block suitable for local modifications.
+func copyBlock(block *types.Block) *types.Block {
+ return createBlock(int(block.Number().Int64()), block.ParentHeaderHash, block.HeaderHash)
+}
+
+// createBlocksFromHashes assembles a collection of blocks, each having a correct
+// place in the given hash chain.
+func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
+ blocks := make(map[common.Hash]*types.Block)
+ for i := 0; i < len(hashes); i++ {
+ parent := knownHash
+ if i < len(hashes)-1 {
+ parent = hashes[i+1]
+ }
+ blocks[hashes[i]] = createBlock(len(hashes)-i, parent, hashes[i])
+ }
+ return blocks
+}
+
+// fetcherTester is a test simulator for mocking out local block chain.
+type fetcherTester struct {
+ fetcher *Fetcher
+
+ hashes []common.Hash // Hash chain belonging to the tester
+ blocks map[common.Hash]*types.Block // Blocks belonging to the tester
+
+ lock sync.RWMutex
+}
+
+// newTester creates a new fetcher test mocker.
+func newTester() *fetcherTester {
+ tester := &fetcherTester{
+ hashes: []common.Hash{knownHash},
+ blocks: map[common.Hash]*types.Block{knownHash: genesis},
+ }
+ tester.fetcher = New(tester.getBlock, tester.verifyBlock, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
+ tester.fetcher.Start()
+
+ return tester
+}
+
+// getBlock retrieves a block from the tester's block chain.
+func (f *fetcherTester) getBlock(hash common.Hash) *types.Block {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.blocks[hash]
+}
+
+// verifyBlock is a nop placeholder for the block header verification.
+func (f *fetcherTester) verifyBlock(block *types.Block, parent *types.Block) error {
+ return nil
+}
+
+// broadcastBlock is a nop placeholder for the block broadcasting.
+func (f *fetcherTester) broadcastBlock(block *types.Block, propagate bool) {
+}
+
+// chainHeight retrieves the current height (block number) of the chain.
+func (f *fetcherTester) chainHeight() uint64 {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64()
+}
+
+// insertChain injects a new blocks into the simulated chain.
+func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ for i, block := range blocks {
+ // Make sure the parent in known
+ if _, ok := f.blocks[block.ParentHash()]; !ok {
+ return i, errors.New("unknown parent")
+ }
+ // Discard any new blocks if the same height already exists
+ if block.NumberU64() <= f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() {
+ return i, nil
+ }
+ // Otherwise build our current chain
+ f.hashes = append(f.hashes, block.Hash())
+ f.blocks[block.Hash()] = block
+ }
+ return 0, nil
+}
+
+// dropPeer is a nop placeholder for the peer removal.
+func (f *fetcherTester) dropPeer(peer string) {
+}
+
+// peerFetcher retrieves a fetcher associated with a simulated peer.
+func (f *fetcherTester) makeFetcher(blocks map[common.Hash]*types.Block) blockRequesterFn {
+ // Copy all the blocks to ensure they are not tampered with
+ closure := make(map[common.Hash]*types.Block)
+ for hash, block := range blocks {
+ closure[hash] = copyBlock(block)
+ }
+ // Create a function that returns blocks from the closure
+ return func(hashes []common.Hash) error {
+ // Gather the blocks to return
+ blocks := make([]*types.Block, 0, len(hashes))
+ for _, hash := range hashes {
+ if block, ok := closure[hash]; ok {
+ blocks = append(blocks, block)
+ }
+ }
+ // Return on a new thread
+ go f.fetcher.Filter(blocks)
+
+ return nil
+ }
+}
+
+// verifyImportEvent verifies that one single event arrive on an import channel.
+func verifyImportEvent(t *testing.T, imported chan *types.Block) {
+ select {
+ case <-imported:
+ case <-time.After(time.Second):
+ t.Fatalf("import timeout")
+ }
+}
+
+// verifyImportCount verifies that exactly count number of events arrive on an
+// import hook channel.
+func verifyImportCount(t *testing.T, imported chan *types.Block, count int) {
+ for i := 0; i < count; i++ {
+ select {
+ case <-imported:
+ case <-time.After(time.Second):
+ t.Fatalf("block %d: import timeout", i)
+ }
+ }
+ verifyImportDone(t, imported)
+}
+
+// verifyImportDone verifies that no more events are arriving on an import channel.
+func verifyImportDone(t *testing.T, imported chan *types.Block) {
+ select {
+ case <-imported:
+ t.Fatalf("extra block imported")
+ case <-time.After(50 * time.Millisecond):
+ }
+}
+
+// Tests that a fetcher accepts block announcements and initiates retrievals for
+// them, successfully importing into the local chain.
+func TestSequentialAnnouncements(t *testing.T) {
+ // Create a chain of blocks to import
+ targetBlocks := 4 * hashLimit
+ hashes := createHashes(targetBlocks, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+
+ tester := newTester()
+ fetcher := tester.makeFetcher(blocks)
+
+ // Iteratively announce blocks until all are imported
+ imported := make(chan *types.Block)
+ tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+
+ for i := len(hashes) - 2; i >= 0; i-- {
+ tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
+ verifyImportEvent(t, imported)
+ }
+ verifyImportDone(t, imported)
+}
+
+// Tests that if blocks are announced by multiple peers (or even the same buggy
+// peer), they will only get downloaded at most once.
+func TestConcurrentAnnouncements(t *testing.T) {
+ // Create a chain of blocks to import
+ targetBlocks := 4 * hashLimit
+ hashes := createHashes(targetBlocks, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+
+ // Assemble a tester with a built in counter for the requests
+ tester := newTester()
+ fetcher := tester.makeFetcher(blocks)
+
+ counter := uint32(0)
+ wrapper := func(hashes []common.Hash) error {
+ atomic.AddUint32(&counter, uint32(len(hashes)))
+ return fetcher(hashes)
+ }
+ // Iteratively announce blocks until all are imported
+ imported := make(chan *types.Block)
+ tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+
+ for i := len(hashes) - 2; i >= 0; i-- {
+ tester.fetcher.Notify("first", hashes[i], time.Now().Add(-arriveTimeout), wrapper)
+ tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout+time.Millisecond), wrapper)
+ tester.fetcher.Notify("second", hashes[i], time.Now().Add(-arriveTimeout-time.Millisecond), wrapper)
+
+ verifyImportEvent(t, imported)
+ }
+ verifyImportDone(t, imported)
+
+ // Make sure no blocks were retrieved twice
+ if int(counter) != targetBlocks {
+ t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks)
+ }
+}
+
+// Tests that announcements arriving while a previous is being fetched still
+// results in a valid import.
+func TestOverlappingAnnouncements(t *testing.T) {
+ // Create a chain of blocks to import
+ targetBlocks := 4 * hashLimit
+ hashes := createHashes(targetBlocks, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+
+ tester := newTester()
+ fetcher := tester.makeFetcher(blocks)
+
+ // Iteratively announce blocks, but overlap them continuously
+ fetching := make(chan []common.Hash)
+ imported := make(chan *types.Block, len(hashes)-1)
+ tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
+ tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+
+ for i := len(hashes) - 2; i >= 0; i-- {
+ tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
+ select {
+ case <-fetching:
+ case <-time.After(time.Second):
+ t.Fatalf("hash %d: announce timeout", len(hashes)-i)
+ }
+ }
+ // Wait for all the imports to complete and check count
+ verifyImportCount(t, imported, len(hashes)-1)
+}
+
+// Tests that announces already being retrieved will not be duplicated.
+func TestPendingDeduplication(t *testing.T) {
+ // Create a hash and corresponding block
+ hashes := createHashes(1, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+
+ // Assemble a tester with a built in counter and delayed fetcher
+ tester := newTester()
+ fetcher := tester.makeFetcher(blocks)
+
+ delay := 50 * time.Millisecond
+ counter := uint32(0)
+ wrapper := func(hashes []common.Hash) error {
+ atomic.AddUint32(&counter, uint32(len(hashes)))
+
+ // Simulate a long running fetch
+ go func() {
+ time.Sleep(delay)
+ fetcher(hashes)
+ }()
+ return nil
+ }
+ // Announce the same block many times until it's fetched (wait for any pending ops)
+ for tester.getBlock(hashes[0]) == nil {
+ tester.fetcher.Notify("repeater", hashes[0], time.Now().Add(-arriveTimeout), wrapper)
+ time.Sleep(time.Millisecond)
+ }
+ time.Sleep(delay)
+
+ // Check that all blocks were imported and none fetched twice
+ if imported := len(tester.blocks); imported != 2 {
+ t.Fatalf("synchronised block mismatch: have %v, want %v", imported, 2)
+ }
+ if int(counter) != 1 {
+ t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1)
+ }
+}
+
+// Tests that announcements retrieved in a random order are cached and eventually
+// imported when all the gaps are filled in.
+func TestRandomArrivalImport(t *testing.T) {
+ // Create a chain of blocks to import, and choose one to delay
+ hashes := createHashes(maxQueueDist, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+ skip := maxQueueDist / 2
+
+ tester := newTester()
+ fetcher := tester.makeFetcher(blocks)
+
+ // Iteratively announce blocks, skipping one entry
+ imported := make(chan *types.Block, len(hashes)-1)
+ tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+
+ for i := len(hashes) - 1; i >= 0; i-- {
+ if i != skip {
+ tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
+ time.Sleep(time.Millisecond)
+ }
+ }
+ // Finally announce the skipped entry and check full import
+ tester.fetcher.Notify("valid", hashes[skip], time.Now().Add(-arriveTimeout), fetcher)
+ verifyImportCount(t, imported, len(hashes)-1)
+}
+
+// Tests that direct block enqueues (due to block propagation vs. hash announce)
+// are correctly schedule, filling and import queue gaps.
+func TestQueueGapFill(t *testing.T) {
+ // Create a chain of blocks to import, and choose one to not announce at all
+ hashes := createHashes(maxQueueDist, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+ skip := maxQueueDist / 2
+
+ tester := newTester()
+ fetcher := tester.makeFetcher(blocks)
+
+ // Iteratively announce blocks, skipping one entry
+ imported := make(chan *types.Block, len(hashes)-1)
+ tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+
+ for i := len(hashes) - 1; i >= 0; i-- {
+ if i != skip {
+ tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), fetcher)
+ time.Sleep(time.Millisecond)
+ }
+ }
+ // Fill the missing block directly as if propagated
+ tester.fetcher.Enqueue("valid", blocks[hashes[skip]])
+ verifyImportCount(t, imported, len(hashes)-1)
+}
+
+// Tests that blocks arriving from various sources (multiple propagations, hash
+// announces, etc) do not get scheduled for import multiple times.
+func TestImportDeduplication(t *testing.T) {
+ // Create two blocks to import (one for duplication, the other for stalling)
+ hashes := createHashes(2, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+
+ // Create the tester and wrap the importer with a counter
+ tester := newTester()
+ fetcher := tester.makeFetcher(blocks)
+
+ counter := uint32(0)
+ tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) {
+ atomic.AddUint32(&counter, uint32(len(blocks)))
+ return tester.insertChain(blocks)
+ }
+ // Instrument the fetching and imported events
+ fetching := make(chan []common.Hash)
+ imported := make(chan *types.Block, len(hashes)-1)
+ tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
+ tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+
+ // Announce the duplicating block, wait for retrieval, and also propagate directly
+ tester.fetcher.Notify("valid", hashes[0], time.Now().Add(-arriveTimeout), fetcher)
+ <-fetching
+
+ tester.fetcher.Enqueue("valid", blocks[hashes[0]])
+ tester.fetcher.Enqueue("valid", blocks[hashes[0]])
+ tester.fetcher.Enqueue("valid", blocks[hashes[0]])
+
+ // Fill the missing block directly as if propagated, and check import uniqueness
+ tester.fetcher.Enqueue("valid", blocks[hashes[1]])
+ verifyImportCount(t, imported, 2)
+
+ if counter != 2 {
+ t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2)
+ }
+}
+
+// Tests that blocks with numbers much lower or higher than out current head get
+// discarded no prevent wasting resources on useless blocks from faulty peers.
+func TestDistantDiscarding(t *testing.T) {
+ // Create a long chain to import
+ hashes := createHashes(3*maxQueueDist, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+
+ head := hashes[len(hashes)/2]
+
+ // Create a tester and simulate a head block being the middle of the above chain
+ tester := newTester()
+ tester.hashes = []common.Hash{head}
+ tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
+
+ // Ensure that a block with a lower number than the threshold is discarded
+ tester.fetcher.Enqueue("lower", blocks[hashes[0]])
+ time.Sleep(10 * time.Millisecond)
+ if !tester.fetcher.queue.Empty() {
+ t.Fatalf("fetcher queued stale block")
+ }
+ // Ensure that a block with a higher number than the threshold is discarded
+ tester.fetcher.Enqueue("higher", blocks[hashes[len(hashes)-1]])
+ time.Sleep(10 * time.Millisecond)
+ if !tester.fetcher.queue.Empty() {
+ t.Fatalf("fetcher queued future block")
+ }
+}
+
+// Tests that a peer is unable to use unbounded memory with sending infinite
+// block announcements to a node, but that even in the face of such an attack,
+// the fetcher remains operational.
+func TestHashMemoryExhaustionAttack(t *testing.T) {
+ // Create a tester with instrumented import hooks
+ tester := newTester()
+
+ imported := make(chan *types.Block)
+ tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+
+ // Create a valid chain and an infinite junk chain
+ hashes := createHashes(hashLimit+2*maxQueueDist, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+ valid := tester.makeFetcher(blocks)
+
+ attack := createHashes(hashLimit+2*maxQueueDist, unknownHash)
+ attacker := tester.makeFetcher(nil)
+
+ // Feed the tester a huge hashset from the attacker, and a limited from the valid peer
+ for i := 0; i < len(attack); i++ {
+ if i < maxQueueDist {
+ tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], time.Now(), valid)
+ }
+ tester.fetcher.Notify("attacker", attack[i], time.Now(), attacker)
+ }
+ if len(tester.fetcher.announced) != hashLimit+maxQueueDist {
+ t.Fatalf("queued announce count mismatch: have %d, want %d", len(tester.fetcher.announced), hashLimit+maxQueueDist)
+ }
+ // Wait for fetches to complete
+ verifyImportCount(t, imported, maxQueueDist)
+
+ // Feed the remaining valid hashes to ensure DOS protection state remains clean
+ for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- {
+ tester.fetcher.Notify("valid", hashes[i], time.Now().Add(-arriveTimeout), valid)
+ verifyImportEvent(t, imported)
+ }
+ verifyImportDone(t, imported)
+}
+
+// Tests that blocks sent to the fetcher (either through propagation or via hash
+// announces and retrievals) don't pile up indefinitely, exhausting available
+// system memory.
+func TestBlockMemoryExhaustionAttack(t *testing.T) {
+ // Create a tester with instrumented import hooks
+ tester := newTester()
+
+ imported := make(chan *types.Block)
+ tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
+
+ // Create a valid chain and a batch of dangling (but in range) blocks
+ hashes := createHashes(blockLimit+2*maxQueueDist, knownHash)
+ blocks := createBlocksFromHashes(hashes)
+
+ attack := make(map[common.Hash]*types.Block)
+ for len(attack) < blockLimit+2*maxQueueDist {
+ hashes := createHashes(maxQueueDist-1, unknownHash)
+ blocks := createBlocksFromHashes(hashes)
+ for _, hash := range hashes[:maxQueueDist-2] {
+ attack[hash] = blocks[hash]
+ }
+ }
+ // Try to feed all the attacker blocks make sure only a limited batch is accepted
+ for _, block := range attack {
+ tester.fetcher.Enqueue("attacker", block)
+ }
+ time.Sleep(100 * time.Millisecond)
+ if queued := tester.fetcher.queue.Size(); queued != blockLimit {
+ t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit)
+ }
+ // Queue up a batch of valid blocks, and check that a new peer is allowed to do so
+ for i := 0; i < maxQueueDist-1; i++ {
+ tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]])
+ }
+ time.Sleep(100 * time.Millisecond)
+ if queued := tester.fetcher.queue.Size(); queued != blockLimit+maxQueueDist-1 {
+ t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1)
+ }
+ // Insert the missing piece (and sanity check the import)
+ tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2]])
+ verifyImportCount(t, imported, maxQueueDist)
+
+ // Insert the remaining blocks in chunks to ensure clean DOS protection
+ for i := maxQueueDist; i < len(hashes)-1; i++ {
+ tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]])
+ verifyImportEvent(t, imported)
+ }
+ verifyImportDone(t, imported)
+}
diff --git a/eth/handler.go b/eth/handler.go
index ec4f2d53a..ad88e9c59 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -3,7 +3,6 @@ package eth
import (
"fmt"
"math"
- "math/big"
"sync"
"time"
@@ -11,10 +10,12 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/eth/fetcher"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -45,6 +46,7 @@ type ProtocolManager struct {
txpool txPool
chainman *core.ChainManager
downloader *downloader.Downloader
+ fetcher *fetcher.Fetcher
peers *peerSet
SubProtocol p2p.Protocol
@@ -54,11 +56,9 @@ type ProtocolManager struct {
minedBlockSub event.Subscription
// channels for fetcher, syncer, txsyncLoop
- newPeerCh chan *peer
- newHashCh chan []*blockAnnounce
- newBlockCh chan chan []*types.Block
- txsyncCh chan *txsync
- quitSync chan struct{}
+ newPeerCh chan *peer
+ txsyncCh chan *txsync
+ quitSync chan struct{}
// wait group is used for graceful shutdowns during downloading
// and processing
@@ -68,31 +68,37 @@ type ProtocolManager struct {
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
-func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpool txPool, chainman *core.ChainManager) *ProtocolManager {
+func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, chainman *core.ChainManager) *ProtocolManager {
+ // Create the protocol manager and initialize peer handlers
manager := &ProtocolManager{
- eventMux: mux,
- txpool: txpool,
- chainman: chainman,
- peers: newPeerSet(),
- newPeerCh: make(chan *peer, 1),
- newHashCh: make(chan []*blockAnnounce, 1),
- newBlockCh: make(chan chan []*types.Block),
- txsyncCh: make(chan *txsync),
- quitSync: make(chan struct{}),
+ eventMux: mux,
+ txpool: txpool,
+ chainman: chainman,
+ peers: newPeerSet(),
+ newPeerCh: make(chan *peer, 1),
+ txsyncCh: make(chan *txsync),
+ quitSync: make(chan struct{}),
}
- manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.InsertChain, manager.removePeer)
manager.SubProtocol = p2p.Protocol{
Name: "eth",
Version: uint(protocolVersion),
Length: ProtocolLength,
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(protocolVersion, networkId, p, rw)
-
manager.newPeerCh <- peer
-
return manager.handle(peer)
},
}
+ // Construct the different synchronisation mechanisms
+ manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.InsertChain, manager.removePeer)
+
+ validator := func(block *types.Block, parent *types.Block) error {
+ return core.ValidateHeader(pow, block.Header(), parent.Header(), true)
+ }
+ heighter := func() uint64 {
+ return manager.chainman.CurrentBlock().NumberU64()
+ }
+ manager.fetcher = fetcher.New(manager.chainman.GetBlock, validator, manager.BroadcastBlock, heighter, manager.chainman.InsertChain, manager.removePeer)
return manager
}
@@ -126,7 +132,6 @@ func (pm *ProtocolManager) Start() {
// start sync handlers
go pm.syncer()
- go pm.fetcher()
go pm.txsyncLoop()
}
@@ -185,7 +190,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
return nil
}
-func (self *ProtocolManager) handleMsg(p *peer) error {
+func (pm *ProtocolManager) handleMsg(p *peer) error {
msg, err := p.rw.ReadMsg()
if err != nil {
return err
@@ -215,7 +220,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
RemoteId: p.ID().String(),
})
}
- self.txpool.AddTransactions(txs)
+ pm.txpool.AddTransactions(txs)
case GetBlockHashesMsg:
var request getBlockHashesMsgData
@@ -227,7 +232,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
request.Amount = uint64(downloader.MaxHashFetch)
}
- hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount)
+ hashes := pm.chainman.GetBlockHashesFromHash(request.Hash, request.Amount)
if glog.V(logger.Debug) {
if len(hashes) == 0 {
@@ -245,40 +250,50 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
if err := msgStream.Decode(&hashes); err != nil {
break
}
- err := self.downloader.DeliverHashes(p.id, hashes)
+ err := pm.downloader.DeliverHashes(p.id, hashes)
if err != nil {
glog.V(logger.Debug).Infoln(err)
}
case GetBlocksMsg:
- var blocks []*types.Block
-
+ // Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
return err
}
+ // Gather blocks until the fetch or network limits is reached
var (
- i int
- totalsize common.StorageSize
+ hash common.Hash
+ bytes common.StorageSize
+ hashes []common.Hash
+ blocks []*types.Block
)
for {
- i++
- var hash common.Hash
err := msgStream.Decode(&hash)
if err == rlp.EOL {
break
} else if err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
+ hashes = append(hashes, hash)
- block := self.chainman.GetBlock(hash)
- if block != nil {
+ // Retrieve the requested block, stopping if enough was found
+ if block := pm.chainman.GetBlock(hash); block != nil {
blocks = append(blocks, block)
- totalsize += block.Size()
+ bytes += block.Size()
+ if len(blocks) >= downloader.MaxBlockFetch || bytes > maxBlockRespSize {
+ break
+ }
}
- if i == downloader.MaxBlockFetch || totalsize > maxBlockRespSize {
- break
+ }
+ if glog.V(logger.Detail) && len(blocks) == 0 && len(hashes) > 0 {
+ list := "["
+ for _, hash := range hashes {
+ list += fmt.Sprintf("%x, ", hash[:4])
}
+ list = list[:len(list)-2] + "]"
+
+ glog.Infof("Peer %s: no blocks found for requested hashes %s", p.id, list)
}
return p.sendBlocks(blocks)
@@ -291,20 +306,13 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
glog.V(logger.Detail).Infoln("Decode error", err)
blocks = nil
}
- // Filter out any explicitly requested blocks (cascading select to get blocking back to peer)
- filter := make(chan []*types.Block)
- select {
- case <-self.quitSync:
- case self.newBlockCh <- filter:
- select {
- case <-self.quitSync:
- case filter <- blocks:
- select {
- case <-self.quitSync:
- case blocks := <-filter:
- self.downloader.DeliverBlocks(p.id, blocks)
- }
- }
+ // Update the receive timestamp of each block
+ for i := 0; i < len(blocks); i++ {
+ blocks[i].ReceivedAt = msg.ReceivedAt
+ }
+ // Filter out any explicitly requested blocks, deliver the rest to the downloader
+ if blocks := pm.fetcher.Filter(blocks); len(blocks) > 0 {
+ pm.downloader.DeliverBlocks(p.id, blocks)
}
case NewBlockHashesMsg:
@@ -323,26 +331,16 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
// Schedule all the unknown hashes for retrieval
unknown := make([]common.Hash, 0, len(hashes))
for _, hash := range hashes {
- if !self.chainman.HasBlock(hash) {
+ if !pm.chainman.HasBlock(hash) {
unknown = append(unknown, hash)
}
}
- announces := make([]*blockAnnounce, len(unknown))
- for i, hash := range unknown {
- announces[i] = &blockAnnounce{
- hash: hash,
- peer: p,
- time: time.Now(),
- }
- }
- if len(announces) > 0 {
- select {
- case self.newHashCh <- announces:
- case <-self.quitSync:
- }
+ for _, hash := range unknown {
+ pm.fetcher.Notify(p.id, hash, time.Now(), p.requestBlocks)
}
case NewBlockMsg:
+ // Retrieve and decode the propagated block
var request newBlockMsgData
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
@@ -352,9 +350,24 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
}
request.Block.ReceivedAt = msg.ReceivedAt
- if err := self.importBlock(p, request.Block, request.TD); err != nil {
- return err
- }
+ // Mark the block's arrival for whatever reason
+ _, chainHead, _ := pm.chainman.Status()
+ jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{
+ BlockHash: request.Block.Hash().Hex(),
+ BlockNumber: request.Block.Number(),
+ ChainHeadHash: chainHead.Hex(),
+ BlockPrevHash: request.Block.ParentHash().Hex(),
+ RemoteId: p.ID().String(),
+ })
+ // Mark the peer as owning the block and schedule it for import
+ p.blockHashes.Add(request.Block.Hash())
+ p.SetHead(request.Block.Hash())
+
+ pm.fetcher.Enqueue(p.id, request.Block)
+
+ // TODO: Schedule a sync to cover potential gaps (this needs proto update)
+ p.SetTd(request.TD)
+ go pm.synchronise(p)
default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
@@ -362,76 +375,27 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
return nil
}
-// importBlocks injects a new block retrieved from the given peer into the chain
-// manager.
-func (pm *ProtocolManager) importBlock(p *peer, block *types.Block, td *big.Int) error {
+// BroadcastBlock will either propagate a block to a subset of it's peers, or
+// will only announce it's availability (depending what's requested).
+func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
hash := block.Hash()
+ peers := pm.peers.PeersWithoutBlock(hash)
- // Mark the block as present at the remote node (don't duplicate already held data)
- p.blockHashes.Add(hash)
- p.SetHead(hash)
- if td != nil {
- p.SetTd(td)
+ // If propagation is requested, send to a subset of the peer
+ if propagate {
+ transfer := peers[:int(math.Sqrt(float64(len(peers))))]
+ for _, peer := range transfer {
+ peer.sendNewBlock(block)
+ }
+ glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
}
- // Log the block's arrival
- _, chainHead, _ := pm.chainman.Status()
- jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{
- BlockHash: hash.Hex(),
- BlockNumber: block.Number(),
- ChainHeadHash: chainHead.Hex(),
- BlockPrevHash: block.ParentHash().Hex(),
- RemoteId: p.ID().String(),
- })
- // If the block's already known or its difficulty is lower than ours, drop
+ // Otherwise if the block is indeed in out own chain, announce it
if pm.chainman.HasBlock(hash) {
- p.SetTd(pm.chainman.GetBlock(hash).Td) // update the peer's TD to the real value
- return nil
- }
- if td != nil && pm.chainman.Td().Cmp(td) > 0 && new(big.Int).Add(block.Number(), big.NewInt(7)).Cmp(pm.chainman.CurrentBlock().Number()) < 0 {
- glog.V(logger.Debug).Infof("[%s] dropped block %v due to low TD %v\n", p.id, block.Number(), td)
- return nil
- }
- // Attempt to insert the newly received block and propagate to our peers
- if pm.chainman.HasBlock(block.ParentHash()) {
- if _, err := pm.chainman.InsertChain(types.Blocks{block}); err != nil {
- glog.V(logger.Error).Infoln("removed peer (", p.id, ") due to block error", err)
- return err
+ for _, peer := range peers {
+ peer.sendNewBlockHashes([]common.Hash{hash})
}
- if td != nil && block.Td.Cmp(td) != 0 {
- err := fmt.Errorf("invalid TD on block(%v) from peer(%s): block.td=%v, request.td=%v", block.Number(), p.id, block.Td, td)
- glog.V(logger.Error).Infoln(err)
- return err
- }
- pm.BroadcastBlock(hash, block)
- return nil
- }
- // Parent of the block is unknown, try to sync with this peer if it seems to be good
- if td != nil {
- go pm.synchronise(p)
- }
- return nil
-}
-
-// BroadcastBlock will propagate the block to a subset of its connected peers,
-// only notifying the rest of the block's appearance.
-func (pm *ProtocolManager) BroadcastBlock(hash common.Hash, block *types.Block) {
- // Retrieve all the target peers and split between full broadcast or only notification
- peers := pm.peers.PeersWithoutBlock(hash)
- split := int(math.Sqrt(float64(len(peers))))
-
- transfer := peers[:split]
- notify := peers[split:]
-
- // Send out the data transfers and the notifications
- for _, peer := range notify {
- peer.sendNewBlockHashes([]common.Hash{hash})
- }
- glog.V(logger.Detail).Infoln("broadcast hash to", len(notify), "peers.")
-
- for _, peer := range transfer {
- peer.sendNewBlock(block)
+ glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
}
- glog.V(logger.Detail).Infoln("broadcast block to", len(transfer), "peers. Total processing time:", time.Since(block.ReceivedAt))
}
// BroadcastTx will propagate the block to its connected peers. It will sort
@@ -453,7 +417,8 @@ func (self *ProtocolManager) minedBroadcastLoop() {
for obj := range self.minedBlockSub.Chan() {
switch ev := obj.(type) {
case core.NewMinedBlockEvent:
- self.BroadcastBlock(ev.Block.Hash(), ev.Block)
+ self.BroadcastBlock(ev.Block, true) // First propagate block to peers
+ self.BroadcastBlock(ev.Block, false) // Only then announce to the rest
}
}
}
diff --git a/eth/protocol_test.go b/eth/protocol_test.go
index 69d487c71..6e0eef59c 100644
--- a/eth/protocol_test.go
+++ b/eth/protocol_test.go
@@ -167,7 +167,7 @@ func newProtocolManagerForTesting(txAdded chan<- []*types.Transaction) *Protocol
db, _ = ethdb.NewMemDatabase()
chain, _ = core.NewChainManager(core.GenesisBlock(0, db), db, db, core.FakePow{}, em)
txpool = &fakeTxPool{added: txAdded}
- pm = NewProtocolManager(ProtocolVersion, 0, em, txpool, chain)
+ pm = NewProtocolManager(ProtocolVersion, 0, em, txpool, core.FakePow{}, chain)
)
pm.Start()
return pm
diff --git a/eth/sync.go b/eth/sync.go
index a3b177a4d..82abb725f 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -12,11 +12,8 @@ import (
)
const (
- forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available
- notifyCheckCycle = 100 * time.Millisecond // Time interval to allow hash notifies to fulfill before hard fetching
- notifyArriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
- notifyFetchTimeout = 5 * time.Second // Maximum alloted time to return an explicitly requested block
- minDesiredPeerCount = 5 // Amount of peers desired to start syncing
+ forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available
+ minDesiredPeerCount = 5 // Amount of peers desired to start syncing
// This is the target size for the packs of transactions sent by txsyncLoop.
// A pack can get larger than this if a single transactions exceeds this size.
@@ -119,140 +116,15 @@ func (pm *ProtocolManager) txsyncLoop() {
}
}
-// fetcher is responsible for collecting hash notifications, and periodically
-// checking all unknown ones and individually fetching them.
-func (pm *ProtocolManager) fetcher() {
- announces := make(map[common.Hash][]*blockAnnounce)
- request := make(map[*peer][]common.Hash)
- pending := make(map[common.Hash]*blockAnnounce)
- cycle := time.Tick(notifyCheckCycle)
- done := make(chan common.Hash)
-
- // Iterate the block fetching until a quit is requested
- for {
- select {
- case notifications := <-pm.newHashCh:
- // A batch of hashes the notified, schedule them for retrieval
- glog.V(logger.Debug).Infof("Scheduling %d hash announcements from %s", len(notifications), notifications[0].peer.id)
- for _, announce := range notifications {
- // Skip if it's already pending fetch
- if _, ok := pending[announce.hash]; ok {
- continue
- }
- // Otherwise queue up the peer as a potential source
- announces[announce.hash] = append(announces[announce.hash], announce)
- }
-
- case hash := <-done:
- // A pending import finished, remove all traces
- delete(pending, hash)
-
- case <-cycle:
- // Clean up any expired block fetches
- for hash, announce := range pending {
- if time.Since(announce.time) > notifyFetchTimeout {
- delete(pending, hash)
- }
- }
- // Check if any notified blocks failed to arrive
- for hash, all := range announces {
- if time.Since(all[0].time) > notifyArriveTimeout {
- announce := all[rand.Intn(len(all))]
- if !pm.chainman.HasBlock(hash) {
- request[announce.peer] = append(request[announce.peer], hash)
- pending[hash] = announce
- }
- delete(announces, hash)
- }
- }
- if len(request) == 0 {
- break
- }
- // Send out all block requests
- for peer, hashes := range request {
- glog.V(logger.Debug).Infof("Explicitly fetching %d blocks from %s", len(hashes), peer.id)
- go peer.requestBlocks(hashes)
- }
- request = make(map[*peer][]common.Hash)
-
- case filter := <-pm.newBlockCh:
- // Blocks arrived, extract any explicit fetches, return all else
- var blocks types.Blocks
- select {
- case blocks = <-filter:
- case <-pm.quitSync:
- return
- }
-
- explicit, download := []*types.Block{}, []*types.Block{}
- for _, block := range blocks {
- hash := block.Hash()
-
- // Filter explicitly requested blocks from hash announcements
- if _, ok := pending[hash]; ok {
- // Discard if already imported by other means
- if !pm.chainman.HasBlock(hash) {
- explicit = append(explicit, block)
- } else {
- delete(pending, hash)
- }
- } else {
- download = append(download, block)
- }
- }
-
- select {
- case filter <- download:
- case <-pm.quitSync:
- return
- }
- // Create a closure with the retrieved blocks and origin peers
- peers := make([]*peer, 0, len(explicit))
- blocks = make([]*types.Block, 0, len(explicit))
- for _, block := range explicit {
- hash := block.Hash()
- if announce := pending[hash]; announce != nil {
- // Drop the block if it surely cannot fit
- if pm.chainman.HasBlock(hash) || !pm.chainman.HasBlock(block.ParentHash()) {
- // delete(pending, hash) // if we drop, it will re-fetch it, wait for timeout?
- continue
- }
- // Otherwise accumulate for import
- peers = append(peers, announce.peer)
- blocks = append(blocks, block)
- }
- }
- // If any explicit fetches were replied to, import them
- if count := len(blocks); count > 0 {
- glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", len(blocks))
- go func() {
- // Make sure all hashes are cleaned up
- for _, block := range blocks {
- hash := block.Hash()
- defer func() { done <- hash }()
- }
- // Try and actually import the blocks
- for i := 0; i < len(blocks); i++ {
- if err := pm.importBlock(peers[i], blocks[i], nil); err != nil {
- glog.V(logger.Detail).Infof("Failed to import explicitly fetched block: %v", err)
- return
- }
- }
- }()
- }
-
- case <-pm.quitSync:
- return
- }
- }
-}
-
// syncer is responsible for periodically synchronising with the network, both
-// downloading hashes and blocks as well as retrieving cached ones.
+// downloading hashes and blocks as well as handling the announcement handler.
func (pm *ProtocolManager) syncer() {
- // Abort any pending syncs if we terminate
- defer pm.downloader.Cancel()
+ // Start and ensure cleanup of sync mechanisms
+ pm.fetcher.Start()
+ defer pm.fetcher.Stop()
+ defer pm.downloader.Terminate()
+ // Wait for different events to fire synchronisation operations
forceSync := time.Tick(forceSyncCycle)
for {
select {
@@ -273,8 +145,7 @@ func (pm *ProtocolManager) syncer() {
}
}
-// synchronise tries to sync up our local block chain with a remote peer, both
-// adding various sanity checks as well as wrapping it with various log entries.
+// synchronise tries to sync up our local block chain with a remote peer.
func (pm *ProtocolManager) synchronise(peer *peer) {
// Short circuit if no peers are available
if peer == nil {