aboutsummaryrefslogtreecommitdiffstats
path: root/eth/downloader/downloader.go
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2015-08-14 22:48:26 +0800
committerPéter Szilágyi <peterke@gmail.com>2015-08-24 22:57:28 +0800
commitca88e18f59af84f34ad67da21fd27a6407eea87c (patch)
treed1943d61c1d6992f1eea83936e420acd639ecb29 /eth/downloader/downloader.go
parent42f44dda5468000b3b2c005ec485529bc5da3674 (diff)
downloadgo-tangerine-ca88e18f59af84f34ad67da21fd27a6407eea87c.tar
go-tangerine-ca88e18f59af84f34ad67da21fd27a6407eea87c.tar.gz
go-tangerine-ca88e18f59af84f34ad67da21fd27a6407eea87c.tar.bz2
go-tangerine-ca88e18f59af84f34ad67da21fd27a6407eea87c.tar.lz
go-tangerine-ca88e18f59af84f34ad67da21fd27a6407eea87c.tar.xz
go-tangerine-ca88e18f59af84f34ad67da21fd27a6407eea87c.tar.zst
go-tangerine-ca88e18f59af84f34ad67da21fd27a6407eea87c.zip
eth: kill off protocol eth/60 in preparation for eth/62
Diffstat (limited to 'eth/downloader/downloader.go')
-rw-r--r--eth/downloader/downloader.go397
1 files changed, 3 insertions, 394 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 6a6bce644..b28879ee6 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -18,11 +18,9 @@
package downloader
import (
- "bytes"
"errors"
"math"
"math/big"
- "math/rand"
"sync"
"sync/atomic"
"time"
@@ -37,8 +35,8 @@ import (
)
const (
- eth60 = 60 // Constant to check for old protocol support
- eth61 = 61 // Constant to check for new protocol support
+ eth61 = 61 // Constant to check for old protocol support
+ eth62 = 62 // Constant to check for new protocol support
)
var (
@@ -324,16 +322,8 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
glog.V(logger.Debug).Infof("Synchronizing with the network using: %s, eth/%d", p.id, p.version)
switch p.version {
- case eth60:
- // Old eth/60 version, use reverse hash retrieval algorithm
- if err = d.fetchHashes60(p, hash); err != nil {
- return err
- }
- if err = d.fetchBlocks60(); err != nil {
- return err
- }
case eth61:
- // New eth/61, use forward, concurrent hash and block retrieval algorithm
+ // Old eth/61, use forward, concurrent hash and block retrieval algorithm
number, err := d.findAncestor(p)
if err != nil {
return err
@@ -355,8 +345,6 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
glog.V(logger.Error).Infof("Unsupported eth protocol: %d", p.version)
return errBadPeer
}
- glog.V(logger.Debug).Infoln("Synchronization completed")
-
return nil
}
@@ -385,299 +373,6 @@ func (d *Downloader) Terminate() {
d.cancel()
}
-// fetchHashes60 starts retrieving hashes backwards from a specific peer and hash,
-// up until it finds a common ancestor. If the source peer times out, alternative
-// ones are tried for continuation.
-func (d *Downloader) fetchHashes60(p *peer, h common.Hash) error {
- var (
- start = time.Now()
- active = p // active peer will help determine the current active peer
- head = common.Hash{} // common and last hash
-
- timeout = time.NewTimer(0) // timer to dump a non-responsive active peer
- attempted = make(map[string]bool) // attempted peers will help with retries
- crossTicker = time.NewTicker(crossCheckCycle) // ticker to periodically check expired cross checks
- )
- defer crossTicker.Stop()
- defer timeout.Stop()
-
- glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", h[:4], p.id)
- <-timeout.C // timeout channel should be initially empty.
-
- getHashes := func(from common.Hash) {
- go active.getRelHashes(from)
- timeout.Reset(hashTTL)
- }
-
- // Add the hash to the queue, and start hash retrieval.
- d.queue.Insert([]common.Hash{h}, false)
- getHashes(h)
-
- attempted[p.id] = true
- for finished := false; !finished; {
- select {
- case <-d.cancelCh:
- return errCancelHashFetch
-
- case hashPack := <-d.hashCh:
- // Make sure the active peer is giving us the hashes
- if hashPack.peerId != active.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
- break
- }
- timeout.Stop()
-
- // Make sure the peer actually gave something valid
- if len(hashPack.hashes) == 0 {
- glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set", active.id)
- return errEmptyHashSet
- }
- for index, hash := range hashPack.hashes {
- if d.banned.Has(hash) {
- glog.V(logger.Debug).Infof("Peer (%s) sent a known invalid chain", active.id)
-
- d.queue.Insert(hashPack.hashes[:index+1], false)
- if err := d.banBlocks(active.id, hash); err != nil {
- glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err)
- }
- return errInvalidChain
- }
- }
- // Determine if we're done fetching hashes (queue up all pending), and continue if not done
- done, index := false, 0
- for index, head = range hashPack.hashes {
- if d.hasBlock(head) || d.queue.GetBlock(head) != nil {
- glog.V(logger.Debug).Infof("Found common hash %x", head[:4])
- hashPack.hashes = hashPack.hashes[:index]
- done = true
- break
- }
- }
- // Insert all the new hashes, but only continue if got something useful
- inserts := d.queue.Insert(hashPack.hashes, false)
- if len(inserts) == 0 && !done {
- glog.V(logger.Debug).Infof("Peer (%s) responded with stale hashes", active.id)
- return errBadPeer
- }
- if !done {
- // Check that the peer is not stalling the sync
- if len(inserts) < MinHashFetch {
- return errStallingPeer
- }
- // Try and fetch a random block to verify the hash batch
- // Skip the last hash as the cross check races with the next hash fetch
- cross := rand.Intn(len(inserts) - 1)
- origin, parent := inserts[cross], inserts[cross+1]
- glog.V(logger.Detail).Infof("Cross checking (%s) with %x/%x", active.id, origin, parent)
-
- d.checks[origin] = &crossCheck{
- expire: time.Now().Add(blockSoftTTL),
- parent: parent,
- }
- go active.getBlocks([]common.Hash{origin})
-
- // Also fetch a fresh batch of hashes
- getHashes(head)
- continue
- }
- // We're done, prepare the download cache and proceed pulling the blocks
- offset := uint64(0)
- if block := d.getBlock(head); block != nil {
- offset = block.NumberU64() + 1
- }
- d.queue.Prepare(offset)
- finished = true
-
- case blockPack := <-d.blockCh:
- // Cross check the block with the random verifications
- if blockPack.peerId != active.id || len(blockPack.blocks) != 1 {
- continue
- }
- block := blockPack.blocks[0]
- if check, ok := d.checks[block.Hash()]; ok {
- if block.ParentHash() != check.parent {
- return errCrossCheckFailed
- }
- delete(d.checks, block.Hash())
- }
-
- case <-crossTicker.C:
- // Iterate over all the cross checks and fail the hash chain if they're not verified
- for hash, check := range d.checks {
- if time.Now().After(check.expire) {
- glog.V(logger.Debug).Infof("Cross check timeout for %x", hash)
- return errCrossCheckFailed
- }
- }
-
- case <-timeout.C:
- glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request", p.id)
-
- var p *peer // p will be set if a peer can be found
- // Attempt to find a new peer by checking inclusion of peers best hash in our
- // already fetched hash list. This can't guarantee 100% correctness but does
- // a fair job. This is always either correct or false incorrect.
- for _, peer := range d.peers.AllPeers() {
- if d.queue.Has(peer.head) && !attempted[peer.id] {
- p = peer
- break
- }
- }
- // if all peers have been tried, abort the process entirely or if the hash is
- // the zero hash.
- if p == nil || (head == common.Hash{}) {
- return errTimeout
- }
- // set p to the active peer. this will invalidate any hashes that may be returned
- // by our previous (delayed) peer.
- active = p
- getHashes(head)
- glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)", p.id)
- }
- }
- glog.V(logger.Debug).Infof("Downloaded hashes (%d) in %v", d.queue.Pending(), time.Since(start))
-
- return nil
-}
-
-// fetchBlocks60 iteratively downloads the entire schedules block-chain, taking
-// any available peers, reserving a chunk of blocks for each, wait for delivery
-// and periodically checking for timeouts.
-func (d *Downloader) fetchBlocks60() error {
- glog.V(logger.Debug).Infoln("Downloading", d.queue.Pending(), "block(s)")
- start := time.Now()
-
- // Start a ticker to continue throttled downloads and check for bad peers
- ticker := time.NewTicker(20 * time.Millisecond)
- defer ticker.Stop()
-
-out:
- for {
- select {
- case <-d.cancelCh:
- return errCancelBlockFetch
-
- case <-d.hashCh:
- // Out of bounds hashes received, ignore them
-
- case blockPack := <-d.blockCh:
- // Short circuit if it's a stale cross check
- if len(blockPack.blocks) == 1 {
- block := blockPack.blocks[0]
- if _, ok := d.checks[block.Hash()]; ok {
- delete(d.checks, block.Hash())
- break
- }
- }
- // If the peer was previously banned and failed to deliver it's pack
- // in a reasonable time frame, ignore it's message.
- if peer := d.peers.Peer(blockPack.peerId); peer != nil {
- // Deliver the received chunk of blocks, and demote in case of errors
- err := d.queue.Deliver(blockPack.peerId, blockPack.blocks)
- switch err {
- case nil:
- // If no blocks were delivered, demote the peer (need the delivery above)
- if len(blockPack.blocks) == 0 {
- peer.Demote()
- peer.SetIdle()
- glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
- break
- }
- // All was successful, promote the peer and potentially start processing
- peer.Promote()
- peer.SetIdle()
- glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks))
- go d.process()
-
- case errInvalidChain:
- // The hash chain is invalid (blocks are not ordered properly), abort
- return err
-
- case errNoFetchesPending:
- // Peer probably timed out with its delivery but came through
- // in the end, demote, but allow to to pull from this peer.
- peer.Demote()
- peer.SetIdle()
- glog.V(logger.Detail).Infof("%s: out of bound delivery", peer)
-
- case errStaleDelivery:
- // Delivered something completely else than requested, usually
- // caused by a timeout and delivery during a new sync cycle.
- // Don't set it to idle as the original request should still be
- // in flight.
- peer.Demote()
- glog.V(logger.Detail).Infof("%s: stale delivery", peer)
-
- default:
- // Peer did something semi-useful, demote but keep it around
- peer.Demote()
- peer.SetIdle()
- glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err)
- go d.process()
- }
- }
-
- case <-ticker.C:
- // Short circuit if we lost all our peers
- if d.peers.Len() == 0 {
- return errNoPeers
- }
- // Check for block request timeouts and demote the responsible peers
- badPeers := d.queue.Expire(blockHardTTL)
- for _, pid := range badPeers {
- if peer := d.peers.Peer(pid); peer != nil {
- peer.Demote()
- glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
- }
- }
- // If there are unrequested hashes left start fetching from the available peers
- if d.queue.Pending() > 0 {
- // Throttle the download if block cache is full and waiting processing
- if d.queue.Throttle() {
- break
- }
- // Send a download request to all idle peers, until throttled
- idlePeers := d.peers.IdlePeers()
- for _, peer := range idlePeers {
- // Short circuit if throttling activated since above
- if d.queue.Throttle() {
- break
- }
- // Get a possible chunk. If nil is returned no chunk
- // could be returned due to no hashes available.
- request := d.queue.Reserve(peer, peer.Capacity())
- if request == nil {
- continue
- }
- if glog.V(logger.Detail) {
- glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
- }
- // Fetch the chunk and check for error. If the peer was somehow
- // already fetching a chunk due to a bug, it will be returned to
- // the queue
- if err := peer.Fetch(request); err != nil {
- glog.V(logger.Error).Infof("Peer %s received double work", peer.id)
- d.queue.Cancel(request)
- }
- }
- // Make sure that we have peers available for fetching. If all peers have been tried
- // and all failed throw an error
- if d.queue.InFlight() == 0 {
- return errPeersUnavailable
- }
-
- } else if d.queue.InFlight() == 0 {
- // When there are no more queue and no more in flight, We can
- // safely assume we're done. Another part of the process will check
- // for parent errors and will re-request anything that's missing
- break out
- }
- }
- }
- glog.V(logger.Detail).Infoln("Downloaded block(s) in", time.Since(start))
- return nil
-}
-
// findAncestor tries to locate the common ancestor block of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N blocks should already get us a match.
@@ -1023,92 +718,6 @@ func (d *Downloader) fetchBlocks(from uint64) error {
}
}
-// banBlocks retrieves a batch of blocks from a peer feeding us invalid hashes,
-// and bans the head of the retrieved batch.
-//
-// This method only fetches one single batch as the goal is not ban an entire
-// (potentially long) invalid chain - wasting a lot of time in the meanwhile -,
-// but rather to gradually build up a blacklist if the peer keeps reconnecting.
-func (d *Downloader) banBlocks(peerId string, head common.Hash) error {
- glog.V(logger.Debug).Infof("Banning a batch out of %d blocks from %s", d.queue.Pending(), peerId)
-
- // Ask the peer being banned for a batch of blocks from the banning point
- peer := d.peers.Peer(peerId)
- if peer == nil {
- return nil
- }
- request := d.queue.Reserve(peer, MaxBlockFetch)
- if request == nil {
- return nil
- }
- if err := peer.Fetch(request); err != nil {
- return err
- }
- // Wait a bit for the reply to arrive, and ban if done so
- timeout := time.After(blockHardTTL)
- for {
- select {
- case <-d.cancelCh:
- return errCancelBlockFetch
-
- case <-timeout:
- return errTimeout
-
- case <-d.hashCh:
- // Out of bounds hashes received, ignore them
-
- case blockPack := <-d.blockCh:
- blocks := blockPack.blocks
-
- // Short circuit if it's a stale cross check
- if len(blocks) == 1 {
- block := blocks[0]
- if _, ok := d.checks[block.Hash()]; ok {
- delete(d.checks, block.Hash())
- break
- }
- }
- // Short circuit if it's not from the peer being banned
- if blockPack.peerId != peerId {
- break
- }
- // Short circuit if no blocks were returned
- if len(blocks) == 0 {
- return errors.New("no blocks returned to ban")
- }
- // Reconstruct the original chain order and ensure we're banning the correct blocks
- types.BlockBy(types.Number).Sort(blocks)
- if bytes.Compare(blocks[0].Hash().Bytes(), head.Bytes()) != 0 {
- return errors.New("head block not the banned one")
- }
- index := 0
- for _, block := range blocks[1:] {
- if bytes.Compare(block.ParentHash().Bytes(), blocks[index].Hash().Bytes()) != 0 {
- break
- }
- index++
- }
- // Ban the head hash and phase out any excess
- d.banned.Add(blocks[index].Hash())
- for d.banned.Size() > maxBannedHashes {
- var evacuate common.Hash
-
- d.banned.Each(func(item interface{}) bool {
- // Skip any hard coded bans
- if core.BadHashes[item.(common.Hash)] {
- return true
- }
- evacuate = item.(common.Hash)
- return false
- })
- d.banned.Remove(evacuate)
- }
- glog.V(logger.Debug).Infof("Banned %d blocks from: %s", index+1, peerId)
- return nil
- }
- }
-}
-
// process takes blocks from the queue and tries to import them into the chain.
//
// The algorithmic flow is as follows: