aboutsummaryrefslogtreecommitdiffstats
path: root/eth/downloader/downloader.go
diff options
context:
space:
mode:
authorFelix Lange <fjl@twurst.com>2015-08-27 06:03:59 +0800
committerFelix Lange <fjl@twurst.com>2015-08-27 06:03:59 +0800
commit6ec13e7e2bab1ebdb580819a48629055bbbb5fb3 (patch)
treecd23c3deac1a41b34a5157c1f7c5361ca56b4137 /eth/downloader/downloader.go
parent79b644c7a35bbc835b7e78ddf8a31c37e69b0784 (diff)
parent17f65cd1e5e0fea6e4f7b96c60767aaa0ada366d (diff)
downloadgo-tangerine-6ec13e7e2bab1ebdb580819a48629055bbbb5fb3.tar
go-tangerine-6ec13e7e2bab1ebdb580819a48629055bbbb5fb3.tar.gz
go-tangerine-6ec13e7e2bab1ebdb580819a48629055bbbb5fb3.tar.bz2
go-tangerine-6ec13e7e2bab1ebdb580819a48629055bbbb5fb3.tar.lz
go-tangerine-6ec13e7e2bab1ebdb580819a48629055bbbb5fb3.tar.xz
go-tangerine-6ec13e7e2bab1ebdb580819a48629055bbbb5fb3.tar.zst
go-tangerine-6ec13e7e2bab1ebdb580819a48629055bbbb5fb3.zip
Merge pull request #1701 from karalabe/eth62-sync-rebase
eth: implement eth/62 synchronization logic
Diffstat (limited to 'eth/downloader/downloader.go')
-rw-r--r--eth/downloader/downloader.go1026
1 files changed, 580 insertions, 446 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index e3e22a784..574f2ba15 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -18,61 +18,64 @@
package downloader
import (
- "bytes"
"errors"
"math"
"math/big"
- "math/rand"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
- "gopkg.in/fatih/set.v0"
)
const (
- eth60 = 60 // Constant to check for old protocol support
- eth61 = 61 // Constant to check for new protocol support
+ eth61 = 61 // Constant to check for old protocol support
+ eth62 = 62 // Constant to check for new protocol support
)
var (
- MinHashFetch = 512 // Minimum amount of hashes to not consider a peer stalling
- MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request
- MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
-
- hashTTL = 5 * time.Second // Time it takes for a hash request to time out
- blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth
- blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired
- crossCheckCycle = time.Second // Period after which to check for expired cross checks
-
- maxQueuedHashes = 256 * 1024 // Maximum number of hashes to queue for import (DOS protection)
- maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out
- maxBlockProcess = 256 // Number of blocks to import at once into the chain
+ MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request
+ MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
+ MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
+ MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request
+ MaxStateFetch = 384 // Amount of node state values to allow fetching per request
+ MaxReceiptsFetch = 384 // Amount of transaction receipts to allow fetching per request
+
+ hashTTL = 5 * time.Second // [eth/61] Time it takes for a hash request to time out
+ blockSoftTTL = 3 * time.Second // [eth/61] Request completion threshold for increasing or decreasing a peer's bandwidth
+ blockHardTTL = 3 * blockSoftTTL // [eth/61] Maximum time allowance before a block request is considered expired
+ headerTTL = 5 * time.Second // [eth/62] Time it takes for a header request to time out
+ bodySoftTTL = 3 * time.Second // [eth/62] Request completion threshold for increasing or decreasing a peer's bandwidth
+ bodyHardTTL = 3 * bodySoftTTL // [eth/62] Maximum time allowance before a block body request is considered expired
+
+ maxQueuedHashes = 256 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection)
+ maxQueuedHeaders = 256 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
+ maxBlockProcess = 256 // Number of blocks to import at once into the chain
)
var (
- errBusy = errors.New("busy")
- errUnknownPeer = errors.New("peer is unknown or unhealthy")
- errBadPeer = errors.New("action from bad peer ignored")
- errStallingPeer = errors.New("peer is stalling")
- errBannedHead = errors.New("peer head hash already banned")
- errNoPeers = errors.New("no peers to keep download active")
- errPendingQueue = errors.New("pending items in queue")
- errTimeout = errors.New("timeout")
- errEmptyHashSet = errors.New("empty hash set by peer")
- errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
- errAlreadyInPool = errors.New("hash already in pool")
- errInvalidChain = errors.New("retrieved hash chain is invalid")
- errCrossCheckFailed = errors.New("block cross-check failed")
- errCancelHashFetch = errors.New("hash fetching canceled (requested)")
- errCancelBlockFetch = errors.New("block downloading canceled (requested)")
- errNoSyncActive = errors.New("no sync active")
+ errBusy = errors.New("busy")
+ errUnknownPeer = errors.New("peer is unknown or unhealthy")
+ errBadPeer = errors.New("action from bad peer ignored")
+ errStallingPeer = errors.New("peer is stalling")
+ errNoPeers = errors.New("no peers to keep download active")
+ errPendingQueue = errors.New("pending items in queue")
+ errTimeout = errors.New("timeout")
+ errEmptyHashSet = errors.New("empty hash set by peer")
+ errEmptyHeaderSet = errors.New("empty header set by peer")
+ errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
+ errAlreadyInPool = errors.New("hash already in pool")
+ errInvalidChain = errors.New("retrieved hash chain is invalid")
+ errInvalidBody = errors.New("retrieved block body is invalid")
+ errCancelHashFetch = errors.New("hash fetching canceled (requested)")
+ errCancelBlockFetch = errors.New("block downloading canceled (requested)")
+ errCancelHeaderFetch = errors.New("block header fetching canceled (requested)")
+ errCancelBodyFetch = errors.New("block body downloading canceled (requested)")
+ errNoSyncActive = errors.New("no sync active")
)
// hashCheckFn is a callback type for verifying a hash's presence in the local chain.
@@ -90,28 +93,36 @@ type chainInsertFn func(types.Blocks) (int, error)
// peerDropFn is a callback type for dropping a peer detected as malicious.
type peerDropFn func(id string)
+// hashPack is a batch of block hashes returned by a peer (eth/61).
+type hashPack struct {
+ peerId string
+ hashes []common.Hash
+}
+
+// blockPack is a batch of blocks returned by a peer (eth/61).
type blockPack struct {
peerId string
blocks []*types.Block
}
-type hashPack struct {
- peerId string
- hashes []common.Hash
+// headerPack is a batch of block headers returned by a peer.
+type headerPack struct {
+ peerId string
+ headers []*types.Header
}
-type crossCheck struct {
- expire time.Time
- parent common.Hash
+// bodyPack is a batch of block bodies returned by a peer.
+type bodyPack struct {
+ peerId string
+ transactions [][]*types.Transaction
+ uncles [][]*types.Header
}
type Downloader struct {
mux *event.TypeMux
- queue *queue // Scheduler for selecting the hashes to download
- peers *peerSet // Set of active peers from which download can proceed
- checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain
- banned *set.Set // Set of hashes we've received and banned
+ queue *queue // Scheduler for selecting the hashes to download
+ peers *peerSet // Set of active peers from which download can proceed
interrupt int32 // Atomic boolean to signal termination
@@ -136,12 +147,18 @@ type Downloader struct {
// Channels
newPeerCh chan *peer
- hashCh chan hashPack // Channel receiving inbound hashes
- blockCh chan blockPack // Channel receiving inbound blocks
- processCh chan bool // Channel to signal the block fetcher of new or finished work
+ hashCh chan hashPack // [eth/61] Channel receiving inbound hashes
+ blockCh chan blockPack // [eth/61] Channel receiving inbound blocks
+ headerCh chan headerPack // [eth/62] Channel receiving inbound block headers
+ bodyCh chan bodyPack // [eth/62] Channel receiving inbound block bodies
+ processCh chan bool // Channel to signal the block fetcher of new or finished work
cancelCh chan struct{} // Channel to cancel mid-flight syncs
cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers
+
+ // Testing hooks
+ bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch
+ chainInsertHook func([]*Block) // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
}
// Block is an origin-tagged blockchain block.
@@ -152,8 +169,7 @@ type Block struct {
// New creates a new downloader to fetch hashes and blocks from remote peers.
func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, headBlock headRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader {
- // Create the base downloader
- downloader := &Downloader{
+ return &Downloader{
mux: mux,
queue: newQueue(),
peers: newPeerSet(),
@@ -165,14 +181,10 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, he
newPeerCh: make(chan *peer, 1),
hashCh: make(chan hashPack, 1),
blockCh: make(chan blockPack, 1),
+ headerCh: make(chan headerPack, 1),
+ bodyCh: make(chan bodyPack, 1),
processCh: make(chan bool, 1),
}
- // Inject all the known bad hashes
- downloader.banned = set.New()
- for hash, _ := range core.BadHashes {
- downloader.banned.Add(hash)
- }
- return downloader
}
// Stats retrieves the current status of the downloader.
@@ -205,15 +217,12 @@ func (d *Downloader) Synchronising() bool {
// RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from.
-func (d *Downloader) RegisterPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn) error {
- // If the peer wants to send a banned hash, reject
- if d.banned.Has(head) {
- glog.V(logger.Debug).Infoln("Register rejected, head hash banned:", id)
- return errBannedHead
- }
- // Otherwise try to construct and register the peer
+func (d *Downloader) RegisterPeer(id string, version int, head common.Hash,
+ getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
+ getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn) error {
+
glog.V(logger.Detail).Infoln("Registering peer", id)
- if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks)); err != nil {
+ if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks, getRelHeaders, getAbsHeaders, getBlockBodies)); err != nil {
glog.V(logger.Error).Infoln("Register failed:", err)
return err
}
@@ -234,7 +243,7 @@ func (d *Downloader) UnregisterPeer(id string) error {
// Synchronise tries to sync up our local block chain with a remote peer, both
// adding various sanity checks as well as wrapping it with various log entries.
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int) {
- glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head 0x%x, TD %v", id, head[:4], td)
+ glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td)
switch err := d.synchronise(id, head, td); err {
case nil:
@@ -243,7 +252,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int) {
case errBusy:
glog.V(logger.Detail).Infof("Synchronisation already in progress")
- case errTimeout, errBadPeer, errStallingPeer, errBannedHead, errEmptyHashSet, errPeersUnavailable, errInvalidChain, errCrossCheckFailed:
+ case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidChain:
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
d.dropPeer(id)
@@ -269,10 +278,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int) error
}
defer atomic.StoreInt32(&d.synchronising, 0)
- // If the head hash is banned, terminate immediately
- if d.banned.Has(hash) {
- return errBannedHead
- }
// Post a user notification of the sync (only once per session)
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
glog.V(logger.Info).Infoln("Block synchronisation started")
@@ -284,7 +289,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int) error
// Reset the queue and peer set to clean any internal leftover state
d.queue.Reset()
d.peers.Reset()
- d.checks = make(map[common.Hash]*crossCheck)
// Create cancel channel for aborting mid-flight
d.cancelLock.Lock()
@@ -319,25 +323,37 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
}
}()
- glog.V(logger.Debug).Infof("Synchronizing with the network using: %s, eth/%d", p.id, p.version)
- switch p.version {
- case eth60:
- // Old eth/60 version, use reverse hash retrieval algorithm
- if err = d.fetchHashes60(p, hash); err != nil {
+ glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
+ defer glog.V(logger.Debug).Infof("Synchronisation terminated")
+
+ switch {
+ case p.version == eth61:
+ // Old eth/61, use forward, concurrent hash and block retrieval algorithm
+ number, err := d.findAncestor61(p)
+ if err != nil {
return err
}
- if err = d.fetchBlocks60(); err != nil {
+ errc := make(chan error, 2)
+ go func() { errc <- d.fetchHashes61(p, td, number+1) }()
+ go func() { errc <- d.fetchBlocks61(number + 1) }()
+
+ // If any fetcher fails, cancel the other
+ if err := <-errc; err != nil {
+ d.cancel()
+ <-errc
return err
}
- case eth61:
- // New eth/61, use forward, concurrent hash and block retrieval algorithm
+ return <-errc
+
+ case p.version >= eth62:
+ // New eth/62, use forward, concurrent header and block body retrieval algorithm
number, err := d.findAncestor(p)
if err != nil {
return err
}
errc := make(chan error, 2)
- go func() { errc <- d.fetchHashes(p, td, number+1) }()
- go func() { errc <- d.fetchBlocks(number + 1) }()
+ go func() { errc <- d.fetchHeaders(p, td, number+1) }()
+ go func() { errc <- d.fetchBodies(number + 1) }()
// If any fetcher fails, cancel the other
if err := <-errc; err != nil {
@@ -352,8 +368,6 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
glog.V(logger.Error).Infof("Unsupported eth protocol: %d", p.version)
return errBadPeer
}
- glog.V(logger.Debug).Infoln("Synchronization completed")
-
return nil
}
@@ -382,207 +396,274 @@ func (d *Downloader) Terminate() {
d.cancel()
}
-// fetchHashes60 starts retrieving hashes backwards from a specific peer and hash,
-// up until it finds a common ancestor. If the source peer times out, alternative
-// ones are tried for continuation.
-func (d *Downloader) fetchHashes60(p *peer, h common.Hash) error {
- var (
- start = time.Now()
- active = p // active peer will help determine the current active peer
- head = common.Hash{} // common and last hash
-
- timeout = time.NewTimer(0) // timer to dump a non-responsive active peer
- attempted = make(map[string]bool) // attempted peers will help with retries
- crossTicker = time.NewTicker(crossCheckCycle) // ticker to periodically check expired cross checks
- )
- defer crossTicker.Stop()
- defer timeout.Stop()
-
- glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", h[:4], p.id)
- <-timeout.C // timeout channel should be initially empty.
+// findAncestor61 tries to locate the common ancestor block of the local chain and
+// a remote peers blockchain. In the general case when our node was in sync and
+// on the correct chain, checking the top N blocks should already get us a match.
+// In the rare scenario when we ended up on a long reorganization (i.e. none of
+// the head blocks match), we do a binary search to find the common ancestor.
+func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
+ glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
- getHashes := func(from common.Hash) {
- go active.getRelHashes(from)
- timeout.Reset(hashTTL)
+ // Request out head blocks to short circuit ancestor location
+ head := d.headBlock().NumberU64()
+ from := int64(head) - int64(MaxHashFetch) + 1
+ if from < 0 {
+ from = 0
}
+ go p.getAbsHashes(uint64(from), MaxHashFetch)
- // Add the hash to the queue, and start hash retrieval.
- d.queue.Insert([]common.Hash{h}, false)
- getHashes(h)
+ // Wait for the remote response to the head fetch
+ number, hash := uint64(0), common.Hash{}
+ timeout := time.After(hashTTL)
- attempted[p.id] = true
for finished := false; !finished; {
select {
case <-d.cancelCh:
- return errCancelHashFetch
+ return 0, errCancelHashFetch
case hashPack := <-d.hashCh:
- // Make sure the active peer is giving us the hashes
- if hashPack.peerId != active.id {
+ // Discard anything not from the origin peer
+ if hashPack.peerId != p.id {
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
break
}
- timeout.Stop()
-
// Make sure the peer actually gave something valid
- if len(hashPack.hashes) == 0 {
- glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set", active.id)
- return errEmptyHashSet
+ hashes := hashPack.hashes
+ if len(hashes) == 0 {
+ glog.V(logger.Debug).Infof("%v: empty head hash set", p)
+ return 0, errEmptyHashSet
}
- for index, hash := range hashPack.hashes {
- if d.banned.Has(hash) {
- glog.V(logger.Debug).Infof("Peer (%s) sent a known invalid chain", active.id)
-
- d.queue.Insert(hashPack.hashes[:index+1], false)
- if err := d.banBlocks(active.id, hash); err != nil {
- glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err)
- }
- return errInvalidChain
+ // Check if a common ancestor was found
+ finished = true
+ for i := len(hashes) - 1; i >= 0; i-- {
+ if d.hasBlock(hashes[i]) {
+ number, hash = uint64(from)+uint64(i), hashes[i]
+ break
}
}
- // Determine if we're done fetching hashes (queue up all pending), and continue if not done
- done, index := false, 0
- for index, head = range hashPack.hashes {
- if d.hasBlock(head) || d.queue.GetBlock(head) != nil {
- glog.V(logger.Debug).Infof("Found common hash %x", head[:4])
- hashPack.hashes = hashPack.hashes[:index]
- done = true
+
+ case <-d.blockCh:
+ // Out of bounds blocks received, ignore them
+
+ case <-d.headerCh:
+ // Out of bounds eth/62 block headers received, ignore them
+
+ case <-d.bodyCh:
+ // Out of bounds eth/62 block bodies received, ignore them
+
+ case <-timeout:
+ glog.V(logger.Debug).Infof("%v: head hash timeout", p)
+ return 0, errTimeout
+ }
+ }
+ // If the head fetch already found an ancestor, return
+ if !common.EmptyHash(hash) {
+ glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
+ return number, nil
+ }
+ // Ancestor not found, we need to binary search over our chain
+ start, end := uint64(0), head
+ for start+1 < end {
+ // Split our chain interval in two, and request the hash to cross check
+ check := (start + end) / 2
+
+ timeout := time.After(hashTTL)
+ go p.getAbsHashes(uint64(check), 1)
+
+ // Wait until a reply arrives to this request
+ for arrived := false; !arrived; {
+ select {
+ case <-d.cancelCh:
+ return 0, errCancelHashFetch
+
+ case hashPack := <-d.hashCh:
+ // Discard anything not from the origin peer
+ if hashPack.peerId != p.id {
+ glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
break
}
- }
- // Insert all the new hashes, but only continue if got something useful
- inserts := d.queue.Insert(hashPack.hashes, false)
- if len(inserts) == 0 && !done {
- glog.V(logger.Debug).Infof("Peer (%s) responded with stale hashes", active.id)
- return errBadPeer
- }
- if !done {
- // Check that the peer is not stalling the sync
- if len(inserts) < MinHashFetch {
- return errStallingPeer
+ // Make sure the peer actually gave something valid
+ hashes := hashPack.hashes
+ if len(hashes) != 1 {
+ glog.V(logger.Debug).Infof("%v: invalid search hash set (%d)", p, len(hashes))
+ return 0, errBadPeer
+ }
+ arrived = true
+
+ // Modify the search interval based on the response
+ block := d.getBlock(hashes[0])
+ if block == nil {
+ end = check
+ break
}
- // Try and fetch a random block to verify the hash batch
- // Skip the last hash as the cross check races with the next hash fetch
- cross := rand.Intn(len(inserts) - 1)
- origin, parent := inserts[cross], inserts[cross+1]
- glog.V(logger.Detail).Infof("Cross checking (%s) with %x/%x", active.id, origin, parent)
-
- d.checks[origin] = &crossCheck{
- expire: time.Now().Add(blockSoftTTL),
- parent: parent,
+ if block.NumberU64() != check {
+ glog.V(logger.Debug).Infof("%v: non requested hash #%d [%x…], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check)
+ return 0, errBadPeer
}
- go active.getBlocks([]common.Hash{origin})
+ start = check
- // Also fetch a fresh batch of hashes
- getHashes(head)
- continue
- }
- // We're done, prepare the download cache and proceed pulling the blocks
- offset := uint64(0)
- if block := d.getBlock(head); block != nil {
- offset = block.NumberU64() + 1
- }
- d.queue.Prepare(offset)
- finished = true
+ case <-d.blockCh:
+ // Out of bounds blocks received, ignore them
- case blockPack := <-d.blockCh:
- // Cross check the block with the random verifications
- if blockPack.peerId != active.id || len(blockPack.blocks) != 1 {
- continue
+ case <-d.headerCh:
+ // Out of bounds eth/62 block headers received, ignore them
+
+ case <-d.bodyCh:
+ // Out of bounds eth/62 block bodies received, ignore them
+
+ case <-timeout:
+ glog.V(logger.Debug).Infof("%v: search hash timeout", p)
+ return 0, errTimeout
}
- block := blockPack.blocks[0]
- if check, ok := d.checks[block.Hash()]; ok {
- if block.ParentHash() != check.parent {
- return errCrossCheckFailed
- }
- delete(d.checks, block.Hash())
+ }
+ }
+ return start, nil
+}
+
+// fetchHashes61 keeps retrieving hashes from the requested number, until no more
+// are returned, potentially throttling on the way.
+func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
+ glog.V(logger.Debug).Infof("%v: downloading hashes from #%d", p, from)
+
+ // Create a timeout timer, and the associated hash fetcher
+ request := time.Now() // time of the last fetch request
+ timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
+ <-timeout.C // timeout channel should be initially empty
+ defer timeout.Stop()
+
+ getHashes := func(from uint64) {
+ glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
+
+ go p.getAbsHashes(from, MaxHashFetch)
+ request = time.Now()
+ timeout.Reset(hashTTL)
+ }
+ // Start pulling hashes, until all are exhausted
+ getHashes(from)
+ gotHashes := false
+
+ for {
+ select {
+ case <-d.cancelCh:
+ return errCancelHashFetch
+
+ case <-d.headerCh:
+ // Out of bounds eth/62 block headers received, ignore them
+
+ case <-d.bodyCh:
+ // Out of bounds eth/62 block bodies received, ignore them
+
+ case hashPack := <-d.hashCh:
+ // Make sure the active peer is giving us the hashes
+ if hashPack.peerId != p.id {
+ glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
+ break
}
+ hashReqTimer.UpdateSince(request)
+ timeout.Stop()
- case <-crossTicker.C:
- // Iterate over all the cross checks and fail the hash chain if they're not verified
- for hash, check := range d.checks {
- if time.Now().After(check.expire) {
- glog.V(logger.Debug).Infof("Cross check timeout for %x", hash)
- return errCrossCheckFailed
+ // If no more hashes are inbound, notify the block fetcher and return
+ if len(hashPack.hashes) == 0 {
+ glog.V(logger.Debug).Infof("%v: no available hashes", p)
+
+ select {
+ case d.processCh <- false:
+ case <-d.cancelCh:
+ }
+ // If no hashes were retrieved at all, the peer violated it's TD promise that it had a
+ // better chain compared to ours. The only exception is if it's promised blocks were
+ // already imported by other means (e.g. fecher):
+ //
+ // R <remote peer>, L <local node>: Both at block 10
+ // R: Mine block 11, and propagate it to L
+ // L: Queue block 11 for import
+ // L: Notice that R's head and TD increased compared to ours, start sync
+ // L: Import of block 11 finishes
+ // L: Sync begins, and finds common ancestor at 11
+ // L: Request new hashes up from 11 (R's TD was higher, it must have something)
+ // R: Nothing to give
+ if !gotHashes && td.Cmp(d.headBlock().Td) > 0 {
+ return errStallingPeer
}
+ return nil
}
+ gotHashes = true
- case <-timeout.C:
- glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request", p.id)
-
- var p *peer // p will be set if a peer can be found
- // Attempt to find a new peer by checking inclusion of peers best hash in our
- // already fetched hash list. This can't guarantee 100% correctness but does
- // a fair job. This is always either correct or false incorrect.
- for _, peer := range d.peers.AllPeers() {
- if d.queue.Has(peer.head) && !attempted[peer.id] {
- p = peer
- break
- }
+ // Otherwise insert all the new hashes, aborting in case of junk
+ glog.V(logger.Detail).Infof("%v: inserting %d hashes from #%d", p, len(hashPack.hashes), from)
+
+ inserts := d.queue.Insert61(hashPack.hashes, true)
+ if len(inserts) != len(hashPack.hashes) {
+ glog.V(logger.Debug).Infof("%v: stale hashes", p)
+ return errBadPeer
+ }
+ // Notify the block fetcher of new hashes, but stop if queue is full
+ cont := d.queue.Pending() < maxQueuedHashes
+ select {
+ case d.processCh <- cont:
+ default:
}
- // if all peers have been tried, abort the process entirely or if the hash is
- // the zero hash.
- if p == nil || (head == common.Hash{}) {
- return errTimeout
+ if !cont {
+ return nil
}
- // set p to the active peer. this will invalidate any hashes that may be returned
- // by our previous (delayed) peer.
- active = p
- getHashes(head)
- glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)", p.id)
+ // Queue not yet full, fetch the next batch
+ from += uint64(len(hashPack.hashes))
+ getHashes(from)
+
+ case <-timeout.C:
+ glog.V(logger.Debug).Infof("%v: hash request timed out", p)
+ hashTimeoutMeter.Mark(1)
+ return errTimeout
}
}
- glog.V(logger.Debug).Infof("Downloaded hashes (%d) in %v", d.queue.Pending(), time.Since(start))
-
- return nil
}
-// fetchBlocks60 iteratively downloads the entire schedules block-chain, taking
-// any available peers, reserving a chunk of blocks for each, wait for delivery
-// and periodically checking for timeouts.
-func (d *Downloader) fetchBlocks60() error {
- glog.V(logger.Debug).Infoln("Downloading", d.queue.Pending(), "block(s)")
- start := time.Now()
+// fetchBlocks61 iteratively downloads the scheduled hashes, taking any available
+// peers, reserving a chunk of blocks for each, waiting for delivery and also
+// periodically checking for timeouts.
+func (d *Downloader) fetchBlocks61(from uint64) error {
+ glog.V(logger.Debug).Infof("Downloading blocks from #%d", from)
+ defer glog.V(logger.Debug).Infof("Block download terminated")
- // Start a ticker to continue throttled downloads and check for bad peers
- ticker := time.NewTicker(20 * time.Millisecond)
+ // Create a timeout timer for scheduling expiration tasks
+ ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
-out:
+ update := make(chan struct{}, 1)
+
+ // Prepare the queue and fetch blocks until the hash fetcher's done
+ d.queue.Prepare(from)
+ finished := false
+
for {
select {
case <-d.cancelCh:
return errCancelBlockFetch
- case <-d.hashCh:
- // Out of bounds hashes received, ignore them
+ case <-d.headerCh:
+ // Out of bounds eth/62 block headers received, ignore them
+
+ case <-d.bodyCh:
+ // Out of bounds eth/62 block bodies received, ignore them
case blockPack := <-d.blockCh:
- // Short circuit if it's a stale cross check
- if len(blockPack.blocks) == 1 {
- block := blockPack.blocks[0]
- if _, ok := d.checks[block.Hash()]; ok {
- delete(d.checks, block.Hash())
- break
- }
- }
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
if peer := d.peers.Peer(blockPack.peerId); peer != nil {
// Deliver the received chunk of blocks, and demote in case of errors
- err := d.queue.Deliver(blockPack.peerId, blockPack.blocks)
+ err := d.queue.Deliver61(blockPack.peerId, blockPack.blocks)
switch err {
case nil:
// If no blocks were delivered, demote the peer (need the delivery above)
if len(blockPack.blocks) == 0 {
peer.Demote()
- peer.SetIdle()
+ peer.SetIdle61()
glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
break
}
// All was successful, promote the peer and potentially start processing
peer.Promote()
- peer.SetIdle()
+ peer.SetIdle61()
glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks))
go d.process()
@@ -594,7 +675,7 @@ out:
// Peer probably timed out with its delivery but came through
// in the end, demote, but allow to to pull from this peer.
peer.Demote()
- peer.SetIdle()
+ peer.SetIdle61()
glog.V(logger.Detail).Infof("%s: out of bound delivery", peer)
case errStaleDelivery:
@@ -608,88 +689,101 @@ out:
default:
// Peer did something semi-useful, demote but keep it around
peer.Demote()
- peer.SetIdle()
+ peer.SetIdle61()
glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err)
go d.process()
}
}
+ // Blocks arrived, try to update the progress
+ select {
+ case update <- struct{}{}:
+ default:
+ }
+
+ case cont := <-d.processCh:
+ // The hash fetcher sent a continuation flag, check if it's done
+ if !cont {
+ finished = true
+ }
+ // Hashes arrive, try to update the progress
+ select {
+ case update <- struct{}{}:
+ default:
+ }
case <-ticker.C:
+ // Sanity check update the progress
+ select {
+ case update <- struct{}{}:
+ default:
+ }
+
+ case <-update:
// Short circuit if we lost all our peers
if d.peers.Len() == 0 {
return errNoPeers
}
// Check for block request timeouts and demote the responsible peers
- badPeers := d.queue.Expire(blockHardTTL)
- for _, pid := range badPeers {
+ for _, pid := range d.queue.Expire(blockHardTTL) {
if peer := d.peers.Peer(pid); peer != nil {
peer.Demote()
glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
}
}
- // If there are unrequested hashes left start fetching from the available peers
- if d.queue.Pending() > 0 {
- // Throttle the download if block cache is full and waiting processing
+ // If there's noting more to fetch, wait or terminate
+ if d.queue.Pending() == 0 {
+ if d.queue.InFlight() == 0 && finished {
+ glog.V(logger.Debug).Infof("Block fetching completed")
+ return nil
+ }
+ break
+ }
+ // Send a download request to all idle peers, until throttled
+ for _, peer := range d.peers.IdlePeers() {
+ // Short circuit if throttling activated
if d.queue.Throttle() {
break
}
- // Send a download request to all idle peers, until throttled
- idlePeers := d.peers.IdlePeers()
- for _, peer := range idlePeers {
- // Short circuit if throttling activated since above
- if d.queue.Throttle() {
- break
- }
- // Get a possible chunk. If nil is returned no chunk
- // could be returned due to no hashes available.
- request := d.queue.Reserve(peer, peer.Capacity())
- if request == nil {
- continue
- }
- if glog.V(logger.Detail) {
- glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
- }
- // Fetch the chunk and check for error. If the peer was somehow
- // already fetching a chunk due to a bug, it will be returned to
- // the queue
- if err := peer.Fetch(request); err != nil {
- glog.V(logger.Error).Infof("Peer %s received double work", peer.id)
- d.queue.Cancel(request)
- }
+ // Reserve a chunk of hashes for a peer. A nil can mean either that
+ // no more hashes are available, or that the peer is known not to
+ // have them.
+ request := d.queue.Reserve61(peer, peer.Capacity())
+ if request == nil {
+ continue
}
- // Make sure that we have peers available for fetching. If all peers have been tried
- // and all failed throw an error
- if d.queue.InFlight() == 0 {
- return errPeersUnavailable
+ if glog.V(logger.Detail) {
+ glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
}
-
- } else if d.queue.InFlight() == 0 {
- // When there are no more queue and no more in flight, We can
- // safely assume we're done. Another part of the process will check
- // for parent errors and will re-request anything that's missing
- break out
+ // Fetch the chunk and make sure any errors return the hashes to the queue
+ if err := peer.Fetch61(request); err != nil {
+ glog.V(logger.Error).Infof("%v: fetch failed, rescheduling", peer)
+ d.queue.Cancel(request)
+ }
+ }
+ // Make sure that we have peers available for fetching. If all peers have been tried
+ // and all failed throw an error
+ if !d.queue.Throttle() && d.queue.InFlight() == 0 {
+ return errPeersUnavailable
}
}
}
- glog.V(logger.Detail).Infoln("Downloaded block(s) in", time.Since(start))
- return nil
}
// findAncestor tries to locate the common ancestor block of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N blocks should already get us a match.
-// In the rare scenario when we ended up on a long soft fork (i.e. none of the
-// head blocks match), we do a binary search to find the common ancestor.
+// In the rare scenario when we ended up on a long reorganization (i.e. none of
+// the head blocks match), we do a binary search to find the common ancestor.
func (d *Downloader) findAncestor(p *peer) (uint64, error) {
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
- // Request out head blocks to short circuit ancestor location
+ // Request our head blocks to short circuit ancestor location
head := d.headBlock().NumberU64()
- from := int64(head) - int64(MaxHashFetch)
+ from := int64(head) - int64(MaxHeaderFetch) + 1
if from < 0 {
from = 0
}
- go p.getAbsHashes(uint64(from), MaxHashFetch)
+ go p.getAbsHeaders(uint64(from), MaxHeaderFetch, 0, false)
// Wait for the remote response to the head fetch
number, hash := uint64(0), common.Hash{}
@@ -700,38 +794,44 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
case <-d.cancelCh:
return 0, errCancelHashFetch
- case hashPack := <-d.hashCh:
+ case headerPack := <-d.headerCh:
// Discard anything not from the origin peer
- if hashPack.peerId != p.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
+ if headerPack.peerId != p.id {
+ glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", headerPack.peerId)
break
}
// Make sure the peer actually gave something valid
- hashes := hashPack.hashes
- if len(hashes) == 0 {
- glog.V(logger.Debug).Infof("%v: empty head hash set", p)
- return 0, errEmptyHashSet
+ headers := headerPack.headers
+ if len(headers) == 0 {
+ glog.V(logger.Debug).Infof("%v: empty head header set", p)
+ return 0, errEmptyHeaderSet
}
// Check if a common ancestor was found
finished = true
- for i := len(hashes) - 1; i >= 0; i-- {
- if d.hasBlock(hashes[i]) {
- number, hash = uint64(from)+uint64(i), hashes[i]
+ for i := len(headers) - 1; i >= 0; i-- {
+ if d.hasBlock(headers[i].Hash()) {
+ number, hash = headers[i].Number.Uint64(), headers[i].Hash()
break
}
}
+ case <-d.bodyCh:
+ // Out of bounds block bodies received, ignore them
+
+ case <-d.hashCh:
+ // Out of bounds eth/61 hashes received, ignore them
+
case <-d.blockCh:
- // Out of bounds blocks received, ignore them
+ // Out of bounds eth/61 blocks received, ignore them
case <-timeout:
- glog.V(logger.Debug).Infof("%v: head hash timeout", p)
+ glog.V(logger.Debug).Infof("%v: head header timeout", p)
return 0, errTimeout
}
}
// If the head fetch already found an ancestor, return
if !common.EmptyHash(hash) {
- glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x]", p, number, hash[:4])
+ glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
return number, nil
}
// Ancestor not found, we need to binary search over our chain
@@ -741,7 +841,7 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
check := (start + end) / 2
timeout := time.After(hashTTL)
- go p.getAbsHashes(uint64(check), 1)
+ go p.getAbsHeaders(uint64(check), 1, 0, false)
// Wait until a reply arrives to this request
for arrived := false; !arrived; {
@@ -749,37 +849,43 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
case <-d.cancelCh:
return 0, errCancelHashFetch
- case hashPack := <-d.hashCh:
+ case headerPack := <-d.headerCh:
// Discard anything not from the origin peer
- if hashPack.peerId != p.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
+ if headerPack.peerId != p.id {
+ glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", headerPack.peerId)
break
}
// Make sure the peer actually gave something valid
- hashes := hashPack.hashes
- if len(hashes) != 1 {
- glog.V(logger.Debug).Infof("%v: invalid search hash set (%d)", p, len(hashes))
+ headers := headerPack.headers
+ if len(headers) != 1 {
+ glog.V(logger.Debug).Infof("%v: invalid search header set (%d)", p, len(headers))
return 0, errBadPeer
}
arrived = true
// Modify the search interval based on the response
- block := d.getBlock(hashes[0])
+ block := d.getBlock(headers[0].Hash())
if block == nil {
end = check
break
}
if block.NumberU64() != check {
- glog.V(logger.Debug).Infof("%v: non requested hash #%d [%x], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check)
+ glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check)
return 0, errBadPeer
}
start = check
+ case <-d.bodyCh:
+ // Out of bounds block bodies received, ignore them
+
+ case <-d.hashCh:
+ // Out of bounds eth/61 hashes received, ignore them
+
case <-d.blockCh:
- // Out of bounds blocks received, ignore them
+ // Out of bounds eth/61 blocks received, ignore them
case <-timeout:
- glog.V(logger.Debug).Infof("%v: search hash timeout", p)
+ glog.V(logger.Debug).Infof("%v: search header timeout", p)
return 0, errTimeout
}
}
@@ -787,48 +893,58 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
return start, nil
}
-// fetchHashes keeps retrieving hashes from the requested number, until no more
+// fetchHeaders keeps retrieving headers from the requested number, until no more
// are returned, potentially throttling on the way.
-func (d *Downloader) fetchHashes(p *peer, td *big.Int, from uint64) error {
- glog.V(logger.Debug).Infof("%v: downloading hashes from #%d", p, from)
+func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
+ glog.V(logger.Debug).Infof("%v: downloading headers from #%d", p, from)
+ defer glog.V(logger.Debug).Infof("%v: header download terminated", p)
// Create a timeout timer, and the associated hash fetcher
+ request := time.Now() // time of the last fetch request
timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
<-timeout.C // timeout channel should be initially empty
defer timeout.Stop()
- getHashes := func(from uint64) {
- glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
+ getHeaders := func(from uint64) {
+ glog.V(logger.Detail).Infof("%v: fetching %d headers from #%d", p, MaxHeaderFetch, from)
- go p.getAbsHashes(from, MaxHashFetch)
- timeout.Reset(hashTTL)
+ go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
+ request = time.Now()
+ timeout.Reset(headerTTL)
}
- // Start pulling hashes, until all are exhausted
- getHashes(from)
- gotHashes := false
+ // Start pulling headers, until all are exhausted
+ getHeaders(from)
+ gotHeaders := false
for {
select {
case <-d.cancelCh:
- return errCancelHashFetch
+ return errCancelHeaderFetch
- case hashPack := <-d.hashCh:
- // Make sure the active peer is giving us the hashes
- if hashPack.peerId != p.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
+ case <-d.hashCh:
+ // Out of bounds eth/61 hashes received, ignore them
+
+ case <-d.blockCh:
+ // Out of bounds eth/61 blocks received, ignore them
+
+ case headerPack := <-d.headerCh:
+ // Make sure the active peer is giving us the headers
+ if headerPack.peerId != p.id {
+ glog.V(logger.Debug).Infof("Received headers from incorrect peer (%s)", headerPack.peerId)
break
}
+ headerReqTimer.UpdateSince(request)
timeout.Stop()
- // If no more hashes are inbound, notify the block fetcher and return
- if len(hashPack.hashes) == 0 {
- glog.V(logger.Debug).Infof("%v: no available hashes", p)
+ // If no more headers are inbound, notify the body fetcher and return
+ if len(headerPack.headers) == 0 {
+ glog.V(logger.Debug).Infof("%v: no available headers", p)
select {
case d.processCh <- false:
case <-d.cancelCh:
}
- // If no hashes were retrieved at all, the peer violated it's TD promise that it had a
+ // If no headers were retrieved at all, the peer violated it's TD promise that it had a
// better chain compared to ours. The only exception is if it's promised blocks were
// already imported by other means (e.g. fecher):
//
@@ -838,25 +954,25 @@ func (d *Downloader) fetchHashes(p *peer, td *big.Int, from uint64) error {
// L: Notice that R's head and TD increased compared to ours, start sync
// L: Import of block 11 finishes
// L: Sync begins, and finds common ancestor at 11
- // L: Request new hashes up from 11 (R's TD was higher, it must have something)
+ // L: Request new headers up from 11 (R's TD was higher, it must have something)
// R: Nothing to give
- if !gotHashes && td.Cmp(d.headBlock().Td) > 0 {
+ if !gotHeaders && td.Cmp(d.headBlock().Td) > 0 {
return errStallingPeer
}
return nil
}
- gotHashes = true
+ gotHeaders = true
- // Otherwise insert all the new hashes, aborting in case of junk
- glog.V(logger.Detail).Infof("%v: inserting %d hashes from #%d", p, len(hashPack.hashes), from)
+ // Otherwise insert all the new headers, aborting in case of junk
+ glog.V(logger.Detail).Infof("%v: inserting %d headers from #%d", p, len(headerPack.headers), from)
- inserts := d.queue.Insert(hashPack.hashes, true)
- if len(inserts) != len(hashPack.hashes) {
- glog.V(logger.Debug).Infof("%v: stale hashes", p)
+ inserts := d.queue.Insert(headerPack.headers)
+ if len(inserts) != len(headerPack.headers) {
+ glog.V(logger.Debug).Infof("%v: stale headers", p)
return errBadPeer
}
- // Notify the block fetcher of new hashes, but stop if queue is full
- cont := d.queue.Pending() < maxQueuedHashes
+ // Notify the block fetcher of new headers, but stop if queue is full
+ cont := d.queue.Pending() < maxQueuedHeaders
select {
case d.processCh <- cont:
default:
@@ -865,22 +981,31 @@ func (d *Downloader) fetchHashes(p *peer, td *big.Int, from uint64) error {
return nil
}
// Queue not yet full, fetch the next batch
- from += uint64(len(hashPack.hashes))
- getHashes(from)
+ from += uint64(len(headerPack.headers))
+ getHeaders(from)
case <-timeout.C:
- glog.V(logger.Debug).Infof("%v: hash request timed out", p)
- return errTimeout
+ // Header retrieval timed out, consider the peer bad and drop
+ glog.V(logger.Debug).Infof("%v: header request timed out", p)
+ headerTimeoutMeter.Mark(1)
+ d.dropPeer(p.id)
+
+ // Finish the sync gracefully instead of dumping the gathered data though
+ select {
+ case d.processCh <- false:
+ default:
+ }
+ return nil
}
}
}
-// fetchBlocks iteratively downloads the scheduled hashes, taking any available
-// peers, reserving a chunk of blocks for each, waiting for delivery and also
-// periodically checking for timeouts.
-func (d *Downloader) fetchBlocks(from uint64) error {
- glog.V(logger.Debug).Infof("Downloading blocks from #%d", from)
- defer glog.V(logger.Debug).Infof("Block download terminated")
+// fetchBodies iteratively downloads the scheduled block bodies, taking any
+// available peers, reserving a chunk of blocks for each, waiting for delivery
+// and also periodically checking for timeouts.
+func (d *Downloader) fetchBodies(from uint64) error {
+ glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from)
+ defer glog.V(logger.Debug).Infof("Block body download terminated")
// Create a timeout timer for scheduling expiration tasks
ticker := time.NewTicker(100 * time.Millisecond)
@@ -888,7 +1013,7 @@ func (d *Downloader) fetchBlocks(from uint64) error {
update := make(chan struct{}, 1)
- // Prepare the queue and fetch blocks until the hash fetcher's done
+ // Prepare the queue and fetch block bodies until the block header fetcher's done
d.queue.Prepare(from)
finished := false
@@ -897,31 +1022,42 @@ func (d *Downloader) fetchBlocks(from uint64) error {
case <-d.cancelCh:
return errCancelBlockFetch
- case blockPack := <-d.blockCh:
+ case <-d.hashCh:
+ // Out of bounds eth/61 hashes received, ignore them
+
+ case <-d.blockCh:
+ // Out of bounds eth/61 blocks received, ignore them
+
+ case bodyPack := <-d.bodyCh:
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
- if peer := d.peers.Peer(blockPack.peerId); peer != nil {
- // Deliver the received chunk of blocks, and demote in case of errors
- err := d.queue.Deliver(blockPack.peerId, blockPack.blocks)
+ if peer := d.peers.Peer(bodyPack.peerId); peer != nil {
+ // Deliver the received chunk of bodies, and demote in case of errors
+ err := d.queue.Deliver(bodyPack.peerId, bodyPack.transactions, bodyPack.uncles)
switch err {
case nil:
// If no blocks were delivered, demote the peer (need the delivery above)
- if len(blockPack.blocks) == 0 {
+ if len(bodyPack.transactions) == 0 || len(bodyPack.uncles) == 0 {
peer.Demote()
peer.SetIdle()
- glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
+ glog.V(logger.Detail).Infof("%s: no block bodies delivered", peer)
break
}
// All was successful, promote the peer and potentially start processing
peer.Promote()
peer.SetIdle()
- glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks))
+ glog.V(logger.Detail).Infof("%s: delivered %d:%d block bodies", peer, len(bodyPack.transactions), len(bodyPack.uncles))
go d.process()
case errInvalidChain:
// The hash chain is invalid (blocks are not ordered properly), abort
return err
+ case errInvalidBody:
+ // The peer delivered something very bad, drop immediately
+ glog.V(logger.Error).Infof("%s: delivered invalid block, dropping", peer)
+ d.dropPeer(peer.id)
+
case errNoFetchesPending:
// Peer probably timed out with its delivery but came through
// in the end, demote, but allow to to pull from this peer.
@@ -945,18 +1081,18 @@ func (d *Downloader) fetchBlocks(from uint64) error {
go d.process()
}
}
- // Blocks arrived, try to update the progress
+ // Blocks assembled, try to update the progress
select {
case update <- struct{}{}:
default:
}
case cont := <-d.processCh:
- // The hash fetcher sent a continuation flag, check if it's done
+ // The header fetcher sent a continuation flag, check if it's done
if !cont {
finished = true
}
- // Hashes arrive, try to update the progress
+ // Headers arrive, try to update the progress
select {
case update <- struct{}{}:
default:
@@ -974,38 +1110,50 @@ func (d *Downloader) fetchBlocks(from uint64) error {
if d.peers.Len() == 0 {
return errNoPeers
}
- // Check for block request timeouts and demote the responsible peers
- for _, pid := range d.queue.Expire(blockHardTTL) {
+ // Check for block body request timeouts and demote the responsible peers
+ for _, pid := range d.queue.Expire(bodyHardTTL) {
if peer := d.peers.Peer(pid); peer != nil {
peer.Demote()
- glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
+ glog.V(logger.Detail).Infof("%s: block body delivery timeout", peer)
}
}
// If there's noting more to fetch, wait or terminate
if d.queue.Pending() == 0 {
if d.queue.InFlight() == 0 && finished {
- glog.V(logger.Debug).Infof("Block fetching completed")
+ glog.V(logger.Debug).Infof("Block body fetching completed")
return nil
}
break
}
// Send a download request to all idle peers, until throttled
+ queuedEmptyBlocks, throttled := false, false
for _, peer := range d.peers.IdlePeers() {
// Short circuit if throttling activated
if d.queue.Throttle() {
+ throttled = true
break
}
// Reserve a chunk of hashes for a peer. A nil can mean either that
// no more hashes are available, or that the peer is known not to
// have them.
- request := d.queue.Reserve(peer, peer.Capacity())
+ request, process, err := d.queue.Reserve(peer, peer.Capacity())
+ if err != nil {
+ return err
+ }
+ if process {
+ queuedEmptyBlocks = true
+ go d.process()
+ }
if request == nil {
continue
}
if glog.V(logger.Detail) {
- glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
+ glog.Infof("%s: requesting %d block bodies", peer, len(request.Headers))
}
// Fetch the chunk and make sure any errors return the hashes to the queue
+ if d.bodyFetchHook != nil {
+ d.bodyFetchHook(request.Headers)
+ }
if err := peer.Fetch(request); err != nil {
glog.V(logger.Error).Infof("%v: fetch failed, rescheduling", peer)
d.queue.Cancel(request)
@@ -1013,99 +1161,13 @@ func (d *Downloader) fetchBlocks(from uint64) error {
}
// Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error
- if !d.queue.Throttle() && d.queue.InFlight() == 0 {
+ if !queuedEmptyBlocks && !throttled && d.queue.InFlight() == 0 {
return errPeersUnavailable
}
}
}
}
-// banBlocks retrieves a batch of blocks from a peer feeding us invalid hashes,
-// and bans the head of the retrieved batch.
-//
-// This method only fetches one single batch as the goal is not ban an entire
-// (potentially long) invalid chain - wasting a lot of time in the meanwhile -,
-// but rather to gradually build up a blacklist if the peer keeps reconnecting.
-func (d *Downloader) banBlocks(peerId string, head common.Hash) error {
- glog.V(logger.Debug).Infof("Banning a batch out of %d blocks from %s", d.queue.Pending(), peerId)
-
- // Ask the peer being banned for a batch of blocks from the banning point
- peer := d.peers.Peer(peerId)
- if peer == nil {
- return nil
- }
- request := d.queue.Reserve(peer, MaxBlockFetch)
- if request == nil {
- return nil
- }
- if err := peer.Fetch(request); err != nil {
- return err
- }
- // Wait a bit for the reply to arrive, and ban if done so
- timeout := time.After(blockHardTTL)
- for {
- select {
- case <-d.cancelCh:
- return errCancelBlockFetch
-
- case <-timeout:
- return errTimeout
-
- case <-d.hashCh:
- // Out of bounds hashes received, ignore them
-
- case blockPack := <-d.blockCh:
- blocks := blockPack.blocks
-
- // Short circuit if it's a stale cross check
- if len(blocks) == 1 {
- block := blocks[0]
- if _, ok := d.checks[block.Hash()]; ok {
- delete(d.checks, block.Hash())
- break
- }
- }
- // Short circuit if it's not from the peer being banned
- if blockPack.peerId != peerId {
- break
- }
- // Short circuit if no blocks were returned
- if len(blocks) == 0 {
- return errors.New("no blocks returned to ban")
- }
- // Reconstruct the original chain order and ensure we're banning the correct blocks
- types.BlockBy(types.Number).Sort(blocks)
- if bytes.Compare(blocks[0].Hash().Bytes(), head.Bytes()) != 0 {
- return errors.New("head block not the banned one")
- }
- index := 0
- for _, block := range blocks[1:] {
- if bytes.Compare(block.ParentHash().Bytes(), blocks[index].Hash().Bytes()) != 0 {
- break
- }
- index++
- }
- // Ban the head hash and phase out any excess
- d.banned.Add(blocks[index].Hash())
- for d.banned.Size() > maxBannedHashes {
- var evacuate common.Hash
-
- d.banned.Each(func(item interface{}) bool {
- // Skip any hard coded bans
- if core.BadHashes[item.(common.Hash)] {
- return true
- }
- evacuate = item.(common.Hash)
- return false
- })
- d.banned.Remove(evacuate)
- }
- glog.V(logger.Debug).Infof("Banned %d blocks from: %s", index+1, peerId)
- return nil
- }
- }
-}
-
// process takes blocks from the queue and tries to import them into the chain.
//
// The algorithmic flow is as follows:
@@ -1151,6 +1213,9 @@ func (d *Downloader) process() {
if len(blocks) == 0 {
return
}
+ if d.chainInsertHook != nil {
+ d.chainInsertHook(blocks)
+ }
// Reset the import statistics
d.importLock.Lock()
d.importStart = time.Now()
@@ -1184,9 +1249,45 @@ func (d *Downloader) process() {
}
}
-// DeliverBlocks injects a new batch of blocks received from a remote node.
+// DeliverHashes61 injects a new batch of hashes received from a remote node into
+// the download schedule. This is usually invoked through the BlockHashesMsg by
+// the protocol handler.
+func (d *Downloader) DeliverHashes61(id string, hashes []common.Hash) (err error) {
+ // Update the delivery metrics for both good and failed deliveries
+ hashInMeter.Mark(int64(len(hashes)))
+ defer func() {
+ if err != nil {
+ hashDropMeter.Mark(int64(len(hashes)))
+ }
+ }()
+ // Make sure the downloader is active
+ if atomic.LoadInt32(&d.synchronising) == 0 {
+ return errNoSyncActive
+ }
+ // Deliver or abort if the sync is canceled while queuing
+ d.cancelLock.RLock()
+ cancel := d.cancelCh
+ d.cancelLock.RUnlock()
+
+ select {
+ case d.hashCh <- hashPack{id, hashes}:
+ return nil
+
+ case <-cancel:
+ return errNoSyncActive
+ }
+}
+
+// DeliverBlocks61 injects a new batch of blocks received from a remote node.
// This is usually invoked through the BlocksMsg by the protocol handler.
-func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) error {
+func (d *Downloader) DeliverBlocks61(id string, blocks []*types.Block) (err error) {
+ // Update the delivery metrics for both good and failed deliveries
+ blockInMeter.Mark(int64(len(blocks)))
+ defer func() {
+ if err != nil {
+ blockDropMeter.Mark(int64(len(blocks)))
+ }
+ }()
// Make sure the downloader is active
if atomic.LoadInt32(&d.synchronising) == 0 {
return errNoSyncActive
@@ -1205,10 +1306,16 @@ func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) error {
}
}
-// DeliverHashes injects a new batch of hashes received from a remote node into
-// the download schedule. This is usually invoked through the BlockHashesMsg by
-// the protocol handler.
-func (d *Downloader) DeliverHashes(id string, hashes []common.Hash) error {
+// DeliverHeaders injects a new batch of blck headers received from a remote
+// node into the download schedule.
+func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
+ // Update the delivery metrics for both good and failed deliveries
+ headerInMeter.Mark(int64(len(headers)))
+ defer func() {
+ if err != nil {
+ headerDropMeter.Mark(int64(len(headers)))
+ }
+ }()
// Make sure the downloader is active
if atomic.LoadInt32(&d.synchronising) == 0 {
return errNoSyncActive
@@ -1219,7 +1326,34 @@ func (d *Downloader) DeliverHashes(id string, hashes []common.Hash) error {
d.cancelLock.RUnlock()
select {
- case d.hashCh <- hashPack{id, hashes}:
+ case d.headerCh <- headerPack{id, headers}:
+ return nil
+
+ case <-cancel:
+ return errNoSyncActive
+ }
+}
+
+// DeliverBodies injects a new batch of block bodies received from a remote node.
+func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
+ // Update the delivery metrics for both good and failed deliveries
+ bodyInMeter.Mark(int64(len(transactions)))
+ defer func() {
+ if err != nil {
+ bodyDropMeter.Mark(int64(len(transactions)))
+ }
+ }()
+ // Make sure the downloader is active
+ if atomic.LoadInt32(&d.synchronising) == 0 {
+ return errNoSyncActive
+ }
+ // Deliver or abort if the sync is canceled while queuing
+ d.cancelLock.RLock()
+ cancel := d.cancelCh
+ d.cancelLock.RUnlock()
+
+ select {
+ case d.bodyCh <- bodyPack{id, transactions, uncles}:
return nil
case <-cancel: