aboutsummaryrefslogtreecommitdiffstats
path: root/eth/downloader
diff options
context:
space:
mode:
authorFelix Lange <fjl@twurst.com>2016-07-21 17:36:38 +0800
committerFelix Lange <fjl@twurst.com>2016-07-22 19:17:19 +0800
commit016007bd25f2b5e597c2ac2f7256c4e73574f70e (patch)
treecf32a5d2edb8f8683ada17fa6bf71f878af50999 /eth/downloader
parenta4c4125b1155d9276614029163b498a17643f0f2 (diff)
downloadgo-tangerine-016007bd25f2b5e597c2ac2f7256c4e73574f70e.tar
go-tangerine-016007bd25f2b5e597c2ac2f7256c4e73574f70e.tar.gz
go-tangerine-016007bd25f2b5e597c2ac2f7256c4e73574f70e.tar.bz2
go-tangerine-016007bd25f2b5e597c2ac2f7256c4e73574f70e.tar.lz
go-tangerine-016007bd25f2b5e597c2ac2f7256c4e73574f70e.tar.xz
go-tangerine-016007bd25f2b5e597c2ac2f7256c4e73574f70e.tar.zst
go-tangerine-016007bd25f2b5e597c2ac2f7256c4e73574f70e.zip
eth, eth/downloader, eth/fetcher: delete eth/61 code
The eth/61 protocol was disabled in #2776, this commit removes its message handlers and hash-chain sync logic.
Diffstat (limited to 'eth/downloader')
-rw-r--r--eth/downloader/downloader.go667
-rw-r--r--eth/downloader/downloader_test.go122
-rw-r--r--eth/downloader/metrics.go10
-rw-r--r--eth/downloader/peer.go50
-rw-r--r--eth/downloader/queue.go138
-rw-r--r--eth/downloader/types.go20
6 files changed, 75 insertions, 932 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 01c0818a0..1e9bc27bc 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -48,23 +48,17 @@ var (
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
- MaxForkAncestry = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
-
- hashTTL = 3 * time.Second // [eth/61] Time it takes for a hash request to time out
- blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request
- blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
-
- rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
- rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests
- rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
- ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
- ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
+ MaxForkAncestry = 3 * params.EpochDuration.Uint64() // Maximum chain reorganisation
+ rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
+ rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests
+ rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
+ ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
+ ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
qosTuningPeers = 5 // Number of peers to tune based on (best peers)
qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
- maxQueuedHashes = 32 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection)
maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
maxResultsProcess = 2048 // Number of content download results to import at once into the chain
@@ -84,16 +78,13 @@ var (
errStallingPeer = errors.New("peer is stalling")
errNoPeers = errors.New("no peers to keep download active")
errTimeout = errors.New("timeout")
- errEmptyHashSet = errors.New("empty hash set by peer")
errEmptyHeaderSet = errors.New("empty header set by peer")
errPeersUnavailable = errors.New("no peers available or all tried for download")
- errAlreadyInPool = errors.New("hash already in pool")
errInvalidAncestor = errors.New("retrieved ancestor is invalid")
errInvalidChain = errors.New("retrieved hash chain is invalid")
errInvalidBlock = errors.New("retrieved block is invalid")
errInvalidBody = errors.New("retrieved block body is invalid")
errInvalidReceipt = errors.New("retrieved receipt is invalid")
- errCancelHashFetch = errors.New("hash download canceled (requested)")
errCancelBlockFetch = errors.New("block download canceled (requested)")
errCancelHeaderFetch = errors.New("block header download canceled (requested)")
errCancelBodyFetch = errors.New("block body download canceled (requested)")
@@ -102,6 +93,7 @@ var (
errCancelHeaderProcessing = errors.New("header processing canceled (requested)")
errCancelContentProcessing = errors.New("content processing canceled (requested)")
errNoSyncActive = errors.New("no sync active")
+ errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
)
type Downloader struct {
@@ -146,13 +138,10 @@ type Downloader struct {
// Channels
newPeerCh chan *peer
- hashCh chan dataPack // [eth/61] Channel receiving inbound hashes
- blockCh chan dataPack // [eth/61] Channel receiving inbound blocks
headerCh chan dataPack // [eth/62] Channel receiving inbound block headers
bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies
receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts
stateCh chan dataPack // [eth/63] Channel receiving inbound node state data
- blockWakeCh chan bool // [eth/61] Channel to signal the block fetcher of new tasks
bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks
receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks
stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks
@@ -199,13 +188,10 @@ func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, ha
rollback: rollback,
dropPeer: dropPeer,
newPeerCh: make(chan *peer, 1),
- hashCh: make(chan dataPack, 1),
- blockCh: make(chan dataPack, 1),
headerCh: make(chan dataPack, 1),
bodyCh: make(chan dataPack, 1),
receiptCh: make(chan dataPack, 1),
stateCh: make(chan dataPack, 1),
- blockWakeCh: make(chan bool, 1),
bodyWakeCh: make(chan bool, 1),
receiptWakeCh: make(chan bool, 1),
stateWakeCh: make(chan bool, 1),
@@ -251,12 +237,11 @@ func (d *Downloader) Synchronising() bool {
// RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from.
func (d *Downloader) RegisterPeer(id string, version int, head common.Hash,
- getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
glog.V(logger.Detail).Infoln("Registering peer", id)
- if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
+ if err := d.peers.Register(newPeer(id, version, head, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
glog.V(logger.Error).Infoln("Register failed:", err)
return err
}
@@ -291,7 +276,9 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
case errBusy:
glog.V(logger.Detail).Infof("Synchronisation already in progress")
- case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidAncestor, errInvalidChain:
+ case errTimeout, errBadPeer, errStallingPeer,
+ errEmptyHeaderSet, errPeersUnavailable, errTooOld,
+ errInvalidAncestor, errInvalidChain:
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
d.dropPeer(id)
@@ -323,13 +310,13 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
d.queue.Reset()
d.peers.Reset()
- for _, ch := range []chan bool{d.blockWakeCh, d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
+ for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
select {
case <-ch:
default:
}
}
- for _, ch := range []chan dataPack{d.hashCh, d.blockCh, d.headerCh, d.bodyCh, d.receiptCh, d.stateCh} {
+ for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh, d.stateCh} {
for empty := false; !empty; {
select {
case <-ch:
@@ -377,105 +364,73 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
d.mux.Post(DoneEvent{})
}
}()
+ if p.version < 62 {
+ return errTooOld
+ }
glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
defer func(start time.Time) {
glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
}(time.Now())
- switch {
- case p.version == 61:
- // Look up the sync boundaries: the common ancestor and the target block
- latest, err := d.fetchHeight61(p)
- if err != nil {
- return err
- }
- origin, err := d.findAncestor61(p, latest)
- if err != nil {
- return err
- }
- d.syncStatsLock.Lock()
- if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
- d.syncStatsChainOrigin = origin
- }
- d.syncStatsChainHeight = latest
- d.syncStatsLock.Unlock()
+ // Look up the sync boundaries: the common ancestor and the target block
+ latest, err := d.fetchHeight(p)
+ if err != nil {
+ return err
+ }
+ height := latest.Number.Uint64()
- // Initiate the sync using a concurrent hash and block retrieval algorithm
- d.queue.Prepare(origin+1, d.mode, 0, nil)
- if d.syncInitHook != nil {
- d.syncInitHook(origin, latest)
- }
- return d.spawnSync(origin+1,
- func() error { return d.fetchHashes61(p, td, origin+1) },
- func() error { return d.fetchBlocks61(origin + 1) },
- )
-
- case p.version >= 62:
- // Look up the sync boundaries: the common ancestor and the target block
- latest, err := d.fetchHeight(p)
- if err != nil {
- return err
- }
- height := latest.Number.Uint64()
+ origin, err := d.findAncestor(p, height)
+ if err != nil {
+ return err
+ }
+ d.syncStatsLock.Lock()
+ if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
+ d.syncStatsChainOrigin = origin
+ }
+ d.syncStatsChainHeight = height
+ d.syncStatsLock.Unlock()
- origin, err := d.findAncestor(p, height)
- if err != nil {
- return err
- }
- d.syncStatsLock.Lock()
- if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
- d.syncStatsChainOrigin = origin
- }
- d.syncStatsChainHeight = height
- d.syncStatsLock.Unlock()
-
- // Initiate the sync using a concurrent header and content retrieval algorithm
- pivot := uint64(0)
- switch d.mode {
- case LightSync:
- pivot = height
- case FastSync:
- // Calculate the new fast/slow sync pivot point
- if d.fsPivotLock == nil {
- pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval)))
- if err != nil {
- panic(fmt.Sprintf("Failed to access crypto random source: %v", err))
- }
- if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() {
- pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64()
- }
- } else {
- // Pivot point locked in, use this and do not pick a new one!
- pivot = d.fsPivotLock.Number.Uint64()
+ // Initiate the sync using a concurrent header and content retrieval algorithm
+ pivot := uint64(0)
+ switch d.mode {
+ case LightSync:
+ pivot = height
+ case FastSync:
+ // Calculate the new fast/slow sync pivot point
+ if d.fsPivotLock == nil {
+ pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval)))
+ if err != nil {
+ panic(fmt.Sprintf("Failed to access crypto random source: %v", err))
}
- // If the point is below the origin, move origin back to ensure state download
- if pivot < origin {
- if pivot > 0 {
- origin = pivot - 1
- } else {
- origin = 0
- }
+ if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() {
+ pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64()
}
- glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
+ } else {
+ // Pivot point locked in, use this and do not pick a new one!
+ pivot = d.fsPivotLock.Number.Uint64()
}
- d.queue.Prepare(origin+1, d.mode, pivot, latest)
- if d.syncInitHook != nil {
- d.syncInitHook(origin, height)
+ // If the point is below the origin, move origin back to ensure state download
+ if pivot < origin {
+ if pivot > 0 {
+ origin = pivot - 1
+ } else {
+ origin = 0
+ }
}
- return d.spawnSync(origin+1,
- func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved
- func() error { return d.processHeaders(origin+1, td) }, // Headers are always retrieved
- func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync
- func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
- func() error { return d.fetchNodeData() }, // Node state data is retrieved during fast sync
- )
-
- default:
- // Something very wrong, stop right here
- glog.V(logger.Error).Infof("Unsupported eth protocol: %d", p.version)
- return errBadPeer
+ glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
+ }
+ d.queue.Prepare(origin+1, d.mode, pivot, latest)
+ if d.syncInitHook != nil {
+ d.syncInitHook(origin, height)
}
+ return d.spawnSync(origin+1,
+ func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved
+ func() error { return d.processHeaders(origin+1, td) }, // Headers are always retrieved
+ func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync
+ func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
+ func() error { return d.fetchNodeData() }, // Node state data is retrieved during fast sync
+ )
}
// spawnSync runs d.process and all given fetcher functions to completion in
@@ -540,452 +495,6 @@ func (d *Downloader) Terminate() {
d.cancel()
}
-// fetchHeight61 retrieves the head block of the remote peer to aid in estimating
-// the total time a pending synchronisation would take.
-func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
- glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
-
- // Request the advertised remote head block and wait for the response
- go p.getBlocks([]common.Hash{p.head})
-
- timeout := time.After(hashTTL)
- for {
- select {
- case <-d.cancelCh:
- return 0, errCancelBlockFetch
-
- case packet := <-d.blockCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received blocks from incorrect peer(%s)", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- blocks := packet.(*blockPack).blocks
- if len(blocks) != 1 {
- glog.V(logger.Debug).Infof("%v: invalid number of head blocks: %d != 1", p, len(blocks))
- return 0, errBadPeer
- }
- return blocks[0].NumberU64(), nil
-
- case <-timeout:
- glog.V(logger.Debug).Infof("%v: head block timeout", p)
- return 0, errTimeout
-
- case <-d.hashCh:
- // Out of bounds hashes received, ignore them
-
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
-}
-
-// findAncestor61 tries to locate the common ancestor block of the local chain and
-// a remote peers blockchain. In the general case when our node was in sync and
-// on the correct chain, checking the top N blocks should already get us a match.
-// In the rare scenario when we ended up on a long reorganisation (i.e. none of
-// the head blocks match), we do a binary search to find the common ancestor.
-func (d *Downloader) findAncestor61(p *peer, height uint64) (uint64, error) {
- glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
-
- // Figure out the valid ancestor range to prevent rewrite attacks
- floor, ceil := int64(-1), d.headBlock().NumberU64()
- if ceil >= MaxForkAncestry {
- floor = int64(ceil - MaxForkAncestry)
- }
- // Request the topmost blocks to short circuit binary ancestor lookup
- head := ceil
- if head > height {
- head = height
- }
- from := int64(head) - int64(MaxHashFetch) + 1
- if from < 0 {
- from = 0
- }
- go p.getAbsHashes(uint64(from), MaxHashFetch)
-
- // Wait for the remote response to the head fetch
- number, hash := uint64(0), common.Hash{}
- timeout := time.After(hashTTL)
-
- for finished := false; !finished; {
- select {
- case <-d.cancelCh:
- return 0, errCancelHashFetch
-
- case packet := <-d.hashCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- hashes := packet.(*hashPack).hashes
- if len(hashes) == 0 {
- glog.V(logger.Debug).Infof("%v: empty head hash set", p)
- return 0, errEmptyHashSet
- }
- // Check if a common ancestor was found
- finished = true
- for i := len(hashes) - 1; i >= 0; i-- {
- // Skip any headers that underflow/overflow our requested set
- header := d.getHeader(hashes[i])
- if header == nil || header.Number.Int64() < from || header.Number.Uint64() > head {
- continue
- }
- // Otherwise check if we already know the header or not
- if d.hasBlockAndState(hashes[i]) {
- number, hash = header.Number.Uint64(), header.Hash()
- break
- }
- }
-
- case <-timeout:
- glog.V(logger.Debug).Infof("%v: head hash timeout", p)
- return 0, errTimeout
-
- case <-d.blockCh:
- // Out of bounds blocks received, ignore them
-
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- // If the head fetch already found an ancestor, return
- if !common.EmptyHash(hash) {
- if int64(number) <= floor {
- glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor)
- return 0, errInvalidAncestor
- }
- glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
- return number, nil
- }
- // Ancestor not found, we need to binary search over our chain
- start, end := uint64(0), head
- if floor > 0 {
- start = uint64(floor)
- }
- for start+1 < end {
- // Split our chain interval in two, and request the hash to cross check
- check := (start + end) / 2
-
- timeout := time.After(hashTTL)
- go p.getAbsHashes(uint64(check), 1)
-
- // Wait until a reply arrives to this request
- for arrived := false; !arrived; {
- select {
- case <-d.cancelCh:
- return 0, errCancelHashFetch
-
- case packet := <-d.hashCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- hashes := packet.(*hashPack).hashes
- if len(hashes) != 1 {
- glog.V(logger.Debug).Infof("%v: invalid search hash set (%d)", p, len(hashes))
- return 0, errBadPeer
- }
- arrived = true
-
- // Modify the search interval based on the response
- if !d.hasBlockAndState(hashes[0]) {
- end = check
- break
- }
- block := d.getBlock(hashes[0]) // this doesn't check state, hence the above explicit check
- if block.NumberU64() != check {
- glog.V(logger.Debug).Infof("%v: non requested hash #%d [%x…], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check)
- return 0, errBadPeer
- }
- start = check
-
- case <-timeout:
- glog.V(logger.Debug).Infof("%v: search hash timeout", p)
- return 0, errTimeout
-
- case <-d.blockCh:
- // Out of bounds blocks received, ignore them
-
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- }
- // Ensure valid ancestry and return
- if int64(start) <= floor {
- glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor)
- return 0, errInvalidAncestor
- }
- glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4])
- return start, nil
-}
-
-// fetchHashes61 keeps retrieving hashes from the requested number, until no more
-// are returned, potentially throttling on the way.
-func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
- glog.V(logger.Debug).Infof("%v: downloading hashes from #%d", p, from)
-
- // Create a timeout timer, and the associated hash fetcher
- request := time.Now() // time of the last fetch request
- timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
- <-timeout.C // timeout channel should be initially empty
- defer timeout.Stop()
-
- getHashes := func(from uint64) {
- glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
-
- request = time.Now()
- timeout.Reset(hashTTL)
- go p.getAbsHashes(from, MaxHashFetch)
- }
- // Start pulling hashes, until all are exhausted
- getHashes(from)
- gotHashes := false
-
- for {
- select {
- case <-d.cancelCh:
- return errCancelHashFetch
-
- case packet := <-d.hashCh:
- // Make sure the active peer is giving us the hashes
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
- break
- }
- hashReqTimer.UpdateSince(request)
- timeout.Stop()
-
- // If no more hashes are inbound, notify the block fetcher and return
- if packet.Items() == 0 {
- glog.V(logger.Debug).Infof("%v: no available hashes", p)
-
- select {
- case d.blockWakeCh <- false:
- case <-d.cancelCh:
- }
- // If no hashes were retrieved at all, the peer violated it's TD promise that it had a
- // better chain compared to ours. The only exception is if it's promised blocks were
- // already imported by other means (e.g. fetcher):
- //
- // R <remote peer>, L <local node>: Both at block 10
- // R: Mine block 11, and propagate it to L
- // L: Queue block 11 for import
- // L: Notice that R's head and TD increased compared to ours, start sync
- // L: Import of block 11 finishes
- // L: Sync begins, and finds common ancestor at 11
- // L: Request new hashes up from 11 (R's TD was higher, it must have something)
- // R: Nothing to give
- if !gotHashes && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
- return errStallingPeer
- }
- return nil
- }
- gotHashes = true
- hashes := packet.(*hashPack).hashes
-
- // Otherwise insert all the new hashes, aborting in case of junk
- glog.V(logger.Detail).Infof("%v: scheduling %d hashes from #%d", p, len(hashes), from)
-
- inserts := d.queue.Schedule61(hashes, true)
- if len(inserts) != len(hashes) {
- glog.V(logger.Debug).Infof("%v: stale hashes", p)
- return errBadPeer
- }
- // Notify the block fetcher of new hashes, but stop if queue is full
- if d.queue.PendingBlocks() < maxQueuedHashes {
- // We still have hashes to fetch, send continuation wake signal (potential)
- select {
- case d.blockWakeCh <- true:
- default:
- }
- } else {
- // Hash limit reached, send a termination wake signal (enforced)
- select {
- case d.blockWakeCh <- false:
- case <-d.cancelCh:
- }
- return nil
- }
- // Queue not yet full, fetch the next batch
- from += uint64(len(hashes))
- getHashes(from)
-
- case <-timeout.C:
- glog.V(logger.Debug).Infof("%v: hash request timed out", p)
- hashTimeoutMeter.Mark(1)
- return errTimeout
-
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
-}
-
-// fetchBlocks61 iteratively downloads the scheduled hashes, taking any available
-// peers, reserving a chunk of blocks for each, waiting for delivery and also
-// periodically checking for timeouts.
-func (d *Downloader) fetchBlocks61(from uint64) error {
- glog.V(logger.Debug).Infof("Downloading blocks from #%d", from)
- defer glog.V(logger.Debug).Infof("Block download terminated")
-
- // Create a timeout timer for scheduling expiration tasks
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
-
- update := make(chan struct{}, 1)
-
- // Fetch blocks until the hash fetcher's done
- finished := false
- for {
- select {
- case <-d.cancelCh:
- return errCancelBlockFetch
-
- case packet := <-d.blockCh:
- // If the peer was previously banned and failed to deliver it's pack
- // in a reasonable time frame, ignore it's message.
- if peer := d.peers.Peer(packet.PeerId()); peer != nil {
- blocks := packet.(*blockPack).blocks
-
- // Deliver the received chunk of blocks and check chain validity
- accepted, err := d.queue.DeliverBlocks(peer.id, blocks)
- if err == errInvalidChain {
- return err
- }
- // Unless a peer delivered something completely else than requested (usually
- // caused by a timed out request which came through in the end), set it to
- // idle. If the delivery's stale, the peer should have already been idled.
- if err != errStaleDelivery {
- peer.SetBlocksIdle(accepted)
- }
- // Issue a log to the user to see what's going on
- switch {
- case err == nil && len(blocks) == 0:
- glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
- case err == nil:
- glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blocks))
- default:
- glog.V(logger.Detail).Infof("%s: delivery failed: %v", peer, err)
- }
- }
- // Blocks arrived, try to update the progress
- select {
- case update <- struct{}{}:
- default:
- }
-
- case cont := <-d.blockWakeCh:
- // The hash fetcher sent a continuation flag, check if it's done
- if !cont {
- finished = true
- }
- // Hashes arrive, try to update the progress
- select {
- case update <- struct{}{}:
- default:
- }
-
- case <-ticker.C:
- // Sanity check update the progress
- select {
- case update <- struct{}{}:
- default:
- }
-
- case <-update:
- // Short circuit if we lost all our peers
- if d.peers.Len() == 0 {
- return errNoPeers
- }
- // Check for block request timeouts and demote the responsible peers
- for pid, fails := range d.queue.ExpireBlocks(blockTTL) {
- if peer := d.peers.Peer(pid); peer != nil {
- if fails > 1 {
- glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
- peer.SetBlocksIdle(0)
- } else {
- glog.V(logger.Debug).Infof("%s: stalling block delivery, dropping", peer)
- d.dropPeer(pid)
- }
- }
- }
- // If there's nothing more to fetch, wait or terminate
- if d.queue.PendingBlocks() == 0 {
- if !d.queue.InFlightBlocks() && finished {
- glog.V(logger.Debug).Infof("Block fetching completed")
- return nil
- }
- break
- }
- // Send a download request to all idle peers, until throttled
- throttled := false
- idles, total := d.peers.BlockIdlePeers()
-
- for _, peer := range idles {
- // Short circuit if throttling activated
- if d.queue.ShouldThrottleBlocks() {
- throttled = true
- break
- }
- // Reserve a chunk of hashes for a peer. A nil can mean either that
- // no more hashes are available, or that the peer is known not to
- // have them.
- request := d.queue.ReserveBlocks(peer, peer.BlockCapacity(blockTargetRTT))
- if request == nil {
- continue
- }
- if glog.V(logger.Detail) {
- glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
- }
- // Fetch the chunk and make sure any errors return the hashes to the queue
- if err := peer.Fetch61(request); err != nil {
- // Although we could try and make an attempt to fix this, this error really
- // means that we've double allocated a fetch task to a peer. If that is the
- // case, the internal state of the downloader and the queue is very wrong so
- // better hard crash and note the error instead of silently accumulating into
- // a much bigger issue.
- panic(fmt.Sprintf("%v: fetch assignment failed", peer))
- }
- }
- // Make sure that we have peers available for fetching. If all peers have been tried
- // and all failed throw an error
- if !throttled && !d.queue.InFlightBlocks() && len(idles) == total {
- return errPeersUnavailable
- }
-
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
-}
-
// fetchHeight retrieves the head header of the remote peer to aid in estimating
// the total time a pending synchronisation would take.
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
@@ -1022,11 +531,6 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
case <-d.stateCh:
case <-d.receiptCh:
// Out of bounds delivery, ignore
-
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
}
}
}
@@ -1067,7 +571,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
for finished := false; !finished; {
select {
case <-d.cancelCh:
- return 0, errCancelHashFetch
+ return 0, errCancelHeaderFetch
case packet := <-d.headerCh:
// Discard anything not from the origin peer
@@ -1114,11 +618,6 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
case <-d.stateCh:
case <-d.receiptCh:
// Out of bounds delivery, ignore
-
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
}
}
// If the head fetch already found an ancestor, return
@@ -1146,7 +645,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
for arrived := false; !arrived; {
select {
case <-d.cancelCh:
- return 0, errCancelHashFetch
+ return 0, errCancelHeaderFetch
case packer := <-d.headerCh:
// Discard anything not from the origin peer
@@ -1182,11 +681,6 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
case <-d.stateCh:
case <-d.receiptCh:
// Out of bounds delivery, ignore
-
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
}
}
}
@@ -1305,11 +799,6 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
case <-d.cancelCh:
}
return errBadPeer
-
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
}
}
}
@@ -1623,11 +1112,6 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
return errPeersUnavailable
}
-
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
}
}
}
@@ -1867,19 +1351,6 @@ func (d *Downloader) processContent() error {
}
}
-// DeliverHashes injects a new batch of hashes received from a remote node into
-// the download schedule. This is usually invoked through the BlockHashesMsg by
-// the protocol handler.
-func (d *Downloader) DeliverHashes(id string, hashes []common.Hash) (err error) {
- return d.deliver(id, d.hashCh, &hashPack{id, hashes}, hashInMeter, hashDropMeter)
-}
-
-// DeliverBlocks injects a new batch of blocks received from a remote node.
-// This is usually invoked through the BlocksMsg by the protocol handler.
-func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) (err error) {
- return d.deliver(id, d.blockCh, &blockPack{id, blocks}, blockInMeter, blockDropMeter)
-}
-
// DeliverHeaders injects a new batch of block headers received from a remote
// node into the download schedule.
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index fac6ef81c..4ca28091c 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -399,14 +399,12 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
var err error
switch version {
- case 61:
- err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), nil, nil, nil, nil, nil)
case 62:
- err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
+ err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil)
case 63:
- err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
+ err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
case 64:
- err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
+ err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay))
}
if err == nil {
// Assign the owned hashes, headers and blocks to the peer (deep copy)
@@ -465,86 +463,6 @@ func (dl *downloadTester) dropPeer(id string) {
dl.downloader.UnregisterPeer(id)
}
-// peerGetRelHashesFn constructs a GetHashes function associated with a specific
-// peer in the download tester. The returned function can be used to retrieve
-// batches of hashes from the particularly requested peer.
-func (dl *downloadTester) peerGetRelHashesFn(id string, delay time.Duration) func(head common.Hash) error {
- return func(head common.Hash) error {
- time.Sleep(delay)
-
- dl.lock.RLock()
- defer dl.lock.RUnlock()
-
- // Gather the next batch of hashes
- hashes := dl.peerHashes[id]
- result := make([]common.Hash, 0, MaxHashFetch)
- for i, hash := range hashes {
- if hash == head {
- i++
- for len(result) < cap(result) && i < len(hashes) {
- result = append(result, hashes[i])
- i++
- }
- break
- }
- }
- // Delay delivery a bit to allow attacks to unfold
- go func() {
- time.Sleep(time.Millisecond)
- dl.downloader.DeliverHashes(id, result)
- }()
- return nil
- }
-}
-
-// peerGetAbsHashesFn constructs a GetHashesFromNumber function associated with
-// a particular peer in the download tester. The returned function can be used to
-// retrieve batches of hashes from the particularly requested peer.
-func (dl *downloadTester) peerGetAbsHashesFn(id string, delay time.Duration) func(uint64, int) error {
- return func(head uint64, count int) error {
- time.Sleep(delay)
-
- dl.lock.RLock()
- defer dl.lock.RUnlock()
-
- // Gather the next batch of hashes
- hashes := dl.peerHashes[id]
- result := make([]common.Hash, 0, count)
- for i := 0; i < count && len(hashes)-int(head)-1-i >= 0; i++ {
- result = append(result, hashes[len(hashes)-int(head)-1-i])
- }
- // Delay delivery a bit to allow attacks to unfold
- go func() {
- time.Sleep(time.Millisecond)
- dl.downloader.DeliverHashes(id, result)
- }()
- return nil
- }
-}
-
-// peerGetBlocksFn constructs a getBlocks function associated with a particular
-// peer in the download tester. The returned function can be used to retrieve
-// batches of blocks from the particularly requested peer.
-func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error {
- return func(hashes []common.Hash) error {
- time.Sleep(delay)
-
- dl.lock.RLock()
- defer dl.lock.RUnlock()
-
- blocks := dl.peerBlocks[id]
- result := make([]*types.Block, 0, len(hashes))
- for _, hash := range hashes {
- if block, ok := blocks[hash]; ok {
- result = append(result, block)
- }
- }
- go dl.downloader.DeliverBlocks(id, result)
-
- return nil
- }
-}
-
// peerGetRelHeadersFn constructs a GetBlockHeaders function based on a hashed
// origin; associated with a particular peer in the download tester. The returned
// function can be used to retrieve batches of headers from the particular peer.
@@ -730,7 +648,6 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng
// Tests that simple synchronization against a canonical chain works correctly.
// In this test common ancestor lookup should be short circuited and not require
// binary searching.
-func TestCanonicalSynchronisation61(t *testing.T) { testCanonicalSynchronisation(t, 61, FullSync) }
func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) }
func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) }
func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) }
@@ -759,7 +676,6 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
// Tests that if a large batch of blocks are being downloaded, it is throttled
// until the cached blocks are retrieved.
-func TestThrottling61(t *testing.T) { testThrottling(t, 61, FullSync) }
func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) }
func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
@@ -845,7 +761,6 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
// Tests that simple synchronization against a forked chain works correctly. In
// this test common ancestor lookup should *not* be short circuited, and a full
// binary search should be executed.
-func TestForkedSync61(t *testing.T) { testForkedSync(t, 61, FullSync) }
func TestForkedSync62(t *testing.T) { testForkedSync(t, 62, FullSync) }
func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) }
func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
@@ -881,7 +796,6 @@ func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
// Tests that synchronising against a much shorter but much heavyer fork works
// corrently and is not dropped.
-func TestHeavyForkedSync61(t *testing.T) { testHeavyForkedSync(t, 61, FullSync) }
func TestHeavyForkedSync62(t *testing.T) { testHeavyForkedSync(t, 62, FullSync) }
func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) }
func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) }
@@ -915,24 +829,9 @@ func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork/2 + 1})
}
-// Tests that an inactive downloader will not accept incoming hashes and blocks.
-func TestInactiveDownloader61(t *testing.T) {
- t.Parallel()
- tester := newTester()
-
- // Check that neither hashes nor blocks are accepted
- if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive {
- t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
- }
- if err := tester.downloader.DeliverBlocks("bad peer", []*types.Block{}); err != errNoSyncActive {
- t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
- }
-}
-
// Tests that chain forks are contained within a certain interval of the current
// chain head, ensuring that malicious peers cannot waste resources by feeding
// long dead chains.
-func TestBoundedForkedSync61(t *testing.T) { testBoundedForkedSync(t, 61, FullSync) }
func TestBoundedForkedSync62(t *testing.T) { testBoundedForkedSync(t, 62, FullSync) }
func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) }
func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) }
@@ -968,7 +867,6 @@ func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current
// chain head for short but heavy forks too. These are a bit special because they
// take different ancestor lookup paths.
-func TestBoundedHeavyForkedSync61(t *testing.T) { testBoundedHeavyForkedSync(t, 61, FullSync) }
func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
@@ -1039,7 +937,6 @@ func TestInactiveDownloader63(t *testing.T) {
}
// Tests that a canceled download wipes all previously accumulated state.
-func TestCancel61(t *testing.T) { testCancel(t, 61, FullSync) }
func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) }
func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) }
func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) }
@@ -1081,7 +978,6 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
}
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
-func TestMultiSynchronisation61(t *testing.T) { testMultiSynchronisation(t, 61, FullSync) }
func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) }
func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) }
func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) }
@@ -1112,7 +1008,6 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
// Tests that synchronisations behave well in multi-version protocol environments
// and not wreak havoc on other nodes in the network.
-func TestMultiProtoSynchronisation61(t *testing.T) { testMultiProtoSync(t, 61, FullSync) }
func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) }
func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) }
func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) }
@@ -1131,7 +1026,6 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
tester := newTester()
defer tester.terminate()
- tester.newPeer("peer 61", 61, hashes, nil, blocks, nil)
tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
@@ -1143,7 +1037,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
assertOwnChain(t, tester, targetBlocks+1)
// Check that no peers have been dropped off
- for _, version := range []int{61, 62, 63, 64} {
+ for _, version := range []int{62, 63, 64} {
peer := fmt.Sprintf("peer %d", version)
if _, ok := tester.peerHashes[peer]; !ok {
t.Errorf("%s dropped", peer)
@@ -1368,7 +1262,6 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
// Tests that a peer advertising an high TD doesn't get to stall the downloader
// afterwards by not sending any useful hashes.
-func TestHighTDStarvationAttack61(t *testing.T) { testHighTDStarvationAttack(t, 61, FullSync) }
func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) }
func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) }
func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) }
@@ -1391,7 +1284,6 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
}
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
-func TestBlockHeaderAttackerDropping61(t *testing.T) { testBlockHeaderAttackerDropping(t, 61) }
func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
@@ -1409,7 +1301,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
{errNoPeers, false}, // No peers to download from, soft race, no issue
{errTimeout, true}, // No hashes received in due time, drop the peer
- {errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
{errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
{errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
@@ -1417,7 +1308,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
{errInvalidBlock, false}, // A bad peer was detected, but not the sync origin
{errInvalidBody, false}, // A bad peer was detected, but not the sync origin
{errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
- {errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
{errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
@@ -1450,7 +1340,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
// Tests that synchronisation progress (origin block number, current block number
// and highest block number) is tracked and updated correctly.
-func TestSyncProgress61(t *testing.T) { testSyncProgress(t, 61, FullSync) }
func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) }
func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) }
func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) }
@@ -1524,7 +1413,6 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Tests that synchronisation progress (origin block number and highest block
// number) is tracked and updated correctly in case of a fork (or manual head
// revertal).
-func TestForkedSyncProgress61(t *testing.T) { testForkedSyncProgress(t, 61, FullSync) }
func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) }
func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) }
func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) }
@@ -1601,7 +1489,6 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Tests that if synchronisation is aborted due to some failure, then the progress
// origin is not updated in the next sync cycle, as it should be considered the
// continuation of the previous sync and not a new instance.
-func TestFailedSyncProgress61(t *testing.T) { testFailedSyncProgress(t, 61, FullSync) }
func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) }
func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) }
func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) }
@@ -1679,7 +1566,6 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Tests that if an attacker fakes a chain height, after the attack is detected,
// the progress height is successfully reduced at the next sync invocation.
-func TestFakedSyncProgress61(t *testing.T) { testFakedSyncProgress(t, 61, FullSync) }
func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) }
func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) }
func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) }
diff --git a/eth/downloader/metrics.go b/eth/downloader/metrics.go
index d6fcfa25c..0d76c7dfd 100644
--- a/eth/downloader/metrics.go
+++ b/eth/downloader/metrics.go
@@ -23,16 +23,6 @@ import (
)
var (
- hashInMeter = metrics.NewMeter("eth/downloader/hashes/in")
- hashReqTimer = metrics.NewTimer("eth/downloader/hashes/req")
- hashDropMeter = metrics.NewMeter("eth/downloader/hashes/drop")
- hashTimeoutMeter = metrics.NewMeter("eth/downloader/hashes/timeout")
-
- blockInMeter = metrics.NewMeter("eth/downloader/blocks/in")
- blockReqTimer = metrics.NewTimer("eth/downloader/blocks/req")
- blockDropMeter = metrics.NewMeter("eth/downloader/blocks/drop")
- blockTimeoutMeter = metrics.NewMeter("eth/downloader/blocks/timeout")
-
headerInMeter = metrics.NewMeter("eth/downloader/headers/in")
headerReqTimer = metrics.NewTimer("eth/downloader/headers/req")
headerDropMeter = metrics.NewMeter("eth/downloader/headers/drop")
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index 94d44fca4..c2b7a52d0 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -37,11 +37,6 @@ const (
measurementImpact = 0.1 // The impact a single measurement has on a peer's final throughput value.
)
-// Hash and block fetchers belonging to eth/61 and below
-type relativeHashFetcherFn func(common.Hash) error
-type absoluteHashFetcherFn func(uint64, int) error
-type blockFetcherFn func([]common.Hash) error
-
// Block header and body fetchers belonging to eth/62 and above
type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error
type absoluteHeaderFetcherFn func(uint64, int, int, bool) error
@@ -79,10 +74,6 @@ type peer struct {
lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
- getRelHashes relativeHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an origin hash
- getAbsHashes absoluteHashFetcherFn // [eth/61] Method to retrieve a batch of hashes from an absolute position
- getBlocks blockFetcherFn // [eth/61] Method to retrieve a batch of blocks
-
getRelHeaders relativeHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an origin hash
getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position
getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies
@@ -97,7 +88,6 @@ type peer struct {
// newPeer create a new downloader peer, with specific hash and block retrieval
// mechanisms.
func newPeer(id string, version int, head common.Hash,
- getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer {
return &peer{
@@ -105,10 +95,6 @@ func newPeer(id string, version int, head common.Hash,
head: head,
lacking: make(map[common.Hash]struct{}),
- getRelHashes: getRelHashes,
- getAbsHashes: getAbsHashes,
- getBlocks: getBlocks,
-
getRelHeaders: getRelHeaders,
getAbsHeaders: getAbsHeaders,
getBlockBodies: getBlockBodies,
@@ -138,28 +124,6 @@ func (p *peer) Reset() {
p.lacking = make(map[common.Hash]struct{})
}
-// Fetch61 sends a block retrieval request to the remote peer.
-func (p *peer) Fetch61(request *fetchRequest) error {
- // Sanity check the protocol version
- if p.version != 61 {
- panic(fmt.Sprintf("block fetch [eth/61] requested on eth/%d", p.version))
- }
- // Short circuit if the peer is already fetching
- if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
- return errAlreadyFetching
- }
- p.blockStarted = time.Now()
-
- // Convert the hash set to a retrievable slice
- hashes := make([]common.Hash, 0, len(request.Hashes))
- for hash, _ := range request.Hashes {
- hashes = append(hashes, hash)
- }
- go p.getBlocks(hashes)
-
- return nil
-}
-
// FetchHeaders sends a header retrieval request to the remote peer.
func (p *peer) FetchHeaders(from uint64, count int) error {
// Sanity check the protocol version
@@ -481,20 +445,6 @@ func (ps *peerSet) AllPeers() []*peer {
return list
}
-// BlockIdlePeers retrieves a flat list of all the currently idle peers within the
-// active peer set, ordered by their reputation.
-func (ps *peerSet) BlockIdlePeers() ([]*peer, int) {
- idle := func(p *peer) bool {
- return atomic.LoadInt32(&p.blockIdle) == 0
- }
- throughput := func(p *peer) float64 {
- p.lock.RLock()
- defer p.lock.RUnlock()
- return p.blockThroughput
- }
- return ps.idlePeers(61, 61, idle, throughput)
-}
-
// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
// within the active peer set, ordered by their reputation.
func (ps *peerSet) HeaderIdlePeers() ([]*peer, int) {
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 01897af6d..fd239f7e4 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -45,7 +45,6 @@ var (
var (
errNoFetchesPending = errors.New("no fetches pending")
- errStateSyncPending = errors.New("state trie sync already scheduled")
errStaleDelivery = errors.New("stale delivery")
)
@@ -74,10 +73,6 @@ type queue struct {
mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
fastSyncPivot uint64 // Block number where the fast sync pivots into archive synchronisation mode
- hashPool map[common.Hash]int // [eth/61] Pending hashes, mapping to their insertion index (priority)
- hashQueue *prque.Prque // [eth/61] Priority queue of the block hashes to fetch
- hashCounter int // [eth/61] Counter indexing the added hashes to ensure retrieval order
-
headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
// Headers are "special", they download in batches, supported by a skeleton chain
@@ -85,7 +80,6 @@ type queue struct {
headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for
headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable
headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations
- headerDonePool map[uint64]struct{} // [eth/62] Set of the completed header fetches
headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers
headerProced int // [eth/62] Number of headers already processed from the results
headerOffset uint64 // [eth/62] Number of the first header in the result cache
@@ -124,8 +118,6 @@ type queue struct {
func newQueue(stateDb ethdb.Database) *queue {
lock := new(sync.Mutex)
return &queue{
- hashPool: make(map[common.Hash]int),
- hashQueue: prque.New(),
headerPendPool: make(map[string]*fetchRequest),
headerContCh: make(chan bool),
blockTaskPool: make(map[common.Hash]*types.Header),
@@ -158,10 +150,6 @@ func (q *queue) Reset() {
q.mode = FullSync
q.fastSyncPivot = 0
- q.hashPool = make(map[common.Hash]int)
- q.hashQueue.Reset()
- q.hashCounter = 0
-
q.headerHead = common.Hash{}
q.headerPendPool = make(map[string]*fetchRequest)
@@ -208,7 +196,7 @@ func (q *queue) PendingBlocks() int {
q.lock.Lock()
defer q.lock.Unlock()
- return q.hashQueue.Size() + q.blockTaskQueue.Size()
+ return q.blockTaskQueue.Size()
}
// PendingReceipts retrieves the number of block receipts pending for retrieval.
@@ -272,7 +260,7 @@ func (q *queue) Idle() bool {
q.lock.Lock()
defer q.lock.Unlock()
- queued := q.hashQueue.Size() + q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size()
+ queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size()
pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool)
cached := len(q.blockDonePool) + len(q.receiptDonePool)
@@ -323,34 +311,6 @@ func (q *queue) ShouldThrottleReceipts() bool {
return pending >= len(q.resultCache)-len(q.receiptDonePool)
}
-// Schedule61 adds a set of hashes for the download queue for scheduling, returning
-// the new hashes encountered.
-func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash {
- q.lock.Lock()
- defer q.lock.Unlock()
-
- // Insert all the hashes prioritised in the arrival order
- inserts := make([]common.Hash, 0, len(hashes))
- for _, hash := range hashes {
- // Skip anything we already have
- if old, ok := q.hashPool[hash]; ok {
- glog.V(logger.Warn).Infof("Hash %x already scheduled at index %v", hash, old)
- continue
- }
- // Update the counters and insert the hash
- q.hashCounter = q.hashCounter + 1
- inserts = append(inserts, hash)
-
- q.hashPool[hash] = q.hashCounter
- if fifo {
- q.hashQueue.Push(hash, -float32(q.hashCounter)) // Lowest gets schedules first
- } else {
- q.hashQueue.Push(hash, float32(q.hashCounter)) // Highest gets schedules first
- }
- }
- return inserts
-}
-
// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
// up an already retrieved header skeleton.
func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
@@ -550,15 +510,6 @@ func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
return request
}
-// ReserveBlocks reserves a set of block hashes for the given peer, skipping any
-// previously failed download.
-func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest {
- q.lock.Lock()
- defer q.lock.Unlock()
-
- return q.reserveHashes(p, count, q.hashQueue, nil, q.blockPendPool, len(q.resultCache)-len(q.blockDonePool))
-}
-
// ReserveNodeData reserves a set of node data hashes for the given peer, skipping
// any previously failed download.
func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest {
@@ -753,11 +704,6 @@ func (q *queue) CancelHeaders(request *fetchRequest) {
q.cancel(request, q.headerTaskQueue, q.headerPendPool)
}
-// CancelBlocks aborts a fetch request, returning all pending hashes to the queue.
-func (q *queue) CancelBlocks(request *fetchRequest) {
- q.cancel(request, q.hashQueue, q.blockPendPool)
-}
-
// CancelBodies aborts a body fetch request, returning all pending headers to the
// task queue.
func (q *queue) CancelBodies(request *fetchRequest) {
@@ -801,9 +747,6 @@ func (q *queue) Revoke(peerId string) {
defer q.lock.Unlock()
if request, ok := q.blockPendPool[peerId]; ok {
- for hash, index := range request.Hashes {
- q.hashQueue.Push(hash, float32(index))
- }
for _, header := range request.Headers {
q.blockTaskQueue.Push(header, -float32(header.Number.Uint64()))
}
@@ -832,15 +775,6 @@ func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
}
-// ExpireBlocks checks for in flight requests that exceeded a timeout allowance,
-// canceling them and returning the responsible peers for penalisation.
-func (q *queue) ExpireBlocks(timeout time.Duration) map[string]int {
- q.lock.Lock()
- defer q.lock.Unlock()
-
- return q.expire(timeout, q.blockPendPool, q.hashQueue, blockTimeoutMeter)
-}
-
// ExpireBodies checks for in flight block body requests that exceeded a timeout
// allowance, canceling them and returning the responsible peers for penalisation.
func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
@@ -907,74 +841,6 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
return expiries
}
-// DeliverBlocks injects a block retrieval response into the download queue. The
-// method returns the number of blocks accepted from the delivery and also wakes
-// any threads waiting for data delivery.
-func (q *queue) DeliverBlocks(id string, blocks []*types.Block) (int, error) {
- q.lock.Lock()
- defer q.lock.Unlock()
-
- // Short circuit if the blocks were never requested
- request := q.blockPendPool[id]
- if request == nil {
- return 0, errNoFetchesPending
- }
- blockReqTimer.UpdateSince(request.Time)
- delete(q.blockPendPool, id)
-
- // If no blocks were retrieved, mark them as unavailable for the origin peer
- if len(blocks) == 0 {
- for hash, _ := range request.Hashes {
- request.Peer.MarkLacking(hash)
- }
- }
- // Iterate over the downloaded blocks and add each of them
- accepted, errs := 0, make([]error, 0)
- for _, block := range blocks {
- // Skip any blocks that were not requested
- hash := block.Hash()
- if _, ok := request.Hashes[hash]; !ok {
- errs = append(errs, fmt.Errorf("non-requested block %x", hash))
- continue
- }
- // Reconstruct the next result if contents match up
- index := int(block.Number().Int64() - int64(q.resultOffset))
- if index >= len(q.resultCache) || index < 0 {
- errs = []error{errInvalidChain}
- break
- }
- q.resultCache[index] = &fetchResult{
- Header: block.Header(),
- Transactions: block.Transactions(),
- Uncles: block.Uncles(),
- }
- q.blockDonePool[block.Hash()] = struct{}{}
-
- delete(request.Hashes, hash)
- delete(q.hashPool, hash)
- accepted++
- }
- // Return all failed or missing fetches to the queue
- for hash, index := range request.Hashes {
- q.hashQueue.Push(hash, float32(index))
- }
- // Wake up WaitResults
- if accepted > 0 {
- q.active.Signal()
- }
- // If none of the blocks were good, it's a stale delivery
- switch {
- case len(errs) == 0:
- return accepted, nil
- case len(errs) == 1 && (errs[0] == errInvalidChain || errs[0] == errInvalidBlock):
- return accepted, errs[0]
- case len(errs) == len(blocks):
- return accepted, errStaleDelivery
- default:
- return accepted, fmt.Errorf("multiple failures: %v", errs)
- }
-}
-
// DeliverHeaders injects a header retrieval response into the header results
// cache. This method either accepts all headers it received, or none of them
// if they do not map correctly to the skeleton.
diff --git a/eth/downloader/types.go b/eth/downloader/types.go
index b67fff1f8..e10510486 100644
--- a/eth/downloader/types.go
+++ b/eth/downloader/types.go
@@ -73,26 +73,6 @@ type dataPack interface {
Stats() string
}
-// hashPack is a batch of block hashes returned by a peer (eth/61).
-type hashPack struct {
- peerId string
- hashes []common.Hash
-}
-
-func (p *hashPack) PeerId() string { return p.peerId }
-func (p *hashPack) Items() int { return len(p.hashes) }
-func (p *hashPack) Stats() string { return fmt.Sprintf("%d", len(p.hashes)) }
-
-// blockPack is a batch of blocks returned by a peer (eth/61).
-type blockPack struct {
- peerId string
- blocks []*types.Block
-}
-
-func (p *blockPack) PeerId() string { return p.peerId }
-func (p *blockPack) Items() int { return len(p.blocks) }
-func (p *blockPack) Stats() string { return fmt.Sprintf("%d", len(p.blocks)) }
-
// headerPack is a batch of block headers returned by a peer.
type headerPack struct {
peerId string