aboutsummaryrefslogtreecommitdiffstats
path: root/eth/downloader/downloader.go
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2019-05-15 19:33:33 +0800
committerPéter Szilágyi <peterke@gmail.com>2019-05-16 15:39:35 +0800
commit536b3b416c6ff53ea11a0d29dcc351a6d7919901 (patch)
tree84f0a2c60601ce9248b1be19de86f82202b3c06a /eth/downloader/downloader.go
parent37d280da411eb649ce22ab69827ac5aacd46534b (diff)
downloadgo-tangerine-536b3b416c6ff53ea11a0d29dcc351a6d7919901.tar
go-tangerine-536b3b416c6ff53ea11a0d29dcc351a6d7919901.tar.gz
go-tangerine-536b3b416c6ff53ea11a0d29dcc351a6d7919901.tar.bz2
go-tangerine-536b3b416c6ff53ea11a0d29dcc351a6d7919901.tar.lz
go-tangerine-536b3b416c6ff53ea11a0d29dcc351a6d7919901.tar.xz
go-tangerine-536b3b416c6ff53ea11a0d29dcc351a6d7919901.tar.zst
go-tangerine-536b3b416c6ff53ea11a0d29dcc351a6d7919901.zip
cosensus, core, eth, params, trie: fixes + clique history cap
Diffstat (limited to 'eth/downloader/downloader.go')
-rw-r--r--eth/downloader/downloader.go32
1 files changed, 16 insertions, 16 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 5c350debe..495fa0e74 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -25,7 +25,7 @@ import (
"sync/atomic"
"time"
- ethereum "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
@@ -46,20 +46,20 @@ var (
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
- MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation
- rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
- rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
- rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
- ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
- ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
+ rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
+ rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
+ rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
+ ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
+ ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
qosTuningPeers = 5 // Number of peers to tune based on (best peers)
qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
- maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
- maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
- maxResultsProcess = 2048 // Number of content download results to import at once into the chain
+ maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
+ maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
+ maxResultsProcess = 2048 // Number of content download results to import at once into the chain
+ maxForkAncestry uint64 = params.ImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
@@ -439,7 +439,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode)
defer func(start time.Time) {
- log.Debug("Synchronisation terminated", "elapsed", time.Since(start))
+ log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
}(time.Now())
// Look up the sync boundaries: the common ancestor and the target block
@@ -491,10 +491,10 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
// The peer would start to feed us valid blocks until head, resulting in all of
// the blocks might be written into the ancient store. A following mini-reorg
// could cause issues.
- if d.checkpoint != 0 && d.checkpoint > MaxForkAncestry+1 {
+ if d.checkpoint != 0 && d.checkpoint > maxForkAncestry+1 {
d.ancientLimit = d.checkpoint
- } else if height > MaxForkAncestry+1 {
- d.ancientLimit = height - MaxForkAncestry - 1
+ } else if height > maxForkAncestry+1 {
+ d.ancientLimit = height - maxForkAncestry - 1
}
frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
// If a part of blockchain data has already been written into active store,
@@ -725,9 +725,9 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header)
p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
// Recap floor value for binary search
- if localHeight >= MaxForkAncestry {
+ if localHeight >= maxForkAncestry {
// We're above the max reorg threshold, find the earliest fork point
- floor = int64(localHeight - MaxForkAncestry)
+ floor = int64(localHeight - maxForkAncestry)
}
// If we're doing a light sync, ensure the floor doesn't go below the CHT, as
// all headers before that point will be missing.