aboutsummaryrefslogtreecommitdiffstats
path: root/eth/downloader/downloader.go
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2016-10-31 19:55:12 +0800
committerPéter Szilágyi <peterke@gmail.com>2016-10-31 20:19:14 +0800
commit90b16a3e85d82241f4505a7db0f95c2f312642a6 (patch)
tree9b8e11b1127bf96e19599a1f610684bb9d67d06f /eth/downloader/downloader.go
parentb8dec948d4d7d8257e63aeecf982c25aeec9a180 (diff)
downloadgo-tangerine-90b16a3e85d82241f4505a7db0f95c2f312642a6.tar
go-tangerine-90b16a3e85d82241f4505a7db0f95c2f312642a6.tar.gz
go-tangerine-90b16a3e85d82241f4505a7db0f95c2f312642a6.tar.bz2
go-tangerine-90b16a3e85d82241f4505a7db0f95c2f312642a6.tar.lz
go-tangerine-90b16a3e85d82241f4505a7db0f95c2f312642a6.tar.xz
go-tangerine-90b16a3e85d82241f4505a7db0f95c2f312642a6.tar.zst
go-tangerine-90b16a3e85d82241f4505a7db0f95c2f312642a6.zip
core/state, eth/downloader, trie: reset fast-failure on progress
Diffstat (limited to 'eth/downloader/downloader.go')
-rw-r--r--eth/downloader/downloader.go27
1 files changed, 16 insertions, 11 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 987be2b7a..1cf69ea71 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -64,12 +64,12 @@ var (
maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
maxResultsProcess = 2048 // Number of content download results to import at once into the chain
- fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
- fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
- fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
- fsPivotInterval = 512 // Number of headers out of which to randomize the pivot point
- fsMinFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync
- fsCriticalTrials = 10 // Number of times to retry in the cricical section before bailing
+ fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
+ fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
+ fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
+ fsPivotInterval = 512 // Number of headers out of which to randomize the pivot point
+ fsMinFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync
+ fsCriticalTrials = uint32(32) // Number of times to retry in the cricical section before bailing
)
var (
@@ -105,7 +105,7 @@ type Downloader struct {
peers *peerSet // Set of active peers from which download can proceed
fsPivotLock *types.Header // Pivot header on critical section entry (cannot change between retries)
- fsPivotFails int // Number of fast sync failures in the critical section
+ fsPivotFails uint32 // Number of subsequent fast sync failures in the critical section
rttEstimate uint64 // Round trip time to target for download requests
rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops)
@@ -361,7 +361,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
// Set the requested sync mode, unless it's forbidden
d.mode = mode
- if d.mode == FastSync && d.fsPivotFails >= fsCriticalTrials {
+ if d.mode == FastSync && atomic.LoadUint32(&d.fsPivotFails) >= fsCriticalTrials {
d.mode = FullSync
}
// Retrieve the origin peer and initiate the downloading process
@@ -926,7 +926,7 @@ func (d *Downloader) fetchNodeData() error {
var (
deliver = func(packet dataPack) (int, error) {
start := time.Now()
- return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(err error, delivered int) {
+ return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) {
// If the peer returned old-requested data, forgive
if err == trie.ErrNotRequested {
glog.V(logger.Info).Infof("peer %s: replied to stale state request, forgiving", packet.PeerId())
@@ -951,6 +951,11 @@ func (d *Downloader) fetchNodeData() error {
syncStatsStateDone := d.syncStatsStateDone // Thread safe copy for the log below
d.syncStatsLock.Unlock()
+ // If real database progress was made, reset any fast-sync pivot failure
+ if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 {
+ glog.V(logger.Debug).Infof("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails))
+ atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block
+ }
// Log a message to the user and return
if delivered > 0 {
glog.V(logger.Info).Infof("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending)
@@ -1177,7 +1182,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
// If we're already past the pivot point, this could be an attack, thread carefully
if rollback[len(rollback)-1].Number.Uint64() > pivot {
// If we didn't ever fail, lock in te pivot header (must! not! change!)
- if d.fsPivotFails == 0 {
+ if atomic.LoadUint32(&d.fsPivotFails) == 0 {
for _, header := range rollback {
if header.Number.Uint64() == pivot {
glog.V(logger.Warn).Infof("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4])
@@ -1185,7 +1190,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
}
}
}
- d.fsPivotFails++
+ atomic.AddUint32(&d.fsPivotFails, 1)
}
}
}()