aboutsummaryrefslogtreecommitdiffstats
path: root/dex/downloader/downloader.go
diff options
context:
space:
mode:
Diffstat (limited to 'dex/downloader/downloader.go')
-rw-r--r--dex/downloader/downloader.go178
1 files changed, 135 insertions, 43 deletions
diff --git a/dex/downloader/downloader.go b/dex/downloader/downloader.go
index e3960ea30..9d609584b 100644
--- a/dex/downloader/downloader.go
+++ b/dex/downloader/downloader.go
@@ -103,6 +103,7 @@ type Downloader struct {
mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle)
mux *event.TypeMux // Event multiplexer to announce sync operation events
+ genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT)
queue *queue // Scheduler for selecting the hashes to download
peers *peerSet // Set of active peers from which download can proceed
stateDB ethdb.Database
@@ -192,6 +193,9 @@ type BlockChain interface {
// HasBlock verifies a block's presence in the local chain.
HasBlock(common.Hash, uint64) bool
+ // HasFastBlock verifies a fast block's presence in the local chain.
+ HasFastBlock(common.Hash, uint64) bool
+
// GetBlockByHash retrieves a block from the local chain.
GetBlockByHash(common.Hash) *types.Block
@@ -442,7 +446,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, number ui
}
height := latest.Number.Uint64()
- origin, err := d.findAncestor(p, height)
+ origin, err := d.findAncestor(p, latest)
if err != nil {
return err
}
@@ -691,41 +695,107 @@ func (d *Downloader) fetchGovState(p *peerConnection,
}
}
-// findAncestor tries to locate the common ancestor link of the local chain and
-// a remote peers blockchain. In the general case when our node was in sync and
-// on the correct chain, checking the top N links should already get us a match.
-// In the rare scenario when we ended up on a long reorganisation (i.e. none of
-// the head links match), we do a binary search to find the common ancestor.
-func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) {
- // Figure out the valid ancestor range to prevent rewrite attacks
- floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64()
-
- if d.mode == FullSync {
- ceil = d.blockchain.CurrentBlock().NumberU64()
- } else if d.mode == FastSync {
- ceil = d.blockchain.CurrentFastBlock().NumberU64()
+// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
+// common ancestor.
+// It returns parameters to be used for peer.RequestHeadersByNumber:
+// from - starting block number
+// count - number of headers to request
+// skip - number of headers to skip
+// and also returns 'max', the last block which is expected to be returned by the remote peers,
+// given the (from,count,skip)
+func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
+ var (
+ from int
+ count int
+ MaxCount = MaxHeaderFetch / 16
+ )
+ // requestHead is the highest block that we will ask for. If requestHead is not offset,
+ // the highest block that we will get is 16 blocks back from head, which means we
+ // will fetch 14 or 15 blocks unnecessarily in the case the height difference
+ // between us and the peer is 1-2 blocks, which is most common
+ requestHead := int(remoteHeight) - 1
+ if requestHead < 0 {
+ requestHead = 0
+ }
+ // requestBottom is the lowest block we want included in the query
+ // Ideally, we want to include just below own head
+ requestBottom := int(localHeight - 1)
+ if requestBottom < 0 {
+ requestBottom = 0
}
- if ceil >= MaxForkAncestry {
- floor = int64(ceil - MaxForkAncestry)
+ totalSpan := requestHead - requestBottom
+ span := 1 + totalSpan/MaxCount
+ if span < 2 {
+ span = 2
+ }
+ if span > 16 {
+ span = 16
}
- p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height)
- // Request the topmost blocks to short circuit binary ancestor lookup
- head := ceil
- if head > height {
- head = height
+ count = 1 + totalSpan/span
+ if count > MaxCount {
+ count = MaxCount
+ }
+ if count < 2 {
+ count = 2
}
- from := int64(head) - int64(MaxHeaderFetch)
+ from = requestHead - (count-1)*span
if from < 0 {
from = 0
}
- // Span out with 15 block gaps into the future to catch bad head reports
- limit := 2 * MaxHeaderFetch / 16
- count := 1 + int((int64(ceil)-from)/16)
- if count > limit {
- count = limit
+ max := from + (count-1)*span
+ return int64(from), count, span - 1, uint64(max)
+}
+
+// findAncestor tries to locate the common ancestor link of the local chain and
+// a remote peers blockchain. In the general case when our node was in sync and
+// on the correct chain, checking the top N links should already get us a match.
+// In the rare scenario when we ended up on a long reorganisation (i.e. none of
+// the head links match), we do a binary search to find the common ancestor.
+func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {
+ // Figure out the valid ancestor range to prevent rewrite attacks
+ var (
+ floor = int64(-1)
+ localHeight uint64
+ remoteHeight = remoteHeader.Number.Uint64()
+ )
+ switch d.mode {
+ case FullSync:
+ localHeight = d.blockchain.CurrentBlock().NumberU64()
+ case FastSync:
+ localHeight = d.blockchain.CurrentFastBlock().NumberU64()
+ default:
+ localHeight = d.lightchain.CurrentHeader().Number.Uint64()
+ }
+ p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
+ if localHeight >= MaxForkAncestry {
+ // We're above the max reorg threshold, find the earliest fork point
+ floor = int64(localHeight - MaxForkAncestry)
+
+ // If we're doing a light sync, ensure the floor doesn't go below the CHT, as
+ // all headers before that point will be missing.
+ if d.mode == LightSync {
+ // If we dont know the current CHT position, find it
+ if d.genesis == 0 {
+ header := d.lightchain.CurrentHeader()
+ for header != nil {
+ d.genesis = header.Number.Uint64()
+ if floor >= int64(d.genesis)-1 {
+ break
+ }
+ header = d.lightchain.GetHeaderByHash(header.ParentHash)
+ }
+ }
+ // We already know the "genesis" block number, cap floor to that
+ if floor < int64(d.genesis)-1 {
+ floor = int64(d.genesis) - 1
+ }
+ }
}
- go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false, false)
+ from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
+
+ p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip)
+ go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false, false)
// Wait for the remote response to the head fetch
number, hash := uint64(0), common.Hash{}
@@ -751,9 +821,10 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
return 0, errEmptyHeaderSet
}
// Make sure the peer's reply conforms to the request
- for i := 0; i < len(headers); i++ {
- if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
- p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number)
+ for i, header := range headers {
+ expectNumber := from + int64(i)*int64((skip+1))
+ if number := header.Number.Int64(); number != expectNumber {
+ p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number)
return 0, errInvalidChain
}
}
@@ -761,20 +832,24 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
finished = true
for i := len(headers) - 1; i >= 0; i-- {
// Skip any headers that underflow/overflow our requested set
- if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil {
+ if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max {
continue
}
// Otherwise check if we already know the header or not
h := headers[i].Hash()
n := headers[i].Number.Uint64()
- if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && d.lightchain.HasHeader(h, n)) {
- number, hash = n, h
- // If every header is known, even future ones, the peer straight out lied about its head
- if number > height && i == limit-1 {
- p.log.Warn("Lied about chain head", "reported", height, "found", number)
- return 0, errStallingPeer
- }
+ var known bool
+ switch d.mode {
+ case FullSync:
+ known = d.blockchain.HasBlock(h, n)
+ case FastSync:
+ known = d.blockchain.HasFastBlock(h, n)
+ default:
+ known = d.lightchain.HasHeader(h, n)
+ }
+ if known {
+ number, hash = n, h
break
}
}
@@ -798,10 +873,11 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
return number, nil
}
// Ancestor not found, we need to binary search over our chain
- start, end := uint64(0), head
+ start, end := uint64(0), remoteHeight
if floor > 0 {
start = uint64(floor)
}
+ p.log.Trace("Binary searching for common ancestor", "start", start, "end", end)
for start+1 < end {
// Split our chain interval in two, and request the hash to cross check
check := (start + end) / 2
@@ -834,7 +910,16 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
// Modify the search interval based on the response
h := headers[0].Hash()
n := headers[0].Number.Uint64()
- if (d.mode == FullSync && !d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && !d.lightchain.HasHeader(h, n)) {
+ var known bool
+ switch d.mode {
+ case FullSync:
+ known = d.blockchain.HasBlock(h, n)
+ case FastSync:
+ known = d.blockchain.HasFastBlock(h, n)
+ default:
+ known = d.lightchain.HasHeader(h, n)
+ }
+ if !known {
end = check
break
}
@@ -1511,8 +1596,15 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
}
if index, err := d.blockchain.InsertDexonChain(blocks); err != nil {
- log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
- return errInvalidChain
+ if index < len(results) {
+ log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
+ } else {
+ // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
+ // when it needs to preprocess blocks to import a sidechain.
+ // The importer will put together a new list of blocks to import, which is a superset
+ // of the blocks delivered from the downloader, and the indexing will be off.
+ log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
+ }
}
return nil
}