diff options
author | Sonic <sonic@dexon.org> | 2019-03-27 20:02:55 +0800 |
---|---|---|
committer | Wei-Ning Huang <w@byzantine-lab.io> | 2019-06-13 18:11:44 +0800 |
commit | c52c9e04a916fac3550b0a3c3d8cdf979ab70bb8 (patch) | |
tree | aa9e20f32fa084fd9c5e2fbfcee295d5d63b1d48 /dex/downloader | |
parent | 7b8b4fcb0e8fd411bf523d06492e966e20e1b613 (diff) | |
download | go-tangerine-c52c9e04a916fac3550b0a3c3d8cdf979ab70bb8.tar go-tangerine-c52c9e04a916fac3550b0a3c3d8cdf979ab70bb8.tar.gz go-tangerine-c52c9e04a916fac3550b0a3c3d8cdf979ab70bb8.tar.bz2 go-tangerine-c52c9e04a916fac3550b0a3c3d8cdf979ab70bb8.tar.lz go-tangerine-c52c9e04a916fac3550b0a3c3d8cdf979ab70bb8.tar.xz go-tangerine-c52c9e04a916fac3550b0a3c3d8cdf979ab70bb8.tar.zst go-tangerine-c52c9e04a916fac3550b0a3c3d8cdf979ab70bb8.zip |
backport from v1.8.23 (#304)
* dex: backport f6193ad
* dex/downloader: backport accc0fa accc0fab 174083c3
* dex: backport 434dd5b
* dex: backport 42a914a 0983d02
* dex: backport 48b70ec 31b3334 and some modification
* dex/downloader: backport 5f251a6
* dex/downloader: backport 81c3dc7
* dex, dex/downloader: fix typos
Diffstat (limited to 'dex/downloader')
-rw-r--r-- | dex/downloader/downloader.go | 178 | ||||
-rw-r--r-- | dex/downloader/downloader_test.go | 103 | ||||
-rw-r--r-- | dex/downloader/queue.go | 2 | ||||
-rw-r--r-- | dex/downloader/statesync.go | 11 |
4 files changed, 237 insertions, 57 deletions
diff --git a/dex/downloader/downloader.go b/dex/downloader/downloader.go index e3960ea30..9d609584b 100644 --- a/dex/downloader/downloader.go +++ b/dex/downloader/downloader.go @@ -103,6 +103,7 @@ type Downloader struct { mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) mux *event.TypeMux // Event multiplexer to announce sync operation events + genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed stateDB ethdb.Database @@ -192,6 +193,9 @@ type BlockChain interface { // HasBlock verifies a block's presence in the local chain. HasBlock(common.Hash, uint64) bool + // HasFastBlock verifies a fast block's presence in the local chain. + HasFastBlock(common.Hash, uint64) bool + // GetBlockByHash retrieves a block from the local chain. GetBlockByHash(common.Hash) *types.Block @@ -442,7 +446,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, number ui } height := latest.Number.Uint64() - origin, err := d.findAncestor(p, height) + origin, err := d.findAncestor(p, latest) if err != nil { return err } @@ -691,41 +695,107 @@ func (d *Downloader) fetchGovState(p *peerConnection, } } -// findAncestor tries to locate the common ancestor link of the local chain and -// a remote peers blockchain. In the general case when our node was in sync and -// on the correct chain, checking the top N links should already get us a match. -// In the rare scenario when we ended up on a long reorganisation (i.e. none of -// the head links match), we do a binary search to find the common ancestor. -func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { - // Figure out the valid ancestor range to prevent rewrite attacks - floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() - - if d.mode == FullSync { - ceil = d.blockchain.CurrentBlock().NumberU64() - } else if d.mode == FastSync { - ceil = d.blockchain.CurrentFastBlock().NumberU64() +// calculateRequestSpan calculates what headers to request from a peer when trying to determine the +// common ancestor. +// It returns parameters to be used for peer.RequestHeadersByNumber: +// from - starting block number +// count - number of headers to request +// skip - number of headers to skip +// and also returns 'max', the last block which is expected to be returned by the remote peers, +// given the (from,count,skip) +func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { + var ( + from int + count int + MaxCount = MaxHeaderFetch / 16 + ) + // requestHead is the highest block that we will ask for. If requestHead is not offset, + // the highest block that we will get is 16 blocks back from head, which means we + // will fetch 14 or 15 blocks unnecessarily in the case the height difference + // between us and the peer is 1-2 blocks, which is most common + requestHead := int(remoteHeight) - 1 + if requestHead < 0 { + requestHead = 0 + } + // requestBottom is the lowest block we want included in the query + // Ideally, we want to include just below own head + requestBottom := int(localHeight - 1) + if requestBottom < 0 { + requestBottom = 0 } - if ceil >= MaxForkAncestry { - floor = int64(ceil - MaxForkAncestry) + totalSpan := requestHead - requestBottom + span := 1 + totalSpan/MaxCount + if span < 2 { + span = 2 + } + if span > 16 { + span = 16 } - p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height) - // Request the topmost blocks to short circuit binary ancestor lookup - head := ceil - if head > height { - head = height + count = 1 + totalSpan/span + if count > MaxCount { + count = MaxCount + } + if count < 2 { + count = 2 } - from := int64(head) - int64(MaxHeaderFetch) + from = requestHead - (count-1)*span if from < 0 { from = 0 } - // Span out with 15 block gaps into the future to catch bad head reports - limit := 2 * MaxHeaderFetch / 16 - count := 1 + int((int64(ceil)-from)/16) - if count > limit { - count = limit + max := from + (count-1)*span + return int64(from), count, span - 1, uint64(max) +} + +// findAncestor tries to locate the common ancestor link of the local chain and +// a remote peers blockchain. In the general case when our node was in sync and +// on the correct chain, checking the top N links should already get us a match. +// In the rare scenario when we ended up on a long reorganisation (i.e. none of +// the head links match), we do a binary search to find the common ancestor. +func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { + // Figure out the valid ancestor range to prevent rewrite attacks + var ( + floor = int64(-1) + localHeight uint64 + remoteHeight = remoteHeader.Number.Uint64() + ) + switch d.mode { + case FullSync: + localHeight = d.blockchain.CurrentBlock().NumberU64() + case FastSync: + localHeight = d.blockchain.CurrentFastBlock().NumberU64() + default: + localHeight = d.lightchain.CurrentHeader().Number.Uint64() + } + p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) + if localHeight >= MaxForkAncestry { + // We're above the max reorg threshold, find the earliest fork point + floor = int64(localHeight - MaxForkAncestry) + + // If we're doing a light sync, ensure the floor doesn't go below the CHT, as + // all headers before that point will be missing. + if d.mode == LightSync { + // If we dont know the current CHT position, find it + if d.genesis == 0 { + header := d.lightchain.CurrentHeader() + for header != nil { + d.genesis = header.Number.Uint64() + if floor >= int64(d.genesis)-1 { + break + } + header = d.lightchain.GetHeaderByHash(header.ParentHash) + } + } + // We already know the "genesis" block number, cap floor to that + if floor < int64(d.genesis)-1 { + floor = int64(d.genesis) - 1 + } + } } - go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false, false) + from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) + + p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) + go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false, false) // Wait for the remote response to the head fetch number, hash := uint64(0), common.Hash{} @@ -751,9 +821,10 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err return 0, errEmptyHeaderSet } // Make sure the peer's reply conforms to the request - for i := 0; i < len(headers); i++ { - if number := headers[i].Number.Int64(); number != from+int64(i)*16 { - p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) + for i, header := range headers { + expectNumber := from + int64(i)*int64((skip+1)) + if number := header.Number.Int64(); number != expectNumber { + p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) return 0, errInvalidChain } } @@ -761,20 +832,24 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err finished = true for i := len(headers) - 1; i >= 0; i-- { // Skip any headers that underflow/overflow our requested set - if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { + if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { continue } // Otherwise check if we already know the header or not h := headers[i].Hash() n := headers[i].Number.Uint64() - if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && d.lightchain.HasHeader(h, n)) { - number, hash = n, h - // If every header is known, even future ones, the peer straight out lied about its head - if number > height && i == limit-1 { - p.log.Warn("Lied about chain head", "reported", height, "found", number) - return 0, errStallingPeer - } + var known bool + switch d.mode { + case FullSync: + known = d.blockchain.HasBlock(h, n) + case FastSync: + known = d.blockchain.HasFastBlock(h, n) + default: + known = d.lightchain.HasHeader(h, n) + } + if known { + number, hash = n, h break } } @@ -798,10 +873,11 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err return number, nil } // Ancestor not found, we need to binary search over our chain - start, end := uint64(0), head + start, end := uint64(0), remoteHeight if floor > 0 { start = uint64(floor) } + p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) for start+1 < end { // Split our chain interval in two, and request the hash to cross check check := (start + end) / 2 @@ -834,7 +910,16 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err // Modify the search interval based on the response h := headers[0].Hash() n := headers[0].Number.Uint64() - if (d.mode == FullSync && !d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && !d.lightchain.HasHeader(h, n)) { + var known bool + switch d.mode { + case FullSync: + known = d.blockchain.HasBlock(h, n) + case FastSync: + known = d.blockchain.HasFastBlock(h, n) + default: + known = d.lightchain.HasHeader(h, n) + } + if !known { end = check break } @@ -1511,8 +1596,15 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) } if index, err := d.blockchain.InsertDexonChain(blocks); err != nil { - log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) - return errInvalidChain + if index < len(results) { + log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) + } else { + // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index, + // when it needs to preprocess blocks to import a sidechain. + // The importer will put together a new list of blocks to import, which is a superset + // of the blocks delivered from the downloader, and the indexing will be off. + log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) + } } return nil } diff --git a/dex/downloader/downloader_test.go b/dex/downloader/downloader_test.go index e8ec0056b..e01e0d96b 100644 --- a/dex/downloader/downloader_test.go +++ b/dex/downloader/downloader_test.go @@ -19,6 +19,7 @@ package downloader import ( "errors" "fmt" + "strings" "sync" "sync/atomic" "testing" @@ -118,6 +119,15 @@ func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { return dl.GetBlockByHash(hash) != nil } +// HasFastBlock checks if a block is present in the testers canonical chain. +func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { + dl.lock.RLock() + defer dl.lock.RUnlock() + + _, ok := dl.ownReceipts[hash] + return ok +} + // GetHeader retrieves a header from the testers canonical chain. func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { dl.lock.RLock() @@ -271,6 +281,7 @@ func (dl *downloadTester) InsertDexonChain(blocks types.Blocks) (i int, err erro dl.ownHeaders[block.Hash()] = block.Header() } dl.ownBlocks[block.Hash()] = block + dl.ownReceipts[block.Hash()] = make(types.Receipts, 0) dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) } return len(blocks), nil @@ -416,14 +427,20 @@ func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { // assertOwnChain checks if the local chain contains the correct number of items // of the various chain components. func assertOwnChain(t *testing.T, tester *downloadTester, length int) { + // Mark this method as a helper to report errors at callsite, not in here + t.Helper() + assertOwnForkedChain(t, tester, 1, []int{length}) } // assertOwnForkedChain checks if the local forked chain contains the correct // number of items of the various chain components. func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { + // Mark this method as a helper to report errors at callsite, not in here + t.Helper() + // Initialize the counters for the first fork - headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-fsMinFullBlocks + headers, blocks, receipts := lengths[0], lengths[0], lengths[0] if receipts < 0 { receipts = 1 @@ -432,12 +449,9 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng for _, length := range lengths[1:] { headers += length - common blocks += length - common - receipts += length - common - fsMinFullBlocks + receipts += length - common } - switch tester.downloader.mode { - case FullSync: - receipts = 1 - case LightSync: + if tester.downloader.mode == LightSync { blocks, receipts = 1, 1 } if hs := len(tester.ownHeaders); hs != headers { @@ -1105,7 +1119,9 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { } func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { + // Mark this method as a helper to report errors at callsite, not in here t.Helper() + p := d.Progress() p.KnownStates, p.PulledStates = 0, 0 want.KnownStates, want.PulledStates = 0, 0 @@ -1363,3 +1379,78 @@ func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int } return nil } + +func TestRemoteHeaderRequestSpan(t *testing.T) { + testCases := []struct { + remoteHeight uint64 + localHeight uint64 + expected []int + }{ + // Remote is way higher. We should ask for the remote head and go backwards + {1500, 1000, + []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, + }, + {15000, 13006, + []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, + }, + //Remote is pretty close to us. We don't have to fetch as many + {1200, 1150, + []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, + }, + // Remote is equal to us (so on a fork with higher td) + // We should get the closest couple of ancestors + {1500, 1500, + []int{1497, 1499}, + }, + // We're higher than the remote! Odd + {1000, 1500, + []int{997, 999}, + }, + // Check some weird edgecases that it behaves somewhat rationally + {0, 1500, + []int{0, 2}, + }, + {6000000, 0, + []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, + }, + {0, 0, + []int{0, 2}, + }, + } + reqs := func(from, count, span int) []int { + var r []int + num := from + for len(r) < count { + r = append(r, num) + num += span + 1 + } + return r + } + for i, tt := range testCases { + from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) + data := reqs(int(from), count, span) + + if max != uint64(data[len(data)-1]) { + t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) + } + failed := false + if len(data) != len(tt.expected) { + failed = true + t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) + } else { + for j, n := range data { + if n != tt.expected[j] { + failed = true + break + } + } + } + if failed { + res := strings.Replace(fmt.Sprint(data), " ", ",", -1) + exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) + fmt.Printf("got: %v\n", res) + fmt.Printf("exp: %v\n", exp) + t.Errorf("test %d: wrong values", i) + } + } +} diff --git a/dex/downloader/queue.go b/dex/downloader/queue.go index f3a36ec3c..ea847913f 100644 --- a/dex/downloader/queue.go +++ b/dex/downloader/queue.go @@ -325,7 +325,7 @@ func (q *queue) Schedule(headers []*types.HeaderWithGovState, from uint64) []*ty } // Make sure no duplicate requests are executed if _, ok := q.blockTaskPool[hash]; ok { - log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) + log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) continue } if _, ok := q.receiptTaskPool[hash]; ok { diff --git a/dex/downloader/statesync.go b/dex/downloader/statesync.go index 49117abbb..1695ba19c 100644 --- a/dex/downloader/statesync.go +++ b/dex/downloader/statesync.go @@ -152,7 +152,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync { finished = append(finished, req) delete(active, pack.PeerId()) - // Handle dropped peer connections: + // Handle dropped peer connections: case p := <-peerDrop: // Skip if no request is currently pending req := active[p.id] @@ -398,9 +398,8 @@ func (s *stateSync) fillTasks(n int, req *stateReq) { // process iterates over a batch of delivered state data, injecting each item // into a running state sync, re-queuing any items that were requested but not -// delivered. -// Returns whether the peer actually managed to deliver anything of value, -// and any error that occurred +// delivered. Returns whether the peer actually managed to deliver anything of +// value, and any error that occurred func (s *stateSync) process(req *stateReq) (int, error) { // Collect processing stats and update progress if valid data was received duplicate, unexpected, successful := 0, 0, 0 @@ -412,14 +411,12 @@ func (s *stateSync) process(req *stateReq) (int, error) { }(time.Now()) // Iterate over all the delivered data and inject one-by-one into the trie - progress := false for _, blob := range req.response { - prog, hash, err := s.processNodeData(blob) + _, hash, err := s.processNodeData(blob) switch err { case nil: s.numUncommitted++ s.bytesUncommitted += len(blob) - progress = progress || prog successful++ case trie.ErrNotRequested: unexpected++ |