aboutsummaryrefslogtreecommitdiffstats
path: root/eth/downloader
diff options
context:
space:
mode:
Diffstat (limited to 'eth/downloader')
-rw-r--r--eth/downloader/downloader.go14
-rw-r--r--eth/downloader/downloader_test.go61
-rw-r--r--eth/downloader/fakepeer.go2
-rw-r--r--eth/downloader/peer.go2
4 files changed, 58 insertions, 21 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 5782d4cf5..b338129e0 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -333,7 +333,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
}
// synchronise will select the peer and use it for synchronising. If an empty string is given
-// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
+// it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous
func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
// Mock out the synchronisation if testing
@@ -708,7 +708,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
ttl := d.requestTTL()
timeout := time.After(ttl)
- go p.peer.RequestHeadersByNumber(uint64(check), 1, 0, false)
+ go p.peer.RequestHeadersByNumber(check, 1, 0, false)
// Wait until a reply arrives to this request
for arrived := false; !arrived; {
@@ -1003,8 +1003,8 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
return errCancel
case packet := <-deliveryCh:
- // If the peer was previously banned and failed to deliver it's pack
- // in a reasonable time frame, ignore it's message.
+ // If the peer was previously banned and failed to deliver its pack
+ // in a reasonable time frame, ignore its message.
if peer := d.peers.Peer(packet.PeerId()); peer != nil {
// Deliver the received chunk of data and check chain validity
accepted, err := deliver(packet)
@@ -1205,8 +1205,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
case <-d.cancelCh:
}
}
- // If no headers were retrieved at all, the peer violated it's TD promise that it had a
- // better chain compared to ours. The only exception is if it's promised blocks were
+ // If no headers were retrieved at all, the peer violated its TD promise that it had a
+ // better chain compared to ours. The only exception is if its promised blocks were
// already imported by other means (e.g. fecher):
//
// R <remote peer>, L <local node>: Both at block 10
@@ -1518,7 +1518,7 @@ func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, i
func (d *Downloader) qosTuner() {
for {
// Retrieve the current median RTT and integrate into the previoust target RTT
- rtt := time.Duration(float64(1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
+ rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
// A new RTT cycle passed, increase our confidence in the estimated RTT
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 58f6e9a62..7d1cc8c34 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -704,6 +704,7 @@ func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
+ t.Parallel()
tester := newTester()
defer tester.terminate()
@@ -1166,6 +1167,8 @@ func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 6
func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
+ t.Parallel()
+
tester := newTester()
defer tester.terminate()
@@ -1198,6 +1201,8 @@ func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(
func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
+ t.Parallel()
+
tester := newTester()
defer tester.terminate()
@@ -1310,6 +1315,8 @@ func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDr
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
+ t.Parallel()
+
// Define the disconnection requirement for individual hash fetch errors
tests := []struct {
result error
@@ -1665,12 +1672,26 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// This test reproduces an issue where unexpected deliveries would
// block indefinitely if they arrived at the right time.
-func TestDeliverHeadersHang62(t *testing.T) { testDeliverHeadersHang(t, 62, FullSync) }
-func TestDeliverHeadersHang63Full(t *testing.T) { testDeliverHeadersHang(t, 63, FullSync) }
-func TestDeliverHeadersHang63Fast(t *testing.T) { testDeliverHeadersHang(t, 63, FastSync) }
-func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) }
-func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }
-func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
+// We use data driven subtests to manage this so that it will be parallel on its own
+// and not with the other tests, avoiding intermittent failures.
+func TestDeliverHeadersHang(t *testing.T) {
+ testCases := []struct {
+ protocol int
+ syncMode SyncMode
+ }{
+ {62, FullSync},
+ {63, FullSync},
+ {63, FastSync},
+ {64, FullSync},
+ {64, FastSync},
+ {64, LightSync},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
+ testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
+ })
+ }
+}
type floodingTestPeer struct {
peer Peer
@@ -1703,7 +1724,7 @@ func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int
// Deliver the actual requested headers.
go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
// None of the extra deliveries should block.
- timeout := time.After(15 * time.Second)
+ timeout := time.After(60 * time.Second)
for i := 0; i < cap(deliveriesDone); i++ {
select {
case <-deliveriesDone:
@@ -1732,7 +1753,6 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
tester.downloader.peers.peers["peer"].peer,
tester,
}
-
if err := tester.sync("peer", nil, mode); err != nil {
t.Errorf("sync failed: %v", err)
}
@@ -1742,12 +1762,28 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
// Tests that if fast sync aborts in the critical section, it can restart a few
// times before giving up.
-func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
-func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
-func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
-func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
+// We use data driven subtests to manage this so that it will be parallel on its own
+// and not with the other tests, avoiding intermittent failures.
+func TestFastCriticalRestarts(t *testing.T) {
+ testCases := []struct {
+ protocol int
+ progress bool
+ }{
+ {63, false},
+ {64, false},
+ {63, true},
+ {64, true},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("protocol %d progress %v", tc.protocol, tc.progress), func(t *testing.T) {
+ testFastCriticalRestarts(t, tc.protocol, tc.progress)
+ })
+ }
+}
func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
+ t.Parallel()
+
tester := newTester()
defer tester.terminate()
@@ -1776,6 +1812,7 @@ func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
if i == 0 {
+ time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
if tester.downloader.fsPivotLock == nil {
time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
t.Fatalf("pivot block not locked in after critical section failure")
diff --git a/eth/downloader/fakepeer.go b/eth/downloader/fakepeer.go
index ebdb9c334..b45acff7d 100644
--- a/eth/downloader/fakepeer.go
+++ b/eth/downloader/fakepeer.go
@@ -62,7 +62,7 @@ func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int,
number := origin.Number.Uint64()
headers = append(headers, origin)
if reverse {
- for i := 0; i < int(skip)+1; i++ {
+ for i := 0; i <= skip; i++ {
if header := p.hc.GetHeader(hash, number); header != nil {
hash = header.ParentHash
number--
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index e638744ea..a4aa86114 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -548,7 +548,7 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerC
return idle, total
}
-// medianRTT returns the median RTT of te peerset, considering only the tuning
+// medianRTT returns the median RTT of the peerset, considering only the tuning
// peers if there are more peers available.
func (ps *peerSet) medianRTT() time.Duration {
// Gather all the currnetly measured round trip times