aboutsummaryrefslogtreecommitdiffstats
path: root/eth/downloader
diff options
context:
space:
mode:
Diffstat (limited to 'eth/downloader')
-rw-r--r--eth/downloader/downloader.go8
-rw-r--r--eth/downloader/downloader_test.go2
-rw-r--r--eth/downloader/fakepeer.go6
-rw-r--r--eth/downloader/peer.go2
-rw-r--r--eth/downloader/queue.go2
-rw-r--r--eth/downloader/statesync.go10
6 files changed, 15 insertions, 15 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 62842adbc..9e4949899 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -47,7 +47,7 @@ var (
MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation
rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
- rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests
+ rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
@@ -884,7 +884,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64)
// immediately to the header processor to keep the rest of the pipeline full even
// in the case of header stalls.
//
-// The method returs the entire filled skeleton and also the number of headers
+// The method returns the entire filled skeleton and also the number of headers
// already forwarded for processing.
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
log.Debug("Filling up skeleton", "from", from)
@@ -1377,7 +1377,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
pivot = height - uint64(fsMinFullBlocks)
}
// To cater for moving pivot points, track the pivot block and subsequently
- // accumulated download results separatey.
+ // accumulated download results separately.
var (
oldPivot *fetchResult // Locked in pivot block, might change eventually
oldTail []*fetchResult // Downloaded content after the pivot
@@ -1615,7 +1615,7 @@ func (d *Downloader) qosReduceConfidence() {
//
// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
// the downloader tries to adapt queries to the RTT, so multiple RTT values can
-// be adapted to, but smaller ones are preffered (stabler download stream).
+// be adapted to, but smaller ones are preferred (stabler download stream).
func (d *Downloader) requestRTT() time.Duration {
return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
}
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index cb671a7df..e85e234c0 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -159,7 +159,7 @@ func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentRec
// Create the common suffix
hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
- // Create the forks, making the second heavyer if non balanced forks were requested
+ // Create the forks, making the second heavier if non balanced forks were requested
hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
hashes1 = append(hashes1, hashes[1:]...)
diff --git a/eth/downloader/fakepeer.go b/eth/downloader/fakepeer.go
index b45acff7d..5248e7fb0 100644
--- a/eth/downloader/fakepeer.go
+++ b/eth/downloader/fakepeer.go
@@ -27,7 +27,7 @@ import (
// FakePeer is a mock downloader peer that operates on a local database instance
// instead of being an actual live node. It's useful for testing and to implement
-// sync commands from an xisting local database.
+// sync commands from an existing local database.
type FakePeer struct {
id string
db ethdb.Database
@@ -48,7 +48,7 @@ func (p *FakePeer) Head() (common.Hash, *big.Int) {
}
// RequestHeadersByHash implements downloader.Peer, returning a batch of headers
-// defined by the origin hash and the associaed query parameters.
+// defined by the origin hash and the associated query parameters.
func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
@@ -92,7 +92,7 @@ func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int,
}
// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers
-// defined by the origin number and the associaed query parameters.
+// defined by the origin number and the associated query parameters.
func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error {
var (
headers []*types.Header
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index a4aa86114..428a60f8a 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -551,7 +551,7 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerC
// medianRTT returns the median RTT of the peerset, considering only the tuning
// peers if there are more peers available.
func (ps *peerSet) medianRTT() time.Duration {
- // Gather all the currnetly measured round trip times
+ // Gather all the currently measured round trip times
ps.lock.RLock()
defer ps.lock.RUnlock()
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 359cce54b..bbe0aed5d 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -275,7 +275,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
if q.headerResults != nil {
panic("skeleton assembly already in progress")
}
- // Shedule all the header retrieval tasks for the skeleton assembly
+ // Schedule all the header retrieval tasks for the skeleton assembly
q.headerTaskPool = make(map[uint64]*types.Header)
q.headerTaskQueue = prque.New()
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go
index ee6c7b491..4071d0ad9 100644
--- a/eth/downloader/statesync.go
+++ b/eth/downloader/statesync.go
@@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/trie"
)
-// stateReq represents a batch of state fetch requests groupped together into
+// stateReq represents a batch of state fetch requests grouped together into
// a single data retrieval network packet.
type stateReq struct {
items []common.Hash // Hashes of the state items to download
@@ -139,7 +139,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
// Handle incoming state packs:
case pack := <-d.stateCh:
- // Discard any data not requested (or previsouly timed out)
+ // Discard any data not requested (or previously timed out)
req := active[pack.PeerId()]
if req == nil {
log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())
@@ -182,7 +182,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
case req := <-d.trackStateReq:
// If an active request already exists for this peer, we have a problem. In
// theory the trie node schedule must never assign two requests to the same
- // peer. In practive however, a peer might receive a request, disconnect and
+ // peer. In practice however, a peer might receive a request, disconnect and
// immediately reconnect before the previous times out. In this case the first
// request is never honored, alas we must not silently overwrite it, as that
// causes valid requests to go missing and sync to get stuck.
@@ -228,7 +228,7 @@ type stateSync struct {
err error // Any error hit during sync (set before completion)
}
-// stateTask represents a single trie node download taks, containing a set of
+// stateTask represents a single trie node download task, containing a set of
// peers already attempted retrieval from to detect stalled syncs and abort.
type stateTask struct {
attempts map[string]struct{}
@@ -333,7 +333,7 @@ func (s *stateSync) commit(force bool) error {
return nil
}
-// assignTasks attempts to assing new tasks to all idle peers, either from the
+// assignTasks attempts to assign new tasks to all idle peers, either from the
// batch currently being retried, or fetching new data from the trie sync itself.
func (s *stateSync) assignTasks() {
// Iterate over all idle peers and try to assign them state fetches