diff options
author | Péter Szilágyi <peterke@gmail.com> | 2017-02-27 23:06:40 +0800 |
---|---|---|
committer | Péter Szilágyi <peterke@gmail.com> | 2017-02-28 01:15:18 +0800 |
commit | 2f28a12cdbee3e5c48ca5f44b128e639c60f3685 (patch) | |
tree | edb2f3a023600bcff5ff668229fc9cf2c114e18c /eth | |
parent | fc97c7a38dc5193ef5e32de42235b6facf609c41 (diff) | |
download | dexon-2f28a12cdbee3e5c48ca5f44b128e639c60f3685.tar dexon-2f28a12cdbee3e5c48ca5f44b128e639c60f3685.tar.gz dexon-2f28a12cdbee3e5c48ca5f44b128e639c60f3685.tar.bz2 dexon-2f28a12cdbee3e5c48ca5f44b128e639c60f3685.tar.lz dexon-2f28a12cdbee3e5c48ca5f44b128e639c60f3685.tar.xz dexon-2f28a12cdbee3e5c48ca5f44b128e639c60f3685.tar.zst dexon-2f28a12cdbee3e5c48ca5f44b128e639c60f3685.zip |
common, eth/downloader, log: support terminal log formatting
Diffstat (limited to 'eth')
-rw-r--r-- | eth/downloader/downloader.go | 34 | ||||
-rw-r--r-- | eth/downloader/queue.go | 18 |
2 files changed, 24 insertions, 28 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 92e42f0b3..be3d8d177 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -383,7 +383,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e return errTooOld } - log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash.Hex()[2:10], "td", td, "mode", d.mode) + log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode) defer func(start time.Time) { log.Debug("Synchronisation terminated", "elapsed", time.Since(start)) }(time.Now()) @@ -544,7 +544,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) { return nil, errBadPeer } head := headers[0] - p.logger.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash().Hex()[2:10]) + p.logger.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) return head, nil case <-timeout: @@ -657,10 +657,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { // If the head fetch already found an ancestor, return if !common.EmptyHash(hash) { if int64(number) <= floor { - p.logger.Warn("Ancestor below allowance", "number", number, "hash", hash.Hex()[2:10], "allowance", floor) + p.logger.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) return 0, errInvalidAncestor } - p.logger.Debug("Found common ancestor", "number", number, "hash", hash.Hex()[2:10]) + p.logger.Debug("Found common ancestor", "number", number, "hash", hash) return number, nil } // Ancestor not found, we need to binary search over our chain @@ -704,7 +704,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { } header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists if header.Number.Uint64() != check { - p.logger.Debug("Received non requested header", "number", header.Number, "hash", header.Hash().Hex()[2:10], "request", check) + p.logger.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) return 0, errBadPeer } start = check @@ -722,10 +722,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { } // Ensure valid ancestry and return if int64(start) <= floor { - p.logger.Warn("Ancestor below allowance", "number", start, "hash", hash.Hex()[2:10], "allowance", floor) + p.logger.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) return 0, errInvalidAncestor } - p.logger.Debug("Found common ancestor", "number", start, "hash", hash.Hex()[2:10]) + p.logger.Debug("Found common ancestor", "number", start, "hash", hash) return start, nil } @@ -1208,7 +1208,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { if atomic.LoadUint32(&d.fsPivotFails) == 0 { for _, header := range rollback { if header.Number.Uint64() == pivot { - log.Warn("Fast-sync critical section failure, locked pivot to header", "number", pivot, "hash", header.Hash().Hex()[2:10]) + log.Warn("Fast-sync critical section failure, locked pivot to header", "number", pivot, "hash", header.Hash()) d.fsPivotLock = header } } @@ -1304,7 +1304,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { if n > 0 { rollback = append(rollback, chunk[:n]...) } - log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash().Hex()[2:10], "err", err) + log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) return errInvalidChain } // All verifications passed, store newly found uncertain headers @@ -1316,7 +1316,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { // If we're fast syncing and just pulled in the pivot, make sure it's the one locked in if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot { if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() { - log.Warn("Pivot doesn't match locked in one", "remoteNumber", pivot.Number, "remoteHash", pivot.Hash().Hex()[2:10], "localNumber", d.fsPivotLock.Number, "localHash", d.fsPivotLock.Hash().Hex()[2:10]) + log.Warn("Pivot doesn't match locked in one", "remoteNumber", pivot.Number, "remoteHash", pivot.Hash(), "localNumber", d.fsPivotLock.Number, "localHash", d.fsPivotLock.Hash()) return errInvalidChain } } @@ -1366,13 +1366,9 @@ func (d *Downloader) processContent() error { // Actually import the blocks first, last := results[0].Header, results[len(results)-1].Header log.Debug("Inserting downloaded chain", "items", len(results), - "from", log.Lazy{Fn: func() string { - return fmt.Sprintf("#%d [%x…]", first.Number, first.Hash().Bytes()[:4]) - }}, - "till", log.Lazy{Fn: func() string { - return fmt.Sprintf("#%d [%x…]", last.Number, last.Hash().Bytes()[:4]) - }}) - + "firstnum", first.Number, "firsthash", first.Hash(), + "lastnum", last.Number, "lasthash", last.Hash(), + ) for len(results) != 0 { // Check for any termination requests select { @@ -1406,14 +1402,14 @@ func (d *Downloader) processContent() error { case len(receipts) > 0: index, err = d.insertReceipts(blocks, receipts) if err == nil && blocks[len(blocks)-1].NumberU64() == pivot { - log.Debug("Committing block as new head", "number", blocks[len(blocks)-1].Number(), "hash", blocks[len(blocks)-1].Hash().Hex()[2:10]) + log.Debug("Committing block as new head", "number", blocks[len(blocks)-1].Number(), "hash", blocks[len(blocks)-1].Hash()) index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash()) } default: index, err = d.insertBlocks(blocks) } if err != nil { - log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash().Hex()[2:10], "err", err) + log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) return errInvalidChain } // Shift the results to the next batch diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 9530e15be..855097c45 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -364,20 +364,20 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { // Make sure chain order is honoured and preserved throughout hash := header.Hash() if header.Number == nil || header.Number.Uint64() != from { - log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash.Hex()[2:10], "expected", from) + log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) break } if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { - log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash.Hex()[2:10]) + log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) break } // Make sure no duplicate requests are executed if _, ok := q.blockTaskPool[hash]; ok { - log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash.Hex()[2:10]) + log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) continue } if _, ok := q.receiptTaskPool[hash]; ok { - log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash.Hex()[2:10]) + log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) continue } // Queue the header for content retrieval @@ -391,7 +391,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { } if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot { // Pivoting point of the fast sync, switch the state retrieval to this - log.Debug("Switching state downloads to new block", "number", header.Number, "hash", hash.Hex()[2:10]) + log.Debug("Switching state downloads to new block", "number", header.Number, "hash", hash) q.stateTaskIndex = 0 q.stateTaskPool = make(map[common.Hash]int) @@ -872,10 +872,10 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh accepted := len(headers) == MaxHeaderFetch if accepted { if headers[0].Number.Uint64() != request.From { - log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash().Hex()[2:10], request.From) + log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From) accepted = false } else if headers[len(headers)-1].Hash() != target { - log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash().Hex()[2:10], "expected", target.Hex()[2:10]) + log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) accepted = false } } @@ -883,12 +883,12 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh for i, header := range headers[1:] { hash := header.Hash() if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { - log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash.Hex()[2:10], "expected", want) + log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want) accepted = false break } if headers[i].Hash() != header.ParentHash { - log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash.Hex()[2:10]) + log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash) accepted = false break } |