aboutsummaryrefslogtreecommitdiffstats
path: root/eth/fetcher
diff options
context:
space:
mode:
authorJeffrey Wilcke <geffobscura@gmail.com>2015-06-30 08:22:19 +0800
committerJeffrey Wilcke <geffobscura@gmail.com>2015-06-30 08:22:19 +0800
commit7625b07dd9a2a7b5c5a504c1276eea04596ac871 (patch)
treece2a757cd4e0591fc15815b2dfae528ae517d36e /eth/fetcher
parent72e2613a9fe3205fa5a67b72b832e03b2357ee88 (diff)
parent8f504063f465e0ca10c6bb53ee914d10a3d45c86 (diff)
downloaddexon-7625b07dd9a2a7b5c5a504c1276eea04596ac871.tar
dexon-7625b07dd9a2a7b5c5a504c1276eea04596ac871.tar.gz
dexon-7625b07dd9a2a7b5c5a504c1276eea04596ac871.tar.bz2
dexon-7625b07dd9a2a7b5c5a504c1276eea04596ac871.tar.lz
dexon-7625b07dd9a2a7b5c5a504c1276eea04596ac871.tar.xz
dexon-7625b07dd9a2a7b5c5a504c1276eea04596ac871.tar.zst
dexon-7625b07dd9a2a7b5c5a504c1276eea04596ac871.zip
Merge branch 'release/0.9.34'
Diffstat (limited to 'eth/fetcher')
-rw-r--r--eth/fetcher/fetcher.go23
-rw-r--r--eth/fetcher/fetcher_test.go123
-rw-r--r--eth/fetcher/metrics.go16
3 files changed, 78 insertions, 84 deletions
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go
index 90a202235..256b452e1 100644
--- a/eth/fetcher/fetcher.go
+++ b/eth/fetcher/fetcher.go
@@ -8,6 +8,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
@@ -229,6 +230,8 @@ func (f *Fetcher) loop() {
case notification := <-f.notify:
// A block was announced, make sure the peer isn't DOSing us
+ announceMeter.Mark(1)
+
count := f.announces[notification.origin] + 1
if count > hashLimit {
glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
@@ -246,6 +249,7 @@ func (f *Fetcher) loop() {
case op := <-f.inject:
// A direct block insertion was requested, try and fill any pending gaps
+ broadcastMeter.Mark(1)
f.enqueue(op.origin, op.block)
case hash := <-f.done:
@@ -307,7 +311,7 @@ func (f *Fetcher) loop() {
hash := block.Hash()
// Filter explicitly requested blocks from hash announcements
- if _, ok := f.fetching[hash]; ok {
+ if f.fetching[hash] != nil && f.queued[hash] == nil {
// Discard if already imported by other means
if f.getBlock(hash) == nil {
explicit = append(explicit, block)
@@ -364,6 +368,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
// Discard any past or too distant blocks
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist)
+ discardMeter.Mark(1)
return
}
// Schedule the block for future importing
@@ -399,19 +404,29 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
return
}
// Quickly validate the header and propagate the block if it passes
- if err := f.validateBlock(block, parent); err != nil {
+ switch err := f.validateBlock(block, parent); err {
+ case nil:
+ // All ok, quickly propagate to our peers
+ broadcastTimer.UpdateSince(block.ReceivedAt)
+ go f.broadcastBlock(block, true)
+
+ case core.BlockFutureErr:
+ futureMeter.Mark(1)
+ // Weird future block, don't fail, but neither propagate
+
+ default:
+ // Something went very wrong, drop the peer
glog.V(logger.Debug).Infof("Peer %s: block #%d [%x] verification failed: %v", peer, block.NumberU64(), hash[:4], err)
f.dropPeer(peer)
return
}
- go f.broadcastBlock(block, true)
-
// Run the actual import and log any issues
if _, err := f.insertChain(types.Blocks{block}); err != nil {
glog.V(logger.Warn).Infof("Peer %s: block #%d [%x] import failed: %v", peer, block.NumberU64(), hash[:4], err)
return
}
// If import succeeded, broadcast the block
+ announceTimer.UpdateSince(block.ReceivedAt)
go f.broadcastBlock(block, false)
// Invoke the testing hook if needed
diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/fetcher_test.go
index 80247d9d2..2c9c9bca3 100644
--- a/eth/fetcher/fetcher_test.go
+++ b/eth/fetcher/fetcher_test.go
@@ -1,7 +1,6 @@
package fetcher
import (
- "encoding/binary"
"errors"
"math/big"
"sync"
@@ -10,58 +9,32 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
)
var (
- knownHash = common.Hash{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
- unknownHash = common.Hash{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}
- bannedHash = common.Hash{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}
-
- genesis = createBlock(1, common.Hash{}, knownHash)
+ testdb, _ = ethdb.NewMemDatabase()
+ genesis = core.GenesisBlockForTesting(testdb, common.Address{}, big.NewInt(0))
+ unknownBlock = types.NewBlock(&types.Header{}, nil, nil, nil)
)
-// idCounter is used by the createHashes method the generate deterministic but unique hashes
-var idCounter = int64(2) // #1 is the genesis block
-
-// createHashes generates a batch of hashes rooted at a specific point in the chain.
-func createHashes(amount int, root common.Hash) (hashes []common.Hash) {
- hashes = make([]common.Hash, amount+1)
- hashes[len(hashes)-1] = root
-
- for i := 0; i < len(hashes)-1; i++ {
- binary.BigEndian.PutUint64(hashes[i][:8], uint64(idCounter))
- idCounter++
- }
- return
-}
-
-// createBlock assembles a new block at the given chain height.
-func createBlock(i int, parent, hash common.Hash) *types.Block {
- header := &types.Header{Number: big.NewInt(int64(i))}
- block := types.NewBlockWithHeader(header)
- block.HeaderHash = hash
- block.ParentHeaderHash = parent
- return block
-}
-
-// copyBlock makes a deep copy of a block suitable for local modifications.
-func copyBlock(block *types.Block) *types.Block {
- return createBlock(int(block.Number().Int64()), block.ParentHeaderHash, block.HeaderHash)
-}
-
-// createBlocksFromHashes assembles a collection of blocks, each having a correct
-// place in the given hash chain.
-func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
- blocks := make(map[common.Hash]*types.Block)
- for i := 0; i < len(hashes); i++ {
- parent := knownHash
- if i < len(hashes)-1 {
- parent = hashes[i+1]
- }
- blocks[hashes[i]] = createBlock(len(hashes)-i, parent, hashes[i])
- }
- return blocks
+// makeChain creates a chain of n blocks starting at and including parent.
+// the returned hash chain is ordered head->parent.
+func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
+ blocks := core.GenerateChain(parent, testdb, n, func(i int, gen *core.BlockGen) {
+ gen.SetCoinbase(common.Address{seed})
+ })
+ hashes := make([]common.Hash, n+1)
+ hashes[len(hashes)-1] = parent.Hash()
+ blockm := make(map[common.Hash]*types.Block, n+1)
+ blockm[parent.Hash()] = parent
+ for i, b := range blocks {
+ hashes[len(hashes)-i-2] = b.Hash()
+ blockm[b.Hash()] = b
+ }
+ return hashes, blockm
}
// fetcherTester is a test simulator for mocking out local block chain.
@@ -77,8 +50,8 @@ type fetcherTester struct {
// newTester creates a new fetcher test mocker.
func newTester() *fetcherTester {
tester := &fetcherTester{
- hashes: []common.Hash{knownHash},
- blocks: map[common.Hash]*types.Block{knownHash: genesis},
+ hashes: []common.Hash{genesis.Hash()},
+ blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
}
tester.fetcher = New(tester.getBlock, tester.verifyBlock, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
tester.fetcher.Start()
@@ -138,10 +111,9 @@ func (f *fetcherTester) dropPeer(peer string) {
// peerFetcher retrieves a fetcher associated with a simulated peer.
func (f *fetcherTester) makeFetcher(blocks map[common.Hash]*types.Block) blockRequesterFn {
- // Copy all the blocks to ensure they are not tampered with
closure := make(map[common.Hash]*types.Block)
for hash, block := range blocks {
- closure[hash] = copyBlock(block)
+ closure[hash] = block
}
// Create a function that returns blocks from the closure
return func(hashes []common.Hash) error {
@@ -195,8 +167,7 @@ func verifyImportDone(t *testing.T, imported chan *types.Block) {
func TestSequentialAnnouncements(t *testing.T) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
- hashes := createHashes(targetBlocks, knownHash)
- blocks := createBlocksFromHashes(hashes)
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
fetcher := tester.makeFetcher(blocks)
@@ -217,8 +188,7 @@ func TestSequentialAnnouncements(t *testing.T) {
func TestConcurrentAnnouncements(t *testing.T) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
- hashes := createHashes(targetBlocks, knownHash)
- blocks := createBlocksFromHashes(hashes)
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
// Assemble a tester with a built in counter for the requests
tester := newTester()
@@ -253,8 +223,7 @@ func TestConcurrentAnnouncements(t *testing.T) {
func TestOverlappingAnnouncements(t *testing.T) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
- hashes := createHashes(targetBlocks, knownHash)
- blocks := createBlocksFromHashes(hashes)
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
fetcher := tester.makeFetcher(blocks)
@@ -280,8 +249,7 @@ func TestOverlappingAnnouncements(t *testing.T) {
// Tests that announces already being retrieved will not be duplicated.
func TestPendingDeduplication(t *testing.T) {
// Create a hash and corresponding block
- hashes := createHashes(1, knownHash)
- blocks := createBlocksFromHashes(hashes)
+ hashes, blocks := makeChain(1, 0, genesis)
// Assemble a tester with a built in counter and delayed fetcher
tester := newTester()
@@ -319,9 +287,9 @@ func TestPendingDeduplication(t *testing.T) {
// imported when all the gaps are filled in.
func TestRandomArrivalImport(t *testing.T) {
// Create a chain of blocks to import, and choose one to delay
- hashes := createHashes(maxQueueDist, knownHash)
- blocks := createBlocksFromHashes(hashes)
- skip := maxQueueDist / 2
+ targetBlocks := maxQueueDist
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
+ skip := targetBlocks / 2
tester := newTester()
fetcher := tester.makeFetcher(blocks)
@@ -345,9 +313,9 @@ func TestRandomArrivalImport(t *testing.T) {
// are correctly schedule, filling and import queue gaps.
func TestQueueGapFill(t *testing.T) {
// Create a chain of blocks to import, and choose one to not announce at all
- hashes := createHashes(maxQueueDist, knownHash)
- blocks := createBlocksFromHashes(hashes)
- skip := maxQueueDist / 2
+ targetBlocks := maxQueueDist
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
+ skip := targetBlocks / 2
tester := newTester()
fetcher := tester.makeFetcher(blocks)
@@ -371,8 +339,7 @@ func TestQueueGapFill(t *testing.T) {
// announces, etc) do not get scheduled for import multiple times.
func TestImportDeduplication(t *testing.T) {
// Create two blocks to import (one for duplication, the other for stalling)
- hashes := createHashes(2, knownHash)
- blocks := createBlocksFromHashes(hashes)
+ hashes, blocks := makeChain(2, 0, genesis)
// Create the tester and wrap the importer with a counter
tester := newTester()
@@ -410,9 +377,7 @@ func TestImportDeduplication(t *testing.T) {
// discarded no prevent wasting resources on useless blocks from faulty peers.
func TestDistantDiscarding(t *testing.T) {
// Create a long chain to import
- hashes := createHashes(3*maxQueueDist, knownHash)
- blocks := createBlocksFromHashes(hashes)
-
+ hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
head := hashes[len(hashes)/2]
// Create a tester and simulate a head block being the middle of the above chain
@@ -445,11 +410,11 @@ func TestHashMemoryExhaustionAttack(t *testing.T) {
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
// Create a valid chain and an infinite junk chain
- hashes := createHashes(hashLimit+2*maxQueueDist, knownHash)
- blocks := createBlocksFromHashes(hashes)
+ targetBlocks := hashLimit + 2*maxQueueDist
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
valid := tester.makeFetcher(blocks)
- attack := createHashes(hashLimit+2*maxQueueDist, unknownHash)
+ attack, _ := makeChain(targetBlocks, 0, unknownBlock)
attacker := tester.makeFetcher(nil)
// Feed the tester a huge hashset from the attacker, and a limited from the valid peer
@@ -484,13 +449,11 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
tester.fetcher.importedHook = func(block *types.Block) { imported <- block }
// Create a valid chain and a batch of dangling (but in range) blocks
- hashes := createHashes(blockLimit+2*maxQueueDist, knownHash)
- blocks := createBlocksFromHashes(hashes)
-
+ targetBlocks := hashLimit + 2*maxQueueDist
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
attack := make(map[common.Hash]*types.Block)
- for len(attack) < blockLimit+2*maxQueueDist {
- hashes := createHashes(maxQueueDist-1, unknownHash)
- blocks := createBlocksFromHashes(hashes)
+ for i := byte(0); len(attack) < blockLimit+2*maxQueueDist; i++ {
+ hashes, blocks := makeChain(maxQueueDist-1, i, unknownBlock)
for _, hash := range hashes[:maxQueueDist-2] {
attack[hash] = blocks[hash]
}
@@ -499,7 +462,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) {
for _, block := range attack {
tester.fetcher.Enqueue("attacker", block)
}
- time.Sleep(100 * time.Millisecond)
+ time.Sleep(200 * time.Millisecond)
if queued := tester.fetcher.queue.Size(); queued != blockLimit {
t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit)
}
diff --git a/eth/fetcher/metrics.go b/eth/fetcher/metrics.go
new file mode 100644
index 000000000..e46e3c0fb
--- /dev/null
+++ b/eth/fetcher/metrics.go
@@ -0,0 +1,16 @@
+// Contains the metrics collected by the fetcher.
+
+package fetcher
+
+import (
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+var (
+ announceMeter = metrics.NewMeter("eth/sync/RemoteAnnounces")
+ announceTimer = metrics.NewTimer("eth/sync/LocalAnnounces")
+ broadcastMeter = metrics.NewMeter("eth/sync/RemoteBroadcasts")
+ broadcastTimer = metrics.NewTimer("eth/sync/LocalBroadcasts")
+ discardMeter = metrics.NewMeter("eth/sync/DiscardedBlocks")
+ futureMeter = metrics.NewMeter("eth/sync/FutureBlocks")
+)