From eedb25b22ac466d8165153589b0e4a7c7de69128 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 29 May 2015 19:47:00 +0300 Subject: eth/downloader: clean up tests and unused variables --- eth/downloader/downloader.go | 10 +- eth/downloader/downloader_test.go | 189 +++++++++++++++----------------------- 2 files changed, 79 insertions(+), 120 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 9f7f34559..de6ee1734 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -22,15 +22,13 @@ const ( MaxHashFetch = 2048 // Amount of hashes to be fetched per retrieval request MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request - peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount - hashTTL = 5 * time.Second // Time it takes for a hash request to time out + hashTTL = 5 * time.Second // Time it takes for a hash request to time out ) var ( - blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth - blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired - crossCheckCycle = time.Second // Period after which to check for expired cross checks - minDesiredPeerCount = 5 // Amount of peers desired to start syncing + blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth + blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired + crossCheckCycle = time.Second // Period after which to check for expired cross checks ) var ( diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index ef94ddbab..434861c61 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -21,7 +21,7 @@ func createHashes(start, amount int) (hashes []common.Hash) { hashes[len(hashes)-1] = knownHash for i := range hashes[:len(hashes)-1] { - binary.BigEndian.PutUint64(hashes[i][:8], uint64(i+2)) + binary.BigEndian.PutUint64(hashes[i][:8], uint64(start+i+2)) } return } @@ -56,7 +56,6 @@ type downloadTester struct { maxHashFetch int // Overrides the maximum number of retrieved hashes t *testing.T - pcount int done chan bool activePeerId string } @@ -114,12 +113,6 @@ func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, e return took, err } -func (dl *downloadTester) insertBlocks(blocks types.Blocks) { - for _, block := range blocks { - dl.chain = append(dl.chain, block.Hash()) - } -} - func (dl *downloadTester) hasBlock(hash common.Hash) bool { for _, h := range dl.chain { if h == hash { @@ -175,157 +168,125 @@ func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error { } func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) { - dl.pcount++ - dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id)) } -func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash) { - dl.pcount++ - - // This bad peer never returns any blocks - dl.downloader.RegisterPeer(id, hash, dl.getHashes, func([]common.Hash) error { - return nil - }) -} - -func TestDownload(t *testing.T) { - minDesiredPeerCount = 4 - blockHardTTL = 1 * time.Second - - targetBlocks := 1000 +// Tests that simple synchronization, without throttling from a good peer works. +func TestSynchronisation(t *testing.T) { + // Create a small enough block chain to download and the tester + targetBlocks := blockCacheLimit - 15 hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - - tester.newPeer("peer1", big.NewInt(10000), hashes[0]) - tester.newPeer("peer2", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) - tester.activePeerId = "peer1" - err := tester.sync("peer1", hashes[0]) - if err != nil { - t.Error("download error", err) - } - - inqueue := len(tester.downloader.queue.blockCache) - if inqueue != targetBlocks { - t.Error("expected", targetBlocks, "have", inqueue) - } -} - -func TestMissing(t *testing.T) { - targetBlocks := 1000 - hashes := createHashes(0, 1000) - extraHashes := createHashes(1001, 1003) - blocks := createBlocksFromHashes(append(extraHashes, hashes...)) tester := newTester(t, hashes, blocks) + tester.newPeer("peer", big.NewInt(10000), hashes[0]) - tester.newPeer("peer1", big.NewInt(10000), hashes[len(hashes)-1]) - - hashes = append(extraHashes, hashes[:len(hashes)-1]...) - tester.newPeer("peer2", big.NewInt(0), common.Hash{}) - - err := tester.sync("peer1", hashes[0]) - if err != nil { - t.Error("download error", err) + // Synchronise with the peer and make sure all blocks were retrieved + if err := tester.sync("peer", hashes[0]); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) } - - inqueue := len(tester.downloader.queue.blockCache) - if inqueue != targetBlocks { - t.Error("expected", targetBlocks, "have", inqueue) + if queued := len(tester.downloader.queue.blockCache); queued != targetBlocks { + t.Fatalf("synchronised block mismatch: have %v, want %v", queued, targetBlocks) } } -func TestTaking(t *testing.T) { - minDesiredPeerCount = 4 - blockHardTTL = 1 * time.Second - - targetBlocks := 1000 +// Tests that the synchronized blocks can be correctly retrieved. +func TestBlockTaking(t *testing.T) { + // Create a small enough block chain to download and the tester + targetBlocks := blockCacheLimit - 15 hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer1", big.NewInt(10000), hashes[0]) - tester.newPeer("peer2", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) + tester := newTester(t, hashes, blocks) + tester.newPeer("peer", big.NewInt(10000), hashes[0]) - err := tester.sync("peer1", hashes[0]) - if err != nil { - t.Error("download error", err) + // Synchronise with the peer and test block retrieval + if err := tester.sync("peer", hashes[0]); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) } - bs := tester.downloader.TakeBlocks() - if len(bs) != targetBlocks { - t.Error("retrieved block mismatch: have %v, want %v", len(bs), targetBlocks) + if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks { + t.Fatalf("took block mismatch: have %v, want %v", len(took), targetBlocks) } } +// Tests that an inactive downloader will not accept incoming hashes and blocks. func TestInactiveDownloader(t *testing.T) { - targetBlocks := 1000 + // Create a small enough block chain to download and the tester + targetBlocks := blockCacheLimit - 15 hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashSet(createHashSet(hashes)) - tester := newTester(t, hashes, nil) - err := tester.downloader.DeliverHashes("bad peer 001", hashes) - if err != errNoSyncActive { - t.Error("expected no sync error, got", err) - } + tester := newTester(t, nil, nil) - err = tester.downloader.DeliverBlocks("bad peer 001", blocks) - if err != errNoSyncActive { - t.Error("expected no sync error, got", err) + // Check that neither hashes nor blocks are accepted + if err := tester.downloader.DeliverHashes("bad peer", hashes); err != errNoSyncActive { + t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) + } + if err := tester.downloader.DeliverBlocks("bad peer", blocks); err != errNoSyncActive { + t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) } } +// Tests that a canceled download wipes all previously accumulated state. func TestCancel(t *testing.T) { - minDesiredPeerCount = 4 - blockHardTTL = 1 * time.Second - - targetBlocks := 1000 + // Create a small enough block chain to download and the tester + targetBlocks := blockCacheLimit - 15 hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer1", big.NewInt(10000), hashes[0]) + tester := newTester(t, hashes, blocks) + tester.newPeer("peer", big.NewInt(10000), hashes[0]) - err := tester.sync("peer1", hashes[0]) - if err != nil { - t.Error("download error", err) + // Synchronise with the peer, but cancel afterwards + if err := tester.sync("peer", hashes[0]); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) } - if !tester.downloader.Cancel() { - t.Error("cancel operation unsuccessfull") + t.Fatalf("cancel operation failed") } - - hashSize, blockSize := tester.downloader.queue.Size() - if hashSize > 0 || blockSize > 0 { - t.Error("block (", blockSize, ") or hash (", hashSize, ") not 0") + // Make sure the queue reports empty and no blocks can be taken + hashCount, blockCount := tester.downloader.queue.Size() + if hashCount > 0 || blockCount > 0 { + t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount) + } + if took := tester.downloader.TakeBlocks(); len(took) != 0 { + t.Errorf("taken blocks mismatch: have %d, want %d", len(took), 0) } } +// Tests that if a large batch of blocks are being downloaded, it is throttled +// until the cached blocks are retrieved. func TestThrottling(t *testing.T) { - minDesiredPeerCount = 4 - blockHardTTL = 1 * time.Second - - targetBlocks := 16 * blockCacheLimit + // Create a long block chain to download and the tester + targetBlocks := 8 * blockCacheLimit hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer1", big.NewInt(10000), hashes[0]) - tester.newPeer("peer2", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) + tester := newTester(t, hashes, blocks) + tester.newPeer("peer", big.NewInt(10000), hashes[0]) - // Concurrently download and take the blocks - took, err := tester.syncTake("peer1", hashes[0]) - if err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) + // Start a synchronisation concurrently + errc := make(chan error) + go func() { + errc <- tester.sync("peer", hashes[0]) + }() + // Iteratively take some blocks, always checking the retrieval count + for total := 0; total < targetBlocks; { + // Sleep a bit for sync to complete + time.Sleep(250 * time.Millisecond) + + // Fetch the next batch of blocks + took := tester.downloader.TakeBlocks() + if len(took) != blockCacheLimit { + t.Fatalf("block count mismatch: have %v, want %v", len(took), blockCacheLimit) + } + total += len(took) + if total > targetBlocks { + t.Fatalf("target block count mismatch: have %v, want %v", total, targetBlocks) + } } - if len(took) != targetBlocks { - t.Fatalf("downloaded block mismatch: have %v, want %v", len(took), targetBlocks) + if err := <-errc; err != nil { + t.Fatalf("block synchronization failed: %v", err) } } -- cgit v1.2.3 From 84bc93d8cb3ca2beab96b567507873e53ce80056 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 29 May 2015 21:04:20 +0300 Subject: eth/downloader: accumulating hash bans for reconnecting attackers --- eth/downloader/downloader.go | 96 +++++++++++++++++++++++++++++++++++++-- eth/downloader/downloader_test.go | 35 ++++++++++++++ 2 files changed, 126 insertions(+), 5 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index de6ee1734..b0d55bc44 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1,7 +1,9 @@ package downloader import ( + "bytes" "errors" + "fmt" "math/rand" "sync" "sync/atomic" @@ -289,9 +291,15 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set", active.id) return ErrEmptyHashSet } - for _, hash := range hashPack.hashes { + for index, hash := range hashPack.hashes { if d.banned.Has(hash) { glog.V(logger.Debug).Infof("Peer (%s) sent a known invalid chain", active.id) + + d.queue.Insert(hashPack.hashes[:index+1]) + if err := d.banBlocks(active.id, hash); err != nil { + fmt.Println("ban err", err) + glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err) + } return ErrInvalidChain } } @@ -399,8 +407,10 @@ func (d *Downloader) fetchBlocks() error { glog.V(logger.Debug).Infoln("Downloading", d.queue.Pending(), "block(s)") start := time.Now() - // default ticker for re-fetching blocks every now and then + // Start a ticker to continue throttled downloads and check for bad peers ticker := time.NewTicker(20 * time.Millisecond) + defer ticker.Stop() + out: for { select { @@ -413,7 +423,7 @@ out: block := blockPack.blocks[0] if _, ok := d.checks[block.Hash()]; ok { delete(d.checks, block.Hash()) - continue + break } } // If the peer was previously banned and failed to deliver it's pack @@ -488,7 +498,7 @@ out: if d.queue.Pending() > 0 { // Throttle the download if block cache is full and waiting processing if d.queue.Throttle() { - continue + break } // Send a download request to all idle peers, until throttled idlePeers := d.peers.IdlePeers() @@ -529,10 +539,86 @@ out: } } glog.V(logger.Detail).Infoln("Downloaded block(s) in", time.Since(start)) - return nil } +// banBlocks retrieves a batch of blocks from a peer feeding us invalid hashes, +// and bans the head of the retrieved batch. +// +// This method only fetches one single batch as the goal is not ban an entire +// (potentially long) invalid chain - wasting a lot of time in the meanwhile -, +// but rather to gradually build up a blacklist if the peer keeps reconnecting. +func (d *Downloader) banBlocks(peerId string, head common.Hash) error { + glog.V(logger.Debug).Infof("Banning a batch out of %d blocks from %s", d.queue.Pending(), peerId) + + // Ask the peer being banned for a batch of blocks from the banning point + peer := d.peers.Peer(peerId) + if peer == nil { + return nil + } + request := d.queue.Reserve(peer, MaxBlockFetch) + if request == nil { + return nil + } + if err := peer.Fetch(request); err != nil { + return err + } + // Wait a bit for the reply to arrive, and ban if done so + timeout := time.After(blockTTL) + for { + select { + case <-d.cancelCh: + return errCancelBlockFetch + + case <-timeout: + return ErrTimeout + + case blockPack := <-d.blockCh: + blocks := blockPack.blocks + + // Short circuit if it's a stale cross check + if len(blocks) == 1 { + block := blocks[0] + if _, ok := d.checks[block.Hash()]; ok { + delete(d.checks, block.Hash()) + break + } + } + // Short circuit if it's not from the peer being banned + if blockPack.peerId != peerId { + break + } + // Short circuit if no blocks were returned + if len(blocks) == 0 { + return errors.New("no blocks returned to ban") + } + // Got the batch of invalid blocks, reconstruct their chain order + for i := 0; i < len(blocks); i++ { + for j := i + 1; j < len(blocks); j++ { + if blocks[i].NumberU64() > blocks[j].NumberU64() { + blocks[i], blocks[j] = blocks[j], blocks[i] + } + } + } + // Ensure we're really banning the correct blocks + if bytes.Compare(blocks[0].Hash().Bytes(), head.Bytes()) != 0 { + return errors.New("head block not the banned one") + } + index := 0 + for _, block := range blocks[1:] { + if bytes.Compare(block.ParentHash().Bytes(), blocks[index].Hash().Bytes()) != 0 { + break + } + index++ + } + d.banned.Add(blocks[index].Hash()) + + glog.V(logger.Debug).Infof("Banned %d blocks from: %s\n", index+1, peerId) + return nil + } + } +} + // DeliverBlocks injects a new batch of blocks received from a remote node. // This is usually invoked through the BlocksMsg by the protocol handler. func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) error { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 434861c61..288072164 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -14,6 +14,7 @@ import ( var ( knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} unknownHash = common.Hash{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9} + bannedHash = common.Hash{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5} ) func createHashes(start, amount int) (hashes []common.Hash) { @@ -520,3 +521,37 @@ func TestMadeupParentBlockChainAttack(t *testing.T) { t.Fatalf("failed to synchronise blocks: %v", err) } } + +// Tests that if one/multiple malicious peers try to feed a banned blockchain to +// the downloader, it will not keep refetching the same chain indefinitely, but +// gradually block pieces of it, until it's head is also blocked. +func TestBannedChainStarvationAttack(t *testing.T) { + // Construct a valid chain, but ban one of the hashes in it + hashes := createHashes(0, 8*blockCacheLimit) + hashes[len(hashes)/2+23] = bannedHash // weird index to have non multiple of ban chunk size + + blocks := createBlocksFromHashes(hashes) + + // Create the tester and ban the selected hash + tester := newTester(t, hashes, blocks) + tester.downloader.banned.Add(bannedHash) + + // Iteratively try to sync, and verify that the banned hash list grows until + // the head of the invalid chain is blocked too. + tester.newPeer("attack", big.NewInt(10000), hashes[0]) + for banned := tester.downloader.banned.Size(); ; { + // Try to sync with the attacker, check hash chain failure + if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain) + } + // Check that the ban list grew with at least 1 new item, or all banned + bans := tester.downloader.banned.Size() + if bans < banned+1 { + if tester.downloader.banned.Has(hashes[0]) { + break + } + t.Fatalf("ban count mismatch: have %v, want %v+", bans, banned+1) + } + banned = bans + } +} -- cgit v1.2.3 From abdfcda4dd223f2a2a932628da1e9388d2670856 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 29 May 2015 21:15:28 +0300 Subject: eth/downloader: short circuit sync if head hash is banned --- eth/downloader/downloader.go | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index b0d55bc44..af9b6b2b1 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -165,6 +165,10 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { } defer atomic.StoreInt32(&d.synchronising, 0) + // If the head hash is banned, terminate immediately + if d.banned.Has(hash) { + return ErrInvalidChain + } // Post a user notification of the sync (only once per session) if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { glog.V(logger.Info).Infoln("Block synchronisation started") -- cgit v1.2.3 From 0275fcb3d3979d14e01b4e0b9980be6d0cafbc8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Sat, 30 May 2015 00:34:23 +0300 Subject: eth/downloader: clean up and simplify the code a bit --- eth/downloader/downloader.go | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index af9b6b2b1..159a06bd4 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -3,20 +3,18 @@ package downloader import ( "bytes" "errors" - "fmt" "math/rand" "sync" "sync/atomic" "time" - "gopkg.in/fatih/set.v0" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" + "gopkg.in/fatih/set.v0" ) const ( @@ -301,7 +299,6 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { d.queue.Insert(hashPack.hashes[:index+1]) if err := d.banBlocks(active.id, hash); err != nil { - fmt.Println("ban err", err) glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err) } return ErrInvalidChain @@ -596,15 +593,8 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { if len(blocks) == 0 { return errors.New("no blocks returned to ban") } - // Got the batch of invalid blocks, reconstruct their chain order - for i := 0; i < len(blocks); i++ { - for j := i + 1; j < len(blocks); j++ { - if blocks[i].NumberU64() > blocks[j].NumberU64() { - blocks[i], blocks[j] = blocks[j], blocks[i] - } - } - } - // Ensure we're really banning the correct blocks + // Reconstruct the original chain order and ensure we're banning the correct blocks + types.BlockBy(types.Number).Sort(blocks) if bytes.Compare(blocks[0].Hash().Bytes(), head.Bytes()) != 0 { return errors.New("head block not the banned one") } -- cgit v1.2.3 From 9da0232eef7e7abd9f036fccb231220e272e6049 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Sat, 30 May 2015 00:45:22 +0300 Subject: eth/downloader: update test for shitty travis --- eth/downloader/downloader_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'eth') diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 288072164..cab213499 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -274,7 +274,7 @@ func TestThrottling(t *testing.T) { // Iteratively take some blocks, always checking the retrieval count for total := 0; total < targetBlocks; { // Sleep a bit for sync to complete - time.Sleep(250 * time.Millisecond) + time.Sleep(500 * time.Millisecond) // Fetch the next batch of blocks took := tester.downloader.TakeBlocks() -- cgit v1.2.3 From 6d497f61c65b1c02c4cfb5f4e0673574a0cd17d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 2 Jun 2015 15:57:07 +0300 Subject: eth/downloader: don't block hash deliveries while pulling blocks --- eth/downloader/downloader.go | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 159a06bd4..4b837eed5 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -418,6 +418,9 @@ out: case <-d.cancelCh: return errCancelBlockFetch + case <-d.hashCh: + // Out of bounds hashes received, ignore them + case blockPack := <-d.blockCh: // Short circuit if it's a stale cross check if len(blockPack.blocks) == 1 { @@ -472,30 +475,21 @@ out: glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err) } } + case <-ticker.C: - // Check for bad peers. Bad peers may indicate a peer not responding - // to a `getBlocks` message. A timeout of 5 seconds is set. Peers - // that badly or poorly behave are removed from the peer set (not banned). - // Bad peers are excluded from the available peer set and therefor won't be - // reused. XXX We could re-introduce peers after X time. + // Short circuit if we lost all our peers + if d.peers.Len() == 0 { + return errNoPeers + } + // Check for block request timeouts and demote the responsible peers badPeers := d.queue.Expire(blockHardTTL) for _, pid := range badPeers { - // XXX We could make use of a reputation system here ranking peers - // in their performance - // 1) Time for them to respond; - // 2) Measure their speed; - // 3) Amount and availability. if peer := d.peers.Peer(pid); peer != nil { peer.Demote() glog.V(logger.Detail).Infof("%s: block delivery timeout", peer) } } - // After removing bad peers make sure we actually have sufficient peer left to keep downloading - if d.peers.Len() == 0 { - return errNoPeers - } - // If there are unrequested hashes left start fetching - // from the available peers. + // If there are unrequested hashes left start fetching from the available peers if d.queue.Pending() > 0 { // Throttle the download if block cache is full and waiting processing if d.queue.Throttle() { @@ -565,7 +559,7 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { return err } // Wait a bit for the reply to arrive, and ban if done so - timeout := time.After(blockTTL) + timeout := time.After(blockHardTTL) for { select { case <-d.cancelCh: @@ -574,6 +568,9 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { case <-timeout: return ErrTimeout + case <-d.hashCh: + // Out of bounds hashes received, ignore them + case blockPack := <-d.blockCh: blocks := blockPack.blocks -- cgit v1.2.3 From 1d7bf3d39fbd6b1a53913bb309bc07500b220ded Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 3 Jun 2015 18:39:56 +0300 Subject: eth/downloader: fix merge compile error --- eth/downloader/downloader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 4b837eed5..d113aa33f 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -551,7 +551,7 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { if peer == nil { return nil } - request := d.queue.Reserve(peer, MaxBlockFetch) + request := d.queue.Reserve(peer) if request == nil { return nil } -- cgit v1.2.3 From b40c796ff726d54efc8c7933e1586869c2a0985a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 3 Jun 2015 19:00:54 +0300 Subject: eth/downloader: preallocate the block cache --- eth/downloader/downloader.go | 8 ++++---- eth/downloader/downloader_test.go | 2 +- eth/downloader/queue.go | 25 +++++++++---------------- 3 files changed, 14 insertions(+), 21 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index d113aa33f..cd2fd81f1 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -341,12 +341,12 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { active.getHashes(head) continue } - // We're done, allocate the download cache and proceed pulling the blocks + // We're done, prepare the download cache and proceed pulling the blocks offset := 0 if block := d.getBlock(head); block != nil { offset = int(block.NumberU64() + 1) } - d.queue.Alloc(offset) + d.queue.Prepare(offset) finished = true case blockPack := <-d.blockCh: @@ -504,7 +504,7 @@ out: } // Get a possible chunk. If nil is returned no chunk // could be returned due to no hashes available. - request := d.queue.Reserve(peer) + request := d.queue.Reserve(peer, peer.Capacity()) if request == nil { continue } @@ -551,7 +551,7 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { if peer == nil { return nil } - request := d.queue.Reserve(peer) + request := d.queue.Reserve(peer, MaxBlockFetch) if request == nil { return nil } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index cab213499..4e2d527b9 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -186,7 +186,7 @@ func TestSynchronisation(t *testing.T) { if err := tester.sync("peer", hashes[0]); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if queued := len(tester.downloader.queue.blockCache); queued != targetBlocks { + if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks { t.Fatalf("synchronised block mismatch: have %v, want %v", queued, targetBlocks) } } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 671ffe51b..79ddbb857 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -50,10 +50,11 @@ type queue struct { // newQueue creates a new download queue for scheduling block retrieval. func newQueue() *queue { return &queue{ - hashPool: make(map[common.Hash]int), - hashQueue: prque.New(), - pendPool: make(map[string]*fetchRequest), - blockPool: make(map[common.Hash]int), + hashPool: make(map[common.Hash]int), + hashQueue: prque.New(), + pendPool: make(map[string]*fetchRequest), + blockPool: make(map[common.Hash]int), + blockCache: make([]*Block, blockCacheLimit), } } @@ -70,7 +71,7 @@ func (q *queue) Reset() { q.blockPool = make(map[common.Hash]int) q.blockOffset = 0 - q.blockCache = nil + q.blockCache = make([]*Block, blockCacheLimit) } // Size retrieves the number of hashes in the queue, returning separately for @@ -208,7 +209,7 @@ func (q *queue) TakeBlocks() []*Block { // Reserve reserves a set of hashes for the given peer, skipping any previously // failed download. -func (q *queue) Reserve(p *peer) *fetchRequest { +func (q *queue) Reserve(p *peer, count int) *fetchRequest { q.lock.Lock() defer q.lock.Unlock() @@ -345,20 +346,12 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) { return nil } -// Alloc ensures that the block cache is the correct size, given a starting -// offset, and a memory cap. -func (q *queue) Alloc(offset int) { +// Prepare configures the block cache offset to allow accepting inbound blocks. +func (q *queue) Prepare(offset int) { q.lock.Lock() defer q.lock.Unlock() if q.blockOffset < offset { q.blockOffset = offset } - size := len(q.hashPool) - if size > blockCacheLimit { - size = blockCacheLimit - } - if len(q.blockCache) < size { - q.blockCache = append(q.blockCache, make([]*Block, size-len(q.blockCache))...) - } } -- cgit v1.2.3 From 2d627995cf22b9a1187e4b22d430f84541904d2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Sun, 7 Jun 2015 18:41:05 +0300 Subject: eth/downloader: fix another rebase error --- eth/downloader/queue.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'eth') diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 79ddbb857..3c99efb81 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -230,8 +230,7 @@ func (q *queue) Reserve(p *peer, count int) *fetchRequest { send := make(map[common.Hash]int) skip := make(map[common.Hash]int) - capacity := p.Capacity() - for proc := 0; proc < space && len(send) < capacity && !q.hashQueue.Empty(); proc++ { + for proc := 0; proc < space && len(send) < count && !q.hashQueue.Empty(); proc++ { hash, priority := q.hashQueue.Pop() if p.ignored.Has(hash) { skip[hash.(common.Hash)] = int(priority) -- cgit v1.2.3 From 4b2dd44711a04c639ecde68806455ccf7244acce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Sun, 7 Jun 2015 18:46:32 +0300 Subject: eth/downloader: fix throttling test to be less timing dependent --- eth/downloader/downloader_test.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 4e2d527b9..4219c2788 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -273,9 +273,13 @@ func TestThrottling(t *testing.T) { }() // Iteratively take some blocks, always checking the retrieval count for total := 0; total < targetBlocks; { - // Sleep a bit for sync to complete - time.Sleep(500 * time.Millisecond) - + // Wait a bit for sync to complete + for start := time.Now(); time.Since(start) < 3*time.Second; { + time.Sleep(25 * time.Millisecond) + if len(tester.downloader.queue.blockPool) == blockCacheLimit { + break + } + } // Fetch the next batch of blocks took := tester.downloader.TakeBlocks() if len(took) != blockCacheLimit { -- cgit v1.2.3 From 63c6cedb14cbd461733e33ffac016dc7d8e431ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 8 Jun 2015 14:06:36 +0300 Subject: eth/downloader: cap the hash ban set, add test for it --- eth/downloader/downloader.go | 23 ++++++++++++++------- eth/downloader/downloader_test.go | 43 +++++++++++++++++++++++++++++++++++++++ eth/downloader/peer.go | 2 +- eth/downloader/queue.go | 2 +- eth/handler.go | 4 ++-- eth/peer.go | 2 +- 6 files changed, 64 insertions(+), 12 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index cd2fd81f1..7eda49186 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -17,18 +17,17 @@ import ( "gopkg.in/fatih/set.v0" ) -const ( +var ( MinHashFetch = 512 // Minimum amount of hashes to not consider a peer stalling MaxHashFetch = 2048 // Amount of hashes to be fetched per retrieval request MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request - hashTTL = 5 * time.Second // Time it takes for a hash request to time out -) - -var ( + hashTTL = 5 * time.Second // Time it takes for a hash request to time out blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired crossCheckCycle = time.Second // Period after which to check for expired cross checks + + maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out ) var ( @@ -602,9 +601,19 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { } index++ } + // Ban the head hash and phase out any excess d.banned.Add(blocks[index].Hash()) - - glog.V(logger.Debug).Infof("Banned %d blocks from: %s\n", index+1, peerId) + for d.banned.Size() > maxBannedHashes { + d.banned.Each(func(item interface{}) bool { + // Skip any hard coded bans + if core.BadHashes[item.(common.Hash)] { + return true + } + d.banned.Remove(item) + return false + }) + } + glog.V(logger.Debug).Infof("Banned %d blocks from: %s", index+1, peerId) return nil } } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 4219c2788..c9e84371c 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" ) @@ -559,3 +560,45 @@ func TestBannedChainStarvationAttack(t *testing.T) { banned = bans } } + +// Tests that if a peer sends excessively many/large invalid chains that are +// gradually banned, it will have an upper limit on the consumed memory and also +// the origin bad hashes will not be evacuated. +func TestBannedChainMemoryExhaustionAttack(t *testing.T) { + // Reduce the test size a bit + MaxBlockFetch = 4 + maxBannedHashes = 256 + + // Construct a banned chain with more chunks than the ban limit + hashes := createHashes(0, maxBannedHashes*MaxBlockFetch) + hashes[len(hashes)-1] = bannedHash // weird index to have non multiple of ban chunk size + + blocks := createBlocksFromHashes(hashes) + + // Create the tester and ban the selected hash + tester := newTester(t, hashes, blocks) + tester.downloader.banned.Add(bannedHash) + + // Iteratively try to sync, and verify that the banned hash list grows until + // the head of the invalid chain is blocked too. + tester.newPeer("attack", big.NewInt(10000), hashes[0]) + for { + // Try to sync with the attacker, check hash chain failure + if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain) + } + // Short circuit if the entire chain was banned + if tester.downloader.banned.Has(hashes[0]) { + break + } + // Otherwise ensure we never exceed the memory allowance and the hard coded bans are untouched + if bans := tester.downloader.banned.Size(); bans > maxBannedHashes { + t.Fatalf("ban cap exceeded: have %v, want max %v", bans, maxBannedHashes) + } + for hash, _ := range core.BadHashes { + if !tester.downloader.banned.Has(hash) { + t.Fatalf("hard coded ban evacuated: %x", hash) + } + } + } +} diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 5fbc64648..9614a6951 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -94,7 +94,7 @@ func (p *peer) SetIdle() { for { // Calculate the new download bandwidth allowance prev := atomic.LoadInt32(&p.capacity) - next := int32(math.Max(1, math.Min(MaxBlockFetch, float64(prev)*scale))) + next := int32(math.Max(1, math.Min(float64(MaxBlockFetch), float64(prev)*scale))) // Try to update the old value if atomic.CompareAndSwapInt32(&p.capacity, prev, next) { diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 3c99efb81..7abbd42fd 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -16,7 +16,7 @@ import ( "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) -const ( +var ( blockCacheLimit = 8 * MaxBlockFetch // Maximum number of blocks to cache before throttling the download ) diff --git a/eth/handler.go b/eth/handler.go index aea33452c..37bbd3691 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -213,8 +213,8 @@ func (self *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "->msg %v: %v", msg, err) } - if request.Amount > downloader.MaxHashFetch { - request.Amount = downloader.MaxHashFetch + if request.Amount > uint64(downloader.MaxHashFetch) { + request.Amount = uint64(downloader.MaxHashFetch) } hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount) diff --git a/eth/peer.go b/eth/peer.go index bb6a20349..b2fa20ebb 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -102,7 +102,7 @@ func (p *peer) sendTransaction(tx *types.Transaction) error { func (p *peer) requestHashes(from common.Hash) error { glog.V(logger.Debug).Infof("[%s] fetching hashes (%d) %x...\n", p.id, downloader.MaxHashFetch, from[:4]) - return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, downloader.MaxHashFetch}) + return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, uint64(downloader.MaxHashFetch)}) } func (p *peer) requestBlocks(hashes []common.Hash) error { -- cgit v1.2.3 From c4f224932f69099f595211755ebc0a9933616315 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 8 Jun 2015 14:46:31 +0300 Subject: eth/downloader: reject peer registration if head is banned --- eth/downloader/downloader.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 7eda49186..92cb1a650 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -36,6 +36,7 @@ var ( errUnknownPeer = errors.New("peer is unknown or unhealthy") ErrBadPeer = errors.New("action from bad peer ignored") ErrStallingPeer = errors.New("peer is stalling") + errBannedHead = errors.New("peer head hash already banned") errNoPeers = errors.New("no peers to keep download active") ErrPendingQueue = errors.New("pending items in queue") ErrTimeout = errors.New("timeout") @@ -72,11 +73,10 @@ type crossCheck struct { type Downloader struct { mux *event.TypeMux - mu sync.RWMutex queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain - banned *set.SetNonTS // Set of hashes we've received and banned + banned *set.Set // Set of hashes we've received and banned // Callbacks hasBlock hashCheckFn @@ -114,7 +114,7 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloa blockCh: make(chan blockPack, 1), } // Inject all the known bad hashes - downloader.banned = set.NewNonTS() + downloader.banned = set.New() for hash, _ := range core.BadHashes { downloader.banned.Add(hash) } @@ -133,6 +133,12 @@ func (d *Downloader) Synchronising() bool { // RegisterPeer injects a new download peer into the set of block source to be // used for fetching hashes and blocks from. func (d *Downloader) RegisterPeer(id string, head common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error { + // If the peer wants to send a banned hash, reject + if d.banned.Has(head) { + glog.V(logger.Debug).Infoln("Register rejected, head hash banned:", id) + return errBannedHead + } + // Otherwise try to construct and register the peer glog.V(logger.Detail).Infoln("Registering peer", id) if err := d.peers.Register(newPeer(id, head, getHashes, getBlocks)); err != nil { glog.V(logger.Error).Infoln("Register failed:", err) @@ -199,6 +205,8 @@ func (d *Downloader) TakeBlocks() []*Block { return d.queue.TakeBlocks() } +// Has checks if the downloader knows about a particular hash, meaning that its +// either already downloaded of pending retrieval. func (d *Downloader) Has(hash common.Hash) bool { return d.queue.Has(hash) } @@ -604,14 +612,17 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error { // Ban the head hash and phase out any excess d.banned.Add(blocks[index].Hash()) for d.banned.Size() > maxBannedHashes { + var evacuate common.Hash + d.banned.Each(func(item interface{}) bool { // Skip any hard coded bans if core.BadHashes[item.(common.Hash)] { return true } - d.banned.Remove(item) + evacuate = item.(common.Hash) return false }) + d.banned.Remove(evacuate) } glog.V(logger.Debug).Infof("Banned %d blocks from: %s", index+1, peerId) return nil -- cgit v1.2.3 From 4ed3509a02c7f5a09036e6e9cb615c6def6d25f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 8 Jun 2015 15:02:52 +0300 Subject: eth/downloader: test registration rejection on head ban --- eth/downloader/downloader_test.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index c9e84371c..5f10fb41f 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -169,8 +169,9 @@ func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error { } } -func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) { - dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id)) +// newPeer registers a new block download source into the syncer. +func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) error { + return dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id)) } // Tests that simple synchronization, without throttling from a good peer works. @@ -559,6 +560,13 @@ func TestBannedChainStarvationAttack(t *testing.T) { } banned = bans } + // Check that after banning an entire chain, bad peers get dropped + if err := tester.newPeer("new attacker", big.NewInt(10000), hashes[0]); err != errBannedHead { + t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead) + } + if peer := tester.downloader.peers.Peer("net attacker"); peer != nil { + t.Fatalf("banned attacker registered: %v", peer) + } } // Tests that if a peer sends excessively many/large invalid chains that are -- cgit v1.2.3