diff options
-rw-r--r-- | README.md | 6 | ||||
-rw-r--r-- | core/chain_manager.go | 13 | ||||
-rw-r--r-- | core/filter.go | 15 | ||||
-rw-r--r-- | eth/backend.go | 2 | ||||
-rw-r--r-- | eth/downloader/downloader.go | 171 | ||||
-rw-r--r-- | eth/downloader/downloader_test.go | 283 | ||||
-rw-r--r-- | eth/downloader/peer.go | 2 | ||||
-rw-r--r-- | eth/downloader/queue.go | 30 | ||||
-rw-r--r-- | eth/handler.go | 206 | ||||
-rw-r--r-- | eth/peer.go | 9 | ||||
-rw-r--r-- | eth/protocol.go | 2 | ||||
-rw-r--r-- | eth/sync.go | 132 |
12 files changed, 599 insertions, 272 deletions
@@ -12,15 +12,17 @@ master | [](http://waffle.io/ethereum/go-ethereum) [](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) -Automated (dev) builds +Automated development builds ====================== +The following builds are build automatically by our build servers after each push to the [develop](https://github.com/ethereum/go-ethereum/tree/develop) branch. + * [Docker](https://registry.hub.docker.com/u/ethereum/client-go/) * [OS X](http://build.ethdev.com/builds/OSX%20Go%20develop%20branch/Mist-OSX-latest.dmg) * Ubuntu [trusty](https://build.ethdev.com/builds/Linux%20Go%20develop%20deb%20i386-trusty/latest/) | [utopic](https://build.ethdev.com/builds/Linux%20Go%20develop%20deb%20i386-utopic/latest/) -* [Windows 64-bit](https://build.ethdev.com/builds/Windows%20Go%20develop%20branch/Geth-Win64-latest.7z) +* [Windows 64-bit](https://build.ethdev.com/builds/Windows%20Go%20develop%20branch/Geth-Win64-latest.zip) Building the source =================== diff --git a/core/chain_manager.go b/core/chain_manager.go index e87253304..a0ce20006 100644 --- a/core/chain_manager.go +++ b/core/chain_manager.go @@ -111,12 +111,13 @@ type ChainManager struct { func NewChainManager(genesis *types.Block, blockDb, stateDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) { bc := &ChainManager{ - blockDb: blockDb, - stateDb: stateDb, - eventMux: mux, - quit: make(chan struct{}), - cache: NewBlockCache(blockCacheLimit), - pow: pow, + blockDb: blockDb, + stateDb: stateDb, + genesisBlock: GenesisBlock(42, stateDb), + eventMux: mux, + quit: make(chan struct{}), + cache: NewBlockCache(blockCacheLimit), + pow: pow, } // Check the genesis block given to the chain manager. If the genesis block mismatches block number 0 diff --git a/core/filter.go b/core/filter.go index 2ca57da65..fcdf68dd0 100644 --- a/core/filter.go +++ b/core/filter.go @@ -1,6 +1,7 @@ package core import ( + "fmt" "math" "github.com/ethereum/go-ethereum/common" @@ -75,15 +76,19 @@ func (self *Filter) Find() state.Logs { var ( logs state.Logs block = self.eth.ChainManager().GetBlockByNumber(latestBlockNo) - quit bool ) - for i := 0; !quit && block != nil; i++ { + +done: + for i := 0; block != nil; i++ { + fmt.Println(block.NumberU64() == 0) // Quit on latest switch { - case block.NumberU64() == earliestBlockNo, block.NumberU64() == 0: - quit = true + case block.NumberU64() == 0: + break done + case block.NumberU64() == earliestBlockNo: + break done case self.max <= len(logs): - break + break done } // Use bloom filtering to see if this block is interesting given the diff --git a/eth/backend.go b/eth/backend.go index 06627416d..fcbea04a2 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -285,7 +285,7 @@ func New(config *Config) (*Ethereum, error) { } eth.pow = ethash.New() - genesis := core.GenesisBlock(uint64(config.GenesisNonce), blockDb) + genesis := core.GenesisBlock(uint64(config.GenesisNonce), stateDb) eth.chainManager, err = core.NewChainManager(genesis, blockDb, stateDb, eth.pow, eth.EventMux()) if err != nil { return nil, err diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 9f7f34559..92cb1a650 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1,36 +1,33 @@ package downloader import ( + "bytes" "errors" "math/rand" "sync" "sync/atomic" "time" - "gopkg.in/fatih/set.v0" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" + "gopkg.in/fatih/set.v0" ) -const ( +var ( MinHashFetch = 512 // Minimum amount of hashes to not consider a peer stalling MaxHashFetch = 2048 // Amount of hashes to be fetched per retrieval request MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request - peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount - hashTTL = 5 * time.Second // Time it takes for a hash request to time out -) + hashTTL = 5 * time.Second // Time it takes for a hash request to time out + blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth + blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired + crossCheckCycle = time.Second // Period after which to check for expired cross checks -var ( - blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth - blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired - crossCheckCycle = time.Second // Period after which to check for expired cross checks - minDesiredPeerCount = 5 // Amount of peers desired to start syncing + maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out ) var ( @@ -39,6 +36,7 @@ var ( errUnknownPeer = errors.New("peer is unknown or unhealthy") ErrBadPeer = errors.New("action from bad peer ignored") ErrStallingPeer = errors.New("peer is stalling") + errBannedHead = errors.New("peer head hash already banned") errNoPeers = errors.New("no peers to keep download active") ErrPendingQueue = errors.New("pending items in queue") ErrTimeout = errors.New("timeout") @@ -75,11 +73,10 @@ type crossCheck struct { type Downloader struct { mux *event.TypeMux - mu sync.RWMutex queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain - banned *set.SetNonTS // Set of hashes we've received and banned + banned *set.Set // Set of hashes we've received and banned // Callbacks hasBlock hashCheckFn @@ -117,7 +114,7 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloa blockCh: make(chan blockPack, 1), } // Inject all the known bad hashes - downloader.banned = set.NewNonTS() + downloader.banned = set.New() for hash, _ := range core.BadHashes { downloader.banned.Add(hash) } @@ -136,6 +133,12 @@ func (d *Downloader) Synchronising() bool { // RegisterPeer injects a new download peer into the set of block source to be // used for fetching hashes and blocks from. func (d *Downloader) RegisterPeer(id string, head common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error { + // If the peer wants to send a banned hash, reject + if d.banned.Has(head) { + glog.V(logger.Debug).Infoln("Register rejected, head hash banned:", id) + return errBannedHead + } + // Otherwise try to construct and register the peer glog.V(logger.Detail).Infoln("Registering peer", id) if err := d.peers.Register(newPeer(id, head, getHashes, getBlocks)); err != nil { glog.V(logger.Error).Infoln("Register failed:", err) @@ -165,6 +168,10 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { } defer atomic.StoreInt32(&d.synchronising, 0) + // If the head hash is banned, terminate immediately + if d.banned.Has(hash) { + return ErrInvalidChain + } // Post a user notification of the sync (only once per session) if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { glog.V(logger.Info).Infoln("Block synchronisation started") @@ -198,6 +205,8 @@ func (d *Downloader) TakeBlocks() []*Block { return d.queue.TakeBlocks() } +// Has checks if the downloader knows about a particular hash, meaning that its +// either already downloaded of pending retrieval. func (d *Downloader) Has(hash common.Hash) bool { return d.queue.Has(hash) } @@ -291,9 +300,14 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set", active.id) return ErrEmptyHashSet } - for _, hash := range hashPack.hashes { + for index, hash := range hashPack.hashes { if d.banned.Has(hash) { glog.V(logger.Debug).Infof("Peer (%s) sent a known invalid chain", active.id) + + d.queue.Insert(hashPack.hashes[:index+1]) + if err := d.banBlocks(active.id, hash); err != nil { + glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err) + } return ErrInvalidChain } } @@ -334,12 +348,12 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { active.getHashes(head) continue } - // We're done, allocate the download cache and proceed pulling the blocks + // We're done, prepare the download cache and proceed pulling the blocks offset := 0 if block := d.getBlock(head); block != nil { offset = int(block.NumberU64() + 1) } - d.queue.Alloc(offset) + d.queue.Prepare(offset) finished = true case blockPack := <-d.blockCh: @@ -401,21 +415,26 @@ func (d *Downloader) fetchBlocks() error { glog.V(logger.Debug).Infoln("Downloading", d.queue.Pending(), "block(s)") start := time.Now() - // default ticker for re-fetching blocks every now and then + // Start a ticker to continue throttled downloads and check for bad peers ticker := time.NewTicker(20 * time.Millisecond) + defer ticker.Stop() + out: for { select { case <-d.cancelCh: return errCancelBlockFetch + case <-d.hashCh: + // Out of bounds hashes received, ignore them + case blockPack := <-d.blockCh: // Short circuit if it's a stale cross check if len(blockPack.blocks) == 1 { block := blockPack.blocks[0] if _, ok := d.checks[block.Hash()]; ok { delete(d.checks, block.Hash()) - continue + break } } // If the peer was previously banned and failed to deliver it's pack @@ -463,34 +482,25 @@ out: glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err) } } + case <-ticker.C: - // Check for bad peers. Bad peers may indicate a peer not responding - // to a `getBlocks` message. A timeout of 5 seconds is set. Peers - // that badly or poorly behave are removed from the peer set (not banned). - // Bad peers are excluded from the available peer set and therefor won't be - // reused. XXX We could re-introduce peers after X time. + // Short circuit if we lost all our peers + if d.peers.Len() == 0 { + return errNoPeers + } + // Check for block request timeouts and demote the responsible peers badPeers := d.queue.Expire(blockHardTTL) for _, pid := range badPeers { - // XXX We could make use of a reputation system here ranking peers - // in their performance - // 1) Time for them to respond; - // 2) Measure their speed; - // 3) Amount and availability. if peer := d.peers.Peer(pid); peer != nil { peer.Demote() glog.V(logger.Detail).Infof("%s: block delivery timeout", peer) } } - // After removing bad peers make sure we actually have sufficient peer left to keep downloading - if d.peers.Len() == 0 { - return errNoPeers - } - // If there are unrequested hashes left start fetching - // from the available peers. + // If there are unrequested hashes left start fetching from the available peers if d.queue.Pending() > 0 { // Throttle the download if block cache is full and waiting processing if d.queue.Throttle() { - continue + break } // Send a download request to all idle peers, until throttled idlePeers := d.peers.IdlePeers() @@ -501,7 +511,7 @@ out: } // Get a possible chunk. If nil is returned no chunk // could be returned due to no hashes available. - request := d.queue.Reserve(peer) + request := d.queue.Reserve(peer, peer.Capacity()) if request == nil { continue } @@ -531,10 +541,95 @@ out: } } glog.V(logger.Detail).Infoln("Downloaded block(s) in", time.Since(start)) - return nil } +// banBlocks retrieves a batch of blocks from a peer feeding us invalid hashes, +// and bans the head of the retrieved batch. +// +// This method only fetches one single batch as the goal is not ban an entire +// (potentially long) invalid chain - wasting a lot of time in the meanwhile -, +// but rather to gradually build up a blacklist if the peer keeps reconnecting. +func (d *Downloader) banBlocks(peerId string, head common.Hash) error { + glog.V(logger.Debug).Infof("Banning a batch out of %d blocks from %s", d.queue.Pending(), peerId) + + // Ask the peer being banned for a batch of blocks from the banning point + peer := d.peers.Peer(peerId) + if peer == nil { + return nil + } + request := d.queue.Reserve(peer, MaxBlockFetch) + if request == nil { + return nil + } + if err := peer.Fetch(request); err != nil { + return err + } + // Wait a bit for the reply to arrive, and ban if done so + timeout := time.After(blockHardTTL) + for { + select { + case <-d.cancelCh: + return errCancelBlockFetch + + case <-timeout: + return ErrTimeout + + case <-d.hashCh: + // Out of bounds hashes received, ignore them + + case blockPack := <-d.blockCh: + blocks := blockPack.blocks + + // Short circuit if it's a stale cross check + if len(blocks) == 1 { + block := blocks[0] + if _, ok := d.checks[block.Hash()]; ok { + delete(d.checks, block.Hash()) + break + } + } + // Short circuit if it's not from the peer being banned + if blockPack.peerId != peerId { + break + } + // Short circuit if no blocks were returned + if len(blocks) == 0 { + return errors.New("no blocks returned to ban") + } + // Reconstruct the original chain order and ensure we're banning the correct blocks + types.BlockBy(types.Number).Sort(blocks) + if bytes.Compare(blocks[0].Hash().Bytes(), head.Bytes()) != 0 { + return errors.New("head block not the banned one") + } + index := 0 + for _, block := range blocks[1:] { + if bytes.Compare(block.ParentHash().Bytes(), blocks[index].Hash().Bytes()) != 0 { + break + } + index++ + } + // Ban the head hash and phase out any excess + d.banned.Add(blocks[index].Hash()) + for d.banned.Size() > maxBannedHashes { + var evacuate common.Hash + + d.banned.Each(func(item interface{}) bool { + // Skip any hard coded bans + if core.BadHashes[item.(common.Hash)] { + return true + } + evacuate = item.(common.Hash) + return false + }) + d.banned.Remove(evacuate) + } + glog.V(logger.Debug).Infof("Banned %d blocks from: %s", index+1, peerId) + return nil + } + } +} + // DeliverBlocks injects a new batch of blocks received from a remote node. // This is usually invoked through the BlocksMsg by the protocol handler. func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) error { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index ef94ddbab..5f10fb41f 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" ) @@ -14,6 +15,7 @@ import ( var ( knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} unknownHash = common.Hash{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9} + bannedHash = common.Hash{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5} ) func createHashes(start, amount int) (hashes []common.Hash) { @@ -21,7 +23,7 @@ func createHashes(start, amount int) (hashes []common.Hash) { hashes[len(hashes)-1] = knownHash for i := range hashes[:len(hashes)-1] { - binary.BigEndian.PutUint64(hashes[i][:8], uint64(i+2)) + binary.BigEndian.PutUint64(hashes[i][:8], uint64(start+i+2)) } return } @@ -56,7 +58,6 @@ type downloadTester struct { maxHashFetch int // Overrides the maximum number of retrieved hashes t *testing.T - pcount int done chan bool activePeerId string } @@ -114,12 +115,6 @@ func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, e return took, err } -func (dl *downloadTester) insertBlocks(blocks types.Blocks) { - for _, block := range blocks { - dl.chain = append(dl.chain, block.Hash()) - } -} - func (dl *downloadTester) hasBlock(hash common.Hash) bool { for _, h := range dl.chain { if h == hash { @@ -174,158 +169,131 @@ func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error { } } -func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) { - dl.pcount++ - - dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id)) -} - -func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash) { - dl.pcount++ - - // This bad peer never returns any blocks - dl.downloader.RegisterPeer(id, hash, dl.getHashes, func([]common.Hash) error { - return nil - }) +// newPeer registers a new block download source into the syncer. +func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) error { + return dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id)) } -func TestDownload(t *testing.T) { - minDesiredPeerCount = 4 - blockHardTTL = 1 * time.Second - - targetBlocks := 1000 +// Tests that simple synchronization, without throttling from a good peer works. +func TestSynchronisation(t *testing.T) { + // Create a small enough block chain to download and the tester + targetBlocks := blockCacheLimit - 15 hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer1", big.NewInt(10000), hashes[0]) - tester.newPeer("peer2", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) - tester.activePeerId = "peer1" - - err := tester.sync("peer1", hashes[0]) - if err != nil { - t.Error("download error", err) - } - - inqueue := len(tester.downloader.queue.blockCache) - if inqueue != targetBlocks { - t.Error("expected", targetBlocks, "have", inqueue) - } -} - -func TestMissing(t *testing.T) { - targetBlocks := 1000 - hashes := createHashes(0, 1000) - extraHashes := createHashes(1001, 1003) - blocks := createBlocksFromHashes(append(extraHashes, hashes...)) tester := newTester(t, hashes, blocks) + tester.newPeer("peer", big.NewInt(10000), hashes[0]) - tester.newPeer("peer1", big.NewInt(10000), hashes[len(hashes)-1]) - - hashes = append(extraHashes, hashes[:len(hashes)-1]...) - tester.newPeer("peer2", big.NewInt(0), common.Hash{}) - - err := tester.sync("peer1", hashes[0]) - if err != nil { - t.Error("download error", err) + // Synchronise with the peer and make sure all blocks were retrieved + if err := tester.sync("peer", hashes[0]); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) } - - inqueue := len(tester.downloader.queue.blockCache) - if inqueue != targetBlocks { - t.Error("expected", targetBlocks, "have", inqueue) + if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks { + t.Fatalf("synchronised block mismatch: have %v, want %v", queued, targetBlocks) } } -func TestTaking(t *testing.T) { - minDesiredPeerCount = 4 - blockHardTTL = 1 * time.Second - - targetBlocks := 1000 +// Tests that the synchronized blocks can be correctly retrieved. +func TestBlockTaking(t *testing.T) { + // Create a small enough block chain to download and the tester + targetBlocks := blockCacheLimit - 15 hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer1", big.NewInt(10000), hashes[0]) - tester.newPeer("peer2", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) + tester := newTester(t, hashes, blocks) + tester.newPeer("peer", big.NewInt(10000), hashes[0]) - err := tester.sync("peer1", hashes[0]) - if err != nil { - t.Error("download error", err) + // Synchronise with the peer and test block retrieval + if err := tester.sync("peer", hashes[0]); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) } - bs := tester.downloader.TakeBlocks() - if len(bs) != targetBlocks { - t.Error("retrieved block mismatch: have %v, want %v", len(bs), targetBlocks) + if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks { + t.Fatalf("took block mismatch: have %v, want %v", len(took), targetBlocks) } } +// Tests that an inactive downloader will not accept incoming hashes and blocks. func TestInactiveDownloader(t *testing.T) { - targetBlocks := 1000 + // Create a small enough block chain to download and the tester + targetBlocks := blockCacheLimit - 15 hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashSet(createHashSet(hashes)) - tester := newTester(t, hashes, nil) - err := tester.downloader.DeliverHashes("bad peer 001", hashes) - if err != errNoSyncActive { - t.Error("expected no sync error, got", err) - } + tester := newTester(t, nil, nil) - err = tester.downloader.DeliverBlocks("bad peer 001", blocks) - if err != errNoSyncActive { - t.Error("expected no sync error, got", err) + // Check that neither hashes nor blocks are accepted + if err := tester.downloader.DeliverHashes("bad peer", hashes); err != errNoSyncActive { + t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) + } + if err := tester.downloader.DeliverBlocks("bad peer", blocks); err != errNoSyncActive { + t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) } } +// Tests that a canceled download wipes all previously accumulated state. func TestCancel(t *testing.T) { - minDesiredPeerCount = 4 - blockHardTTL = 1 * time.Second - - targetBlocks := 1000 + // Create a small enough block chain to download and the tester + targetBlocks := blockCacheLimit - 15 hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer1", big.NewInt(10000), hashes[0]) + tester := newTester(t, hashes, blocks) + tester.newPeer("peer", big.NewInt(10000), hashes[0]) - err := tester.sync("peer1", hashes[0]) - if err != nil { - t.Error("download error", err) + // Synchronise with the peer, but cancel afterwards + if err := tester.sync("peer", hashes[0]); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) } - if !tester.downloader.Cancel() { - t.Error("cancel operation unsuccessfull") + t.Fatalf("cancel operation failed") } - - hashSize, blockSize := tester.downloader.queue.Size() - if hashSize > 0 || blockSize > 0 { - t.Error("block (", blockSize, ") or hash (", hashSize, ") not 0") + // Make sure the queue reports empty and no blocks can be taken + hashCount, blockCount := tester.downloader.queue.Size() + if hashCount > 0 || blockCount > 0 { + t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount) + } + if took := tester.downloader.TakeBlocks(); len(took) != 0 { + t.Errorf("taken blocks mismatch: have %d, want %d", len(took), 0) } } +// Tests that if a large batch of blocks are being downloaded, it is throttled +// until the cached blocks are retrieved. func TestThrottling(t *testing.T) { - minDesiredPeerCount = 4 - blockHardTTL = 1 * time.Second - - targetBlocks := 16 * blockCacheLimit + // Create a long block chain to download and the tester + targetBlocks := 8 * blockCacheLimit hashes := createHashes(0, targetBlocks) blocks := createBlocksFromHashes(hashes) - tester := newTester(t, hashes, blocks) - tester.newPeer("peer1", big.NewInt(10000), hashes[0]) - tester.newPeer("peer2", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) - tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) + tester := newTester(t, hashes, blocks) + tester.newPeer("peer", big.NewInt(10000), hashes[0]) - // Concurrently download and take the blocks - took, err := tester.syncTake("peer1", hashes[0]) - if err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) + // Start a synchronisation concurrently + errc := make(chan error) + go func() { + errc <- tester.sync("peer", hashes[0]) + }() + // Iteratively take some blocks, always checking the retrieval count + for total := 0; total < targetBlocks; { + // Wait a bit for sync to complete + for start := time.Now(); time.Since(start) < 3*time.Second; { + time.Sleep(25 * time.Millisecond) + if len(tester.downloader.queue.blockPool) == blockCacheLimit { + break + } + } + // Fetch the next batch of blocks + took := tester.downloader.TakeBlocks() + if len(took) != blockCacheLimit { + t.Fatalf("block count mismatch: have %v, want %v", len(took), blockCacheLimit) + } + total += len(took) + if total > targetBlocks { + t.Fatalf("target block count mismatch: have %v, want %v", total, targetBlocks) + } } - if len(took) != targetBlocks { - t.Fatalf("downloaded block mismatch: have %v, want %v", len(took), targetBlocks) + if err := <-errc; err != nil { + t.Fatalf("block synchronization failed: %v", err) } } @@ -559,3 +527,86 @@ func TestMadeupParentBlockChainAttack(t *testing.T) { t.Fatalf("failed to synchronise blocks: %v", err) } } + +// Tests that if one/multiple malicious peers try to feed a banned blockchain to +// the downloader, it will not keep refetching the same chain indefinitely, but +// gradually block pieces of it, until it's head is also blocked. +func TestBannedChainStarvationAttack(t *testing.T) { + // Construct a valid chain, but ban one of the hashes in it + hashes := createHashes(0, 8*blockCacheLimit) + hashes[len(hashes)/2+23] = bannedHash // weird index to have non multiple of ban chunk size + + blocks := createBlocksFromHashes(hashes) + + // Create the tester and ban the selected hash + tester := newTester(t, hashes, blocks) + tester.downloader.banned.Add(bannedHash) + + // Iteratively try to sync, and verify that the banned hash list grows until + // the head of the invalid chain is blocked too. + tester.newPeer("attack", big.NewInt(10000), hashes[0]) + for banned := tester.downloader.banned.Size(); ; { + // Try to sync with the attacker, check hash chain failure + if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain) + } + // Check that the ban list grew with at least 1 new item, or all banned + bans := tester.downloader.banned.Size() + if bans < banned+1 { + if tester.downloader.banned.Has(hashes[0]) { + break + } + t.Fatalf("ban count mismatch: have %v, want %v+", bans, banned+1) + } + banned = bans + } + // Check that after banning an entire chain, bad peers get dropped + if err := tester.newPeer("new attacker", big.NewInt(10000), hashes[0]); err != errBannedHead { + t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead) + } + if peer := tester.downloader.peers.Peer("net attacker"); peer != nil { + t.Fatalf("banned attacker registered: %v", peer) + } +} + +// Tests that if a peer sends excessively many/large invalid chains that are +// gradually banned, it will have an upper limit on the consumed memory and also +// the origin bad hashes will not be evacuated. +func TestBannedChainMemoryExhaustionAttack(t *testing.T) { + // Reduce the test size a bit + MaxBlockFetch = 4 + maxBannedHashes = 256 + + // Construct a banned chain with more chunks than the ban limit + hashes := createHashes(0, maxBannedHashes*MaxBlockFetch) + hashes[len(hashes)-1] = bannedHash // weird index to have non multiple of ban chunk size + + blocks := createBlocksFromHashes(hashes) + + // Create the tester and ban the selected hash + tester := newTester(t, hashes, blocks) + tester.downloader.banned.Add(bannedHash) + + // Iteratively try to sync, and verify that the banned hash list grows until + // the head of the invalid chain is blocked too. + tester.newPeer("attack", big.NewInt(10000), hashes[0]) + for { + // Try to sync with the attacker, check hash chain failure + if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain { + t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain) + } + // Short circuit if the entire chain was banned + if tester.downloader.banned.Has(hashes[0]) { + break + } + // Otherwise ensure we never exceed the memory allowance and the hard coded bans are untouched + if bans := tester.downloader.banned.Size(); bans > maxBannedHashes { + t.Fatalf("ban cap exceeded: have %v, want max %v", bans, maxBannedHashes) + } + for hash, _ := range core.BadHashes { + if !tester.downloader.banned.Has(hash) { + t.Fatalf("hard coded ban evacuated: %x", hash) + } + } + } +} diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 5fbc64648..9614a6951 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -94,7 +94,7 @@ func (p *peer) SetIdle() { for { // Calculate the new download bandwidth allowance prev := atomic.LoadInt32(&p.capacity) - next := int32(math.Max(1, math.Min(MaxBlockFetch, float64(prev)*scale))) + next := int32(math.Max(1, math.Min(float64(MaxBlockFetch), float64(prev)*scale))) // Try to update the old value if atomic.CompareAndSwapInt32(&p.capacity, prev, next) { diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 671ffe51b..7abbd42fd 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -16,7 +16,7 @@ import ( "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) -const ( +var ( blockCacheLimit = 8 * MaxBlockFetch // Maximum number of blocks to cache before throttling the download ) @@ -50,10 +50,11 @@ type queue struct { // newQueue creates a new download queue for scheduling block retrieval. func newQueue() *queue { return &queue{ - hashPool: make(map[common.Hash]int), - hashQueue: prque.New(), - pendPool: make(map[string]*fetchRequest), - blockPool: make(map[common.Hash]int), + hashPool: make(map[common.Hash]int), + hashQueue: prque.New(), + pendPool: make(map[string]*fetchRequest), + blockPool: make(map[common.Hash]int), + blockCache: make([]*Block, blockCacheLimit), } } @@ -70,7 +71,7 @@ func (q *queue) Reset() { q.blockPool = make(map[common.Hash]int) q.blockOffset = 0 - q.blockCache = nil + q.blockCache = make([]*Block, blockCacheLimit) } // Size retrieves the number of hashes in the queue, returning separately for @@ -208,7 +209,7 @@ func (q *queue) TakeBlocks() []*Block { // Reserve reserves a set of hashes for the given peer, skipping any previously // failed download. -func (q *queue) Reserve(p *peer) *fetchRequest { +func (q *queue) Reserve(p *peer, count int) *fetchRequest { q.lock.Lock() defer q.lock.Unlock() @@ -229,8 +230,7 @@ func (q *queue) Reserve(p *peer) *fetchRequest { send := make(map[common.Hash]int) skip := make(map[common.Hash]int) - capacity := p.Capacity() - for proc := 0; proc < space && len(send) < capacity && !q.hashQueue.Empty(); proc++ { + for proc := 0; proc < space && len(send) < count && !q.hashQueue.Empty(); proc++ { hash, priority := q.hashQueue.Pop() if p.ignored.Has(hash) { skip[hash.(common.Hash)] = int(priority) @@ -345,20 +345,12 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) { return nil } -// Alloc ensures that the block cache is the correct size, given a starting -// offset, and a memory cap. -func (q *queue) Alloc(offset int) { +// Prepare configures the block cache offset to allow accepting inbound blocks. +func (q *queue) Prepare(offset int) { q.lock.Lock() defer q.lock.Unlock() if q.blockOffset < offset { q.blockOffset = offset } - size := len(q.hashPool) - if size > blockCacheLimit { - size = blockCacheLimit - } - if len(q.blockCache) < size { - q.blockCache = append(q.blockCache, make([]*Block, size-len(q.blockCache))...) - } } diff --git a/eth/handler.go b/eth/handler.go index aea33452c..64f89b273 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -2,6 +2,7 @@ package eth import ( "fmt" + "math" "math/big" "sync" "time" @@ -17,13 +18,6 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -const ( - forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available - blockProcCycle = 500 * time.Millisecond // Time interval to check for new blocks to process - minDesiredPeerCount = 5 // Amount of peers desired to start syncing - blockProcAmount = 256 -) - func errResp(code errCode, format string, v ...interface{}) error { return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) } @@ -54,8 +48,11 @@ type ProtocolManager struct { txSub event.Subscription minedBlockSub event.Subscription - newPeerCh chan *peer - quitSync chan struct{} + newPeerCh chan *peer + newHashCh chan []*blockAnnounce + newBlockCh chan chan []*types.Block + quitSync chan struct{} + // wait group is used for graceful shutdowns during downloading // and processing wg sync.WaitGroup @@ -72,6 +69,8 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo downloader: downloader, peers: newPeerSet(), newPeerCh: make(chan *peer, 1), + newHashCh: make(chan []*blockAnnounce, 1), + newBlockCh: make(chan chan []*types.Block), quitSync: make(chan struct{}), } @@ -119,7 +118,8 @@ func (pm *ProtocolManager) Start() { pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{}) go pm.minedBroadcastLoop() - go pm.update() + go pm.syncer() + go pm.fetcher() } func (pm *ProtocolManager) Stop() { @@ -186,7 +186,6 @@ func (self *ProtocolManager) handleMsg(p *peer) error { defer msg.Discard() switch msg.Code { - case GetTxMsg: // ignore case StatusMsg: return errResp(ErrExtraStatusMsg, "uncontrolled status message") @@ -213,8 +212,8 @@ func (self *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "->msg %v: %v", msg, err) } - if request.Amount > downloader.MaxHashFetch { - request.Amount = downloader.MaxHashFetch + if request.Amount > uint64(downloader.MaxHashFetch) { + request.Amount = uint64(downloader.MaxHashFetch) } hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount) @@ -227,6 +226,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { // returns either requested hashes or nothing (i.e. not found) return p.sendBlockHashes(hashes) + case BlockHashesMsg: msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) @@ -266,15 +266,66 @@ func (self *ProtocolManager) handleMsg(p *peer) error { } } return p.sendBlocks(blocks) - case BlocksMsg: - var blocks []*types.Block + case BlocksMsg: + // Decode the arrived block message msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) + + var blocks []*types.Block if err := msgStream.Decode(&blocks); err != nil { glog.V(logger.Detail).Infoln("Decode error", err) blocks = nil } - self.downloader.DeliverBlocks(p.id, blocks) + // Filter out any explicitly requested blocks (cascading select to get blocking back to peer) + filter := make(chan []*types.Block) + select { + case <-self.quitSync: + case self.newBlockCh <- filter: + select { + case <-self.quitSync: + case filter <- blocks: + select { + case <-self.quitSync: + case blocks := <-filter: + self.downloader.DeliverBlocks(p.id, blocks) + } + } + } + + case NewBlockHashesMsg: + // Retrieve and deseralize the remote new block hashes notification + msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) + + var hashes []common.Hash + if err := msgStream.Decode(&hashes); err != nil { + break + } + // Mark the hashes as present at the remote node + for _, hash := range hashes { + p.blockHashes.Add(hash) + p.recentHash = hash + } + // Schedule all the unknown hashes for retrieval + unknown := make([]common.Hash, 0, len(hashes)) + for _, hash := range hashes { + if !self.chainman.HasBlock(hash) { + unknown = append(unknown, hash) + } + } + announces := make([]*blockAnnounce, len(unknown)) + for i, hash := range unknown { + announces[i] = &blockAnnounce{ + hash: hash, + peer: p, + time: time.Now(), + } + } + if len(announces) > 0 { + select { + case self.newHashCh <- announces: + case <-self.quitSync: + } + } case NewBlockMsg: var request newBlockMsgData @@ -286,83 +337,86 @@ func (self *ProtocolManager) handleMsg(p *peer) error { } request.Block.ReceivedAt = msg.ReceivedAt - hash := request.Block.Hash() - // Add the block hash as a known hash to the peer. This will later be used to determine - // who should receive this. - p.blockHashes.Add(hash) - // update the peer info - p.recentHash = hash - p.td = request.TD - - _, chainHead, _ := self.chainman.Status() - - jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{ - BlockHash: hash.Hex(), - BlockNumber: request.Block.Number(), // this surely must be zero - ChainHeadHash: chainHead.Hex(), - BlockPrevHash: request.Block.ParentHash().Hex(), - RemoteId: p.ID().String(), - }) - - // Make sure the block isn't already known. If this is the case simply drop - // the message and move on. If the TD is < currentTd; drop it as well. If this - // chain at some point becomes canonical, the downloader will fetch it. - if self.chainman.HasBlock(hash) { - break - } - if self.chainman.Td().Cmp(request.TD) > 0 && new(big.Int).Add(request.Block.Number(), big.NewInt(7)).Cmp(self.chainman.CurrentBlock().Number()) < 0 { - glog.V(logger.Debug).Infof("[%s] dropped block %v due to low TD %v\n", p.id, request.Block.Number(), request.TD) - break + if err := self.importBlock(p, request.Block, request.TD); err != nil { + return err } - // Attempt to insert the newly received by checking if the parent exists. - // if the parent exists we process the block and propagate to our peers - // otherwise synchronize with the peer - if self.chainman.HasBlock(request.Block.ParentHash()) { - if _, err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil { - glog.V(logger.Error).Infoln("removed peer (", p.id, ") due to block error") - - self.removePeer(p.id) - - return nil - } - - if err := self.verifyTd(p, request); err != nil { - glog.V(logger.Error).Infoln(err) - // XXX for now return nil so it won't disconnect (we should in the future) - return nil - } - self.BroadcastBlock(hash, request.Block) - } else { - go self.synchronise(p) - } default: return errResp(ErrInvalidMsgCode, "%v", msg.Code) } return nil } -func (pm *ProtocolManager) verifyTd(peer *peer, request newBlockMsgData) error { - if request.Block.Td.Cmp(request.TD) != 0 { - glog.V(logger.Detail).Infoln(peer) +// importBlocks injects a new block retrieved from the given peer into the chain +// manager. +func (pm *ProtocolManager) importBlock(p *peer, block *types.Block, td *big.Int) error { + hash := block.Hash() - return fmt.Errorf("invalid TD on block(%v) from peer(%s): block.td=%v, request.td=%v", request.Block.Number(), peer.id, request.Block.Td, request.TD) + // Mark the block as present at the remote node (don't duplicate already held data) + p.blockHashes.Add(hash) + p.recentHash = hash + if td != nil { + p.td = td + } + // Log the block's arrival + _, chainHead, _ := pm.chainman.Status() + jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{ + BlockHash: hash.Hex(), + BlockNumber: block.Number(), + ChainHeadHash: chainHead.Hex(), + BlockPrevHash: block.ParentHash().Hex(), + RemoteId: p.ID().String(), + }) + // If the block's already known or its difficulty is lower than ours, drop + if pm.chainman.HasBlock(hash) { + p.td = pm.chainman.GetBlock(hash).Td // update the peer's TD to the real value + return nil + } + if td != nil && pm.chainman.Td().Cmp(td) > 0 && new(big.Int).Add(block.Number(), big.NewInt(7)).Cmp(pm.chainman.CurrentBlock().Number()) < 0 { + glog.V(logger.Debug).Infof("[%s] dropped block %v due to low TD %v\n", p.id, block.Number(), td) + return nil + } + // Attempt to insert the newly received block and propagate to our peers + if pm.chainman.HasBlock(block.ParentHash()) { + if _, err := pm.chainman.InsertChain(types.Blocks{block}); err != nil { + glog.V(logger.Error).Infoln("removed peer (", p.id, ") due to block error", err) + return err + } + if td != nil && block.Td.Cmp(td) != 0 { + err := fmt.Errorf("invalid TD on block(%v) from peer(%s): block.td=%v, request.td=%v", block.Number(), p.id, block.Td, td) + glog.V(logger.Error).Infoln(err) + return err + } + pm.BroadcastBlock(hash, block) + return nil + } + // Parent of the block is unknown, try to sync with this peer if it seems to be good + if td != nil { + go pm.synchronise(p) } - return nil } -// BroadcastBlock will propagate the block to its connected peers. It will sort -// out which peers do not contain the block in their block set and will do a -// sqrt(peers) to determine the amount of peers we broadcast to. +// BroadcastBlock will propagate the block to a subset of its connected peers, +// only notifying the rest of the block's appearance. func (pm *ProtocolManager) BroadcastBlock(hash common.Hash, block *types.Block) { - // Broadcast block to a batch of peers not knowing about it + // Retrieve all the target peers and split between full broadcast or only notification peers := pm.peers.PeersWithoutBlock(hash) - //peers = peers[:int(math.Sqrt(float64(len(peers))))] - for _, peer := range peers { + split := int(math.Sqrt(float64(len(peers)))) + + transfer := peers[:split] + notify := peers[split:] + + // Send out the data transfers and the notifications + for _, peer := range notify { + peer.sendNewBlockHashes([]common.Hash{hash}) + } + glog.V(logger.Detail).Infoln("broadcast hash to", len(notify), "peers.") + + for _, peer := range transfer { peer.sendNewBlock(block) } - glog.V(logger.Detail).Infoln("broadcast block to", len(peers), "peers. Total processing time:", time.Since(block.ReceivedAt)) + glog.V(logger.Detail).Infoln("broadcast block to", len(transfer), "peers. Total processing time:", time.Since(block.ReceivedAt)) } // BroadcastTx will propagate the block to its connected peers. It will sort diff --git a/eth/peer.go b/eth/peer.go index bb6a20349..cf2c58ec9 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -88,6 +88,13 @@ func (p *peer) sendBlocks(blocks []*types.Block) error { return p2p.Send(p.rw, BlocksMsg, blocks) } +func (p *peer) sendNewBlockHashes(hashes []common.Hash) error { + for _, hash := range hashes { + p.blockHashes.Add(hash) + } + return p2p.Send(p.rw, NewBlockHashesMsg, hashes) +} + func (p *peer) sendNewBlock(block *types.Block) error { p.blockHashes.Add(block.Hash()) @@ -102,7 +109,7 @@ func (p *peer) sendTransaction(tx *types.Transaction) error { func (p *peer) requestHashes(from common.Hash) error { glog.V(logger.Debug).Infof("[%s] fetching hashes (%d) %x...\n", p.id, downloader.MaxHashFetch, from[:4]) - return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, downloader.MaxHashFetch}) + return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, uint64(downloader.MaxHashFetch)}) } func (p *peer) requestBlocks(hashes []common.Hash) error { diff --git a/eth/protocol.go b/eth/protocol.go index 948051ed1..9ccf2cb60 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -17,7 +17,7 @@ const ( // eth protocol message codes const ( StatusMsg = iota - GetTxMsg // unused + NewBlockHashesMsg TxMsg GetBlockHashesMsg BlockHashesMsg diff --git a/eth/sync.go b/eth/sync.go index 56084f2f0..dd7414da8 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -5,15 +5,135 @@ import ( "sync/atomic" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" ) -// update periodically tries to synchronise with the network, both downloading -// hashes and blocks as well as retrieving cached ones. -func (pm *ProtocolManager) update() { +const ( + forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available + blockProcCycle = 500 * time.Millisecond // Time interval to check for new blocks to process + notifyCheckCycle = 100 * time.Millisecond // Time interval to allow hash notifies to fulfill before hard fetching + notifyArriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested + notifyFetchTimeout = 5 * time.Second // Maximum alloted time to return an explicitly requested block + minDesiredPeerCount = 5 // Amount of peers desired to start syncing + blockProcAmount = 256 +) + +// blockAnnounce is the hash notification of the availability of a new block in +// the network. +type blockAnnounce struct { + hash common.Hash + peer *peer + time time.Time +} + +// fetcher is responsible for collecting hash notifications, and periodically +// checking all unknown ones and individually fetching them. +func (pm *ProtocolManager) fetcher() { + announces := make(map[common.Hash]*blockAnnounce) + request := make(map[*peer][]common.Hash) + pending := make(map[common.Hash]*blockAnnounce) + cycle := time.Tick(notifyCheckCycle) + + // Iterate the block fetching until a quit is requested + for { + select { + case notifications := <-pm.newHashCh: + // A batch of hashes the notified, schedule them for retrieval + glog.V(logger.Debug).Infof("Scheduling %d hash announcements from %s", len(notifications), notifications[0].peer.id) + for _, announce := range notifications { + announces[announce.hash] = announce + } + + case <-cycle: + // Clean up any expired block fetches + for hash, announce := range pending { + if time.Since(announce.time) > notifyFetchTimeout { + delete(pending, hash) + } + } + // Check if any notified blocks failed to arrive + for hash, announce := range announces { + if time.Since(announce.time) > notifyArriveTimeout { + if !pm.chainman.HasBlock(hash) { + request[announce.peer] = append(request[announce.peer], hash) + pending[hash] = announce + } + delete(announces, hash) + } + } + if len(request) == 0 { + break + } + // Send out all block requests + for peer, hashes := range request { + glog.V(logger.Debug).Infof("Explicitly fetching %d blocks from %s", len(hashes), peer.id) + peer.requestBlocks(hashes) + } + request = make(map[*peer][]common.Hash) + + case filter := <-pm.newBlockCh: + // Blocks arrived, extract any explicit fetches, return all else + var blocks types.Blocks + select { + case blocks = <-filter: + case <-pm.quitSync: + return + } + + explicit, download := []*types.Block{}, []*types.Block{} + for _, block := range blocks { + hash := block.Hash() + + // Filter explicitly requested blocks from hash announcements + if _, ok := pending[hash]; ok { + // Discard if already imported by other means + if !pm.chainman.HasBlock(hash) { + explicit = append(explicit, block) + } else { + delete(pending, hash) + } + } else { + download = append(download, block) + } + } + + select { + case filter <- download: + case <-pm.quitSync: + return + } + // If any explicit fetches were replied to, import them + if count := len(explicit); count > 0 { + glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", count) + go func() { + for _, block := range explicit { + hash := block.Hash() + + // Make sure there's still something pending to import + if announce := pending[hash]; announce != nil { + delete(pending, hash) + if err := pm.importBlock(announce.peer, block, nil); err != nil { + glog.V(logger.Detail).Infof("Failed to import explicitly fetched block: %v", err) + return + } + } + } + }() + } + + case <-pm.quitSync: + return + } + } +} + +// syncer is responsible for periodically synchronising with the network, both +// downloading hashes and blocks as well as retrieving cached ones. +func (pm *ProtocolManager) syncer() { forceSync := time.Tick(forceSyncCycle) blockProc := time.Tick(blockProcCycle) blockProcPend := int32(0) @@ -99,15 +219,15 @@ func (pm *ProtocolManager) synchronise(peer *peer) { return } // Get the hashes from the peer (synchronously) - glog.V(logger.Debug).Infof("Attempting synchronisation: %v, 0x%x", peer.id, peer.recentHash) + glog.V(logger.Detail).Infof("Attempting synchronisation: %v, 0x%x", peer.id, peer.recentHash) err := pm.downloader.Synchronise(peer.id, peer.recentHash) switch err { case nil: - glog.V(logger.Debug).Infof("Synchronisation completed") + glog.V(logger.Detail).Infof("Synchronisation completed") case downloader.ErrBusy: - glog.V(logger.Debug).Infof("Synchronisation already in progress") + glog.V(logger.Detail).Infof("Synchronisation already in progress") case downloader.ErrTimeout, downloader.ErrBadPeer, downloader.ErrEmptyHashSet, downloader.ErrInvalidChain, downloader.ErrCrossCheckFailed: glog.V(logger.Debug).Infof("Removing peer %v: %v", peer.id, err) |