diff options
author | obscuren <geffobscura@gmail.com> | 2015-05-01 06:23:51 +0800 |
---|---|---|
committer | obscuren <geffobscura@gmail.com> | 2015-05-01 21:58:44 +0800 |
commit | 016f152b36106130fa42514ef6cfacc09dfc3142 (patch) | |
tree | a38fa42c59a8a4e0c18b68fc8e5dcb6bd533719b /eth/downloader | |
parent | 8595198c1b56364bb9589a912d2a9797b93a3357 (diff) | |
download | go-tangerine-016f152b36106130fa42514ef6cfacc09dfc3142.tar go-tangerine-016f152b36106130fa42514ef6cfacc09dfc3142.tar.gz go-tangerine-016f152b36106130fa42514ef6cfacc09dfc3142.tar.bz2 go-tangerine-016f152b36106130fa42514ef6cfacc09dfc3142.tar.lz go-tangerine-016f152b36106130fa42514ef6cfacc09dfc3142.tar.xz go-tangerine-016f152b36106130fa42514ef6cfacc09dfc3142.tar.zst go-tangerine-016f152b36106130fa42514ef6cfacc09dfc3142.zip |
eth, eth/downloader: Moved block processing & graceful shutdown
The downloader is no longer responsible for processing blocks. The
eth-protocol handler now takes care of this instead.
Added graceful shutdown during block processing. Closes #846
Diffstat (limited to 'eth/downloader')
-rw-r--r-- | eth/downloader/downloader.go | 281 | ||||
-rw-r--r-- | eth/downloader/downloader_test.go | 55 | ||||
-rw-r--r-- | eth/downloader/queue.go | 58 |
3 files changed, 221 insertions, 173 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 016f290fc..c1dc9d3a8 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -3,14 +3,11 @@ package downloader import ( "errors" "fmt" - "math" - "math/big" "sync" "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" @@ -27,16 +24,21 @@ var ( minDesiredPeerCount = 5 // Amount of peers desired to start syncing blockTtl = 20 * time.Second // The amount of time it takes for a block request to time out - errLowTd = errors.New("peer's TD is too low") - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer's unknown or unhealthy") - ErrBadPeer = errors.New("action from bad peer ignored") - errTimeout = errors.New("timeout") - errEmptyHashSet = errors.New("empty hash set by peer") - errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") + errLowTd = errors.New("peer's TD is too low") + errBusy = errors.New("busy") + errUnknownPeer = errors.New("peer's unknown or unhealthy") + ErrBadPeer = errors.New("action from bad peer ignored") + errNoPeers = errors.New("no peers to keep download active") + errPendingQueue = errors.New("pending items in queue") + errTimeout = errors.New("timeout") + errEmptyHashSet = errors.New("empty hash set by peer") + errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") + errAlreadyInPool = errors.New("hash already in pool") + errBlockNumberOverflow = errors.New("received block which overflows") ) type hashCheckFn func(common.Hash) bool +type getBlockFn func(common.Hash) *types.Block type chainInsertFn func(types.Blocks) (int, error) type hashIterFn func() (common.Hash, error) @@ -58,13 +60,12 @@ type Downloader struct { activePeer string // Callbacks - hasBlock hashCheckFn - insertChain chainInsertFn + hasBlock hashCheckFn + getBlock getBlockFn // Status fetchingHashes int32 downloadingBlocks int32 - processingBlocks int32 // Channels newPeerCh chan *peer @@ -72,15 +73,15 @@ type Downloader struct { blockCh chan blockPack } -func New(hasBlock hashCheckFn, insertChain chainInsertFn) *Downloader { +func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader { downloader := &Downloader{ - queue: newqueue(), - peers: make(peers), - hasBlock: hasBlock, - insertChain: insertChain, - newPeerCh: make(chan *peer, 1), - hashCh: make(chan []common.Hash, 1), - blockCh: make(chan blockPack, 1), + queue: newqueue(), + peers: make(peers), + hasBlock: hasBlock, + getBlock: getBlock, + newPeerCh: make(chan *peer, 1), + hashCh: make(chan []common.Hash, 1), + blockCh: make(chan blockPack, 1), } return downloader @@ -126,6 +127,12 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { return errBusy } + // When a synchronisation attempt is made while the queue stil + // contains items we abort the sync attempt + if d.queue.size() > 0 { + return errPendingQueue + } + // Fetch the peer using the id or throw an error if the peer couldn't be found p := d.peers[id] if p == nil { @@ -138,30 +145,87 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { return err } - return d.process(p) + return nil +} + +// Done lets the downloader know that whatever previous hashes were taken +// are processed. If the block count reaches zero and done is called +// we reset the queue for the next batch of incoming hashes and blocks. +func (d *Downloader) Done() { + d.queue.mu.Lock() + defer d.queue.mu.Unlock() + + if len(d.queue.blocks) == 0 { + d.queue.resetNoTS() + } +} + +// TakeBlocks takes blocks from the queue and yields them to the blockTaker handler +// it's possible it yields no blocks +func (d *Downloader) TakeBlocks() types.Blocks { + d.queue.mu.Lock() + defer d.queue.mu.Unlock() + + var blocks types.Blocks + if len(d.queue.blocks) > 0 { + // Make sure the parent hash is known + if d.queue.blocks[0] != nil && !d.hasBlock(d.queue.blocks[0].ParentHash()) { + return nil + } + + for _, block := range d.queue.blocks { + if block == nil { + break + } + + blocks = append(blocks, block) + } + d.queue.blockOffset += len(blocks) + // delete the blocks from the slice and let them be garbage collected + // without this slice trick the blocks would stay in memory until nil + // would be assigned to d.queue.blocks + copy(d.queue.blocks, d.queue.blocks[len(blocks):]) + for k, n := len(d.queue.blocks)-len(blocks), len(d.queue.blocks); k < n; k++ { + d.queue.blocks[k] = nil + } + d.queue.blocks = d.queue.blocks[:len(d.queue.blocks)-len(blocks)] + + //d.queue.blocks = d.queue.blocks[len(blocks):] + if len(d.queue.blocks) == 0 { + d.queue.blocks = nil + } + + } + + return blocks } -func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error { +func (d *Downloader) Has(hash common.Hash) bool { + return d.queue.has(hash) +} + +func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) { d.activePeer = p.id + defer func() { + // reset on error + if err != nil { + d.queue.reset() + } + }() glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id) // Start the fetcher. This will block the update entirely // interupts need to be send to the appropriate channels // respectively. - if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil { - // handle error - glog.V(logger.Debug).Infoln("Error fetching hashes:", err) - // XXX Reset + if err = d.startFetchingHashes(p, hash, ignoreInitial); err != nil { return err } // Start fetching blocks in paralel. The strategy is simple // take any available peers, seserve a chunk for each peer available, // let the peer deliver the chunkn and periodically check if a peer - // has timedout. When done downloading, process blocks. - if err := d.startFetchingBlocks(p); err != nil { - glog.V(logger.Debug).Infoln("Error downloading blocks:", err) - // XXX reset + // has timedout. + if err = d.startFetchingBlocks(p); err != nil { return err } @@ -175,6 +239,10 @@ func (d *Downloader) startFetchingHashes(p *peer, hash common.Hash, ignoreInitia atomic.StoreInt32(&d.fetchingHashes, 1) defer atomic.StoreInt32(&d.fetchingHashes, 0) + if d.queue.has(hash) { + return errAlreadyInPool + } + glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", hash.Bytes()[:4], p.id) start := time.Now() @@ -196,10 +264,13 @@ out: case hashes := <-d.hashCh: failureResponseTimer.Reset(hashTtl) - var done bool // determines whether we're done fetching hashes (i.e. common hash found) + var ( + done bool // determines whether we're done fetching hashes (i.e. common hash found) + hash common.Hash // current and common hash + ) hashSet := set.New() - for _, hash := range hashes { - if d.hasBlock(hash) { + for _, hash = range hashes { + if d.hasBlock(hash) || d.queue.has(hash) { glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4]) done = true @@ -220,6 +291,14 @@ out: // Get the next set of hashes p.getHashes(hashes[len(hashes)-1]) } else { // we're done + // The offset of the queue is determined by the highest known block + var offset int + if block := d.getBlock(hash); block != nil { + offset = int(block.NumberU64() + 1) + } + // allocate proper size for the queueue + d.queue.alloc(offset, d.queue.hashPool.Size()) + break out } case <-failureResponseTimer.C: @@ -257,11 +336,27 @@ out: // If the peer was previously banned and failed to deliver it's pack // in a reasonable time frame, ignore it's message. if d.peers[blockPack.peerId] != nil { + err := d.queue.deliver(blockPack.peerId, blockPack.blocks) + if err != nil { + glog.V(logger.Debug).Infof("deliver failed for peer %s: %v\n", blockPack.peerId, err) + // FIXME d.UnregisterPeer(blockPack.peerId) + break + } + + if glog.V(logger.Debug) { + glog.Infof("adding %d blocks from: %s\n", len(blockPack.blocks), blockPack.peerId) + } d.peers[blockPack.peerId].promote() - d.queue.deliver(blockPack.peerId, blockPack.blocks) d.peers.setState(blockPack.peerId, idleState) } case <-ticker.C: + // after removing bad peers make sure we actually have suffucient peer left to keep downlading + if len(d.peers) == 0 { + d.queue.reset() + + return errNoPeers + } + // If there are unrequested hashes left start fetching // from the available peers. if d.queue.hashPool.Size() > 0 { @@ -310,7 +405,7 @@ out: if time.Since(chunk.itime) > blockTtl { badPeers = append(badPeers, pid) // remove peer as good peer from peer list - //d.UnregisterPeer(pid) + // FIXME d.UnregisterPeer(pid) } } d.queue.mu.Unlock() @@ -364,114 +459,6 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error { return nil } -// Add an (unrequested) block to the downloader. This is usually done through the -// NewBlockMsg by the protocol handler. -// Adding blocks is done synchronously. if there are missing blocks, blocks will be -// fetched first. If the downloader is busy or if some other processed failed an error -// will be returned. -func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error { - hash := block.Hash() - - if d.hasBlock(hash) { - return fmt.Errorf("known block %x", hash.Bytes()[:4]) - } - - peer := d.peers.getPeer(id) - // if the peer is in our healthy list of peers; update the td - // and add the block. Otherwise just ignore it - if peer == nil { - glog.V(logger.Detail).Infof("Ignored block from bad peer %s\n", id) - return ErrBadPeer - } - - peer.mu.Lock() - peer.recentHash = block.Hash() - peer.mu.Unlock() - peer.promote() - - glog.V(logger.Detail).Infoln("Inserting new block from:", id) - d.queue.addBlock(id, block) - - // if neither go ahead to process - if d.isBusy() { - return errBusy - } - - // Check if the parent of the received block is known. - // If the block is not know, request it otherwise, request. - phash := block.ParentHash() - if !d.hasBlock(phash) { - glog.V(logger.Detail).Infof("Missing parent %x, requires fetching\n", phash.Bytes()[:4]) - - // Get the missing hashes from the peer (synchronously) - err := d.getFromPeer(peer, peer.recentHash, true) - if err != nil { - return err - } - } - - return d.process(peer) -} - -func (d *Downloader) process(peer *peer) error { - atomic.StoreInt32(&d.processingBlocks, 1) - defer atomic.StoreInt32(&d.processingBlocks, 0) - - // XXX this will move when optimised - // Sort the blocks by number. This bit needs much improvement. Right now - // it assumes full honesty form peers (i.e. it's not checked when the blocks - // link). We should at least check whihc queue match. This code could move - // to a seperate goroutine where it periodically checks for linked pieces. - types.BlockBy(types.Number).Sort(d.queue.blocks) - if len(d.queue.blocks) == 0 { - return nil - } - - var blocks = d.queue.blocks - glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].Number(), blocks[len(blocks)-1].Number()) - - // Loop untill we're out of blocks - for len(blocks) != 0 { - max := int(math.Min(float64(len(blocks)), 256)) - // TODO check for parent error. When there's a parent error we should stop - // processing and start requesting the `block.hash` so that it's parent and - // grandparents can be requested and queued. - var i int - i, err := d.insertChain(blocks[:max]) - if err != nil && core.IsParentErr(err) { - // Ignore the missing blocks. Handler should take care of anything that's missing. - glog.V(logger.Debug).Infof("Ignored block with missing parent (%d)\n", i) - blocks = blocks[i+1:] - - continue - } else if err != nil { - // immediatly unregister the false peer but do not disconnect - d.UnregisterPeer(d.activePeer) - // Reset chain completely. This needs much, much improvement. - // instead: check all blocks leading down to this block false block and remove it - d.queue.blocks = nil - - return ErrBadPeer - } - - // delete the blocks from the slice and let them be garbage collected - // without this slice trick the blocks would stay in memory until nil - // would be assigned to d.queue.blocks - copy(blocks, blocks[max:]) - for k, n := len(blocks)-max, len(blocks); k < n; k++ { - blocks[k] = nil - } - blocks = blocks[:len(blocks)-max] - } - - if len(blocks) == 0 { - d.queue.blocks = nil - } else { - d.queue.blocks = blocks - } - return nil -} - func (d *Downloader) isFetchingHashes() bool { return atomic.LoadInt32(&d.fetchingHashes) == 1 } @@ -480,12 +467,8 @@ func (d *Downloader) isDownloadingBlocks() bool { return atomic.LoadInt32(&d.downloadingBlocks) == 1 } -func (d *Downloader) isProcessing() bool { - return atomic.LoadInt32(&d.processingBlocks) == 1 -} - func (d *Downloader) isBusy() bool { - return d.isFetchingHashes() || d.isDownloadingBlocks() || d.isProcessing() + return d.isFetchingHashes() || d.isDownloadingBlocks() } func (d *Downloader) IsBusy() bool { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 5518163ca..d13818b37 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -8,8 +8,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" ) var knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} @@ -28,7 +26,7 @@ func createHashes(start, amount int) (hashes []common.Hash) { func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block { blocks := make(map[common.Hash]*types.Block) for i, hash := range hashes { - header := &types.Header{Number: big.NewInt(int64(i))} + header := &types.Header{Number: big.NewInt(int64(len(hashes) - i))} blocks[hash] = types.NewBlockWithHeader(header) blocks[hash].HeaderHash = hash } @@ -43,13 +41,11 @@ type downloadTester struct { t *testing.T pcount int done chan bool - - insertedBlocks int } func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester { tester := &downloadTester{t: t, hashes: hashes, blocks: blocks, done: make(chan bool)} - downloader := New(tester.hasBlock, tester.insertChain) + downloader := New(tester.hasBlock, tester.getBlock) tester.downloader = downloader return tester @@ -62,10 +58,8 @@ func (dl *downloadTester) hasBlock(hash common.Hash) bool { return false } -func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { - dl.insertedBlocks += len(blocks) - - return 0, nil +func (dl *downloadTester) getBlock(hash common.Hash) *types.Block { + return dl.blocks[knownHash] } func (dl *downloadTester) getHashes(hash common.Hash) error { @@ -102,9 +96,6 @@ func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash } func TestDownload(t *testing.T) { - glog.SetV(logger.Detail) - glog.SetToStderr(true) - minDesiredPeerCount = 4 blockTtl = 1 * time.Second @@ -123,15 +114,13 @@ func TestDownload(t *testing.T) { t.Error("download error", err) } - if tester.insertedBlocks != targetBlocks { - t.Error("expected", targetBlocks, "have", tester.insertedBlocks) + inqueue := len(tester.downloader.queue.blocks) + if inqueue != targetBlocks { + t.Error("expected", targetBlocks, "have", inqueue) } } func TestMissing(t *testing.T) { - glog.SetV(logger.Detail) - glog.SetToStderr(true) - targetBlocks := 1000 hashes := createHashes(0, 1000) extraHashes := createHashes(1001, 1003) @@ -148,7 +137,33 @@ func TestMissing(t *testing.T) { t.Error("download error", err) } - if tester.insertedBlocks != targetBlocks { - t.Error("expected", targetBlocks, "have", tester.insertedBlocks) + inqueue := len(tester.downloader.queue.blocks) + if inqueue != targetBlocks { + t.Error("expected", targetBlocks, "have", inqueue) + } +} + +func TestTaking(t *testing.T) { + minDesiredPeerCount = 4 + blockTtl = 1 * time.Second + + targetBlocks := 1000 + hashes := createHashes(0, targetBlocks) + blocks := createBlocksFromHashes(hashes) + tester := newTester(t, hashes, blocks) + + tester.newPeer("peer1", big.NewInt(10000), hashes[0]) + tester.newPeer("peer2", big.NewInt(0), common.Hash{}) + tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) + tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) + + err := tester.downloader.Synchronise("peer1", hashes[0]) + if err != nil { + t.Error("download error", err) + } + + bs1 := tester.downloader.TakeBlocks(1000) + if len(bs1) != 1000 { + t.Error("expected to take 1000, got", len(bs1)) } } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index a21a44706..13768229f 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -18,7 +18,9 @@ type queue struct { mu sync.Mutex fetching map[string]*chunk - blocks []*types.Block + + blockOffset int + blocks []*types.Block } func newqueue() *queue { @@ -34,6 +36,10 @@ func (c *queue) reset() { c.mu.Lock() defer c.mu.Unlock() + c.resetNoTS() +} +func (c *queue) resetNoTS() { + c.blockOffset = 0 c.hashPool.Clear() c.fetchPool.Clear() c.blockHashes.Clear() @@ -41,6 +47,10 @@ func (c *queue) reset() { c.fetching = make(map[string]*chunk) } +func (c *queue) size() int { + return c.hashPool.Size() + c.blockHashes.Size() + c.fetchPool.Size() +} + // reserve a `max` set of hashes for `p` peer. func (c *queue) get(p *peer, max int) *chunk { c.mu.Lock() @@ -89,7 +99,7 @@ func (c *queue) get(p *peer, max int) *chunk { } func (c *queue) has(hash common.Hash) bool { - return c.hashPool.Has(hash) || c.fetchPool.Has(hash) + return c.hashPool.Has(hash) || c.fetchPool.Has(hash) || c.blockHashes.Has(hash) } func (c *queue) addBlock(id string, block *types.Block) { @@ -103,8 +113,24 @@ func (c *queue) addBlock(id string, block *types.Block) { } } +func (c *queue) getBlock(hash common.Hash) *types.Block { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.blockHashes.Has(hash) { + return nil + } + + for _, block := range c.blocks { + if block.Hash() == hash { + return block + } + } + return nil +} + // deliver delivers a chunk to the queue that was requested of the peer -func (c *queue) deliver(id string, blocks []*types.Block) { +func (c *queue) deliver(id string, blocks []*types.Block) error { c.mu.Lock() defer c.mu.Unlock() @@ -124,11 +150,35 @@ func (c *queue) deliver(id string, blocks []*types.Block) { // merge block hashes c.blockHashes.Merge(blockHashes) // Add the blocks - c.blocks = append(c.blocks, blocks...) + for _, block := range blocks { + // See (1) for future limitation + n := int(block.NumberU64()) - c.blockOffset + if n > len(c.blocks) || n < 0 { + return errBlockNumberOverflow + } + c.blocks[n] = block + } // Add back whatever couldn't be delivered c.hashPool.Merge(chunk.hashes) c.fetchPool.Separate(chunk.hashes) } + + return nil +} + +func (c *queue) alloc(offset, size int) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.blockOffset < offset { + c.blockOffset = offset + } + + // (1) XXX at some point we could limit allocation to memory and use the disk + // to store future blocks. + if len(c.blocks) < size { + c.blocks = append(c.blocks, make([]*types.Block, size)...) + } } // puts puts sets of hashes on to the queue for fetching |