aboutsummaryrefslogtreecommitdiffstats
path: root/eth/handler.go
diff options
context:
space:
mode:
Diffstat (limited to 'eth/handler.go')
-rw-r--r--eth/handler.go81
1 files changed, 72 insertions, 9 deletions
diff --git a/eth/handler.go b/eth/handler.go
index d466dbfee..d00d00f23 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -39,6 +39,7 @@ import (
"math"
"math/big"
"sync"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@@ -51,6 +52,11 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
+const (
+ peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
+ minDesiredPeerCount = 5 // Amount of peers desired to start syncing
+)
+
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
}
@@ -82,6 +88,9 @@ type ProtocolManager struct {
eventMux *event.TypeMux
txSub event.Subscription
minedBlockSub event.Subscription
+
+ newPeerCh chan *peer
+ quitSync chan struct{}
}
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
@@ -93,6 +102,8 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
chainman: chainman,
downloader: downloader,
peers: make(map[string]*peer),
+ newPeerCh: make(chan *peer, 1),
+ quitSync: make(chan struct{}),
}
manager.SubProtocol = p2p.Protocol{
@@ -101,16 +112,67 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
Length: ProtocolLength,
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(protocolVersion, networkId, p, rw)
- err := manager.handle(peer)
- //glog.V(logger.Detail).Infof("[%s]: %v\n", peer.id, err)
- return err
+ manager.newPeerCh <- peer
+
+ return manager.handle(peer)
},
}
return manager
}
+func (pm *ProtocolManager) syncHandler() {
+ // itimer is used to determine when to start ignoring `minDesiredPeerCount`
+ itimer := time.NewTimer(peerCountTimeout)
+out:
+ for {
+ select {
+ case <-pm.newPeerCh:
+ // Meet the `minDesiredPeerCount` before we select our best peer
+ if len(pm.peers) < minDesiredPeerCount {
+ break
+ }
+
+ // Find the best peer
+ peer := getBestPeer(pm.peers)
+ if peer == nil {
+ glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available")
+ }
+
+ itimer.Stop()
+ go pm.synchronise(peer)
+ case <-itimer.C:
+ // The timer will make sure that the downloader keeps an active state
+ // in which it attempts to always check the network for highest td peers
+ // Either select the peer or restart the timer if no peers could
+ // be selected.
+ if peer := getBestPeer(pm.peers); peer != nil {
+ go pm.synchronise(peer)
+ } else {
+ itimer.Reset(5 * time.Second)
+ }
+ case <-pm.quitSync:
+ break out
+ }
+ }
+}
+
+func (pm *ProtocolManager) synchronise(peer *peer) {
+ // Make sure the peer's TD is higher than our own. If not drop.
+ if peer.td.Cmp(pm.chainman.Td()) <= 0 {
+ return
+ }
+
+ glog.V(logger.Info).Infof("Synchronisation attempt using %s TD=%v\n", peer.id, peer.td)
+ // Get the hashes from the peer (synchronously)
+ err := pm.downloader.Synchronise(peer.id, peer.recentHash)
+ if err != nil {
+ // handle error
+ glog.V(logger.Debug).Infoln("error downloading:", err)
+ }
+}
+
func (pm *ProtocolManager) Start() {
// broadcast transactions
pm.txSub = pm.eventMux.Subscribe(core.TxPreEvent{})
@@ -119,11 +181,15 @@ func (pm *ProtocolManager) Start() {
// broadcast mined blocks
pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
go pm.minedBroadcastLoop()
+
+ // sync handler
+ go pm.syncHandler()
}
func (pm *ProtocolManager) Stop() {
pm.txSub.Unsubscribe() // quits txBroadcastLoop
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
+ close(pm.quitSync) // quits the sync handler
}
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
@@ -141,7 +207,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
pm.peers[p.id] = p
pm.pmu.Unlock()
- pm.downloader.RegisterPeer(p.id, p.td, p.currentHash, p.requestHashes, p.requestBlocks)
+ pm.downloader.RegisterPeer(p.id, p.recentHash, p.requestHashes, p.requestBlocks)
defer func() {
pm.pmu.Lock()
defer pm.pmu.Unlock()
@@ -276,7 +342,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "block validation %v: %v", msg, err)
}
hash := request.Block.Hash()
- // Add the block hash as a known hash to the peer. This will later be used to detirmine
+ // Add the block hash as a known hash to the peer. This will later be used to determine
// who should receive this.
p.blockHashes.Add(hash)
@@ -296,7 +362,6 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
if self.chainman.HasBlock(hash) {
break
}
- /* XXX unsure about this */
if self.chainman.Td().Cmp(request.TD) > 0 && new(big.Int).Add(request.Block.Number(), big.NewInt(7)).Cmp(self.chainman.CurrentBlock().Number()) < 0 {
glog.V(logger.Debug).Infof("[%s] dropped block %v due to low TD %v\n", p.id, request.Block.Number(), request.TD)
break
@@ -305,24 +370,22 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
// Attempt to insert the newly received by checking if the parent exists.
// if the parent exists we process the block and propagate to our peers
// if the parent does not exists we delegate to the downloader.
- // NOTE we can reduce chatter by dropping blocks with Td < currentTd
if self.chainman.HasBlock(request.Block.ParentHash()) {
if err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil {
// handle error
return nil
}
self.BroadcastBlock(hash, request.Block)
- //fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD)
} else {
// adding blocks is synchronous
go func() {
+ // TODO check parent error
err := self.downloader.AddBlock(p.id, request.Block, request.TD)
if err != nil {
glog.V(logger.Detail).Infoln("downloader err:", err)
return
}
self.BroadcastBlock(hash, request.Block)
- //fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD)
}()
}
default: