aboutsummaryrefslogtreecommitdiffstats
path: root/eth
diff options
context:
space:
mode:
Diffstat (limited to 'eth')
-rw-r--r--eth/backend.go113
-rw-r--r--eth/downloader/downloader.go132
-rw-r--r--eth/downloader/downloader_test.go42
-rw-r--r--eth/downloader/peer.go15
-rw-r--r--eth/downloader/queue.go3
-rw-r--r--eth/downloader/synchronous.go79
-rw-r--r--eth/handler.go147
-rw-r--r--eth/peer.go28
8 files changed, 291 insertions, 268 deletions
diff --git a/eth/backend.go b/eth/backend.go
index 982317314..fa8349116 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -3,9 +3,9 @@ package eth
import (
"crypto/ecdsa"
"fmt"
- "math"
"path"
"strings"
+ "time"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts"
@@ -30,8 +30,9 @@ var (
jsonlogger = logger.NewJsonLogger()
defaultBootNodes = []*discover.Node{
- // ETH/DEV cmd/bootnode
- discover.MustParseNode("enode://09fbeec0d047e9a37e63f60f8618aa9df0e49271f3fadb2c070dc09e2099b95827b63a8b837c6fd01d0802d457dd83e3bd48bd3e6509f8209ed90dabbc30e3d3@52.16.188.185:30303"),
+ // ETH/DEV Go Bootnodes
+ discover.MustParseNode("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"),
+ discover.MustParseNode("enode://7f25d3eab333a6b98a8b5ed68d962bb22c876ffcd5561fca54e3c2ef27f754df6f7fd7c9b74cc919067abac154fb8e1f8385505954f161ae440abc355855e034@54.207.93.166:30303"),
// ETH/DEV cpp-ethereum (poc-9.ethdev.com)
discover.MustParseNode("enode://487611428e6c99a11a9795a6abe7b529e81315ca6aad66e2a2fc76e3adf263faba0d35466c2f8f68d561dbefa8878d4df5f1f2ddb1fbeab7f42ffb8cd328bd4a@5.1.83.226:30303"),
}
@@ -124,6 +125,8 @@ type Ethereum struct {
blockDb common.Database // Block chain database
stateDb common.Database // State changes database
extraDb common.Database // Extra database (txs, etc)
+ // Closed when databases are flushed and closed
+ databasesClosed chan bool
//*** SERVICES ***
// State manager for processing new blocks and managing the over all states
@@ -136,11 +139,10 @@ type Ethereum struct {
protocolManager *ProtocolManager
downloader *downloader.Downloader
- net *p2p.Server
- eventMux *event.TypeMux
- txSub event.Subscription
- minedBlockSub event.Subscription
- miner *miner.Miner
+ net *p2p.Server
+ eventMux *event.TypeMux
+ txSub event.Subscription
+ miner *miner.Miner
// logger logger.LogSystem
@@ -199,30 +201,31 @@ func New(config *Config) (*Ethereum, error) {
glog.V(logger.Info).Infof("Blockchain DB Version: %d", config.BlockChainVersion)
eth := &Ethereum{
- shutdownChan: make(chan bool),
- blockDb: blockDb,
- stateDb: stateDb,
- extraDb: extraDb,
- eventMux: &event.TypeMux{},
- accountManager: config.AccountManager,
- DataDir: config.DataDir,
- etherbase: common.HexToAddress(config.Etherbase),
- clientVersion: config.Name, // TODO should separate from Name
- ethVersionId: config.ProtocolVersion,
- netVersionId: config.NetworkId,
- NatSpec: config.NatSpec,
+ shutdownChan: make(chan bool),
+ databasesClosed: make(chan bool),
+ blockDb: blockDb,
+ stateDb: stateDb,
+ extraDb: extraDb,
+ eventMux: &event.TypeMux{},
+ accountManager: config.AccountManager,
+ DataDir: config.DataDir,
+ etherbase: common.HexToAddress(config.Etherbase),
+ clientVersion: config.Name, // TODO should separate from Name
+ ethVersionId: config.ProtocolVersion,
+ netVersionId: config.NetworkId,
+ NatSpec: config.NatSpec,
}
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux())
- eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.InsertChain, eth.chainManager.Td)
+ eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.InsertChain)
eth.pow = ethash.New(eth.chainManager)
- eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State)
+ eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux())
eth.chainManager.SetProcessor(eth.blockProcessor)
eth.whisper = whisper.New()
eth.shhVersionId = int(eth.whisper.Version())
eth.miner = miner.New(eth, eth.pow, config.MinerThreads)
- eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.txPool, eth.chainManager, eth.downloader)
+ eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.eventMux, eth.txPool, eth.chainManager, eth.downloader)
netprv, err := config.nodeKey()
if err != nil {
@@ -319,10 +322,9 @@ func (s *Ethereum) StartMining() error {
err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
glog.V(logger.Error).Infoln(err)
return err
-
}
- s.miner.Start(eb)
+ go s.miner.Start(eb)
return nil
}
@@ -379,8 +381,12 @@ func (s *Ethereum) Start() error {
}
}
+ // periodically flush databases
+ go s.syncDatabases()
+
// Start services
- s.txPool.Start()
+ go s.txPool.Start()
+ s.protocolManager.Start()
if s.whisper != nil {
s.whisper.Start()
@@ -390,14 +396,38 @@ func (s *Ethereum) Start() error {
s.txSub = s.eventMux.Subscribe(core.TxPreEvent{})
go s.txBroadcastLoop()
- // broadcast mined blocks
- s.minedBlockSub = s.eventMux.Subscribe(core.NewMinedBlockEvent{})
- go s.minedBroadcastLoop()
-
glog.V(logger.Info).Infoln("Server started")
return nil
}
+func (s *Ethereum) syncDatabases() {
+ ticker := time.NewTicker(1 * time.Minute)
+done:
+ for {
+ select {
+ case <-ticker.C:
+ // don't change the order of database flushes
+ if err := s.extraDb.Flush(); err != nil {
+ glog.V(logger.Error).Infof("error: flush extraDb: %v\n", err)
+ }
+ if err := s.stateDb.Flush(); err != nil {
+ glog.V(logger.Error).Infof("error: flush stateDb: %v\n", err)
+ }
+ if err := s.blockDb.Flush(); err != nil {
+ glog.V(logger.Error).Infof("error: flush blockDb: %v\n", err)
+ }
+ case <-s.shutdownChan:
+ break done
+ }
+ }
+
+ s.blockDb.Close()
+ s.stateDb.Close()
+ s.extraDb.Close()
+
+ close(s.databasesClosed)
+}
+
func (s *Ethereum) StartForTest() {
jsonlogger.LogJson(&logger.LogStarting{
ClientString: s.net.Name,
@@ -418,14 +448,9 @@ func (self *Ethereum) SuggestPeer(nodeURL string) error {
}
func (s *Ethereum) Stop() {
- // Close the database
- defer s.blockDb.Close()
- defer s.stateDb.Close()
- defer s.extraDb.Close()
-
- s.txSub.Unsubscribe() // quits txBroadcastLoop
- s.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
+ s.txSub.Unsubscribe() // quits txBroadcastLoop
+ s.protocolManager.Stop()
s.txPool.Stop()
s.eventMux.Stop()
if s.whisper != nil {
@@ -438,16 +463,14 @@ func (s *Ethereum) Stop() {
// This function will wait for a shutdown and resumes main thread execution
func (s *Ethereum) WaitForShutdown() {
+ <-s.databasesClosed
<-s.shutdownChan
}
-// now tx broadcasting is taken out of txPool
-// handled here via subscription, efficiency?
func (self *Ethereum) txBroadcastLoop() {
// automatically stops if unsubscribe
for obj := range self.txSub.Chan() {
event := obj.(core.TxPreEvent)
- self.net.BroadcastLimited("eth", TxMsg, math.Sqrt, []*types.Transaction{event.Tx})
self.syncAccounts(event.Tx)
}
}
@@ -466,16 +489,6 @@ func (self *Ethereum) syncAccounts(tx *types.Transaction) {
}
}
-func (self *Ethereum) minedBroadcastLoop() {
- // automatically stops if unsubscribe
- for obj := range self.minedBlockSub.Chan() {
- switch ev := obj.(type) {
- case core.NewMinedBlockEvent:
- self.protocolManager.BroadcastBlock(ev.Block.Hash(), ev.Block)
- }
- }
-}
-
func saveProtocolVersion(db common.Database, protov int) {
d, _ := db.Get([]byte("ProtocolVersion"))
protocolVersion := common.NewValue(d).Uint()
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index cfc494b2f..60d908758 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -39,7 +39,6 @@ var (
type hashCheckFn func(common.Hash) bool
type chainInsertFn func(types.Blocks) error
type hashIterFn func() (common.Hash, error)
-type currentTdFn func() *big.Int
type blockPack struct {
peerId string
@@ -61,7 +60,6 @@ type Downloader struct {
// Callbacks
hasBlock hashCheckFn
insertChain chainInsertFn
- currentTd currentTdFn
// Status
fetchingHashes int32
@@ -70,27 +68,20 @@ type Downloader struct {
// Channels
newPeerCh chan *peer
- syncCh chan syncPack
hashCh chan []common.Hash
blockCh chan blockPack
- quit chan struct{}
}
-func New(hasBlock hashCheckFn, insertChain chainInsertFn, currentTd currentTdFn) *Downloader {
+func New(hasBlock hashCheckFn, insertChain chainInsertFn) *Downloader {
downloader := &Downloader{
queue: newqueue(),
peers: make(peers),
hasBlock: hasBlock,
insertChain: insertChain,
- currentTd: currentTd,
newPeerCh: make(chan *peer, 1),
- syncCh: make(chan syncPack, 1),
hashCh: make(chan []common.Hash, 1),
blockCh: make(chan blockPack, 1),
- quit: make(chan struct{}),
}
- go downloader.peerHandler()
- go downloader.update()
return downloader
}
@@ -99,18 +90,17 @@ func (d *Downloader) Stats() (current int, max int) {
return d.queue.blockHashes.Size(), d.queue.fetchPool.Size() + d.queue.hashPool.Size()
}
-func (d *Downloader) RegisterPeer(id string, td *big.Int, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error {
+func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error {
d.mu.Lock()
defer d.mu.Unlock()
- glog.V(logger.Detail).Infoln("Register peer", id, "TD =", td)
+ glog.V(logger.Detail).Infoln("Register peer", id)
// Create a new peer and add it to the list of known peers
- peer := newPeer(id, td, hash, getHashes, getBlocks)
+ peer := newPeer(id, hash, getHashes, getBlocks)
// add peer to our peer set
d.peers[id] = peer
// broadcast new peer
- d.newPeerCh <- peer
return nil
}
@@ -125,72 +115,59 @@ func (d *Downloader) UnregisterPeer(id string) {
delete(d.peers, id)
}
-func (d *Downloader) peerHandler() {
- // itimer is used to determine when to start ignoring `minDesiredPeerCount`
- itimer := time.NewTimer(peerCountTimeout)
-out:
- for {
- select {
- case <-d.newPeerCh:
- // Meet the `minDesiredPeerCount` before we select our best peer
- if len(d.peers) < minDesiredPeerCount {
- break
- }
- itimer.Stop()
-
- d.selectPeer(d.peers.bestPeer())
- case <-itimer.C:
- // The timer will make sure that the downloader keeps an active state
- // in which it attempts to always check the network for highest td peers
- // Either select the peer or restart the timer if no peers could
- // be selected.
- if peer := d.peers.bestPeer(); peer != nil {
- d.selectPeer(d.peers.bestPeer())
- } else {
- itimer.Reset(5 * time.Second)
- }
- case <-d.quit:
- break out
- }
- }
-}
-
-func (d *Downloader) selectPeer(p *peer) {
+// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given
+// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the
+// checks fail an error will be returned. This method is synchronous
+func (d *Downloader) Synchronise(id string, hash common.Hash) error {
// Make sure it's doing neither. Once done we can restart the
// downloading process if the TD is higher. For now just get on
// with whatever is going on. This prevents unecessary switching.
if d.isBusy() {
- return
+ return errBusy
}
- // selected peer must be better than our own
- // XXX we also check the peer's recent hash to make sure we
- // don't have it. Some peers report (i think) incorrect TD.
- if p.td.Cmp(d.currentTd()) <= 0 || d.hasBlock(p.recentHash) {
- return
+
+ // Fetch the peer using the id or throw an error if the peer couldn't be found
+ p := d.peers[id]
+ if p == nil {
+ return errUnknownPeer
}
- glog.V(logger.Detail).Infoln("New peer with highest TD =", p.td)
- d.syncCh <- syncPack{p, p.recentHash, false}
+ // Get the hash from the peer and initiate the downloading progress.
+ err := d.getFromPeer(p, hash, false)
+ if err != nil {
+ return err
+ }
+ return d.process(p)
}
-func (d *Downloader) update() {
-out:
- for {
- select {
- case sync := <-d.syncCh:
- var peer *peer = sync.peer
- err := d.getFromPeer(peer, sync.hash, sync.ignoreInitial)
- if err != nil {
- glog.V(logger.Detail).Infoln(err)
- break
- }
+func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error {
+ d.activePeer = p.id
+
+ glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
+ // Start the fetcher. This will block the update entirely
+ // interupts need to be send to the appropriate channels
+ // respectively.
+ if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
+ // handle error
+ glog.V(logger.Debug).Infoln("Error fetching hashes:", err)
+ // XXX Reset
+ return err
+ }
- d.process()
- case <-d.quit:
- break out
- }
+ // Start fetching blocks in paralel. The strategy is simple
+ // take any available peers, seserve a chunk for each peer available,
+ // let the peer deliver the chunkn and periodically check if a peer
+ // has timedout. When done downloading, process blocks.
+ if err := d.startFetchingBlocks(p); err != nil {
+ glog.V(logger.Debug).Infoln("Error downloading blocks:", err)
+ // XXX reset
+ return err
}
+
+ glog.V(logger.Detail).Infoln("Sync completed")
+
+ return nil
}
// XXX Make synchronous
@@ -403,13 +380,12 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error
}
peer.mu.Lock()
- peer.td = td
peer.recentHash = block.Hash()
peer.mu.Unlock()
peer.promote()
glog.V(logger.Detail).Infoln("Inserting new block from:", id)
- d.queue.addBlock(id, block, td)
+ d.queue.addBlock(id, block)
// if neither go ahead to process
if d.isBusy() {
@@ -429,10 +405,10 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error
}
}
- return d.process()
+ return d.process(peer)
}
-func (d *Downloader) process() error {
+func (d *Downloader) process(peer *peer) error {
atomic.StoreInt32(&d.processingBlocks, 1)
defer atomic.StoreInt32(&d.processingBlocks, 0)
@@ -458,18 +434,8 @@ func (d *Downloader) process() error {
// grandparents can be requested and queued.
err = d.insertChain(blocks[:max])
if err != nil && core.IsParentErr(err) {
- glog.V(logger.Debug).Infoln("Aborting process due to missing parent. Fetching hashes")
-
- // TODO change this. This shite
- for i, block := range blocks[:max] {
- if !d.hasBlock(block.ParentHash()) {
- d.syncCh <- syncPack{d.peers.bestPeer(), block.Hash(), true}
- // remove processed blocks
- blocks = blocks[i:]
+ glog.V(logger.Debug).Infoln("Aborting process due to missing parent.")
- break
- }
- }
break
} else if err != nil {
// immediatly unregister the false peer but do not disconnect
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 1d449cfba..8843ca0c7 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -49,7 +49,7 @@ type downloadTester struct {
func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester {
tester := &downloadTester{t: t, hashes: hashes, blocks: blocks, done: make(chan bool)}
- downloader := New(tester.hasBlock, tester.insertChain, func() *big.Int { return new(big.Int) })
+ downloader := New(tester.hasBlock, tester.insertChain)
tester.downloader = downloader
return tester
@@ -65,10 +65,6 @@ func (dl *downloadTester) hasBlock(hash common.Hash) bool {
func (dl *downloadTester) insertChain(blocks types.Blocks) error {
dl.insertedBlocks += len(blocks)
- if len(dl.blocks)-1 <= dl.insertedBlocks {
- dl.done <- true
- }
-
return nil
}
@@ -93,14 +89,14 @@ func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error {
func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) {
dl.pcount++
- dl.downloader.RegisterPeer(id, td, hash, dl.getHashes, dl.getBlocks(id))
+ dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id))
}
func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash) {
dl.pcount++
// This bad peer never returns any blocks
- dl.downloader.RegisterPeer(id, td, hash, dl.getHashes, func([]common.Hash) error {
+ dl.downloader.RegisterPeer(id, hash, dl.getHashes, func([]common.Hash) error {
return nil
})
}
@@ -112,7 +108,8 @@ func TestDownload(t *testing.T) {
minDesiredPeerCount = 4
blockTtl = 1 * time.Second
- hashes := createHashes(0, 1000)
+ targetBlocks := 1000
+ hashes := createHashes(0, targetBlocks)
blocks := createBlocksFromHashes(hashes)
tester := newTester(t, hashes, blocks)
@@ -121,21 +118,21 @@ func TestDownload(t *testing.T) {
tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
-success:
- select {
- case <-tester.done:
- break success
- case <-time.After(10 * time.Second): // XXX this could actually fail on a slow computer
- t.Error("timeout")
+ err := tester.downloader.Synchronise("peer1", hashes[0])
+ if err != nil {
+ t.Error("download error", err)
+ }
+
+ if tester.insertedBlocks != targetBlocks {
+ t.Error("expected", targetBlocks, "have", tester.insertedBlocks)
}
}
func TestMissing(t *testing.T) {
- t.Skip()
-
glog.SetV(logger.Detail)
glog.SetToStderr(true)
+ targetBlocks := 1000
hashes := createHashes(0, 1000)
extraHashes := createHashes(1001, 1003)
blocks := createBlocksFromHashes(append(extraHashes, hashes...))
@@ -146,13 +143,12 @@ func TestMissing(t *testing.T) {
hashes = append(extraHashes, hashes[:len(hashes)-1]...)
tester.newPeer("peer2", big.NewInt(0), common.Hash{})
-success1:
- select {
- case <-tester.done:
- break success1
- case <-time.After(10 * time.Second): // XXX this could actually fail on a slow computer
- t.Error("timout")
+ err := tester.downloader.Synchronise("peer1", hashes[0])
+ if err != nil {
+ t.Error("download error", err)
}
- tester.downloader.AddBlock("peer2", blocks[hashes[len(hashes)-1]], big.NewInt(10001))
+ if tester.insertedBlocks != targetBlocks {
+ t.Error("expected", targetBlocks, "have", tester.insertedBlocks)
+ }
}
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index bcb8ad43a..91977f592 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -2,7 +2,6 @@ package downloader
import (
"errors"
- "math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
@@ -51,16 +50,6 @@ func (p peers) getPeer(id string) *peer {
return p[id]
}
-func (p peers) bestPeer() *peer {
- var peer *peer
- for _, cp := range p {
- if peer == nil || cp.td.Cmp(peer.td) > 0 {
- peer = cp
- }
- }
- return peer
-}
-
// peer represents an active peer
type peer struct {
state int // Peer state (working, idle)
@@ -68,7 +57,6 @@ type peer struct {
mu sync.RWMutex
id string
- td *big.Int
recentHash common.Hash
ignored *set.Set
@@ -78,10 +66,9 @@ type peer struct {
}
// create a new peer
-func newPeer(id string, td *big.Int, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) *peer {
+func newPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) *peer {
return &peer{
id: id,
- td: td,
recentHash: hash,
getHashes: getHashes,
getBlocks: getBlocks,
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index adbc2a0d0..a21a44706 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -2,7 +2,6 @@ package downloader
import (
"math"
- "math/big"
"sync"
"time"
@@ -93,7 +92,7 @@ func (c *queue) has(hash common.Hash) bool {
return c.hashPool.Has(hash) || c.fetchPool.Has(hash)
}
-func (c *queue) addBlock(id string, block *types.Block, td *big.Int) {
+func (c *queue) addBlock(id string, block *types.Block) {
c.mu.Lock()
defer c.mu.Unlock()
diff --git a/eth/downloader/synchronous.go b/eth/downloader/synchronous.go
deleted file mode 100644
index 7bb49d24e..000000000
--- a/eth/downloader/synchronous.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package downloader
-
-import (
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/logger"
- "github.com/ethereum/go-ethereum/logger/glog"
-)
-
-// THIS IS PENDING AND TO DO CHANGES FOR MAKING THE DOWNLOADER SYNCHRONOUS
-
-// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given
-// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the
-// checks fail an error will be returned. This method is synchronous
-func (d *Downloader) SynchroniseWithPeer(id string) (types.Blocks, error) {
- // Check if we're busy
- if d.isBusy() {
- return nil, errBusy
- }
-
- // Attempt to select a peer. This can either be nothing, which returns, best peer
- // or selected peer. If no peer could be found an error will be returned
- var p *peer
- if len(id) == 0 {
- p = d.peers[id]
- if p == nil {
- return nil, errUnknownPeer
- }
- } else {
- p = d.peers.bestPeer()
- }
-
- // Make sure our td is lower than the peer's td
- if p.td.Cmp(d.currentTd()) <= 0 || d.hasBlock(p.recentHash) {
- return nil, errLowTd
- }
-
- // Get the hash from the peer and initiate the downloading progress.
- err := d.getFromPeer(p, p.recentHash, false)
- if err != nil {
- return nil, err
- }
-
- return d.queue.blocks, nil
-}
-
-// Synchronise will synchronise using the best peer.
-func (d *Downloader) Synchronise() (types.Blocks, error) {
- return d.SynchroniseWithPeer("")
-}
-
-func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error {
- d.activePeer = p.id
-
- glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
- // Start the fetcher. This will block the update entirely
- // interupts need to be send to the appropriate channels
- // respectively.
- if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
- // handle error
- glog.V(logger.Debug).Infoln("Error fetching hashes:", err)
- // XXX Reset
- return err
- }
-
- // Start fetching blocks in paralel. The strategy is simple
- // take any available peers, seserve a chunk for each peer available,
- // let the peer deliver the chunkn and periodically check if a peer
- // has timedout. When done downloading, process blocks.
- if err := d.startFetchingBlocks(p); err != nil {
- glog.V(logger.Debug).Infoln("Error downloading blocks:", err)
- // XXX reset
- return err
- }
-
- glog.V(logger.Detail).Infoln("Sync completed")
-
- return nil
-}
diff --git a/eth/handler.go b/eth/handler.go
index 622f22132..d00d00f23 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -39,17 +39,24 @@ import (
"math"
"math/big"
"sync"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp"
)
+const (
+ peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
+ minDesiredPeerCount = 5 // Amount of peers desired to start syncing
+)
+
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
}
@@ -77,16 +84,26 @@ type ProtocolManager struct {
peers map[string]*peer
SubProtocol p2p.Protocol
+
+ eventMux *event.TypeMux
+ txSub event.Subscription
+ minedBlockSub event.Subscription
+
+ newPeerCh chan *peer
+ quitSync chan struct{}
}
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
-func NewProtocolManager(protocolVersion, networkId int, txpool txPool, chainman *core.ChainManager, downloader *downloader.Downloader) *ProtocolManager {
+func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpool txPool, chainman *core.ChainManager, downloader *downloader.Downloader) *ProtocolManager {
manager := &ProtocolManager{
+ eventMux: mux,
txpool: txpool,
chainman: chainman,
downloader: downloader,
peers: make(map[string]*peer),
+ newPeerCh: make(chan *peer, 1),
+ quitSync: make(chan struct{}),
}
manager.SubProtocol = p2p.Protocol{
@@ -95,16 +112,86 @@ func NewProtocolManager(protocolVersion, networkId int, txpool txPool, chainman
Length: ProtocolLength,
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(protocolVersion, networkId, p, rw)
- err := manager.handle(peer)
- //glog.V(logger.Detail).Infof("[%s]: %v\n", peer.id, err)
- return err
+ manager.newPeerCh <- peer
+
+ return manager.handle(peer)
},
}
return manager
}
+func (pm *ProtocolManager) syncHandler() {
+ // itimer is used to determine when to start ignoring `minDesiredPeerCount`
+ itimer := time.NewTimer(peerCountTimeout)
+out:
+ for {
+ select {
+ case <-pm.newPeerCh:
+ // Meet the `minDesiredPeerCount` before we select our best peer
+ if len(pm.peers) < minDesiredPeerCount {
+ break
+ }
+
+ // Find the best peer
+ peer := getBestPeer(pm.peers)
+ if peer == nil {
+ glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available")
+ }
+
+ itimer.Stop()
+ go pm.synchronise(peer)
+ case <-itimer.C:
+ // The timer will make sure that the downloader keeps an active state
+ // in which it attempts to always check the network for highest td peers
+ // Either select the peer or restart the timer if no peers could
+ // be selected.
+ if peer := getBestPeer(pm.peers); peer != nil {
+ go pm.synchronise(peer)
+ } else {
+ itimer.Reset(5 * time.Second)
+ }
+ case <-pm.quitSync:
+ break out
+ }
+ }
+}
+
+func (pm *ProtocolManager) synchronise(peer *peer) {
+ // Make sure the peer's TD is higher than our own. If not drop.
+ if peer.td.Cmp(pm.chainman.Td()) <= 0 {
+ return
+ }
+
+ glog.V(logger.Info).Infof("Synchronisation attempt using %s TD=%v\n", peer.id, peer.td)
+ // Get the hashes from the peer (synchronously)
+ err := pm.downloader.Synchronise(peer.id, peer.recentHash)
+ if err != nil {
+ // handle error
+ glog.V(logger.Debug).Infoln("error downloading:", err)
+ }
+}
+
+func (pm *ProtocolManager) Start() {
+ // broadcast transactions
+ pm.txSub = pm.eventMux.Subscribe(core.TxPreEvent{})
+ go pm.txBroadcastLoop()
+
+ // broadcast mined blocks
+ pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
+ go pm.minedBroadcastLoop()
+
+ // sync handler
+ go pm.syncHandler()
+}
+
+func (pm *ProtocolManager) Stop() {
+ pm.txSub.Unsubscribe() // quits txBroadcastLoop
+ pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
+ close(pm.quitSync) // quits the sync handler
+}
+
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
td, current, genesis := pm.chainman.Status()
@@ -120,7 +207,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
pm.peers[p.id] = p
pm.pmu.Unlock()
- pm.downloader.RegisterPeer(p.id, p.td, p.currentHash, p.requestHashes, p.requestBlocks)
+ pm.downloader.RegisterPeer(p.id, p.recentHash, p.requestHashes, p.requestBlocks)
defer func() {
pm.pmu.Lock()
defer pm.pmu.Unlock()
@@ -255,7 +342,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "block validation %v: %v", msg, err)
}
hash := request.Block.Hash()
- // Add the block hash as a known hash to the peer. This will later be used to detirmine
+ // Add the block hash as a known hash to the peer. This will later be used to determine
// who should receive this.
p.blockHashes.Add(hash)
@@ -275,7 +362,6 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
if self.chainman.HasBlock(hash) {
break
}
- /* XXX unsure about this */
if self.chainman.Td().Cmp(request.TD) > 0 && new(big.Int).Add(request.Block.Number(), big.NewInt(7)).Cmp(self.chainman.CurrentBlock().Number()) < 0 {
glog.V(logger.Debug).Infof("[%s] dropped block %v due to low TD %v\n", p.id, request.Block.Number(), request.TD)
break
@@ -284,24 +370,22 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
// Attempt to insert the newly received by checking if the parent exists.
// if the parent exists we process the block and propagate to our peers
// if the parent does not exists we delegate to the downloader.
- // NOTE we can reduce chatter by dropping blocks with Td < currentTd
if self.chainman.HasBlock(request.Block.ParentHash()) {
if err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil {
// handle error
return nil
}
self.BroadcastBlock(hash, request.Block)
- //fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD)
} else {
// adding blocks is synchronous
go func() {
+ // TODO check parent error
err := self.downloader.AddBlock(p.id, request.Block, request.TD)
if err != nil {
glog.V(logger.Detail).Infoln("downloader err:", err)
return
}
self.BroadcastBlock(hash, request.Block)
- //fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD)
}()
}
default:
@@ -326,10 +410,51 @@ func (pm *ProtocolManager) BroadcastBlock(hash common.Hash, block *types.Block)
}
}
// Broadcast block to peer set
- // XXX due to the current shit state of the network disable the limit
peers = peers[:int(math.Sqrt(float64(len(peers))))]
for _, peer := range peers {
peer.sendNewBlock(block)
}
glog.V(logger.Detail).Infoln("broadcast block to", len(peers), "peers")
}
+
+// BroadcastTx will propagate the block to its connected peers. It will sort
+// out which peers do not contain the block in their block set and will do a
+// sqrt(peers) to determine the amount of peers we broadcast to.
+func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) {
+ pm.pmu.Lock()
+ defer pm.pmu.Unlock()
+
+ // Find peers who don't know anything about the given hash. Peers that
+ // don't know about the hash will be a candidate for the broadcast loop
+ var peers []*peer
+ for _, peer := range pm.peers {
+ if !peer.txHashes.Has(hash) {
+ peers = append(peers, peer)
+ }
+ }
+ // Broadcast block to peer set
+ peers = peers[:int(math.Sqrt(float64(len(peers))))]
+ for _, peer := range peers {
+ peer.sendTransaction(tx)
+ }
+ glog.V(logger.Detail).Infoln("broadcast tx to", len(peers), "peers")
+}
+
+// Mined broadcast loop
+func (self *ProtocolManager) minedBroadcastLoop() {
+ // automatically stops if unsubscribe
+ for obj := range self.minedBlockSub.Chan() {
+ switch ev := obj.(type) {
+ case core.NewMinedBlockEvent:
+ self.BroadcastBlock(ev.Block.Hash(), ev.Block)
+ }
+ }
+}
+
+func (self *ProtocolManager) txBroadcastLoop() {
+ // automatically stops if unsubscribe
+ for obj := range self.txSub.Chan() {
+ event := obj.(core.TxPreEvent)
+ self.BroadcastTx(event.Tx.Hash(), event.Tx)
+ }
+}
diff --git a/eth/peer.go b/eth/peer.go
index 972880845..861efaaec 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -25,6 +25,16 @@ type getBlockHashesMsgData struct {
Amount uint64
}
+func getBestPeer(peers map[string]*peer) *peer {
+ var peer *peer
+ for _, cp := range peers {
+ if peer == nil || cp.td.Cmp(peer.td) > 0 {
+ peer = cp
+ }
+ }
+ return peer
+}
+
type peer struct {
*p2p.Peer
@@ -32,9 +42,9 @@ type peer struct {
protv, netid int
- currentHash common.Hash
- id string
- td *big.Int
+ recentHash common.Hash
+ id string
+ td *big.Int
genesis, ourHash common.Hash
ourTd *big.Int
@@ -43,14 +53,14 @@ type peer struct {
blockHashes *set.Set
}
-func newPeer(protv, netid int, genesis, currentHash common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
+func newPeer(protv, netid int, genesis, recentHash common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID()
return &peer{
Peer: p,
rw: rw,
genesis: genesis,
- ourHash: currentHash,
+ ourHash: recentHash,
ourTd: td,
protv: protv,
netid: netid,
@@ -86,6 +96,12 @@ func (p *peer) sendNewBlock(block *types.Block) error {
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, block.Td})
}
+func (p *peer) sendTransaction(tx *types.Transaction) error {
+ p.txHashes.Add(tx.Hash())
+
+ return p2p.Send(p.rw, TxMsg, []*types.Transaction{tx})
+}
+
func (p *peer) requestHashes(from common.Hash) error {
glog.V(logger.Debug).Infof("[%s] fetching hashes (%d) %x...\n", p.id, maxHashes, from[:4])
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, maxHashes})
@@ -139,7 +155,7 @@ func (p *peer) handleStatus() error {
// Set the total difficulty of the peer
p.td = status.TD
// set the best hash of the peer
- p.currentHash = status.CurrentBlock
+ p.recentHash = status.CurrentBlock
return <-errc
}