From 47f62a67aa8a033d8a81dc16104018369325897d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 2 Oct 2015 13:20:41 +0300 Subject: eth/downloader: match capabilities when querying idle peers --- eth/downloader/downloader.go | 4 ++-- eth/downloader/downloader_test.go | 49 ++++++++++++++++++++++++++++++++++++--- eth/downloader/peer.go | 8 ++++--- 3 files changed, 53 insertions(+), 8 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index d1a716c5f..64fb1b57b 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -816,7 +816,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { } // Send a download request to all idle peers, until throttled throttled := false - for _, peer := range d.peers.IdlePeers() { + for _, peer := range d.peers.IdlePeers(eth61) { // Short circuit if throttling activated if d.queue.Throttle() { throttled = true @@ -1255,7 +1255,7 @@ func (d *Downloader) fetchBodies(from uint64) error { } // Send a download request to all idle peers, until throttled queuedEmptyBlocks, throttled := false, false - for _, peer := range d.peers.IdlePeers() { + for _, peer := range d.peers.IdlePeers(eth62) { // Short circuit if throttling activated if d.queue.Throttle() { throttled = true diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 885fab8bd..96096527e 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -205,9 +205,17 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha dl.lock.Lock() defer dl.lock.Unlock() - err := dl.downloader.RegisterPeer(id, version, hashes[0], - dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), - dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay)) + var err error + switch version { + case 61: + err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), nil, nil, nil) + case 62: + err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay)) + case 63: + err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay)) + case 64: + err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay)) + } if err == nil { // Assign the owned hashes and blocks to the peer (deep copy) dl.peerHashes[id] = make([]common.Hash, len(hashes)) @@ -618,6 +626,41 @@ func testMultiSynchronisation(t *testing.T, protocol int) { } } +// Tests that synchronisations behave well in multi-version protocol environments +// and not wreak havok on other nodes in the network. +func TestMultiProtocolSynchronisation61(t *testing.T) { testMultiProtocolSynchronisation(t, 61) } +func TestMultiProtocolSynchronisation62(t *testing.T) { testMultiProtocolSynchronisation(t, 62) } +func TestMultiProtocolSynchronisation63(t *testing.T) { testMultiProtocolSynchronisation(t, 63) } +func TestMultiProtocolSynchronisation64(t *testing.T) { testMultiProtocolSynchronisation(t, 64) } + +func testMultiProtocolSynchronisation(t *testing.T, protocol int) { + // Create a small enough block chain to download + targetBlocks := blockCacheLimit - 15 + hashes, blocks := makeChain(targetBlocks, 0, genesis) + + // Create peers of every type + tester := newTester() + tester.newPeer("peer 61", 61, hashes, blocks) + tester.newPeer("peer 62", 62, hashes, blocks) + tester.newPeer("peer 63", 63, hashes, blocks) + tester.newPeer("peer 64", 64, hashes, blocks) + + // Synchronise with the requestd peer and make sure all blocks were retrieved + if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) + } + if imported := len(tester.ownBlocks); imported != targetBlocks+1 { + t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) + } + // Check that no peers have been dropped off + for _, version := range []int{61, 62, 63, 64} { + peer := fmt.Sprintf("peer %d", version) + if _, ok := tester.peerHashes[peer]; !ok { + t.Errorf("%s dropped", peer) + } + } +} + // Tests that if a block is empty (i.e. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) } diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 8fd1f9a99..c1d20ac61 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -312,14 +312,16 @@ func (ps *peerSet) AllPeers() []*peer { // IdlePeers retrieves a flat list of all the currently idle peers within the // active peer set, ordered by their reputation. -func (ps *peerSet) IdlePeers() []*peer { +func (ps *peerSet) IdlePeers(version int) []*peer { ps.lock.RLock() defer ps.lock.RUnlock() list := make([]*peer, 0, len(ps.peers)) for _, p := range ps.peers { - if atomic.LoadInt32(&p.idle) == 0 { - list = append(list, p) + if (version == eth61 && p.version == eth61) || (version >= eth62 && p.version >= eth62) { + if atomic.LoadInt32(&p.idle) == 0 { + list = append(list, p) + } } } for i := 0; i < len(list); i++ { -- cgit v1.2.3 From f7a71996fbbe9cea4445600ffa3c232a6cf42803 Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Sun, 30 Aug 2015 10:04:59 +0200 Subject: core, event/filter, xeth: refactored filter system Moved the filtering system from `event` to `eth/filters` package and removed the `core.Filter` object. The `filters.Filter` object now requires a `common.Database` rather than a `eth.Backend` and invokes the `core.GetBlockByX` directly rather than thru a "manager". --- eth/filters/filter.go | 211 +++++++++++++++++++++++++++++++++++++++++++ eth/filters/filter_system.go | 133 +++++++++++++++++++++++++++ 2 files changed, 344 insertions(+) create mode 100644 eth/filters/filter.go create mode 100644 eth/filters/filter_system.go (limited to 'eth') diff --git a/eth/filters/filter.go b/eth/filters/filter.go new file mode 100644 index 000000000..b7f795607 --- /dev/null +++ b/eth/filters/filter.go @@ -0,0 +1,211 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package filters + +import ( + "math" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" +) + +type AccountChange struct { + Address, StateAddress []byte +} + +// Filtering interface +type Filter struct { + db common.Database + earliest int64 + latest int64 + skip int + address []common.Address + max int + topics [][]common.Hash + + BlockCallback func(*types.Block, state.Logs) + TransactionCallback func(*types.Transaction) + LogsCallback func(state.Logs) +} + +// Create a new filter which uses a bloom filter on blocks to figure out whether a particular block +// is interesting or not. +func New(db common.Database) *Filter { + return &Filter{db: db} +} + +// Set the earliest and latest block for filtering. +// -1 = latest block (i.e., the current block) +// hash = particular hash from-to +func (self *Filter) SetEarliestBlock(earliest int64) { + self.earliest = earliest +} + +func (self *Filter) SetLatestBlock(latest int64) { + self.latest = latest +} + +func (self *Filter) SetAddress(addr []common.Address) { + self.address = addr +} + +func (self *Filter) SetTopics(topics [][]common.Hash) { + self.topics = topics +} + +func (self *Filter) SetMax(max int) { + self.max = max +} + +func (self *Filter) SetSkip(skip int) { + self.skip = skip +} + +// Run filters logs with the current parameters set +func (self *Filter) Find() state.Logs { + earliestBlock := core.GetCurrentBlock(self.db) + var earliestBlockNo uint64 = uint64(self.earliest) + if self.earliest == -1 { + earliestBlockNo = earliestBlock.NumberU64() + } + var latestBlockNo uint64 = uint64(self.latest) + if self.latest == -1 { + latestBlockNo = earliestBlock.NumberU64() + } + + var ( + logs state.Logs + block = core.GetBlockByNumber(self.db, latestBlockNo) + ) + +done: + for i := 0; block != nil; i++ { + // Quit on latest + switch { + case block.NumberU64() == 0: + break done + case block.NumberU64() < earliestBlockNo: + break done + case self.max <= len(logs): + break done + } + + // Use bloom filtering to see if this block is interesting given the + // current parameters + if self.bloomFilter(block) { + // Get the logs of the block + var ( + receipts = core.GetBlockReceipts(self.db, block.Hash()) + unfiltered state.Logs + ) + for _, receipt := range receipts { + unfiltered = append(unfiltered, receipt.Logs()...) + } + logs = append(logs, self.FilterLogs(unfiltered)...) + } + + block = core.GetBlockByHash(self.db, block.ParentHash()) + } + + skip := int(math.Min(float64(len(logs)), float64(self.skip))) + + return logs[skip:] +} + +func includes(addresses []common.Address, a common.Address) bool { + for _, addr := range addresses { + if addr == a { + return true + } + } + + return false +} + +func (self *Filter) FilterLogs(logs state.Logs) state.Logs { + var ret state.Logs + + // Filter the logs for interesting stuff +Logs: + for _, log := range logs { + if len(self.address) > 0 && !includes(self.address, log.Address) { + continue + } + + logTopics := make([]common.Hash, len(self.topics)) + copy(logTopics, log.Topics) + + // If the to filtered topics is greater than the amount of topics in + // logs, skip. + if len(self.topics) > len(log.Topics) { + continue Logs + } + + for i, topics := range self.topics { + var match bool + for _, topic := range topics { + // common.Hash{} is a match all (wildcard) + if (topic == common.Hash{}) || log.Topics[i] == topic { + match = true + break + } + } + + if !match { + continue Logs + } + + } + + ret = append(ret, log) + } + + return ret +} + +func (self *Filter) bloomFilter(block *types.Block) bool { + if len(self.address) > 0 { + var included bool + for _, addr := range self.address { + if types.BloomLookup(block.Bloom(), addr) { + included = true + break + } + } + + if !included { + return false + } + } + + for _, sub := range self.topics { + var included bool + for _, topic := range sub { + if (topic == common.Hash{}) || types.BloomLookup(block.Bloom(), topic) { + included = true + break + } + } + if !included { + return false + } + } + + return true +} diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go new file mode 100644 index 000000000..9ad73a896 --- /dev/null +++ b/eth/filters/filter_system.go @@ -0,0 +1,133 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// package filters implements an ethereum filtering system for block, +// transactions and log events. +package filters + +import ( + "sync" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/event" +) + +// FilterSystem manages filters that filter specific events such as +// block, transaction and log events. The Filtering system can be used to listen +// for specific LOG events fires by the EVM (Ethereum Virtual Machine). +type FilterSystem struct { + eventMux *event.TypeMux + + filterMu sync.RWMutex + filterId int + filters map[int]*Filter + + quit chan struct{} +} + +// NewFilterSystem returns a newly allocated filter manager +func NewFilterSystem(mux *event.TypeMux) *FilterSystem { + fs := &FilterSystem{ + eventMux: mux, + filters: make(map[int]*Filter), + } + go fs.filterLoop() + return fs +} + +// Stop quits the filter loop required for polling events +func (fs *FilterSystem) Stop() { + close(fs.quit) +} + +// Add adds a filter to the filter manager +func (fs *FilterSystem) Add(filter *Filter) (id int) { + fs.filterMu.Lock() + defer fs.filterMu.Unlock() + id = fs.filterId + fs.filters[id] = filter + fs.filterId++ + + return id +} + +// Remove removes a filter by filter id +func (fs *FilterSystem) Remove(id int) { + fs.filterMu.Lock() + defer fs.filterMu.Unlock() + if _, ok := fs.filters[id]; ok { + delete(fs.filters, id) + } +} + +// Get retrieves a filter installed using Add The filter may not be modified. +func (fs *FilterSystem) Get(id int) *Filter { + fs.filterMu.RLock() + defer fs.filterMu.RUnlock() + return fs.filters[id] +} + +// filterLoop waits for specific events from ethereum and fires their handlers +// when the filter matches the requirements. +func (fs *FilterSystem) filterLoop() { + // Subscribe to events + events := fs.eventMux.Subscribe( + //core.PendingBlockEvent{}, + core.ChainEvent{}, + core.TxPreEvent{}, + state.Logs(nil)) + +out: + for { + select { + case <-fs.quit: + break out + case event := <-events.Chan(): + switch event := event.(type) { + case core.ChainEvent: + fs.filterMu.RLock() + for _, filter := range fs.filters { + if filter.BlockCallback != nil { + filter.BlockCallback(event.Block, event.Logs) + } + } + fs.filterMu.RUnlock() + + case core.TxPreEvent: + fs.filterMu.RLock() + for _, filter := range fs.filters { + if filter.TransactionCallback != nil { + filter.TransactionCallback(event.Tx) + } + } + fs.filterMu.RUnlock() + + case state.Logs: + fs.filterMu.RLock() + for _, filter := range fs.filters { + if filter.LogsCallback != nil { + msgs := filter.FilterLogs(event) + if len(msgs) > 0 { + filter.LogsCallback(msgs) + } + } + } + fs.filterMu.RUnlock() + } + } + } +} -- cgit v1.2.3 From 361082ec4b942aea7c01fcb1be1782cb68b6fe3a Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Sun, 30 Aug 2015 10:19:10 +0200 Subject: cmd/evm, core/vm, test: refactored VM and core * Moved `vm.Transfer` to `core` package and changed execution to call `env.Transfer` instead of `core.Transfer` directly. * core/vm: byte code VM moved to jump table instead of switch * Moved `vm.Transfer` to `core` package and changed execution to call `env.Transfer` instead of `core.Transfer` directly. * Byte code VM now shares the same code as the JITVM * Renamed Context to Contract * Changed initialiser of state transition & unexported methods * Removed the Execution object and refactor `Call`, `CallCode` & `Create` in to their own functions instead of being methods. * Removed the hard dep on the state for the VM. The VM now depends on a Database interface returned by the environment. In the process the core now depends less on the statedb by usage of the env * Moved `Log` from package `core/state` to package `core/vm`. --- eth/filters/filter.go | 16 ++++++++-------- eth/filters/filter_system.go | 6 +++--- 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'eth') diff --git a/eth/filters/filter.go b/eth/filters/filter.go index b7f795607..0b4911629 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -21,8 +21,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" ) type AccountChange struct { @@ -39,9 +39,9 @@ type Filter struct { max int topics [][]common.Hash - BlockCallback func(*types.Block, state.Logs) + BlockCallback func(*types.Block, vm.Logs) TransactionCallback func(*types.Transaction) - LogsCallback func(state.Logs) + LogsCallback func(vm.Logs) } // Create a new filter which uses a bloom filter on blocks to figure out whether a particular block @@ -78,7 +78,7 @@ func (self *Filter) SetSkip(skip int) { } // Run filters logs with the current parameters set -func (self *Filter) Find() state.Logs { +func (self *Filter) Find() vm.Logs { earliestBlock := core.GetCurrentBlock(self.db) var earliestBlockNo uint64 = uint64(self.earliest) if self.earliest == -1 { @@ -90,7 +90,7 @@ func (self *Filter) Find() state.Logs { } var ( - logs state.Logs + logs vm.Logs block = core.GetBlockByNumber(self.db, latestBlockNo) ) @@ -112,7 +112,7 @@ done: // Get the logs of the block var ( receipts = core.GetBlockReceipts(self.db, block.Hash()) - unfiltered state.Logs + unfiltered vm.Logs ) for _, receipt := range receipts { unfiltered = append(unfiltered, receipt.Logs()...) @@ -138,8 +138,8 @@ func includes(addresses []common.Address, a common.Address) bool { return false } -func (self *Filter) FilterLogs(logs state.Logs) state.Logs { - var ret state.Logs +func (self *Filter) FilterLogs(logs vm.Logs) vm.Logs { + var ret vm.Logs // Filter the logs for interesting stuff Logs: diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 9ad73a896..1c27c7be4 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -22,7 +22,7 @@ import ( "sync" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/event" ) @@ -89,7 +89,7 @@ func (fs *FilterSystem) filterLoop() { //core.PendingBlockEvent{}, core.ChainEvent{}, core.TxPreEvent{}, - state.Logs(nil)) + vm.Logs(nil)) out: for { @@ -116,7 +116,7 @@ out: } fs.filterMu.RUnlock() - case state.Logs: + case vm.Logs: fs.filterMu.RLock() for _, filter := range fs.filters { if filter.LogsCallback != nil { -- cgit v1.2.3 From 7c7692933c21b77328a94eed714f66c276776197 Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Mon, 31 Aug 2015 17:09:50 +0200 Subject: cmd/geth, cmd/utils, core, rpc: renamed to blockchain * Renamed ChainManager to BlockChain * Checkpointing is no longer required and never really properly worked when the state was corrupted. --- eth/backend.go | 22 +++--- eth/filters/filter.go | 17 +++-- eth/filters/filter_system.go | 2 +- eth/gasprice.go | 4 +- eth/handler.go | 66 +++++++++--------- eth/handler_test.go | 158 +++++++++++++++++++++---------------------- eth/helper_test.go | 20 +++--- eth/protocol_test.go | 2 +- eth/sync.go | 2 +- 9 files changed, 149 insertions(+), 144 deletions(-) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index 349dfa613..a480b4931 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -217,7 +217,7 @@ type Ethereum struct { // State manager for processing new blocks and managing the over all states blockProcessor *core.BlockProcessor txPool *core.TxPool - chainManager *core.ChainManager + blockchain *core.BlockChain accountManager *accounts.Manager whisper *whisper.Whisper pow *ethash.Ethash @@ -365,7 +365,7 @@ func New(config *Config) (*Ethereum, error) { eth.pow = ethash.New() } //genesis := core.GenesisBlock(uint64(config.GenesisNonce), stateDb) - eth.chainManager, err = core.NewChainManager(chainDb, eth.pow, eth.EventMux()) + eth.blockchain, err = core.NewBlockChain(chainDb, eth.pow, eth.EventMux()) if err != nil { if err == core.ErrNoGenesis { return nil, fmt.Errorf(`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`) @@ -373,11 +373,11 @@ func New(config *Config) (*Ethereum, error) { return nil, err } - eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit) + eth.txPool = core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) - eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.chainManager, eth.EventMux()) - eth.chainManager.SetProcessor(eth.blockProcessor) - eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.chainManager, chainDb) + eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux()) + eth.blockchain.SetProcessor(eth.blockProcessor) + eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb) eth.miner = miner.New(eth, eth.EventMux(), eth.pow) eth.miner.SetGasPrice(config.GasPrice) @@ -441,7 +441,7 @@ func (s *Ethereum) NodeInfo() *NodeInfo { DiscPort: int(node.UDP), TCPPort: int(node.TCP), ListenAddr: s.net.ListenAddr, - Td: s.ChainManager().Td().String(), + Td: s.BlockChain().Td().String(), } } @@ -478,7 +478,7 @@ func (s *Ethereum) PeersInfo() (peersinfo []*PeerInfo) { } func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) { - s.chainManager.ResetWithGenesisBlock(gb) + s.blockchain.ResetWithGenesisBlock(gb) } func (s *Ethereum) StartMining(threads int) error { @@ -518,7 +518,7 @@ func (s *Ethereum) Miner() *miner.Miner { return s.miner } // func (s *Ethereum) Logger() logger.LogSystem { return s.logger } func (s *Ethereum) Name() string { return s.net.Name } func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager } -func (s *Ethereum) ChainManager() *core.ChainManager { return s.chainManager } +func (s *Ethereum) BlockChain() *core.BlockChain { return s.blockchain } func (s *Ethereum) BlockProcessor() *core.BlockProcessor { return s.blockProcessor } func (s *Ethereum) TxPool() *core.TxPool { return s.txPool } func (s *Ethereum) Whisper() *whisper.Whisper { return s.whisper } @@ -581,7 +581,7 @@ func (self *Ethereum) AddPeer(nodeURL string) error { func (s *Ethereum) Stop() { s.net.Stop() - s.chainManager.Stop() + s.blockchain.Stop() s.protocolManager.Stop() s.txPool.Stop() s.eventMux.Stop() @@ -622,7 +622,7 @@ func (self *Ethereum) StartAutoDAG() { select { case <-timer: glog.V(logger.Info).Infof("checking DAG (ethash dir: %s)", ethash.DefaultDir) - currentBlock := self.ChainManager().CurrentBlock().NumberU64() + currentBlock := self.BlockChain().CurrentBlock().NumberU64() thisEpoch := currentBlock / epochLength if nextEpoch <= thisEpoch { if currentBlock%epochLength > autoDAGepochHeight { diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 0b4911629..2bcf20d0c 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -1,4 +1,4 @@ -// Copyright 2014 The go-ethereum Authors +// Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/ethdb" ) type AccountChange struct { @@ -31,7 +32,7 @@ type AccountChange struct { // Filtering interface type Filter struct { - db common.Database + db ethdb.Database earliest int64 latest int64 skip int @@ -46,7 +47,7 @@ type Filter struct { // Create a new filter which uses a bloom filter on blocks to figure out whether a particular block // is interesting or not. -func New(db common.Database) *Filter { +func New(db ethdb.Database) *Filter { return &Filter{db: db} } @@ -79,7 +80,7 @@ func (self *Filter) SetSkip(skip int) { // Run filters logs with the current parameters set func (self *Filter) Find() vm.Logs { - earliestBlock := core.GetCurrentBlock(self.db) + earliestBlock := core.GetBlock(self.db, core.GetHeadBlockHash(self.db)) var earliestBlockNo uint64 = uint64(self.earliest) if self.earliest == -1 { earliestBlockNo = earliestBlock.NumberU64() @@ -91,8 +92,12 @@ func (self *Filter) Find() vm.Logs { var ( logs vm.Logs - block = core.GetBlockByNumber(self.db, latestBlockNo) + block *types.Block ) + hash := core.GetCanonicalHash(self.db, latestBlockNo) + if hash != (common.Hash{}) { + block = core.GetBlock(self.db, hash) + } done: for i := 0; block != nil; i++ { @@ -120,7 +125,7 @@ done: logs = append(logs, self.FilterLogs(unfiltered)...) } - block = core.GetBlockByHash(self.db, block.ParentHash()) + block = core.GetBlock(self.db, block.ParentHash()) } skip := int(math.Min(float64(len(logs)), float64(self.skip))) diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 1c27c7be4..4972dcd59 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -28,7 +28,7 @@ import ( // FilterSystem manages filters that filter specific events such as // block, transaction and log events. The Filtering system can be used to listen -// for specific LOG events fires by the EVM (Ethereum Virtual Machine). +// for specific LOG events fired by the EVM (Ethereum Virtual Machine). type FilterSystem struct { eventMux *event.TypeMux diff --git a/eth/gasprice.go b/eth/gasprice.go index 3caad73c6..c08b96129 100644 --- a/eth/gasprice.go +++ b/eth/gasprice.go @@ -36,7 +36,7 @@ type blockPriceInfo struct { type GasPriceOracle struct { eth *Ethereum - chain *core.ChainManager + chain *core.BlockChain events event.Subscription blocks map[uint64]*blockPriceInfo firstProcessed, lastProcessed uint64 @@ -48,7 +48,7 @@ func NewGasPriceOracle(eth *Ethereum) (self *GasPriceOracle) { self = &GasPriceOracle{} self.blocks = make(map[uint64]*blockPriceInfo) self.eth = eth - self.chain = eth.chainManager + self.chain = eth.blockchain self.events = eth.EventMux().Subscribe( core.ChainEvent{}, core.ChainSplitEvent{}, diff --git a/eth/handler.go b/eth/handler.go index 52c9c4151..fc92338b4 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -60,9 +60,9 @@ func (ep extProt) GetHashes(hash common.Hash) error { return ep.getHashes(has func (ep extProt) GetBlock(hashes []common.Hash) error { return ep.getBlocks(hashes) } type ProtocolManager struct { - txpool txPool - chainman *core.ChainManager - chaindb ethdb.Database + txpool txPool + blockchain *core.BlockChain + chaindb ethdb.Database downloader *downloader.Downloader fetcher *fetcher.Fetcher @@ -87,17 +87,17 @@ type ProtocolManager struct { // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the ethereum network. -func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, chainman *core.ChainManager, chaindb ethdb.Database) *ProtocolManager { +func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) *ProtocolManager { // Create the protocol manager with the base fields manager := &ProtocolManager{ - eventMux: mux, - txpool: txpool, - chainman: chainman, - chaindb: chaindb, - peers: newPeerSet(), - newPeerCh: make(chan *peer, 1), - txsyncCh: make(chan *txsync), - quitSync: make(chan struct{}), + eventMux: mux, + txpool: txpool, + blockchain: blockchain, + chaindb: chaindb, + peers: newPeerSet(), + newPeerCh: make(chan *peer, 1), + txsyncCh: make(chan *txsync), + quitSync: make(chan struct{}), } // Initiate a sub-protocol for every implemented version we can handle manager.SubProtocols = make([]p2p.Protocol, len(ProtocolVersions)) @@ -116,15 +116,15 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po } } // Construct the different synchronisation mechanisms - manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.GetTd, manager.chainman.InsertChain, manager.removePeer) + manager.downloader = downloader.New(manager.eventMux, manager.blockchain.HasBlock, manager.blockchain.GetBlock, manager.blockchain.CurrentBlock, manager.blockchain.GetTd, manager.blockchain.InsertChain, manager.removePeer) validator := func(block *types.Block, parent *types.Block) error { return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false) } heighter := func() uint64 { - return manager.chainman.CurrentBlock().NumberU64() + return manager.blockchain.CurrentBlock().NumberU64() } - manager.fetcher = fetcher.New(manager.chainman.GetBlock, validator, manager.BroadcastBlock, heighter, manager.chainman.InsertChain, manager.removePeer) + manager.fetcher = fetcher.New(manager.blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, manager.blockchain.InsertChain, manager.removePeer) return manager } @@ -187,7 +187,7 @@ func (pm *ProtocolManager) handle(p *peer) error { glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name()) // Execute the Ethereum handshake - td, head, genesis := pm.chainman.Status() + td, head, genesis := pm.blockchain.Status() if err := p.Handshake(td, head, genesis); err != nil { glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err) return err @@ -252,7 +252,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { request.Amount = uint64(downloader.MaxHashFetch) } // Retrieve the hashes from the block chain and return them - hashes := pm.chainman.GetBlockHashesFromHash(request.Hash, request.Amount) + hashes := pm.blockchain.GetBlockHashesFromHash(request.Hash, request.Amount) if len(hashes) == 0 { glog.V(logger.Debug).Infof("invalid block hash %x", request.Hash.Bytes()[:4]) } @@ -268,9 +268,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { request.Amount = uint64(downloader.MaxHashFetch) } // Calculate the last block that should be retrieved, and short circuit if unavailable - last := pm.chainman.GetBlockByNumber(request.Number + request.Amount - 1) + last := pm.blockchain.GetBlockByNumber(request.Number + request.Amount - 1) if last == nil { - last = pm.chainman.CurrentBlock() + last = pm.blockchain.CurrentBlock() request.Amount = last.NumberU64() - request.Number + 1 } if last.NumberU64() < request.Number { @@ -278,7 +278,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } // Retrieve the hashes from the last block backwards, reverse and return hashes := []common.Hash{last.Hash()} - hashes = append(hashes, pm.chainman.GetBlockHashesFromHash(last.Hash(), request.Amount-1)...) + hashes = append(hashes, pm.blockchain.GetBlockHashesFromHash(last.Hash(), request.Amount-1)...) for i := 0; i < len(hashes)/2; i++ { hashes[i], hashes[len(hashes)-1-i] = hashes[len(hashes)-1-i], hashes[i] @@ -318,7 +318,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "msg %v: %v", msg, err) } // Retrieve the requested block, stopping if enough was found - if block := pm.chainman.GetBlock(hash); block != nil { + if block := pm.blockchain.GetBlock(hash); block != nil { blocks = append(blocks, block) bytes += block.Size() } @@ -358,9 +358,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Retrieve the next header satisfying the query var origin *types.Header if query.Origin.Hash != (common.Hash{}) { - origin = pm.chainman.GetHeader(query.Origin.Hash) + origin = pm.blockchain.GetHeader(query.Origin.Hash) } else { - origin = pm.chainman.GetHeaderByNumber(query.Origin.Number) + origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number) } if origin == nil { break @@ -373,7 +373,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { case query.Origin.Hash != (common.Hash{}) && query.Reverse: // Hash based traversal towards the genesis block for i := 0; i < int(query.Skip)+1; i++ { - if header := pm.chainman.GetHeader(query.Origin.Hash); header != nil { + if header := pm.blockchain.GetHeader(query.Origin.Hash); header != nil { query.Origin.Hash = header.ParentHash } else { unknown = true @@ -382,8 +382,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } case query.Origin.Hash != (common.Hash{}) && !query.Reverse: // Hash based traversal towards the leaf block - if header := pm.chainman.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil { - if pm.chainman.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { + if header := pm.blockchain.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil { + if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { query.Origin.Hash = header.Hash() } else { unknown = true @@ -466,7 +466,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "msg %v: %v", msg, err) } // Retrieve the requested block body, stopping if enough was found - if data := pm.chainman.GetBodyRLP(hash); len(data) != 0 { + if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 { bodies = append(bodies, data) bytes += len(data) } @@ -562,7 +562,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Schedule all the unknown hashes for retrieval unknown := make([]announce, 0, len(announces)) for _, block := range announces { - if !pm.chainman.HasBlock(block.Hash) { + if !pm.blockchain.HasBlock(block.Hash) { unknown = append(unknown, block) } } @@ -586,7 +586,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { request.Block.ReceivedAt = msg.ReceivedAt // Mark the block's arrival for whatever reason - _, chainHead, _ := pm.chainman.Status() + _, chainHead, _ := pm.blockchain.Status() jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{ BlockHash: request.Block.Hash().Hex(), BlockNumber: request.Block.Number(), @@ -603,7 +603,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Update the peers total difficulty if needed, schedule a download if gapped if request.TD.Cmp(p.Td()) > 0 { p.SetTd(request.TD) - if request.TD.Cmp(new(big.Int).Add(pm.chainman.Td(), request.Block.Difficulty())) > 0 { + if request.TD.Cmp(new(big.Int).Add(pm.blockchain.Td(), request.Block.Difficulty())) > 0 { go pm.synchronise(p) } } @@ -645,8 +645,8 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { if propagate { // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) var td *big.Int - if parent := pm.chainman.GetBlock(block.ParentHash()); parent != nil { - td = new(big.Int).Add(block.Difficulty(), pm.chainman.GetTd(block.ParentHash())) + if parent := pm.blockchain.GetBlock(block.ParentHash()); parent != nil { + td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash())) } else { glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]) return @@ -659,7 +659,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)) } // Otherwise if the block is indeed in out own chain, announce it - if pm.chainman.HasBlock(hash) { + if pm.blockchain.HasBlock(hash) { for _, peer := range peers { if peer.version < eth62 { peer.SendNewBlockHashes61([]common.Hash{hash}) diff --git a/eth/handler_test.go b/eth/handler_test.go index 6400d4e78..2b8c6168a 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -33,23 +33,23 @@ func testGetBlockHashes(t *testing.T, protocol int) { number int result int }{ - {common.Hash{}, 1, 0}, // Make sure non existent hashes don't return results - {pm.chainman.Genesis().Hash(), 1, 0}, // There are no hashes to retrieve up from the genesis - {pm.chainman.GetBlockByNumber(5).Hash(), 5, 5}, // All the hashes including the genesis requested - {pm.chainman.GetBlockByNumber(5).Hash(), 10, 5}, // More hashes than available till the genesis requested - {pm.chainman.GetBlockByNumber(100).Hash(), 10, 10}, // All hashes available from the middle of the chain - {pm.chainman.CurrentBlock().Hash(), 10, 10}, // All hashes available from the head of the chain - {pm.chainman.CurrentBlock().Hash(), limit, limit}, // Request the maximum allowed hash count - {pm.chainman.CurrentBlock().Hash(), limit + 1, limit}, // Request more than the maximum allowed hash count + {common.Hash{}, 1, 0}, // Make sure non existent hashes don't return results + {pm.blockchain.Genesis().Hash(), 1, 0}, // There are no hashes to retrieve up from the genesis + {pm.blockchain.GetBlockByNumber(5).Hash(), 5, 5}, // All the hashes including the genesis requested + {pm.blockchain.GetBlockByNumber(5).Hash(), 10, 5}, // More hashes than available till the genesis requested + {pm.blockchain.GetBlockByNumber(100).Hash(), 10, 10}, // All hashes available from the middle of the chain + {pm.blockchain.CurrentBlock().Hash(), 10, 10}, // All hashes available from the head of the chain + {pm.blockchain.CurrentBlock().Hash(), limit, limit}, // Request the maximum allowed hash count + {pm.blockchain.CurrentBlock().Hash(), limit + 1, limit}, // Request more than the maximum allowed hash count } // Run each of the tests and verify the results against the chain for i, tt := range tests { // Assemble the hash response we would like to receive resp := make([]common.Hash, tt.result) if len(resp) > 0 { - from := pm.chainman.GetBlock(tt.origin).NumberU64() - 1 + from := pm.blockchain.GetBlock(tt.origin).NumberU64() - 1 for j := 0; j < len(resp); j++ { - resp[j] = pm.chainman.GetBlockByNumber(uint64(int(from) - j)).Hash() + resp[j] = pm.blockchain.GetBlockByNumber(uint64(int(from) - j)).Hash() } } // Send the hash request and verify the response @@ -76,11 +76,11 @@ func testGetBlockHashesFromNumber(t *testing.T, protocol int) { number int result int }{ - {pm.chainman.CurrentBlock().NumberU64() + 1, 1, 0}, // Out of bounds requests should return empty - {pm.chainman.CurrentBlock().NumberU64(), 1, 1}, // Make sure the head hash can be retrieved - {pm.chainman.CurrentBlock().NumberU64() - 4, 5, 5}, // All hashes, including the head hash requested - {pm.chainman.CurrentBlock().NumberU64() - 4, 10, 5}, // More hashes requested than available till the head - {pm.chainman.CurrentBlock().NumberU64() - 100, 10, 10}, // All hashes available from the middle of the chain + {pm.blockchain.CurrentBlock().NumberU64() + 1, 1, 0}, // Out of bounds requests should return empty + {pm.blockchain.CurrentBlock().NumberU64(), 1, 1}, // Make sure the head hash can be retrieved + {pm.blockchain.CurrentBlock().NumberU64() - 4, 5, 5}, // All hashes, including the head hash requested + {pm.blockchain.CurrentBlock().NumberU64() - 4, 10, 5}, // More hashes requested than available till the head + {pm.blockchain.CurrentBlock().NumberU64() - 100, 10, 10}, // All hashes available from the middle of the chain {0, 10, 10}, // All hashes available from the root of the chain {0, limit, limit}, // Request the maximum allowed hash count {0, limit + 1, limit}, // Request more than the maximum allowed hash count @@ -91,7 +91,7 @@ func testGetBlockHashesFromNumber(t *testing.T, protocol int) { // Assemble the hash response we would like to receive resp := make([]common.Hash, tt.result) for j := 0; j < len(resp); j++ { - resp[j] = pm.chainman.GetBlockByNumber(tt.origin + uint64(j)).Hash() + resp[j] = pm.blockchain.GetBlockByNumber(tt.origin + uint64(j)).Hash() } // Send the hash request and verify the response p2p.Send(peer.app, 0x08, getBlockHashesFromNumberData{tt.origin, uint64(tt.number)}) @@ -117,22 +117,22 @@ func testGetBlocks(t *testing.T, protocol int) { available []bool // Availability of explicitly requested blocks expected int // Total number of existing blocks to expect }{ - {1, nil, nil, 1}, // A single random block should be retrievable - {10, nil, nil, 10}, // Multiple random blocks should be retrievable - {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable - {limit + 1, nil, nil, limit}, // No more that the possible block count should be returned - {0, []common.Hash{pm.chainman.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable - {0, []common.Hash{pm.chainman.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable - {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned + {1, nil, nil, 1}, // A single random block should be retrievable + {10, nil, nil, 10}, // Multiple random blocks should be retrievable + {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable + {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned + {0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable + {0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable + {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned // Existing and non-existing blocks interleaved should not cause problems {0, []common.Hash{ common.Hash{}, - pm.chainman.GetBlockByNumber(1).Hash(), + pm.blockchain.GetBlockByNumber(1).Hash(), common.Hash{}, - pm.chainman.GetBlockByNumber(10).Hash(), + pm.blockchain.GetBlockByNumber(10).Hash(), common.Hash{}, - pm.chainman.GetBlockByNumber(100).Hash(), + pm.blockchain.GetBlockByNumber(100).Hash(), common.Hash{}, }, []bool{false, true, false, true, false, true, false}, 3}, } @@ -144,11 +144,11 @@ func testGetBlocks(t *testing.T, protocol int) { for j := 0; j < tt.random; j++ { for { - num := rand.Int63n(int64(pm.chainman.CurrentBlock().NumberU64())) + num := rand.Int63n(int64(pm.blockchain.CurrentBlock().NumberU64())) if !seen[num] { seen[num] = true - block := pm.chainman.GetBlockByNumber(uint64(num)) + block := pm.blockchain.GetBlockByNumber(uint64(num)) hashes = append(hashes, block.Hash()) if len(blocks) < tt.expected { blocks = append(blocks, block) @@ -160,7 +160,7 @@ func testGetBlocks(t *testing.T, protocol int) { for j, hash := range tt.explicit { hashes = append(hashes, hash) if tt.available[j] && len(blocks) < tt.expected { - blocks = append(blocks, pm.chainman.GetBlock(hash)) + blocks = append(blocks, pm.blockchain.GetBlock(hash)) } } // Send the hash request and verify the response @@ -194,83 +194,83 @@ func testGetBlockHeaders(t *testing.T, protocol int) { }{ // A single random block should be retrievable by hash and number too { - &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.chainman.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, - []common.Hash{pm.chainman.GetBlockByNumber(limit / 2).Hash()}, + &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + []common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1}, - []common.Hash{pm.chainman.GetBlockByNumber(limit / 2).Hash()}, + []common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, }, // Multiple headers should be retrievable in both directions { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3}, []common.Hash{ - pm.chainman.GetBlockByNumber(limit / 2).Hash(), - pm.chainman.GetBlockByNumber(limit/2 + 1).Hash(), - pm.chainman.GetBlockByNumber(limit/2 + 2).Hash(), + pm.blockchain.GetBlockByNumber(limit / 2).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 + 1).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 + 2).Hash(), }, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, []common.Hash{ - pm.chainman.GetBlockByNumber(limit / 2).Hash(), - pm.chainman.GetBlockByNumber(limit/2 - 1).Hash(), - pm.chainman.GetBlockByNumber(limit/2 - 2).Hash(), + pm.blockchain.GetBlockByNumber(limit / 2).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 - 1).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 - 2).Hash(), }, }, // Multiple headers with skip lists should be retrievable { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, []common.Hash{ - pm.chainman.GetBlockByNumber(limit / 2).Hash(), - pm.chainman.GetBlockByNumber(limit/2 + 4).Hash(), - pm.chainman.GetBlockByNumber(limit/2 + 8).Hash(), + pm.blockchain.GetBlockByNumber(limit / 2).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 + 4).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 + 8).Hash(), }, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ - pm.chainman.GetBlockByNumber(limit / 2).Hash(), - pm.chainman.GetBlockByNumber(limit/2 - 4).Hash(), - pm.chainman.GetBlockByNumber(limit/2 - 8).Hash(), + pm.blockchain.GetBlockByNumber(limit / 2).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 - 4).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 - 8).Hash(), }, }, // The chain endpoints should be retrievable { &getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1}, - []common.Hash{pm.chainman.GetBlockByNumber(0).Hash()}, + []common.Hash{pm.blockchain.GetBlockByNumber(0).Hash()}, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64()}, Amount: 1}, - []common.Hash{pm.chainman.CurrentBlock().Hash()}, + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64()}, Amount: 1}, + []common.Hash{pm.blockchain.CurrentBlock().Hash()}, }, // Ensure protocol limits are honored { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, - pm.chainman.GetBlockHashesFromHash(pm.chainman.CurrentBlock().Hash(), limit), + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, + pm.blockchain.GetBlockHashesFromHash(pm.blockchain.CurrentBlock().Hash(), limit), }, // Check that requesting more than available is handled gracefully { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, []common.Hash{ - pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 4).Hash(), - pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64()).Hash(), + pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(), + pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64()).Hash(), }, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ - pm.chainman.GetBlockByNumber(4).Hash(), - pm.chainman.GetBlockByNumber(0).Hash(), + pm.blockchain.GetBlockByNumber(4).Hash(), + pm.blockchain.GetBlockByNumber(0).Hash(), }, }, // Check that requesting more than available is handled gracefully, even if mid skip { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, []common.Hash{ - pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 4).Hash(), - pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 1).Hash(), + pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(), + pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 1).Hash(), }, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, []common.Hash{ - pm.chainman.GetBlockByNumber(4).Hash(), - pm.chainman.GetBlockByNumber(1).Hash(), + pm.blockchain.GetBlockByNumber(4).Hash(), + pm.blockchain.GetBlockByNumber(1).Hash(), }, }, // Check that non existing headers aren't returned @@ -278,7 +278,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) { &getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1}, []common.Hash{}, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() + 1}, Amount: 1}, + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() + 1}, Amount: 1}, []common.Hash{}, }, } @@ -287,7 +287,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) { // Collect the headers to expect in the response headers := []*types.Header{} for _, hash := range tt.expect { - headers = append(headers, pm.chainman.GetBlock(hash).Header()) + headers = append(headers, pm.blockchain.GetBlock(hash).Header()) } // Send the hash request and verify the response p2p.Send(peer.app, 0x03, tt.query) @@ -315,22 +315,22 @@ func testGetBlockBodies(t *testing.T, protocol int) { available []bool // Availability of explicitly requested blocks expected int // Total number of existing blocks to expect }{ - {1, nil, nil, 1}, // A single random block should be retrievable - {10, nil, nil, 10}, // Multiple random blocks should be retrievable - {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable - {limit + 1, nil, nil, limit}, // No more that the possible block count should be returned - {0, []common.Hash{pm.chainman.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable - {0, []common.Hash{pm.chainman.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable - {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned + {1, nil, nil, 1}, // A single random block should be retrievable + {10, nil, nil, 10}, // Multiple random blocks should be retrievable + {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable + {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned + {0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable + {0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable + {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned // Existing and non-existing blocks interleaved should not cause problems {0, []common.Hash{ common.Hash{}, - pm.chainman.GetBlockByNumber(1).Hash(), + pm.blockchain.GetBlockByNumber(1).Hash(), common.Hash{}, - pm.chainman.GetBlockByNumber(10).Hash(), + pm.blockchain.GetBlockByNumber(10).Hash(), common.Hash{}, - pm.chainman.GetBlockByNumber(100).Hash(), + pm.blockchain.GetBlockByNumber(100).Hash(), common.Hash{}, }, []bool{false, true, false, true, false, true, false}, 3}, } @@ -342,11 +342,11 @@ func testGetBlockBodies(t *testing.T, protocol int) { for j := 0; j < tt.random; j++ { for { - num := rand.Int63n(int64(pm.chainman.CurrentBlock().NumberU64())) + num := rand.Int63n(int64(pm.blockchain.CurrentBlock().NumberU64())) if !seen[num] { seen[num] = true - block := pm.chainman.GetBlockByNumber(uint64(num)) + block := pm.blockchain.GetBlockByNumber(uint64(num)) hashes = append(hashes, block.Hash()) if len(bodies) < tt.expected { bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) @@ -358,7 +358,7 @@ func testGetBlockBodies(t *testing.T, protocol int) { for j, hash := range tt.explicit { hashes = append(hashes, hash) if tt.available[j] && len(bodies) < tt.expected { - block := pm.chainman.GetBlock(hash) + block := pm.blockchain.GetBlock(hash) bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) } } @@ -442,11 +442,11 @@ func testGetNodeData(t *testing.T, protocol int) { statedb.Put(hashes[i].Bytes(), data[i]) } accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr} - for i := uint64(0); i <= pm.chainman.CurrentBlock().NumberU64(); i++ { - trie := state.New(pm.chainman.GetBlockByNumber(i).Root(), statedb) + for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { + trie := state.New(pm.blockchain.GetBlockByNumber(i).Root(), statedb) for j, acc := range accounts { - bw := pm.chainman.State().GetBalance(acc) + bw := pm.blockchain.State().GetBalance(acc) bh := trie.GetBalance(acc) if (bw != nil && bh == nil) || (bw == nil && bh != nil) { @@ -505,8 +505,8 @@ func testGetReceipt(t *testing.T, protocol int) { // Collect the hashes to request, and the response to expect hashes := []common.Hash{} - for i := uint64(0); i <= pm.chainman.CurrentBlock().NumberU64(); i++ { - for _, tx := range pm.chainman.GetBlockByNumber(i).Transactions() { + for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { + for _, tx := range pm.blockchain.GetBlockByNumber(i).Transactions() { hashes = append(hashes, tx.Hash()) } } diff --git a/eth/helper_test.go b/eth/helper_test.go index 034751f7f..e42fa1f82 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -30,18 +30,18 @@ var ( // channels for different events. func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { var ( - evmux = new(event.TypeMux) - pow = new(core.FakePow) - db, _ = ethdb.NewMemDatabase() - genesis = core.WriteGenesisBlockForTesting(db, core.GenesisAccount{testBankAddress, testBankFunds}) - chainman, _ = core.NewChainManager(db, pow, evmux) - blockproc = core.NewBlockProcessor(db, pow, chainman, evmux) + evmux = new(event.TypeMux) + pow = new(core.FakePow) + db, _ = ethdb.NewMemDatabase() + genesis = core.WriteGenesisBlockForTesting(db, core.GenesisAccount{testBankAddress, testBankFunds}) + blockchain, _ = core.NewBlockChain(db, pow, evmux) + blockproc = core.NewBlockProcessor(db, pow, blockchain, evmux) ) - chainman.SetProcessor(blockproc) - if _, err := chainman.InsertChain(core.GenerateChain(genesis, db, blocks, generator)); err != nil { + blockchain.SetProcessor(blockproc) + if _, err := blockchain.InsertChain(core.GenerateChain(genesis, db, blocks, generator)); err != nil { panic(err) } - pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, chainman, db) + pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db) pm.Start() return pm } @@ -116,7 +116,7 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te } // Execute any implicitly requested handshakes and return if shake { - td, head, genesis := pm.chainman.Status() + td, head, genesis := pm.blockchain.Status() tp.handshake(nil, td, head, genesis) } return tp, errc diff --git a/eth/protocol_test.go b/eth/protocol_test.go index bc3b5acfc..523e6c1eb 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -45,7 +45,7 @@ func TestStatusMsgErrors64(t *testing.T) { testStatusMsgErrors(t, 64) } func testStatusMsgErrors(t *testing.T, protocol int) { pm := newTestProtocolManager(0, nil, nil) - td, currentBlock, genesis := pm.chainman.Status() + td, currentBlock, genesis := pm.blockchain.Status() defer pm.Stop() tests := []struct { diff --git a/eth/sync.go b/eth/sync.go index b4dea4b0f..5a2031c68 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -160,7 +160,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) { return } // Make sure the peer's TD is higher than our own. If not drop. - if peer.Td().Cmp(pm.chainman.Td()) <= 0 { + if peer.Td().Cmp(pm.blockchain.Td()) <= 0 { return } // Otherwise try to sync with the downloader -- cgit v1.2.3 From ec6a548ee3555813d83f86f82bd25694bfd9c303 Mon Sep 17 00:00:00 2001 From: Gustav Simonsson Date: Fri, 12 Jun 2015 07:45:23 +0200 Subject: all: Add GPU mining, disabled by default --- eth/backend.go | 12 ------- eth/cpu_mining.go | 54 ++++++++++++++++++++++++++++ eth/gpu_mining.go | 103 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 157 insertions(+), 12 deletions(-) create mode 100644 eth/cpu_mining.go create mode 100644 eth/gpu_mining.go (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index 349dfa613..8862e1670 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -481,18 +481,6 @@ func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) { s.chainManager.ResetWithGenesisBlock(gb) } -func (s *Ethereum) StartMining(threads int) error { - eb, err := s.Etherbase() - if err != nil { - err = fmt.Errorf("Cannot start mining without etherbase address: %v", err) - glog.V(logger.Error).Infoln(err) - return err - } - - go s.miner.Start(eb, threads) - return nil -} - func (s *Ethereum) Etherbase() (eb common.Address, err error) { eb = s.etherbase if (eb == common.Address{}) { diff --git a/eth/cpu_mining.go b/eth/cpu_mining.go new file mode 100644 index 000000000..f8795fd0c --- /dev/null +++ b/eth/cpu_mining.go @@ -0,0 +1,54 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build !opencl + +package eth + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" +) + +const disabledInfo = "Set GO_OPENCL and re-build to enable." + +func (s *Ethereum) StartMining(threads int, gpus string) error { + eb, err := s.Etherbase() + if err != nil { + err = fmt.Errorf("Cannot start mining without etherbase address: %v", err) + glog.V(logger.Error).Infoln(err) + return err + } + + if gpus != "" { + return errors.New("GPU mining disabled. " + disabledInfo) + } + + // CPU mining + go s.miner.Start(eb, threads) + return nil +} + +func GPUBench(gpuid uint64) { + fmt.Println("GPU mining disabled. " + disabledInfo) +} + +func PrintOpenCLDevices() { + fmt.Println("OpenCL disabled. " + disabledInfo) +} diff --git a/eth/gpu_mining.go b/eth/gpu_mining.go new file mode 100644 index 000000000..c351c2bdd --- /dev/null +++ b/eth/gpu_mining.go @@ -0,0 +1,103 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build opencl + +package eth + +import ( + "fmt" + "math/big" + "strconv" + "strings" + "time" + + "github.com/ethereum/ethash" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/miner" +) + +func (s *Ethereum) StartMining(threads int, gpus string) error { + eb, err := s.Etherbase() + if err != nil { + err = fmt.Errorf("Cannot start mining without etherbase address: %v", err) + glog.V(logger.Error).Infoln(err) + return err + } + + // GPU mining + if gpus != "" { + var ids []int + for _, s := range strings.Split(gpus, ",") { + i, err := strconv.Atoi(s) + if err != nil { + return fmt.Errorf("Invalid GPU id(s): %v", err) + } + if i < 0 { + return fmt.Errorf("Invalid GPU id: %v", i) + } + ids = append(ids, i) + } + + // TODO: re-creating miner is a bit ugly + cl := ethash.NewCL(ids) + s.miner = miner.New(s, s.EventMux(), cl) + go s.miner.Start(eb, len(ids)) + return nil + } + + // CPU mining + go s.miner.Start(eb, threads) + return nil +} + +func GPUBench(gpuid uint64) { + e := ethash.NewCL([]int{int(gpuid)}) + + var h common.Hash + bogoHeader := &types.Header{ + ParentHash: h, + Number: big.NewInt(int64(42)), + Difficulty: big.NewInt(int64(999999999999999)), + } + bogoBlock := types.NewBlock(bogoHeader, nil, nil, nil) + + err := ethash.InitCL(bogoBlock.NumberU64(), e) + if err != nil { + fmt.Println("OpenCL init error: ", err) + return + } + + stopChan := make(chan struct{}) + reportHashRate := func() { + for { + time.Sleep(3 * time.Second) + fmt.Printf("hashes/s : %v\n", e.GetHashrate()) + } + } + fmt.Printf("Starting benchmark (%v seconds)\n", 60) + go reportHashRate() + go e.Search(bogoBlock, stopChan, 0) + time.Sleep(60 * time.Second) + fmt.Println("OK.") +} + +func PrintOpenCLDevices() { + ethash.PrintDevices() +} -- cgit v1.2.3 From 1de796f10134bb4aa245591e0d8802e320892efb Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Mon, 5 Oct 2015 13:01:34 +0200 Subject: cmd, core, eth: added official testnet --- eth/backend.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index a480b4931..83eefca5b 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/compiler" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -69,12 +70,17 @@ var ( discover.MustParseNode("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303"), } + defaultTestNetBootNodes = []*discover.Node{ + discover.MustParseNode("enode://5374c1bff8df923d3706357eeb4983cd29a63be40a269aaa2296ee5f3b2119a8978c0ed68b8f6fc84aad0df18790417daadf91a4bfbb786a16c9b0a199fa254a@92.51.165.126:30303"), + } + staticNodes = "static-nodes.json" // Path within to search for the static node list trustedNodes = "trusted-nodes.json" // Path within to search for the trusted node list ) type Config struct { DevMode bool + TestNet bool Name string NetworkId int @@ -133,6 +139,10 @@ type Config struct { func (cfg *Config) parseBootNodes() []*discover.Node { if cfg.BootNodes == "" { + if cfg.TestNet { + return defaultTestNetBootNodes + } + return defaultBootNodes } var ns []*discover.Node @@ -309,7 +319,13 @@ func New(config *Config) (*Ethereum, error) { glog.V(logger.Error).Infoln("Starting Olympic network") fallthrough case config.DevMode: - _, err := core.WriteTestNetGenesisBlock(chainDb, 42) + _, err := core.WriteOlympicGenesisBlock(chainDb, 42) + if err != nil { + return nil, err + } + case config.TestNet: + state.StartingNonce = 1048576 // (2**20) + _, err := core.WriteTestNetGenesisBlock(chainDb, 0x6d6f7264656e) if err != nil { return nil, err } -- cgit v1.2.3 From 402fd6e8c6a2e379351e0aae10a833fae6bcae6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 12 Oct 2015 15:04:38 +0300 Subject: core, eth, event, miner, xeth: fix event post / subscription race --- eth/filters/filter_system.go | 44 ++++++++++++++++++++++++++++---------------- eth/gasprice.go | 15 ++++++--------- eth/handler.go | 4 ++-- 3 files changed, 36 insertions(+), 27 deletions(-) (limited to 'eth') diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 4972dcd59..ae6093525 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -20,6 +20,7 @@ package filters import ( "sync" + "time" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/vm" @@ -35,6 +36,7 @@ type FilterSystem struct { filterMu sync.RWMutex filterId int filters map[int]*Filter + created map[int]time.Time quit chan struct{} } @@ -44,6 +46,7 @@ func NewFilterSystem(mux *event.TypeMux) *FilterSystem { fs := &FilterSystem{ eventMux: mux, filters: make(map[int]*Filter), + created: make(map[int]time.Time), } go fs.filterLoop() return fs @@ -60,6 +63,7 @@ func (fs *FilterSystem) Add(filter *Filter) (id int) { defer fs.filterMu.Unlock() id = fs.filterId fs.filters[id] = filter + fs.created[id] = time.Now() fs.filterId++ return id @@ -69,15 +73,16 @@ func (fs *FilterSystem) Add(filter *Filter) (id int) { func (fs *FilterSystem) Remove(id int) { fs.filterMu.Lock() defer fs.filterMu.Unlock() - if _, ok := fs.filters[id]; ok { - delete(fs.filters, id) - } + + delete(fs.filters, id) + delete(fs.created, id) } // Get retrieves a filter installed using Add The filter may not be modified. func (fs *FilterSystem) Get(id int) *Filter { fs.filterMu.RLock() defer fs.filterMu.RUnlock() + return fs.filters[id] } @@ -85,42 +90,49 @@ func (fs *FilterSystem) Get(id int) *Filter { // when the filter matches the requirements. func (fs *FilterSystem) filterLoop() { // Subscribe to events - events := fs.eventMux.Subscribe( + eventCh := fs.eventMux.Subscribe( //core.PendingBlockEvent{}, core.ChainEvent{}, core.TxPreEvent{}, - vm.Logs(nil)) + vm.Logs(nil), + ).Chan() out: for { select { case <-fs.quit: break out - case event := <-events.Chan(): - switch event := event.(type) { + case event, ok := <-eventCh: + if !ok { + // Event subscription closed, set the channel to nil to stop spinning + eventCh = nil + continue + } + // A real event arrived, notify the registered filters + switch ev := event.Data.(type) { case core.ChainEvent: fs.filterMu.RLock() - for _, filter := range fs.filters { - if filter.BlockCallback != nil { - filter.BlockCallback(event.Block, event.Logs) + for id, filter := range fs.filters { + if filter.BlockCallback != nil && fs.created[id].Before(event.Time) { + filter.BlockCallback(ev.Block, ev.Logs) } } fs.filterMu.RUnlock() case core.TxPreEvent: fs.filterMu.RLock() - for _, filter := range fs.filters { - if filter.TransactionCallback != nil { - filter.TransactionCallback(event.Tx) + for id, filter := range fs.filters { + if filter.TransactionCallback != nil && fs.created[id].Before(event.Time) { + filter.TransactionCallback(ev.Tx) } } fs.filterMu.RUnlock() case vm.Logs: fs.filterMu.RLock() - for _, filter := range fs.filters { - if filter.LogsCallback != nil { - msgs := filter.FilterLogs(event) + for id, filter := range fs.filters { + if filter.LogsCallback != nil && fs.created[id].Before(event.Time) { + msgs := filter.FilterLogs(ev) if len(msgs) > 0 { filter.LogsCallback(msgs) } diff --git a/eth/gasprice.go b/eth/gasprice.go index c08b96129..b4409f346 100644 --- a/eth/gasprice.go +++ b/eth/gasprice.go @@ -84,19 +84,16 @@ func (self *GasPriceOracle) processPastBlocks() { } func (self *GasPriceOracle) listenLoop() { - for { - ev, isopen := <-self.events.Chan() - if !isopen { - break - } - switch ev := ev.(type) { + defer self.events.Unsubscribe() + + for event := range self.events.Chan() { + switch event := event.Data.(type) { case core.ChainEvent: - self.processBlock(ev.Block) + self.processBlock(event.Block) case core.ChainSplitEvent: - self.processBlock(ev.Block) + self.processBlock(event.Block) } } - self.events.Unsubscribe() } func (self *GasPriceOracle) processBlock(block *types.Block) { diff --git a/eth/handler.go b/eth/handler.go index fc92338b4..3fc909672 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -687,7 +687,7 @@ func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) func (self *ProtocolManager) minedBroadcastLoop() { // automatically stops if unsubscribe for obj := range self.minedBlockSub.Chan() { - switch ev := obj.(type) { + switch ev := obj.Data.(type) { case core.NewMinedBlockEvent: self.BroadcastBlock(ev.Block, true) // First propagate block to peers self.BroadcastBlock(ev.Block, false) // Only then announce to the rest @@ -698,7 +698,7 @@ func (self *ProtocolManager) minedBroadcastLoop() { func (self *ProtocolManager) txBroadcastLoop() { // automatically stops if unsubscribe for obj := range self.txSub.Chan() { - event := obj.(core.TxPreEvent) + event := obj.Data.(core.TxPreEvent) self.BroadcastTx(event.Tx.Hash(), event.Tx) } } -- cgit v1.2.3 From 30f057aaf9891fb37f82d94c24b8aa35d388e07b Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Mon, 12 Oct 2015 17:54:59 +0200 Subject: eth/filters: added benchmark --- eth/filters/filter.go | 8 +--- eth/filters/filter_test.go | 95 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 7 deletions(-) create mode 100644 eth/filters/filter_test.go (limited to 'eth') diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 2bcf20d0c..d3d430775 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -17,8 +17,6 @@ package filters import ( - "math" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" @@ -107,8 +105,6 @@ done: break done case block.NumberU64() < earliestBlockNo: break done - case self.max <= len(logs): - break done } // Use bloom filtering to see if this block is interesting given the @@ -128,9 +124,7 @@ done: block = core.GetBlock(self.db, block.ParentHash()) } - skip := int(math.Min(float64(len(logs)), float64(self.skip))) - - return logs[skip:] + return logs } func includes(addresses []common.Address, a common.Address) bool { diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go new file mode 100644 index 000000000..950a84579 --- /dev/null +++ b/eth/filters/filter_test.go @@ -0,0 +1,95 @@ +package filters + +import ( + "math/big" + "os" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" +) + +func makeReceipt(addr common.Address) *types.Receipt { + receipt := types.NewReceipt(nil, new(big.Int)) + receipt.SetLogs(vm.Logs{ + &vm.Log{Address: addr}, + }) + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + return receipt +} + +func BenchmarkMipmaps(b *testing.B) { + const dbname = "/tmp/mipmap" + var ( + db, _ = ethdb.NewLDBDatabase(dbname, 16) + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = common.BytesToAddress([]byte("jeff")) + addr3 = common.BytesToAddress([]byte("ethereum")) + addr4 = common.BytesToAddress([]byte("random addresses please")) + ) + defer func() { + db.Close() + os.Remove(dbname) + }() + + genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr1, big.NewInt(1000000)}) + chain := core.GenerateChain(genesis, db, 100000, func(i int, gen *core.BlockGen) { + var receipts types.Receipts + switch i { + case 2403: + receipt := makeReceipt(addr1) + receipts = types.Receipts{receipt} + gen.AddReceipt(receipt) + case 10340: + receipt := makeReceipt(addr2) + receipts = types.Receipts{receipt} + gen.AddReceipt(receipt) + case 34: + receipt := makeReceipt(addr3) + receipts = types.Receipts{receipt} + gen.AddReceipt(receipt) + case 99999: + receipt := makeReceipt(addr4) + receipts = types.Receipts{receipt} + gen.AddReceipt(receipt) + + } + + // store the receipts + err := core.PutReceipts(db, receipts) + if err != nil { + b.Fatal(err) + } + }) + for _, block := range chain { + core.WriteBlock(db, block) + if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { + b.Fatalf("failed to insert block number: %v", err) + } + if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { + b.Fatalf("failed to insert block number: %v", err) + } + if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil { + b.Fatal("error writing block receipts:", err) + } + } + + b.ResetTimer() + + filter := New(db) + filter.SetAddress([]common.Address{addr1, addr2, addr3, addr4}) + filter.SetEarliestBlock(0) + filter.SetLatestBlock(-1) + + for i := 0; i < b.N; i++ { + logs := filter.Find() + if len(logs) != 4 { + b.Fatal("expected 4 log, got", len(logs)) + } + } +} -- cgit v1.2.3 From 1b1f293082044c43d8d1c5df9ac40aab8fdb2ae8 Mon Sep 17 00:00:00 2001 From: Gustav Simonsson Date: Tue, 6 Oct 2015 16:35:55 +0200 Subject: core/state, core, miner: handle missing root error from state.New --- eth/backend.go | 3 ++- eth/handler_test.go | 5 +++-- eth/helper_test.go | 3 ++- 3 files changed, 7 insertions(+), 4 deletions(-) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index 83eefca5b..18900c91e 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -389,7 +389,8 @@ func New(config *Config) (*Ethereum, error) { return nil, err } - eth.txPool = core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) + newPool := core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) + eth.txPool = newPool eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux()) eth.blockchain.SetProcessor(eth.blockProcessor) diff --git a/eth/handler_test.go b/eth/handler_test.go index 2b8c6168a..dde2ecbd5 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -443,10 +443,11 @@ func testGetNodeData(t *testing.T, protocol int) { } accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr} for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { - trie := state.New(pm.blockchain.GetBlockByNumber(i).Root(), statedb) + trie, _ := state.New(pm.blockchain.GetBlockByNumber(i).Root(), statedb) for j, acc := range accounts { - bw := pm.blockchain.State().GetBalance(acc) + state, _ := pm.blockchain.State() + bw := state.GetBalance(acc) bh := trie.GetBalance(acc) if (bw != nil && bh == nil) || (bw == nil && bh != nil) { diff --git a/eth/helper_test.go b/eth/helper_test.go index e42fa1f82..9314884ef 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -38,7 +38,8 @@ func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), new blockproc = core.NewBlockProcessor(db, pow, blockchain, evmux) ) blockchain.SetProcessor(blockproc) - if _, err := blockchain.InsertChain(core.GenerateChain(genesis, db, blocks, generator)); err != nil { + chain := core.GenerateChain(genesis, db, blocks, generator) + if _, err := blockchain.InsertChain(chain); err != nil { panic(err) } pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db) -- cgit v1.2.3 From 6dc14788a238f3e0ec786c6c04d476a3b957e645 Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Mon, 12 Oct 2015 17:58:51 +0200 Subject: core, eth/filters, miner, xeth: Optimised log filtering Log filtering is now using a MIPmap like approach where addresses of logs are added to a mapped bloom bin. The current levels for the MIP are in ranges of 1.000.000, 500.000, 100.000, 50.000, 1.000. Logs are therefor filtered in batches of 1.000. --- eth/backend.go | 46 +++++++++++ eth/backend_test.go | 67 +++++++++++++++ eth/filters/filter.go | 116 +++++++++++++++----------- eth/filters/filter_test.go | 202 +++++++++++++++++++++++++++++++++++++++++---- 4 files changed, 368 insertions(+), 63 deletions(-) create mode 100644 eth/backend_test.go (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index 83eefca5b..f703b4ac0 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -47,6 +47,7 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/nat" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/whisper" ) @@ -288,6 +289,9 @@ func New(config *Config) (*Ethereum, error) { if err := upgradeChainDatabase(chainDb); err != nil { return nil, err } + if err := addMipmapBloomBins(chainDb); err != nil { + return nil, err + } dappDb, err := newdb(filepath.Join(config.DataDir, "dapp")) if err != nil { @@ -769,3 +773,45 @@ func upgradeChainDatabase(db ethdb.Database) error { } return nil } + +func addMipmapBloomBins(db ethdb.Database) (err error) { + const mipmapVersion uint = 2 + + // check if the version is set. We ignore data for now since there's + // only one version so we can easily ignore it for now + var data []byte + data, _ = db.Get([]byte("setting-mipmap-version")) + if len(data) > 0 { + var version uint + if err := rlp.DecodeBytes(data, &version); err == nil && version == mipmapVersion { + return nil + } + } + + defer func() { + if err == nil { + var val []byte + val, err = rlp.EncodeToBytes(mipmapVersion) + if err == nil { + err = db.Put([]byte("setting-mipmap-version"), val) + } + return + } + }() + latestBlock := core.GetBlock(db, core.GetHeadBlockHash(db)) + if latestBlock == nil { // clean database + return + } + + tstart := time.Now() + glog.V(logger.Info).Infoln("upgrading db log bloom bins") + for i := uint64(0); i <= latestBlock.NumberU64(); i++ { + hash := core.GetCanonicalHash(db, i) + if (hash == common.Hash{}) { + return fmt.Errorf("chain db corrupted. Could not find block %d.", i) + } + core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash)) + } + glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart)) + return nil +} diff --git a/eth/backend_test.go b/eth/backend_test.go new file mode 100644 index 000000000..220426c17 --- /dev/null +++ b/eth/backend_test.go @@ -0,0 +1,67 @@ +package eth + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/ethdb" +) + +func TestMipmapUpgrade(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + addr := common.BytesToAddress([]byte("jeff")) + genesis := core.WriteGenesisBlockForTesting(db) + + chain := core.GenerateChain(genesis, db, 10, func(i int, gen *core.BlockGen) { + var receipts types.Receipts + switch i { + case 1: + receipt := types.NewReceipt(nil, new(big.Int)) + receipt.SetLogs(vm.Logs{&vm.Log{Address: addr}}) + gen.AddUncheckedReceipt(receipt) + receipts = types.Receipts{receipt} + case 2: + receipt := types.NewReceipt(nil, new(big.Int)) + receipt.SetLogs(vm.Logs{&vm.Log{Address: addr}}) + gen.AddUncheckedReceipt(receipt) + receipts = types.Receipts{receipt} + } + + // store the receipts + err := core.PutReceipts(db, receipts) + if err != nil { + t.Fatal(err) + } + }) + for _, block := range chain { + core.WriteBlock(db, block) + if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { + t.Fatalf("failed to insert block number: %v", err) + } + if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { + t.Fatalf("failed to insert block number: %v", err) + } + if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil { + t.Fatal("error writing block receipts:", err) + } + } + + err := addMipmapBloomBins(db) + if err != nil { + t.Fatal(err) + } + + bloom := core.GetMipmapBloom(db, 1, core.MIPMapLevels[0]) + if (bloom == types.Bloom{}) { + t.Error("got empty bloom filter") + } + + data, _ := db.Get([]byte("setting-mipmap-version")) + if len(data) == 0 { + t.Error("setting-mipmap-version not written to database") + } +} diff --git a/eth/filters/filter.go b/eth/filters/filter.go index d3d430775..2e81ea177 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -17,6 +17,8 @@ package filters import ( + "math" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" @@ -30,13 +32,10 @@ type AccountChange struct { // Filtering interface type Filter struct { - db ethdb.Database - earliest int64 - latest int64 - skip int - address []common.Address - max int - topics [][]common.Hash + db ethdb.Database + begin, end int64 + addresses []common.Address + topics [][]common.Hash BlockCallback func(*types.Block, vm.Logs) TransactionCallback func(*types.Transaction) @@ -52,59 +51,82 @@ func New(db ethdb.Database) *Filter { // Set the earliest and latest block for filtering. // -1 = latest block (i.e., the current block) // hash = particular hash from-to -func (self *Filter) SetEarliestBlock(earliest int64) { - self.earliest = earliest +func (self *Filter) SetBeginBlock(begin int64) { + self.begin = begin } -func (self *Filter) SetLatestBlock(latest int64) { - self.latest = latest +func (self *Filter) SetEndBlock(end int64) { + self.end = end } -func (self *Filter) SetAddress(addr []common.Address) { - self.address = addr +func (self *Filter) SetAddresses(addr []common.Address) { + self.addresses = addr } func (self *Filter) SetTopics(topics [][]common.Hash) { self.topics = topics } -func (self *Filter) SetMax(max int) { - self.max = max -} - -func (self *Filter) SetSkip(skip int) { - self.skip = skip -} - // Run filters logs with the current parameters set func (self *Filter) Find() vm.Logs { - earliestBlock := core.GetBlock(self.db, core.GetHeadBlockHash(self.db)) - var earliestBlockNo uint64 = uint64(self.earliest) - if self.earliest == -1 { - earliestBlockNo = earliestBlock.NumberU64() + latestBlock := core.GetBlock(self.db, core.GetHeadBlockHash(self.db)) + var beginBlockNo uint64 = uint64(self.begin) + if self.begin == -1 { + beginBlockNo = latestBlock.NumberU64() } - var latestBlockNo uint64 = uint64(self.latest) - if self.latest == -1 { - latestBlockNo = earliestBlock.NumberU64() + var endBlockNo uint64 = uint64(self.end) + if self.end == -1 { + endBlockNo = latestBlock.NumberU64() } - var ( - logs vm.Logs - block *types.Block - ) - hash := core.GetCanonicalHash(self.db, latestBlockNo) - if hash != (common.Hash{}) { - block = core.GetBlock(self.db, hash) + // if no addresses are present we can't make use of fast search which + // uses the mipmap bloom filters to check for fast inclusion and uses + // higher range probability in order to ensure at least a false positive + if len(self.addresses) == 0 { + return self.getLogs(beginBlockNo, endBlockNo) } + return self.mipFind(beginBlockNo, endBlockNo, 0) +} -done: - for i := 0; block != nil; i++ { - // Quit on latest - switch { - case block.NumberU64() == 0: - break done - case block.NumberU64() < earliestBlockNo: - break done +func (self *Filter) mipFind(start, end uint64, depth int) (logs vm.Logs) { + level := core.MIPMapLevels[depth] + // normalise numerator so we can work in level specific batches and + // work with the proper range checks + for num := start / level * level; num <= end; num += level { + // find addresses in bloom filters + bloom := core.GetMipmapBloom(self.db, num, level) + for _, addr := range self.addresses { + if bloom.TestBytes(addr[:]) { + // range check normalised values and make sure that + // we're resolving the correct range instead of the + // normalised values. + start := uint64(math.Max(float64(num), float64(start))) + end := uint64(math.Min(float64(num+level-1), float64(end))) + if depth+1 == len(core.MIPMapLevels) { + logs = append(logs, self.getLogs(start, end)...) + } else { + logs = append(logs, self.mipFind(start, end, depth+1)...) + } + // break so we don't check the same range for each + // possible address. Checks on multiple addresses + // are handled further down the stack. + break + } + } + } + + return logs +} + +func (self *Filter) getLogs(start, end uint64) (logs vm.Logs) { + var block *types.Block + + for i := start; i <= end; i++ { + hash := core.GetCanonicalHash(self.db, i) + if hash != (common.Hash{}) { + block = core.GetBlock(self.db, hash) + } else { // block not found + return logs } // Use bloom filtering to see if this block is interesting given the @@ -120,8 +142,6 @@ done: } logs = append(logs, self.FilterLogs(unfiltered)...) } - - block = core.GetBlock(self.db, block.ParentHash()) } return logs @@ -143,7 +163,7 @@ func (self *Filter) FilterLogs(logs vm.Logs) vm.Logs { // Filter the logs for interesting stuff Logs: for _, log := range logs { - if len(self.address) > 0 && !includes(self.address, log.Address) { + if len(self.addresses) > 0 && !includes(self.addresses, log.Address) { continue } @@ -179,9 +199,9 @@ Logs: } func (self *Filter) bloomFilter(block *types.Block) bool { - if len(self.address) > 0 { + if len(self.addresses) > 0 { var included bool - for _, addr := range self.address { + for _, addr := range self.addresses { if types.BloomLookup(block.Bloom(), addr) { included = true break diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index 950a84579..9e7538fac 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -1,6 +1,7 @@ package filters import ( + "io/ioutil" "math/big" "os" "testing" @@ -23,40 +24,42 @@ func makeReceipt(addr common.Address) *types.Receipt { } func BenchmarkMipmaps(b *testing.B) { - const dbname = "/tmp/mipmap" + dir, err := ioutil.TempDir("", "mipmap") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dir) + var ( - db, _ = ethdb.NewLDBDatabase(dbname, 16) + db, _ = ethdb.NewLDBDatabase(dir, 16) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = common.BytesToAddress([]byte("jeff")) addr3 = common.BytesToAddress([]byte("ethereum")) addr4 = common.BytesToAddress([]byte("random addresses please")) ) - defer func() { - db.Close() - os.Remove(dbname) - }() + defer db.Close() genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr1, big.NewInt(1000000)}) - chain := core.GenerateChain(genesis, db, 100000, func(i int, gen *core.BlockGen) { + chain := core.GenerateChain(genesis, db, 100010, func(i int, gen *core.BlockGen) { var receipts types.Receipts switch i { case 2403: receipt := makeReceipt(addr1) receipts = types.Receipts{receipt} - gen.AddReceipt(receipt) - case 10340: + gen.AddUncheckedReceipt(receipt) + case 1034: receipt := makeReceipt(addr2) receipts = types.Receipts{receipt} - gen.AddReceipt(receipt) + gen.AddUncheckedReceipt(receipt) case 34: receipt := makeReceipt(addr3) receipts = types.Receipts{receipt} - gen.AddReceipt(receipt) + gen.AddUncheckedReceipt(receipt) case 99999: receipt := makeReceipt(addr4) receipts = types.Receipts{receipt} - gen.AddReceipt(receipt) + gen.AddUncheckedReceipt(receipt) } @@ -65,6 +68,7 @@ func BenchmarkMipmaps(b *testing.B) { if err != nil { b.Fatal(err) } + core.WriteMipmapBloom(db, uint64(i+1), receipts) }) for _, block := range chain { core.WriteBlock(db, block) @@ -82,9 +86,9 @@ func BenchmarkMipmaps(b *testing.B) { b.ResetTimer() filter := New(db) - filter.SetAddress([]common.Address{addr1, addr2, addr3, addr4}) - filter.SetEarliestBlock(0) - filter.SetLatestBlock(-1) + filter.SetAddresses([]common.Address{addr1, addr2, addr3, addr4}) + filter.SetBeginBlock(0) + filter.SetEndBlock(-1) for i := 0; i < b.N; i++ { logs := filter.Find() @@ -93,3 +97,171 @@ func BenchmarkMipmaps(b *testing.B) { } } } + +func TestFilters(t *testing.T) { + dir, err := ioutil.TempDir("", "mipmap") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + var ( + db, _ = ethdb.NewLDBDatabase(dir, 16) + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key1.PublicKey) + + hash1 = common.BytesToHash([]byte("topic1")) + hash2 = common.BytesToHash([]byte("topic2")) + hash3 = common.BytesToHash([]byte("topic3")) + hash4 = common.BytesToHash([]byte("topic4")) + ) + defer db.Close() + + genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr, big.NewInt(1000000)}) + chain := core.GenerateChain(genesis, db, 1000, func(i int, gen *core.BlockGen) { + var receipts types.Receipts + switch i { + case 1: + receipt := types.NewReceipt(nil, new(big.Int)) + receipt.SetLogs(vm.Logs{ + &vm.Log{ + Address: addr, + Topics: []common.Hash{hash1}, + }, + }) + gen.AddUncheckedReceipt(receipt) + receipts = types.Receipts{receipt} + case 2: + receipt := types.NewReceipt(nil, new(big.Int)) + receipt.SetLogs(vm.Logs{ + &vm.Log{ + Address: addr, + Topics: []common.Hash{hash2}, + }, + }) + gen.AddUncheckedReceipt(receipt) + receipts = types.Receipts{receipt} + case 998: + receipt := types.NewReceipt(nil, new(big.Int)) + receipt.SetLogs(vm.Logs{ + &vm.Log{ + Address: addr, + Topics: []common.Hash{hash3}, + }, + }) + gen.AddUncheckedReceipt(receipt) + receipts = types.Receipts{receipt} + case 999: + receipt := types.NewReceipt(nil, new(big.Int)) + receipt.SetLogs(vm.Logs{ + &vm.Log{ + Address: addr, + Topics: []common.Hash{hash4}, + }, + }) + gen.AddUncheckedReceipt(receipt) + receipts = types.Receipts{receipt} + } + + // store the receipts + err := core.PutReceipts(db, receipts) + if err != nil { + t.Fatal(err) + } + // i is used as block number for the writes but since the i + // starts at 0 and block 0 (genesis) is already present increment + // by one + core.WriteMipmapBloom(db, uint64(i+1), receipts) + }) + for _, block := range chain { + core.WriteBlock(db, block) + if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { + t.Fatalf("failed to insert block number: %v", err) + } + if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { + t.Fatalf("failed to insert block number: %v", err) + } + if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil { + t.Fatal("error writing block receipts:", err) + } + } + + filter := New(db) + filter.SetAddresses([]common.Address{addr}) + filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2, hash3, hash4}}) + filter.SetBeginBlock(0) + filter.SetEndBlock(-1) + + logs := filter.Find() + if len(logs) != 4 { + t.Error("expected 4 log, got", len(logs)) + } + + filter = New(db) + filter.SetAddresses([]common.Address{addr}) + filter.SetTopics([][]common.Hash{[]common.Hash{hash3}}) + filter.SetBeginBlock(900) + filter.SetEndBlock(999) + logs = filter.Find() + if len(logs) != 1 { + t.Error("expected 1 log, got", len(logs)) + } + if len(logs) > 0 && logs[0].Topics[0] != hash3 { + t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) + } + + filter = New(db) + filter.SetAddresses([]common.Address{addr}) + filter.SetTopics([][]common.Hash{[]common.Hash{hash3}}) + filter.SetBeginBlock(990) + filter.SetEndBlock(-1) + logs = filter.Find() + if len(logs) != 1 { + t.Error("expected 1 log, got", len(logs)) + } + if len(logs) > 0 && logs[0].Topics[0] != hash3 { + t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) + } + + filter = New(db) + filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2}}) + filter.SetBeginBlock(1) + filter.SetEndBlock(10) + + logs = filter.Find() + if len(logs) != 2 { + t.Error("expected 2 log, got", len(logs)) + } + + failHash := common.BytesToHash([]byte("fail")) + filter = New(db) + filter.SetTopics([][]common.Hash{[]common.Hash{failHash}}) + filter.SetBeginBlock(0) + filter.SetEndBlock(-1) + + logs = filter.Find() + if len(logs) != 0 { + t.Error("expected 0 log, got", len(logs)) + } + + failAddr := common.BytesToAddress([]byte("failmenow")) + filter = New(db) + filter.SetAddresses([]common.Address{failAddr}) + filter.SetBeginBlock(0) + filter.SetEndBlock(-1) + + logs = filter.Find() + if len(logs) != 0 { + t.Error("expected 0 log, got", len(logs)) + } + + filter = New(db) + filter.SetTopics([][]common.Hash{[]common.Hash{failHash}, []common.Hash{hash1}}) + filter.SetBeginBlock(0) + filter.SetEndBlock(-1) + + logs = filter.Find() + if len(logs) != 0 { + t.Error("expected 0 log, got", len(logs)) + } +} -- cgit v1.2.3 From 92f9a3e5fa29e0f05c81b348b87cab4f7a94f0c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 1 Sep 2015 17:35:14 +0300 Subject: cmd, eth: support switching client modes of operation --- eth/backend.go | 6 ++++-- eth/handler.go | 40 ++++++++++++++++++++++------------------ eth/handler_test.go | 44 +++++++++++++++++++++++++++++++++++++------- eth/helper_test.go | 19 +++++++++++++++++-- eth/protocol.go | 17 +++++++++++++++++ eth/protocol_test.go | 6 +++--- 6 files changed, 100 insertions(+), 32 deletions(-) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index 9ec3c1440..04dd3767a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -89,6 +89,7 @@ type Config struct { GenesisFile string GenesisBlock *types.Block // used by block tests Olympic bool + Mode Mode BlockChainVersion int SkipBcVersionCheck bool // e.g. blockchain export @@ -398,8 +399,9 @@ func New(config *Config) (*Ethereum, error) { eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux()) eth.blockchain.SetProcessor(eth.blockProcessor) - eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb) - + if eth.protocolManager, err = NewProtocolManager(config.Mode, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil { + return nil, err + } eth.miner = miner.New(eth, eth.EventMux(), eth.pow) eth.miner.SetGasPrice(config.GasPrice) eth.miner.SetExtra(config.ExtraData) diff --git a/eth/handler.go b/eth/handler.go index 3fc909672..5716350af 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -17,6 +17,7 @@ package eth import ( + "errors" "fmt" "math" "math/big" @@ -42,6 +43,10 @@ const ( estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header ) +// errIncompatibleConfig is returned if the requested protocols and configs are +// not compatible (low protocol version restrictions and high requirements). +var errIncompatibleConfig = errors.New("incompatible configuration") + func errResp(code errCode, format string, v ...interface{}) error { return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) } @@ -49,17 +54,8 @@ func errResp(code errCode, format string, v ...interface{}) error { type hashFetcherFn func(common.Hash) error type blockFetcherFn func([]common.Hash) error -// extProt is an interface which is passed around so we can expose GetHashes and GetBlock without exposing it to the rest of the protocol -// extProt is passed around to peers which require to GetHashes and GetBlocks -type extProt struct { - getHashes hashFetcherFn - getBlocks blockFetcherFn -} - -func (ep extProt) GetHashes(hash common.Hash) error { return ep.getHashes(hash) } -func (ep extProt) GetBlock(hashes []common.Hash) error { return ep.getBlocks(hashes) } - type ProtocolManager struct { + mode Mode txpool txPool blockchain *core.BlockChain chaindb ethdb.Database @@ -87,9 +83,10 @@ type ProtocolManager struct { // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the ethereum network. -func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) *ProtocolManager { +func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { // Create the protocol manager with the base fields manager := &ProtocolManager{ + mode: mode, eventMux: mux, txpool: txpool, blockchain: blockchain, @@ -100,11 +97,15 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po quitSync: make(chan struct{}), } // Initiate a sub-protocol for every implemented version we can handle - manager.SubProtocols = make([]p2p.Protocol, len(ProtocolVersions)) - for i := 0; i < len(manager.SubProtocols); i++ { - version := ProtocolVersions[i] - - manager.SubProtocols[i] = p2p.Protocol{ + manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions)) + for i, version := range ProtocolVersions { + // Skip protocol version if incompatible with the mode of operation + if minimumProtocolVersion[mode] > version { + continue + } + // Compatible, initialize the sub-protocol + version := version // Closure for the run + manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{ Name: "eth", Version: version, Length: ProtocolLengths[i], @@ -113,7 +114,10 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po manager.newPeerCh <- peer return manager.handle(peer) }, - } + }) + } + if len(manager.SubProtocols) == 0 { + return nil, errIncompatibleConfig } // Construct the different synchronisation mechanisms manager.downloader = downloader.New(manager.eventMux, manager.blockchain.HasBlock, manager.blockchain.GetBlock, manager.blockchain.CurrentBlock, manager.blockchain.GetTd, manager.blockchain.InsertChain, manager.removePeer) @@ -126,7 +130,7 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po } manager.fetcher = fetcher.New(manager.blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, manager.blockchain.InsertChain, manager.removePeer) - return manager + return manager, nil } func (pm *ProtocolManager) removePeer(id string) { diff --git a/eth/handler_test.go b/eth/handler_test.go index dde2ecbd5..8ab5c1aad 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -17,12 +17,42 @@ import ( "github.com/ethereum/go-ethereum/params" ) +// Tests that protocol versions and modes of operations are matched up properly. +func TestProtocolCompatibility(t *testing.T) { + // Define the compatibility chart + tests := []struct { + version uint + mode Mode + compatible bool + }{ + {61, ArchiveMode, true}, {62, ArchiveMode, true}, {63, ArchiveMode, true}, {64, ArchiveMode, true}, + {61, FullMode, false}, {62, FullMode, false}, {63, FullMode, true}, {64, FullMode, true}, + {61, LightMode, false}, {62, LightMode, false}, {63, LightMode, false}, {64, LightMode, true}, + } + // Make sure anything we screw up is restored + backup := ProtocolVersions + defer func() { ProtocolVersions = backup }() + + // Try all available compatibility configs and check for errors + for i, tt := range tests { + ProtocolVersions = []uint{tt.version} + + pm, err := newTestProtocolManager(tt.mode, 0, nil, nil) + if pm != nil { + defer pm.Stop() + } + if (err == nil && !tt.compatible) || (err != nil && tt.compatible) { + t.Errorf("test %d: compatibility mismatch: have error %v, want compatibility %v", i, err, tt.compatible) + } + } +} + // Tests that hashes can be retrieved from a remote chain by hashes in reverse // order. func TestGetBlockHashes61(t *testing.T) { testGetBlockHashes(t, 61) } func testGetBlockHashes(t *testing.T, protocol int) { - pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxHashFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -65,7 +95,7 @@ func testGetBlockHashes(t *testing.T, protocol int) { func TestGetBlockHashesFromNumber61(t *testing.T) { testGetBlockHashesFromNumber(t, 61) } func testGetBlockHashesFromNumber(t *testing.T, protocol int) { - pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxHashFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -105,7 +135,7 @@ func testGetBlockHashesFromNumber(t *testing.T, protocol int) { func TestGetBlocks61(t *testing.T) { testGetBlocks(t, 61) } func testGetBlocks(t *testing.T, protocol int) { - pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxHashFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -177,7 +207,7 @@ func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) } func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) } func testGetBlockHeaders(t *testing.T, protocol int) { - pm := newTestProtocolManager(downloader.MaxHashFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxHashFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -303,7 +333,7 @@ func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) } func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) } func testGetBlockBodies(t *testing.T, protocol int) { - pm := newTestProtocolManager(downloader.MaxBlockFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxBlockFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -410,7 +440,7 @@ func testGetNodeData(t *testing.T, protocol int) { } } // Assemble the test environment - pm := newTestProtocolManager(4, generator, nil) + pm := newTestProtocolManagerMust(t, ArchiveMode, 4, generator, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -500,7 +530,7 @@ func testGetReceipt(t *testing.T, protocol int) { } } // Assemble the test environment - pm := newTestProtocolManager(4, generator, nil) + pm := newTestProtocolManagerMust(t, ArchiveMode, 4, generator, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() diff --git a/eth/helper_test.go b/eth/helper_test.go index 9314884ef..bd65b49f8 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -28,7 +28,7 @@ var ( // newTestProtocolManager creates a new protocol manager for testing purposes, // with the given number of blocks already known, and potential notification // channels for different events. -func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { +func newTestProtocolManager(mode Mode, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) { var ( evmux = new(event.TypeMux) pow = new(core.FakePow) @@ -42,8 +42,23 @@ func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), new if _, err := blockchain.InsertChain(chain); err != nil { panic(err) } - pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db) + pm, err := NewProtocolManager(mode, NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db) + if err != nil { + return nil, err + } pm.Start() + return pm, nil +} + +// newTestProtocolManagerMust creates a new protocol manager for testing purposes, +// with the given number of blocks already known, and potential notification +// channels for different events. In case of an error, the constructor force- +// fails the test. +func newTestProtocolManagerMust(t *testing.T, mode Mode, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { + pm, err := newTestProtocolManager(mode, blocks, generator, newtx) + if err != nil { + t.Fatalf("Failed to create protocol manager: %v", err) + } return pm } diff --git a/eth/protocol.go b/eth/protocol.go index 49f096a3b..0d2b5128d 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -26,6 +26,15 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) +// Mode represents the mode of operation of the eth client. +type Mode int + +const ( + ArchiveMode Mode = iota // Maintain the entire blockchain history + FullMode // Maintain only a recent view of the blockchain + LightMode // Don't maintain any history, rather fetch on demand +) + // Constants to match up protocol versions and messages const ( eth61 = 61 @@ -34,6 +43,14 @@ const ( eth64 = 64 ) +// minimumProtocolVersion is the minimum version of the protocol eth must run to +// support the desired mode of operation. +var minimumProtocolVersion = map[Mode]uint{ + ArchiveMode: eth61, + FullMode: eth63, + LightMode: eth64, +} + // Supported versions of the eth protocol (first is primary). var ProtocolVersions = []uint{eth64, eth63, eth62, eth61} diff --git a/eth/protocol_test.go b/eth/protocol_test.go index 523e6c1eb..bac519ae3 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -44,7 +44,7 @@ func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) } func TestStatusMsgErrors64(t *testing.T) { testStatusMsgErrors(t, 64) } func testStatusMsgErrors(t *testing.T, protocol int) { - pm := newTestProtocolManager(0, nil, nil) + pm := newTestProtocolManagerMust(t, ArchiveMode, 0, nil, nil) td, currentBlock, genesis := pm.blockchain.Status() defer pm.Stop() @@ -99,7 +99,7 @@ func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) } func testRecvTransactions(t *testing.T, protocol int) { txAdded := make(chan []*types.Transaction) - pm := newTestProtocolManager(0, nil, txAdded) + pm := newTestProtocolManagerMust(t, ArchiveMode, 0, nil, txAdded) p, _ := newTestPeer("peer", protocol, pm, true) defer pm.Stop() defer p.close() @@ -127,7 +127,7 @@ func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) } func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) } func testSendTransactions(t *testing.T, protocol int) { - pm := newTestProtocolManager(0, nil, nil) + pm := newTestProtocolManagerMust(t, ArchiveMode, 0, nil, nil) defer pm.Stop() // Fill the pool with big transactions. -- cgit v1.2.3 From c33cc382b3561ca91871111933f81653bfd8532f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 21 Sep 2015 15:36:29 +0300 Subject: core: support inserting pure header chains --- eth/backend.go | 2 +- eth/handler.go | 18 ++---------------- eth/sync.go | 3 ++- 3 files changed, 5 insertions(+), 18 deletions(-) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index 04dd3767a..f4acc76cb 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -464,7 +464,7 @@ func (s *Ethereum) NodeInfo() *NodeInfo { DiscPort: int(node.UDP), TCPPort: int(node.TCP), ListenAddr: s.net.ListenAddr, - Td: s.BlockChain().Td().String(), + Td: s.BlockChain().GetTd(s.BlockChain().CurrentBlock().Hash()).String(), } } diff --git a/eth/handler.go b/eth/handler.go index 5716350af..021be1024 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -589,15 +589,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } request.Block.ReceivedAt = msg.ReceivedAt - // Mark the block's arrival for whatever reason - _, chainHead, _ := pm.blockchain.Status() - jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{ - BlockHash: request.Block.Hash().Hex(), - BlockNumber: request.Block.Number(), - ChainHeadHash: chainHead.Hex(), - BlockPrevHash: request.Block.ParentHash().Hex(), - RemoteId: p.ID().String(), - }) // Mark the peer as owning the block and schedule it for import p.MarkBlock(request.Block.Hash()) p.SetHead(request.Block.Hash()) @@ -607,7 +598,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Update the peers total difficulty if needed, schedule a download if gapped if request.TD.Cmp(p.Td()) > 0 { p.SetTd(request.TD) - if request.TD.Cmp(new(big.Int).Add(pm.blockchain.Td(), request.Block.Difficulty())) > 0 { + td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash()) + if request.TD.Cmp(new(big.Int).Add(td, request.Block.Difficulty())) > 0 { go pm.synchronise(p) } } @@ -624,12 +616,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "transaction %d is nil", i) } p.MarkTransaction(tx.Hash()) - - // Log it's arrival for later analysis - jsonlogger.LogJson(&logger.EthTxReceived{ - TxHash: tx.Hash().Hex(), - RemoteId: p.ID().String(), - }) } pm.txpool.AddTransactions(txs) diff --git a/eth/sync.go b/eth/sync.go index 5a2031c68..6295083e2 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -160,7 +160,8 @@ func (pm *ProtocolManager) synchronise(peer *peer) { return } // Make sure the peer's TD is higher than our own. If not drop. - if peer.Td().Cmp(pm.blockchain.Td()) <= 0 { + td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash()) + if peer.Td().Cmp(td) <= 0 { return } // Otherwise try to sync with the downloader -- cgit v1.2.3 From f186b390182da7af368e7a5a1e9eff8d690b7414 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 28 Sep 2015 19:27:31 +0300 Subject: eth/downloader: add fast and light sync strategies --- eth/downloader/downloader.go | 667 ++++++++++++++++++++++------------ eth/downloader/downloader_test.go | 739 +++++++++++++++++++++++--------------- eth/downloader/metrics.go | 5 + eth/downloader/modes.go | 26 ++ eth/downloader/peer.go | 192 ++++++---- eth/downloader/queue.go | 534 +++++++++++++++++---------- eth/handler.go | 43 ++- eth/handler_test.go | 13 +- eth/peer.go | 6 +- 9 files changed, 1427 insertions(+), 798 deletions(-) create mode 100644 eth/downloader/modes.go (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 64fb1b57b..7ae7aa221 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -19,8 +19,10 @@ package downloader import ( "errors" + "fmt" "math" "math/big" + "strings" "sync" "sync/atomic" "time" @@ -32,70 +34,96 @@ import ( "github.com/ethereum/go-ethereum/logger/glog" ) -const ( - eth61 = 61 // Constant to check for old protocol support - eth62 = 62 // Constant to check for new protocol support -) - var ( - MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request - MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request - MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request - MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request - MaxStateFetch = 384 // Amount of node state values to allow fetching per request - MaxReceiptsFetch = 384 // Amount of transaction receipts to allow fetching per request - - hashTTL = 5 * time.Second // [eth/61] Time it takes for a hash request to time out - blockSoftTTL = 3 * time.Second // [eth/61] Request completion threshold for increasing or decreasing a peer's bandwidth - blockHardTTL = 3 * blockSoftTTL // [eth/61] Maximum time allowance before a block request is considered expired - headerTTL = 5 * time.Second // [eth/62] Time it takes for a header request to time out - bodySoftTTL = 3 * time.Second // [eth/62] Request completion threshold for increasing or decreasing a peer's bandwidth - bodyHardTTL = 3 * bodySoftTTL // [eth/62] Maximum time allowance before a block body request is considered expired - - maxQueuedHashes = 256 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection) - maxQueuedHeaders = 256 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) - maxBlockProcess = 256 // Number of blocks to import at once into the chain + MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request + MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request + MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request + MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request + MaxStateFetch = 384 // Amount of node state values to allow fetching per request + MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request + + hashTTL = 5 * time.Second // [eth/61] Time it takes for a hash request to time out + blockSoftTTL = 3 * time.Second // [eth/61] Request completion threshold for increasing or decreasing a peer's bandwidth + blockHardTTL = 3 * blockSoftTTL // [eth/61] Maximum time allowance before a block request is considered expired + headerTTL = 5 * time.Second // [eth/62] Time it takes for a header request to time out + bodySoftTTL = 3 * time.Second // [eth/62] Request completion threshold for increasing or decreasing a peer's bandwidth + bodyHardTTL = 3 * bodySoftTTL // [eth/62] Maximum time allowance before a block body request is considered expired + receiptSoftTTL = 3 * time.Second // [eth/63] Request completion threshold for increasing or decreasing a peer's bandwidth + receiptHardTTL = 3 * receiptSoftTTL // [eth/63] Maximum time allowance before a block body request is considered expired + + maxQueuedHashes = 256 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection) + maxQueuedHeaders = 256 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) + maxResultsProcess = 256 // Number of download results to import at once into the chain + + headerCheckFrequency = 64 // Verification frequency of the downloaded headers during fast sync + minCheckedHeaders = 1024 // Number of headers to verify fully when approaching the chain head + minFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync ) var ( - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer is unknown or unhealthy") - errBadPeer = errors.New("action from bad peer ignored") - errStallingPeer = errors.New("peer is stalling") - errNoPeers = errors.New("no peers to keep download active") - errPendingQueue = errors.New("pending items in queue") - errTimeout = errors.New("timeout") - errEmptyHashSet = errors.New("empty hash set by peer") - errEmptyHeaderSet = errors.New("empty header set by peer") - errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") - errAlreadyInPool = errors.New("hash already in pool") - errInvalidChain = errors.New("retrieved hash chain is invalid") - errInvalidBody = errors.New("retrieved block body is invalid") - errCancelHashFetch = errors.New("hash fetching canceled (requested)") - errCancelBlockFetch = errors.New("block downloading canceled (requested)") - errCancelHeaderFetch = errors.New("block header fetching canceled (requested)") - errCancelBodyFetch = errors.New("block body downloading canceled (requested)") - errNoSyncActive = errors.New("no sync active") + errBusy = errors.New("busy") + errUnknownPeer = errors.New("peer is unknown or unhealthy") + errBadPeer = errors.New("action from bad peer ignored") + errStallingPeer = errors.New("peer is stalling") + errNoPeers = errors.New("no peers to keep download active") + errPendingQueue = errors.New("pending items in queue") + errTimeout = errors.New("timeout") + errEmptyHashSet = errors.New("empty hash set by peer") + errEmptyHeaderSet = errors.New("empty header set by peer") + errPeersUnavailable = errors.New("no peers available or all tried for download") + errAlreadyInPool = errors.New("hash already in pool") + errInvalidChain = errors.New("retrieved hash chain is invalid") + errInvalidBlock = errors.New("retrieved block is invalid") + errInvalidBody = errors.New("retrieved block body is invalid") + errInvalidReceipt = errors.New("retrieved receipt is invalid") + errCancelHashFetch = errors.New("hash download canceled (requested)") + errCancelBlockFetch = errors.New("block download canceled (requested)") + errCancelHeaderFetch = errors.New("block header download canceled (requested)") + errCancelBodyFetch = errors.New("block body download canceled (requested)") + errCancelReceiptFetch = errors.New("receipt download canceled (requested)") + errNoSyncActive = errors.New("no sync active") ) -// hashCheckFn is a callback type for verifying a hash's presence in the local chain. -type hashCheckFn func(common.Hash) bool +// headerCheckFn is a callback type for verifying a header's presence in the local chain. +type headerCheckFn func(common.Hash) bool + +// blockCheckFn is a callback type for verifying a block's presence in the local chain. +type blockCheckFn func(common.Hash) bool + +// headerRetrievalFn is a callback type for retrieving a header from the local chain. +type headerRetrievalFn func(common.Hash) *types.Header // blockRetrievalFn is a callback type for retrieving a block from the local chain. type blockRetrievalFn func(common.Hash) *types.Block -// headRetrievalFn is a callback type for retrieving the head block from the local chain. -type headRetrievalFn func() *types.Block +// headHeaderRetrievalFn is a callback type for retrieving the head header from the local chain. +type headHeaderRetrievalFn func() *types.Header + +// headBlockRetrievalFn is a callback type for retrieving the head block from the local chain. +type headBlockRetrievalFn func() *types.Block // tdRetrievalFn is a callback type for retrieving the total difficulty of a local block. type tdRetrievalFn func(common.Hash) *big.Int -// chainInsertFn is a callback type to insert a batch of blocks into the local chain. -type chainInsertFn func(types.Blocks) (int, error) +// headerChainInsertFn is a callback type to insert a batch of headers into the local chain. +type headerChainInsertFn func([]*types.Header, bool) (int, error) + +// blockChainInsertFn is a callback type to insert a batch of blocks into the local chain. +type blockChainInsertFn func(types.Blocks) (int, error) + +// receiptChainInsertFn is a callback type to insert a batch of receipts into the local chain. +type receiptChainInsertFn func(types.Blocks, []types.Receipts) (int, error) // peerDropFn is a callback type for dropping a peer detected as malicious. type peerDropFn func(id string) +// dataPack is a data message returned by a peer for some query. +type dataPack interface { + PeerId() string + Empty() bool + Stats() string +} + // hashPack is a batch of block hashes returned by a peer (eth/61). type hashPack struct { peerId string @@ -121,8 +149,33 @@ type bodyPack struct { uncles [][]*types.Header } +// PeerId retrieves the origin peer who sent this block body packet. +func (p *bodyPack) PeerId() string { return p.peerId } + +// Empty returns whether the no block bodies were delivered. +func (p *bodyPack) Empty() bool { return len(p.transactions) == 0 || len(p.uncles) == 0 } + +// Stats creates a textual stats report for logging purposes. +func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) } + +// receiptPack is a batch of receipts returned by a peer. +type receiptPack struct { + peerId string + receipts [][]*types.Receipt +} + +// PeerId retrieves the origin peer who sent this receipt packet. +func (p *receiptPack) PeerId() string { return p.peerId } + +// Empty returns whether the no receipts were delivered. +func (p *receiptPack) Empty() bool { return len(p.receipts) == 0 } + +// Stats creates a textual stats report for logging purposes. +func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) } + type Downloader struct { - mux *event.TypeMux + mode SyncMode // Synchronisation mode defining the strategies used + mux *event.TypeMux // Event multiplexer to announce sync operation events queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed @@ -135,12 +188,17 @@ type Downloader struct { syncStatsLock sync.RWMutex // Lock protecting the sync stats fields // Callbacks - hasBlock hashCheckFn // Checks if a block is present in the chain - getBlock blockRetrievalFn // Retrieves a block from the chain - headBlock headRetrievalFn // Retrieves the head block from the chain - getTd tdRetrievalFn // Retrieves the TD of a block from the chain - insertChain chainInsertFn // Injects a batch of blocks into the chain - dropPeer peerDropFn // Drops a peer for misbehaving + hasHeader headerCheckFn // Checks if a header is present in the chain + hasBlock blockCheckFn // Checks if a block is present in the chain + getHeader headerRetrievalFn // Retrieves a header from the chain + getBlock blockRetrievalFn // Retrieves a block from the chain + headHeader headHeaderRetrievalFn // Retrieves the head header from the chain + headBlock headBlockRetrievalFn // Retrieves the head block from the chain + getTd tdRetrievalFn // Retrieves the TD of a block from the chain + insertHeaders headerChainInsertFn // Injects a batch of headers into the chain + insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain + insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain + dropPeer peerDropFn // Drops a peer for misbehaving // Status synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing @@ -149,46 +207,56 @@ type Downloader struct { notified int32 // Channels - newPeerCh chan *peer - hashCh chan hashPack // [eth/61] Channel receiving inbound hashes - blockCh chan blockPack // [eth/61] Channel receiving inbound blocks - headerCh chan headerPack // [eth/62] Channel receiving inbound block headers - bodyCh chan bodyPack // [eth/62] Channel receiving inbound block bodies - wakeCh chan bool // Channel to signal the block/body fetcher of new tasks + newPeerCh chan *peer + hashCh chan hashPack // [eth/61] Channel receiving inbound hashes + blockCh chan blockPack // [eth/61] Channel receiving inbound blocks + headerCh chan headerPack // [eth/62] Channel receiving inbound block headers + bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies + receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts + blockWakeCh chan bool // [eth/61] Channel to signal the block fetcher of new tasks + bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks + receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks cancelCh chan struct{} // Channel to cancel mid-flight syncs cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers // Testing hooks - syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run - bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch - chainInsertHook func([]*Block) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) -} - -// Block is an origin-tagged blockchain block. -type Block struct { - RawBlock *types.Block - OriginPeer string + syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run + bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch + receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch + chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, headBlock headRetrievalFn, getTd tdRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader { +func New(mode SyncMode, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn, getBlock blockRetrievalFn, + headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, + insertReceipts receiptChainInsertFn, dropPeer peerDropFn) *Downloader { + return &Downloader{ - mux: mux, - queue: newQueue(), - peers: newPeerSet(), - hasBlock: hasBlock, - getBlock: getBlock, - headBlock: headBlock, - getTd: getTd, - insertChain: insertChain, - dropPeer: dropPeer, - newPeerCh: make(chan *peer, 1), - hashCh: make(chan hashPack, 1), - blockCh: make(chan blockPack, 1), - headerCh: make(chan headerPack, 1), - bodyCh: make(chan bodyPack, 1), - wakeCh: make(chan bool, 1), + mode: mode, + mux: mux, + queue: newQueue(), + peers: newPeerSet(), + hasHeader: hasHeader, + hasBlock: hasBlock, + getHeader: getHeader, + getBlock: getBlock, + headHeader: headHeader, + headBlock: headBlock, + getTd: getTd, + insertHeaders: insertHeaders, + insertBlocks: insertBlocks, + insertReceipts: insertReceipts, + dropPeer: dropPeer, + newPeerCh: make(chan *peer, 1), + hashCh: make(chan hashPack, 1), + blockCh: make(chan blockPack, 1), + headerCh: make(chan headerPack, 1), + bodyCh: make(chan dataPack, 1), + receiptCh: make(chan dataPack, 1), + blockWakeCh: make(chan bool, 1), + bodyWakeCh: make(chan bool, 1), + receiptWakeCh: make(chan bool, 1), } } @@ -211,10 +279,10 @@ func (d *Downloader) Synchronising() bool { // used for fetching hashes and blocks from. func (d *Downloader) RegisterPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading - getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn) error { + getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, getReceipts receiptFetcherFn) error { glog.V(logger.Detail).Infoln("Registering peer", id) - if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks, getRelHeaders, getAbsHeaders, getBlockBodies)); err != nil { + if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts)); err != nil { glog.V(logger.Error).Infoln("Register failed:", err) return err } @@ -222,13 +290,15 @@ func (d *Downloader) RegisterPeer(id string, version int, head common.Hash, } // UnregisterPeer remove a peer from the known list, preventing any action from -// the specified peer. +// the specified peer. An effort is also made to return any pending fetches into +// the queue. func (d *Downloader) UnregisterPeer(id string) error { glog.V(logger.Detail).Infoln("Unregistering peer", id) if err := d.peers.Unregister(id); err != nil { glog.V(logger.Error).Infoln("Unregister failed:", err) return err } + d.queue.Revoke(id) return nil } @@ -275,16 +345,18 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int) error glog.V(logger.Info).Infoln("Block synchronisation started") } // Abort if the queue still contains some leftover data - if _, cached := d.queue.Size(); cached > 0 && d.queue.GetHeadBlock() != nil { + if d.queue.GetHeadResult() != nil { return errPendingQueue } - // Reset the queue and peer set to clean any internal leftover state + // Reset the queue, peer set and wake channels to clean any internal leftover state d.queue.Reset() d.peers.Reset() - select { - case <-d.wakeCh: - default: + for _, ch := range []chan bool{d.blockWakeCh, d.bodyWakeCh, d.receiptWakeCh} { + select { + case <-ch: + default: + } } // Create cancel channel for aborting mid-flight d.cancelLock.Lock() @@ -299,12 +371,13 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int) error return d.syncWithPeer(p, hash, td) } +/* // Has checks if the downloader knows about a particular hash, meaning that its // either already downloaded of pending retrieval. func (d *Downloader) Has(hash common.Hash) bool { return d.queue.Has(hash) } - +*/ // syncWithPeer starts a block synchronization based on the hash chain from the // specified peer and head hash. func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err error) { @@ -323,7 +396,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e defer glog.V(logger.Debug).Infof("Synchronisation terminated") switch { - case p.version == eth61: + case p.version == 61: // Look up the sync boundaries: the common ancestor and the target block latest, err := d.fetchHeight61(p) if err != nil { @@ -344,6 +417,8 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e if d.syncInitHook != nil { d.syncInitHook(origin, latest) } + d.queue.Prepare(origin+1, 1) + errc := make(chan error, 2) go func() { errc <- d.fetchHashes61(p, td, origin+1) }() go func() { errc <- d.fetchBlocks61(origin + 1) }() @@ -356,7 +431,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e } return <-errc - case p.version >= eth62: + case p.version >= 62: // Look up the sync boundaries: the common ancestor and the target block latest, err := d.fetchHeight(p) if err != nil { @@ -373,21 +448,32 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e d.syncStatsHeight = latest d.syncStatsLock.Unlock() - // Initiate the sync using a concurrent hash and block retrieval algorithm + // Initiate the sync using a concurrent header and content retrieval algorithm + parts := 1 + if d.mode == FastSync { + parts = 2 // receipts are fetched too + } + d.queue.Prepare(origin+1, parts) + if d.syncInitHook != nil { d.syncInitHook(origin, latest) } - errc := make(chan error, 2) - go func() { errc <- d.fetchHeaders(p, td, origin+1) }() - go func() { errc <- d.fetchBodies(origin + 1) }() - - // If any fetcher fails, cancel the other - if err := <-errc; err != nil { - d.cancel() - <-errc - return err + errc := make(chan error, 3) + go func() { errc <- d.fetchHeaders(p, td, origin+1) }() // Headers are always retrieved + go func() { errc <- d.fetchReceipts(origin + 1) }() // Receipts are retrieved during fast sync + go func() { errc <- d.fetchBodies(origin + 1) }() // Bodies are retrieved during normal sync + + // If any fetcher fails, cancel the others + var fail error + for i := 0; i < cap(errc); i++ { + if err := <-errc; err != nil { + if fail == nil { + fail = err + d.cancel() + } + } } - return <-errc + return fail default: // Something very wrong, stop right here @@ -637,7 +723,7 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error { glog.V(logger.Debug).Infof("%v: no available hashes", p) select { - case d.wakeCh <- false: + case d.blockWakeCh <- false: case <-d.cancelCh: } // If no hashes were retrieved at all, the peer violated it's TD promise that it had a @@ -660,24 +746,24 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error { gotHashes = true // Otherwise insert all the new hashes, aborting in case of junk - glog.V(logger.Detail).Infof("%v: inserting %d hashes from #%d", p, len(hashPack.hashes), from) + glog.V(logger.Detail).Infof("%v: scheduling %d hashes from #%d", p, len(hashPack.hashes), from) - inserts := d.queue.Insert61(hashPack.hashes, true) + inserts := d.queue.Schedule61(hashPack.hashes, true) if len(inserts) != len(hashPack.hashes) { glog.V(logger.Debug).Infof("%v: stale hashes", p) return errBadPeer } // Notify the block fetcher of new hashes, but stop if queue is full - if d.queue.Pending() < maxQueuedHashes { + if d.queue.PendingBlocks() < maxQueuedHashes { // We still have hashes to fetch, send continuation wake signal (potential) select { - case d.wakeCh <- true: + case d.blockWakeCh <- true: default: } } else { // Hash limit reached, send a termination wake signal (enforced) select { - case d.wakeCh <- false: + case d.blockWakeCh <- false: case <-d.cancelCh: } return nil @@ -707,10 +793,8 @@ func (d *Downloader) fetchBlocks61(from uint64) error { update := make(chan struct{}, 1) - // Prepare the queue and fetch blocks until the hash fetcher's done - d.queue.Prepare(from) + // Fetch blocks until the hash fetcher's done finished := false - for { select { case <-d.cancelCh: @@ -733,13 +817,13 @@ func (d *Downloader) fetchBlocks61(from uint64) error { // If no blocks were delivered, demote the peer (need the delivery above) if len(blockPack.blocks) == 0 { peer.Demote() - peer.SetIdle61() + peer.SetBlocksIdle() glog.V(logger.Detail).Infof("%s: no blocks delivered", peer) break } // All was successful, promote the peer and potentially start processing peer.Promote() - peer.SetIdle61() + peer.SetBlocksIdle() glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks)) go d.process() @@ -751,7 +835,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { // Peer probably timed out with its delivery but came through // in the end, demote, but allow to to pull from this peer. peer.Demote() - peer.SetIdle61() + peer.SetBlocksIdle() glog.V(logger.Detail).Infof("%s: out of bound delivery", peer) case errStaleDelivery: @@ -765,7 +849,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { default: // Peer did something semi-useful, demote but keep it around peer.Demote() - peer.SetIdle61() + peer.SetBlocksIdle() glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err) go d.process() } @@ -776,7 +860,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { default: } - case cont := <-d.wakeCh: + case cont := <-d.blockWakeCh: // The hash fetcher sent a continuation flag, check if it's done if !cont { finished = true @@ -800,14 +884,14 @@ func (d *Downloader) fetchBlocks61(from uint64) error { return errNoPeers } // Check for block request timeouts and demote the responsible peers - for _, pid := range d.queue.Expire(blockHardTTL) { + for _, pid := range d.queue.Expire61(blockHardTTL) { if peer := d.peers.Peer(pid); peer != nil { peer.Demote() glog.V(logger.Detail).Infof("%s: block delivery timeout", peer) } } - // If there's noting more to fetch, wait or terminate - if d.queue.Pending() == 0 { + // If there's nothing more to fetch, wait or terminate + if d.queue.PendingBlocks() == 0 { if d.queue.InFlight() == 0 && finished { glog.V(logger.Debug).Infof("Block fetching completed") return nil @@ -816,16 +900,18 @@ func (d *Downloader) fetchBlocks61(from uint64) error { } // Send a download request to all idle peers, until throttled throttled := false - for _, peer := range d.peers.IdlePeers(eth61) { + idles, total := d.peers.BlockIdlePeers(61) + + for _, peer := range idles { // Short circuit if throttling activated - if d.queue.Throttle() { + if d.queue.ThrottleBlocks() { throttled = true break } // Reserve a chunk of hashes for a peer. A nil can mean either that // no more hashes are available, or that the peer is known not to // have them. - request := d.queue.Reserve61(peer, peer.Capacity()) + request := d.queue.Reserve61(peer, peer.BlockCapacity()) if request == nil { continue } @@ -835,12 +921,12 @@ func (d *Downloader) fetchBlocks61(from uint64) error { // Fetch the chunk and make sure any errors return the hashes to the queue if err := peer.Fetch61(request); err != nil { glog.V(logger.Error).Infof("%v: fetch failed, rescheduling", peer) - d.queue.Cancel(request) + d.queue.Cancel61(request) } } // Make sure that we have peers available for fetching. If all peers have been tried // and all failed throw an error - if !throttled && d.queue.InFlight() == 0 { + if !throttled && d.queue.InFlight() == 0 && len(idles) == total { return errPeersUnavailable } } @@ -891,16 +977,19 @@ func (d *Downloader) fetchHeight(p *peer) (uint64, error) { } } -// findAncestor tries to locate the common ancestor block of the local chain and +// findAncestor tries to locate the common ancestor link of the local chain and // a remote peers blockchain. In the general case when our node was in sync and -// on the correct chain, checking the top N blocks should already get us a match. +// on the correct chain, checking the top N links should already get us a match. // In the rare scenario when we ended up on a long reorganization (i.e. none of -// the head blocks match), we do a binary search to find the common ancestor. +// the head links match), we do a binary search to find the common ancestor. func (d *Downloader) findAncestor(p *peer) (uint64, error) { glog.V(logger.Debug).Infof("%v: looking for common ancestor", p) - // Request our head blocks to short circuit ancestor location - head := d.headBlock().NumberU64() + // Request our head headers to short circuit ancestor location + head := d.headHeader().Number.Uint64() + if d.mode == FullSync { + head = d.headBlock().NumberU64() + } from := int64(head) - int64(MaxHeaderFetch) + 1 if from < 0 { from = 0 @@ -931,7 +1020,7 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) { // Check if a common ancestor was found finished = true for i := len(headers) - 1; i >= 0; i-- { - if d.hasBlock(headers[i].Hash()) { + if (d.mode == FullSync && d.hasBlock(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) { number, hash = headers[i].Number.Uint64(), headers[i].Hash() break } @@ -986,13 +1075,13 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) { arrived = true // Modify the search interval based on the response - block := d.getBlock(headers[0].Hash()) - if block == nil { + if (d.mode == FullSync && !d.hasBlock(headers[0].Hash())) || (d.mode != FullSync && !d.hasHeader(headers[0].Hash())) { end = check break } - if block.NumberU64() != check { - glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check) + header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists + if header.Number.Uint64() != check { + glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check) return 0, errBadPeer } start = check @@ -1017,6 +1106,9 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) { // fetchHeaders keeps retrieving headers from the requested number, until no more // are returned, potentially throttling on the way. +// +// The queue parameter can be used to switch between queuing headers for block +// body download too, or directly import as pure header chains. func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { glog.V(logger.Debug).Infof("%v: downloading headers from #%d", p, from) defer glog.V(logger.Debug).Infof("%v: header download terminated", p) @@ -1058,13 +1150,15 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { headerReqTimer.UpdateSince(request) timeout.Stop() - // If no more headers are inbound, notify the body fetcher and return + // If no more headers are inbound, notify the content fetchers and return if len(headerPack.headers) == 0 { glog.V(logger.Debug).Infof("%v: no available headers", p) - select { - case d.wakeCh <- false: - case <-d.cancelCh: + for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + select { + case ch <- false: + case <-d.cancelCh: + } } // If no headers were retrieved at all, the peer violated it's TD promise that it had a // better chain compared to ours. The only exception is if it's promised blocks were @@ -1086,27 +1180,37 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { gotHeaders = true // Otherwise insert all the new headers, aborting in case of junk - glog.V(logger.Detail).Infof("%v: inserting %d headers from #%d", p, len(headerPack.headers), from) + glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headerPack.headers), from) - inserts := d.queue.Insert(headerPack.headers, from) - if len(inserts) != len(headerPack.headers) { - glog.V(logger.Debug).Infof("%v: stale headers", p) - return errBadPeer - } - // Notify the block fetcher of new headers, but stop if queue is full - if d.queue.Pending() < maxQueuedHeaders { - // We still have headers to fetch, send continuation wake signal (potential) - select { - case d.wakeCh <- true: - default: + if d.mode == FullSync || d.mode == FastSync { + inserts := d.queue.Schedule(headerPack.headers, from, d.mode == FastSync) + if len(inserts) != len(headerPack.headers) { + glog.V(logger.Debug).Infof("%v: stale headers", p) + return errBadPeer } } else { - // Header limit reached, send a termination wake signal (enforced) - select { - case d.wakeCh <- false: - case <-d.cancelCh: + if n, err := d.insertHeaders(headerPack.headers, true); err != nil { + glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headerPack.headers[n].Number, headerPack.headers[n].Hash().Bytes()[:4], err) + return errInvalidChain + } + } + // Notify the content fetchers of new headers, but stop if queue is full + cont := d.queue.PendingBlocks() < maxQueuedHeaders || d.queue.PendingReceipts() < maxQueuedHeaders + for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + if cont { + // We still have headers to fetch, send continuation wake signal (potential) + select { + case ch <- true: + default: + } + } else { + // Header limit reached, send a termination wake signal (enforced) + select { + case ch <- false: + case <-d.cancelCh: + } + return nil } - return nil } // Queue not yet full, fetch the next batch from += uint64(len(headerPack.headers)) @@ -1119,9 +1223,11 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { d.dropPeer(p.id) // Finish the sync gracefully instead of dumping the gathered data though - select { - case d.wakeCh <- false: - case <-d.cancelCh: + for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + select { + case ch <- false: + case <-d.cancelCh: + } } return nil } @@ -1133,22 +1239,69 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { // and also periodically checking for timeouts. func (d *Downloader) fetchBodies(from uint64) error { glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from) - defer glog.V(logger.Debug).Infof("Block body download terminated") - // Create a timeout timer for scheduling expiration tasks + var ( + deliver = func(packet interface{}) error { + pack := packet.(*bodyPack) + return d.queue.DeliverBlocks(pack.peerId, pack.transactions, pack.uncles) + } + expire = func() []string { return d.queue.ExpireBlocks(bodyHardTTL) } + fetch = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) } + capacity = func(p *peer) int { return p.BlockCapacity() } + getIdles = func() ([]*peer, int) { return d.peers.BlockIdlePeers(62) } + setIdle = func(p *peer) { p.SetBlocksIdle() } + ) + err := d.fetchParts(from, errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, + d.queue.PendingBlocks, d.queue.ThrottleBlocks, d.queue.ReserveBlocks, d.bodyFetchHook, + fetch, d.queue.CancelBlocks, capacity, getIdles, setIdle, "Body") + + glog.V(logger.Debug).Infof("Block body download terminated: %v", err) + return err +} + +// fetchReceipts iteratively downloads the scheduled block receipts, taking any +// available peers, reserving a chunk of receipts for each, waiting for delivery +// and also periodically checking for timeouts. +func (d *Downloader) fetchReceipts(from uint64) error { + glog.V(logger.Debug).Infof("Downloading receipts from #%d", from) + + var ( + deliver = func(packet interface{}) error { + pack := packet.(*receiptPack) + return d.queue.DeliverReceipts(pack.peerId, pack.receipts) + } + expire = func() []string { return d.queue.ExpireReceipts(bodyHardTTL) } + fetch = func(p *peer, req *fetchRequest) error { return p.FetchReceipts(req) } + capacity = func(p *peer) int { return p.ReceiptCapacity() } + setIdle = func(p *peer) { p.SetReceiptsIdle() } + ) + err := d.fetchParts(from, errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, + d.queue.PendingReceipts, d.queue.ThrottleReceipts, d.queue.ReserveReceipts, d.receiptFetchHook, + fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt") + + glog.V(logger.Debug).Infof("Receipt download terminated: %v", err) + return err +} + +// fetchParts iteratively downloads scheduled block parts, taking any available +// peers, reserving a chunk of fetch requests for each, waiting for delivery and +// also periodically checking for timeouts. +func (d *Downloader) fetchParts(from uint64, errCancel error, deliveryCh chan dataPack, deliver func(packet interface{}) error, wakeCh chan bool, + expire func() []string, pending func() int, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error), fetchHook func([]*types.Header), + fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int, idle func() ([]*peer, int), setIdle func(*peer), kind string) error { + + // Create a ticker to detect expired retreival tasks ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() update := make(chan struct{}, 1) - // Prepare the queue and fetch block bodies until the block header fetcher's done - d.queue.Prepare(from) + // Prepare the queue and fetch block parts until the block header fetcher's done finished := false - for { select { case <-d.cancelCh: - return errCancelBlockFetch + return errCancel case <-d.hashCh: // Out of bounds eth/61 hashes received, ignore them @@ -1156,42 +1309,41 @@ func (d *Downloader) fetchBodies(from uint64) error { case <-d.blockCh: // Out of bounds eth/61 blocks received, ignore them - case bodyPack := <-d.bodyCh: + case packet := <-deliveryCh: // If the peer was previously banned and failed to deliver it's pack // in a reasonable time frame, ignore it's message. - if peer := d.peers.Peer(bodyPack.peerId); peer != nil { - // Deliver the received chunk of bodies, and demote in case of errors - err := d.queue.Deliver(bodyPack.peerId, bodyPack.transactions, bodyPack.uncles) - switch err { + if peer := d.peers.Peer(packet.PeerId()); peer != nil { + // Deliver the received chunk of data, and demote in case of errors + switch err := deliver(packet); err { case nil: - // If no blocks were delivered, demote the peer (need the delivery above) - if len(bodyPack.transactions) == 0 || len(bodyPack.uncles) == 0 { + // If no blocks were delivered, demote the peer (need the delivery above to clean internal queue!) + if packet.Empty() { peer.Demote() - peer.SetIdle() - glog.V(logger.Detail).Infof("%s: no block bodies delivered", peer) + setIdle(peer) + glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind)) break } // All was successful, promote the peer and potentially start processing peer.Promote() - peer.SetIdle() - glog.V(logger.Detail).Infof("%s: delivered %d:%d block bodies", peer, len(bodyPack.transactions), len(bodyPack.uncles)) + setIdle(peer) + glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind)) go d.process() case errInvalidChain: // The hash chain is invalid (blocks are not ordered properly), abort return err - case errInvalidBody: + case errInvalidBody, errInvalidReceipt: // The peer delivered something very bad, drop immediately - glog.V(logger.Error).Infof("%s: delivered invalid block, dropping", peer) + glog.V(logger.Error).Infof("%s: delivered invalid %s, dropping", peer, strings.ToLower(kind)) d.dropPeer(peer.id) case errNoFetchesPending: // Peer probably timed out with its delivery but came through // in the end, demote, but allow to to pull from this peer. peer.Demote() - peer.SetIdle() - glog.V(logger.Detail).Infof("%s: out of bound delivery", peer) + setIdle(peer) + glog.V(logger.Detail).Infof("%s: out of bound %s delivery", peer, strings.ToLower(kind)) case errStaleDelivery: // Delivered something completely else than requested, usually @@ -1199,13 +1351,13 @@ func (d *Downloader) fetchBodies(from uint64) error { // Don't set it to idle as the original request should still be // in flight. peer.Demote() - glog.V(logger.Detail).Infof("%s: stale delivery", peer) + glog.V(logger.Detail).Infof("%s: %s stale delivery", peer, strings.ToLower(kind)) default: // Peer did something semi-useful, demote but keep it around peer.Demote() - peer.SetIdle() - glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err) + setIdle(peer) + glog.V(logger.Detail).Infof("%s: %s delivery partially failed: %v", peer, strings.ToLower(kind), err) go d.process() } } @@ -1215,7 +1367,7 @@ func (d *Downloader) fetchBodies(from uint64) error { default: } - case cont := <-d.wakeCh: + case cont := <-wakeCh: // The header fetcher sent a continuation flag, check if it's done if !cont { finished = true @@ -1238,65 +1390,69 @@ func (d *Downloader) fetchBodies(from uint64) error { if d.peers.Len() == 0 { return errNoPeers } - // Check for block body request timeouts and demote the responsible peers - for _, pid := range d.queue.Expire(bodyHardTTL) { + // Check for fetch request timeouts and demote the responsible peers + for _, pid := range expire() { if peer := d.peers.Peer(pid); peer != nil { peer.Demote() - glog.V(logger.Detail).Infof("%s: block body delivery timeout", peer) + glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind)) } } - // If there's noting more to fetch, wait or terminate - if d.queue.Pending() == 0 { + // If there's nothing more to fetch, wait or terminate + if pending() == 0 { if d.queue.InFlight() == 0 && finished { - glog.V(logger.Debug).Infof("Block body fetching completed") + glog.V(logger.Debug).Infof("%s fetching completed", kind) return nil } break } // Send a download request to all idle peers, until throttled - queuedEmptyBlocks, throttled := false, false - for _, peer := range d.peers.IdlePeers(eth62) { + progressed, throttled := false, false + idles, total := idle() + + for _, peer := range idles { // Short circuit if throttling activated - if d.queue.Throttle() { + if throttle() { throttled = true break } - // Reserve a chunk of hashes for a peer. A nil can mean either that - // no more hashes are available, or that the peer is known not to + // Reserve a chunk of fetches for a peer. A nil can mean either that + // no more headers are available, or that the peer is known not to // have them. - request, process, err := d.queue.Reserve(peer, peer.Capacity()) + request, progress, err := reserve(peer, capacity(peer)) if err != nil { return err } - if process { - queuedEmptyBlocks = true + if progress { + progressed = true go d.process() } if request == nil { continue } if glog.V(logger.Detail) { - glog.Infof("%s: requesting %d block bodies", peer, len(request.Headers)) + glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number) } // Fetch the chunk and make sure any errors return the hashes to the queue - if d.bodyFetchHook != nil { - d.bodyFetchHook(request.Headers) + if fetchHook != nil { + fetchHook(request.Headers) } - if err := peer.Fetch(request); err != nil { - glog.V(logger.Error).Infof("%v: fetch failed, rescheduling", peer) - d.queue.Cancel(request) + if err := fetch(peer, request); err != nil { + glog.V(logger.Error).Infof("%v: %s fetch failed, rescheduling", peer, strings.ToLower(kind)) + cancel(request) } } // Make sure that we have peers available for fetching. If all peers have been tried // and all failed throw an error - if !queuedEmptyBlocks && !throttled && d.queue.InFlight() == 0 { + if !progressed && !throttled && d.queue.InFlight() == 0 && len(idles) == total { return errPeersUnavailable } } } } -// process takes blocks from the queue and tries to import them into the chain. +// process takes fetch results from the queue and tries to import them into the +// chain. The type of import operation will depend on the result contents: +// - // // The algorithmic flow is as follows: // - The `processing` flag is swapped to 1 to ensure singleton access @@ -1317,10 +1473,10 @@ func (d *Downloader) process() { } // If the processor just exited, but there are freshly pending items, try to // reenter. This is needed because the goroutine spinned up for processing - // the fresh blocks might have been rejected entry to to this present thread + // the fresh results might have been rejected entry to to this present thread // not yet releasing the `processing` state. defer func() { - if atomic.LoadInt32(&d.interrupt) == 0 && d.queue.GetHeadBlock() != nil { + if atomic.LoadInt32(&d.interrupt) == 0 && d.queue.GetHeadResult() != nil { d.process() } }() @@ -1328,38 +1484,64 @@ func (d *Downloader) process() { // the import statistics to zero. defer atomic.StoreInt32(&d.processing, 0) - // Repeat the processing as long as there are blocks to import + // Repeat the processing as long as there are results to process for { - // Fetch the next batch of blocks - blocks := d.queue.TakeBlocks() - if len(blocks) == 0 { + // Fetch the next batch of results + results := d.queue.TakeResults() + if len(results) == 0 { return } if d.chainInsertHook != nil { - d.chainInsertHook(blocks) + d.chainInsertHook(results) } // Actually import the blocks - glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number()) - for len(blocks) != 0 { + if glog.V(logger.Debug) { + first, last := results[0].Header, results[len(results)-1].Header + glog.V(logger.Debug).Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4]) + } + for len(results) != 0 { // Check for any termination requests if atomic.LoadInt32(&d.interrupt) == 1 { return } - // Retrieve the first batch of blocks to insert - max := int(math.Min(float64(len(blocks)), float64(maxBlockProcess))) - raw := make(types.Blocks, 0, max) - for _, block := range blocks[:max] { - raw = append(raw, block.RawBlock) + // Retrieve the a batch of results to import + var ( + headers = make([]*types.Header, 0, maxResultsProcess) + blocks = make([]*types.Block, 0, maxResultsProcess) + receipts = make([]types.Receipts, 0, maxResultsProcess) + ) + items := int(math.Min(float64(len(results)), float64(maxResultsProcess))) + for _, result := range results[:items] { + switch { + case d.mode == FullSync: + blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)) + case d.mode == FastSync: + blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)) + receipts = append(receipts, result.Receipts) + case d.mode == LightSync: + headers = append(headers, result.Header) + } + } + // Try to process the results, aborting if there's an error + var ( + err error + index int + ) + switch { + case d.mode == FullSync: + index, err = d.insertBlocks(blocks) + case d.mode == FastSync: + index, err = d.insertReceipts(blocks, receipts) + case d.mode == LightSync: + index, err = d.insertHeaders(headers, true) } - // Try to inset the blocks, drop the originating peer if there's an error - index, err := d.insertChain(raw) if err != nil { - glog.V(logger.Debug).Infof("Block #%d import failed: %v", raw[index].NumberU64(), err) - d.dropPeer(blocks[index].OriginPeer) + glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash(), err) d.cancel() return } - blocks = blocks[max:] + // Shift the results to the next batch + results = results[items:] } } } @@ -1468,7 +1650,34 @@ func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transactio d.cancelLock.RUnlock() select { - case d.bodyCh <- bodyPack{id, transactions, uncles}: + case d.bodyCh <- &bodyPack{id, transactions, uncles}: + return nil + + case <-cancel: + return errNoSyncActive + } +} + +// DeliverReceipts injects a new batch of receipts received from a remote node. +func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { + // Update the delivery metrics for both good and failed deliveries + receiptInMeter.Mark(int64(len(receipts))) + defer func() { + if err != nil { + receiptDropMeter.Mark(int64(len(receipts))) + } + }() + // Make sure the downloader is active + if atomic.LoadInt32(&d.synchronising) == 0 { + return errNoSyncActive + } + // Deliver or abort if the sync is canceled while queuing + d.cancelLock.RLock() + cancel := d.cancelCh + d.cancelLock.RUnlock() + + select { + case d.receiptCh <- &receiptPack{id, receipts}: return nil case <-cancel: diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 96096527e..18bdb56dd 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -45,7 +45,8 @@ var ( // the returned hash chain is ordered head->parent. In addition, every 3rd block // contains a transaction and every 5th an uncle to allow testing correct block // reassembly. -func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) { +func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block) { + // Generate the block chain blocks := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) { block.SetCoinbase(common.Address{seed}) @@ -62,59 +63,80 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))}) } }) + // Convert the block-chain into a hash-chain and header/block maps hashes := make([]common.Hash, n+1) hashes[len(hashes)-1] = parent.Hash() + + headerm := make(map[common.Hash]*types.Header, n+1) + headerm[parent.Hash()] = parent.Header() + blockm := make(map[common.Hash]*types.Block, n+1) blockm[parent.Hash()] = parent + for i, b := range blocks { hashes[len(hashes)-i-2] = b.Hash() + headerm[b.Hash()] = b.Header() blockm[b.Hash()] = b } - return hashes, blockm + return hashes, headerm, blockm } // makeChainFork creates two chains of length n, such that h1[:f] and // h2[:f] are different but have a common suffix of length n-f. -func makeChainFork(n, f int, parent *types.Block) (h1, h2 []common.Hash, b1, b2 map[common.Hash]*types.Block) { - // Create the common suffix. - h, b := makeChain(n-f, 0, parent) - // Create the forks. - h1, b1 = makeChain(f, 1, b[h[0]]) - h1 = append(h1, h[1:]...) - h2, b2 = makeChain(f, 2, b[h[0]]) - h2 = append(h2, h[1:]...) - for hash, block := range b { - b1[hash] = block - b2[hash] = block - } - return h1, h2, b1, b2 +func makeChainFork(n, f int, parent *types.Block) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block) { + // Create the common suffix + hashes, headers, blocks := makeChain(n-f, 0, parent) + + // Create the forks + hashes1, headers1, blocks1 := makeChain(f, 1, blocks[hashes[0]]) + hashes1 = append(hashes1, hashes[1:]...) + + hashes2, headers2, blocks2 := makeChain(f, 2, blocks[hashes[0]]) + hashes2 = append(hashes2, hashes[1:]...) + + for hash, header := range headers { + headers1[hash] = header + headers2[hash] = header + } + for hash, block := range blocks { + blocks1[hash] = block + blocks2[hash] = block + } + return hashes1, hashes2, headers1, headers2, blocks1, blocks2 } // downloadTester is a test simulator for mocking out local block chain. type downloadTester struct { downloader *Downloader - ownHashes []common.Hash // Hash chain belonging to the tester - ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester - ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain - peerHashes map[string][]common.Hash // Hash chain belonging to different test peers - peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers - peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains + ownHashes []common.Hash // Hash chain belonging to the tester + ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester + ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester + ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester + ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain + peerHashes map[string][]common.Hash // Hash chain belonging to different test peers + peerHeaders map[string]map[common.Hash]*types.Header // Headers belonging to different test peers + peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers + peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains lock sync.RWMutex } // newTester creates a new downloader test mocker. -func newTester() *downloadTester { +func newTester(mode SyncMode) *downloadTester { tester := &downloadTester{ ownHashes: []common.Hash{genesis.Hash()}, + ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, + ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): genesis.Receipts()}, ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()}, peerHashes: make(map[string][]common.Hash), + peerHeaders: make(map[string]map[common.Hash]*types.Header), peerBlocks: make(map[string]map[common.Hash]*types.Block), peerChainTds: make(map[string]map[common.Hash]*big.Int), } - tester.downloader = New(new(event.TypeMux), tester.hasBlock, tester.getBlock, tester.headBlock, tester.getTd, tester.insertChain, tester.dropPeer) + tester.downloader = New(mode, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, tester.getBlock, + tester.headHeader, tester.headBlock, tester.getTd, tester.insertHeaders, tester.insertBlocks, tester.insertConfirmedBlocks, tester.dropPeer) return tester } @@ -135,8 +157,7 @@ func (dl *downloadTester) sync(id string, td *big.Int) error { err := dl.downloader.synchronise(id, hash, td) for { // If the queue is empty and processing stopped, break - hashes, blocks := dl.downloader.queue.Size() - if hashes+blocks == 0 && atomic.LoadInt32(&dl.downloader.processing) == 0 { + if dl.downloader.queue.Idle() && atomic.LoadInt32(&dl.downloader.processing) == 0 { break } // Otherwise sleep a bit and retry @@ -145,12 +166,22 @@ func (dl *downloadTester) sync(id string, td *big.Int) error { return err } -// hasBlock checks if a block is pres ent in the testers canonical chain. +// hasHeader checks if a header is present in the testers canonical chain. +func (dl *downloadTester) hasHeader(hash common.Hash) bool { + return dl.getHeader(hash) != nil +} + +// hasBlock checks if a block is present in the testers canonical chain. func (dl *downloadTester) hasBlock(hash common.Hash) bool { + return dl.getBlock(hash) != nil +} + +// getHeader retrieves a header from the testers canonical chain. +func (dl *downloadTester) getHeader(hash common.Hash) *types.Header { dl.lock.RLock() defer dl.lock.RUnlock() - return dl.getBlock(hash) != nil + return dl.ownHeaders[hash] } // getBlock retrieves a block from the testers canonical chain. @@ -161,12 +192,25 @@ func (dl *downloadTester) getBlock(hash common.Hash) *types.Block { return dl.ownBlocks[hash] } +// headHeader retrieves the current head header from the canonical chain. +func (dl *downloadTester) headHeader() *types.Header { + dl.lock.RLock() + defer dl.lock.RUnlock() + + return dl.getHeader(dl.ownHashes[len(dl.ownHashes)-1]) +} + // headBlock retrieves the current head block from the canonical chain. func (dl *downloadTester) headBlock() *types.Block { dl.lock.RLock() defer dl.lock.RUnlock() - return dl.getBlock(dl.ownHashes[len(dl.ownHashes)-1]) + for i := len(dl.ownHashes) - 1; i >= 0; i-- { + if block := dl.getBlock(dl.ownHashes[i]); block != nil { + return block + } + } + return nil } // getTd retrieves the block's total difficulty from the canonical chain. @@ -177,8 +221,24 @@ func (dl *downloadTester) getTd(hash common.Hash) *big.Int { return dl.ownChainTd[hash] } -// insertChain injects a new batch of blocks into the simulated chain. -func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { +// insertHeaders injects a new batch of headers into the simulated chain. +func (dl *downloadTester) insertHeaders(headers []*types.Header, verify bool) (int, error) { + dl.lock.Lock() + defer dl.lock.Unlock() + + for i, header := range headers { + if _, ok := dl.ownHeaders[header.ParentHash]; !ok { + return i, errors.New("unknown parent") + } + dl.ownHashes = append(dl.ownHashes, header.Hash()) + dl.ownHeaders[header.Hash()] = header + dl.ownChainTd[header.Hash()] = dl.ownChainTd[header.ParentHash] + } + return len(headers), nil +} + +// insertBlocks injects a new batch of blocks into the simulated chain. +func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) { dl.lock.Lock() defer dl.lock.Unlock() @@ -187,47 +247,74 @@ func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { return i, errors.New("unknown parent") } dl.ownHashes = append(dl.ownHashes, block.Hash()) + dl.ownHeaders[block.Hash()] = block.Header() dl.ownBlocks[block.Hash()] = block dl.ownChainTd[block.Hash()] = dl.ownChainTd[block.ParentHash()] } return len(blocks), nil } +// insertBlocks injects a new batch of blocks into the simulated chain. +func (dl *downloadTester) insertConfirmedBlocks(blocks types.Blocks, receipts []types.Receipts) (int, error) { + dl.lock.Lock() + defer dl.lock.Unlock() + + for i := 0; i < len(blocks) && i < len(receipts); i++ { + if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { + return i, errors.New("unknown parent") + } + dl.ownHashes = append(dl.ownHashes, blocks[i].Hash()) + dl.ownHeaders[blocks[i].Hash()] = blocks[i].Header() + dl.ownBlocks[blocks[i].Hash()] = blocks[i] + dl.ownReceipts[blocks[i].Hash()] = blocks[i].Receipts() + dl.ownChainTd[blocks[i].Hash()] = dl.ownChainTd[blocks[i].ParentHash()] + } + return len(blocks), nil +} + // newPeer registers a new block download source into the downloader. -func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, blocks map[common.Hash]*types.Block) error { - return dl.newSlowPeer(id, version, hashes, blocks, 0) +func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block) error { + return dl.newSlowPeer(id, version, hashes, headers, blocks, 0) } // newSlowPeer registers a new block download source into the downloader, with a // specific delay time on processing the network packets sent to it, simulating // potentially slow network IO. -func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, blocks map[common.Hash]*types.Block, delay time.Duration) error { +func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, delay time.Duration) error { dl.lock.Lock() defer dl.lock.Unlock() var err error switch version { case 61: - err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), nil, nil, nil) + err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), nil, nil, nil, nil) case 62: - err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay)) + err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil) case 63: - err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay)) + err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay)) case 64: - err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay)) + err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay)) } if err == nil { - // Assign the owned hashes and blocks to the peer (deep copy) + // Assign the owned hashes, headers and blocks to the peer (deep copy) dl.peerHashes[id] = make([]common.Hash, len(hashes)) copy(dl.peerHashes[id], hashes) + dl.peerHeaders[id] = make(map[common.Hash]*types.Header) dl.peerBlocks[id] = make(map[common.Hash]*types.Block) dl.peerChainTds[id] = make(map[common.Hash]*big.Int) + for _, hash := range hashes { + if header, ok := headers[hash]; ok { + dl.peerHeaders[id][hash] = header + if _, ok := dl.peerHeaders[id][header.ParentHash]; ok { + dl.peerChainTds[id][hash] = new(big.Int).Add(header.Difficulty, dl.peerChainTds[id][header.ParentHash]) + } + } if block, ok := blocks[hash]; ok { dl.peerBlocks[id][hash] = block - if parent, ok := dl.peerBlocks[id][block.ParentHash()]; ok { - dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][parent.Hash()]) + if _, ok := dl.peerBlocks[id][block.ParentHash()]; ok { + dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()]) } } } @@ -241,6 +328,7 @@ func (dl *downloadTester) dropPeer(id string) { defer dl.lock.Unlock() delete(dl.peerHashes, id) + delete(dl.peerHeaders, id) delete(dl.peerBlocks, id) delete(dl.peerChainTds, id) @@ -358,13 +446,13 @@ func (dl *downloadTester) peerGetAbsHeadersFn(id string, delay time.Duration) fu dl.lock.RLock() defer dl.lock.RUnlock() - // Gather the next batch of hashes + // Gather the next batch of headers hashes := dl.peerHashes[id] - blocks := dl.peerBlocks[id] + headers := dl.peerHeaders[id] result := make([]*types.Header, 0, amount) for i := 0; i < amount && len(hashes)-int(origin)-1-i >= 0; i++ { - if block, ok := blocks[hashes[len(hashes)-int(origin)-1-i]]; ok { - result = append(result, block.Header()) + if header, ok := headers[hashes[len(hashes)-int(origin)-1-i]]; ok { + result = append(result, header) } } // Delay delivery a bit to allow attacks to unfold @@ -403,50 +491,99 @@ func (dl *downloadTester) peerGetBodiesFn(id string, delay time.Duration) func([ } } +// peerGetReceiptsFn constructs a getReceipts method associated with a particular +// peer in the download tester. The returned function can be used to retrieve +// batches of block receipts from the particularly requested peer. +func (dl *downloadTester) peerGetReceiptsFn(id string, delay time.Duration) func([]common.Hash) error { + return func(hashes []common.Hash) error { + time.Sleep(delay) + + dl.lock.RLock() + defer dl.lock.RUnlock() + + blocks := dl.peerBlocks[id] + + receipts := make([][]*types.Receipt, 0, len(hashes)) + for _, hash := range hashes { + if block, ok := blocks[hash]; ok { + receipts = append(receipts, block.Receipts()) + } + } + go dl.downloader.DeliverReceipts(id, receipts) + + return nil + } +} + +// assertOwnChain checks if the local chain contains the correct number of items +// of the various chain components. +func assertOwnChain(t *testing.T, tester *downloadTester, length int) { + headers, blocks, receipts := length, length, length + switch tester.downloader.mode { + case FullSync: + receipts = 1 + case LightSync: + blocks, receipts = 1, 1 + } + + if hs := len(tester.ownHeaders); hs != headers { + t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) + } + if bs := len(tester.ownBlocks); bs != blocks { + t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) + } + if rs := len(tester.ownReceipts); rs != receipts { + t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) + } +} + // Tests that simple synchronization against a canonical chain works correctly. // In this test common ancestor lookup should be short circuited and not require // binary searching. -func TestCanonicalSynchronisation61(t *testing.T) { testCanonicalSynchronisation(t, 61) } -func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62) } -func TestCanonicalSynchronisation63(t *testing.T) { testCanonicalSynchronisation(t, 63) } -func TestCanonicalSynchronisation64(t *testing.T) { testCanonicalSynchronisation(t, 64) } - -func testCanonicalSynchronisation(t *testing.T, protocol int) { +func TestCanonicalSynchronisation61(t *testing.T) { testCanonicalSynchronisation(t, 61, FullSync) } +func TestCanonicalSynchronisation62(t *testing.T) { testCanonicalSynchronisation(t, 62, FullSync) } +func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) } +func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) } +func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } +func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } +func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronisation(t, 64, LightSync) } + +func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) - tester := newTester() - tester.newPeer("peer", protocol, hashes, blocks) + tester := newTester(mode) + tester.newPeer("peer", protocol, hashes, headers, blocks) - // Synchronise with the peer and make sure all blocks were retrieved + // Synchronise with the peer and make sure all relevant data was retrieved if err := tester.sync("peer", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != targetBlocks+1 { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) - } + assertOwnChain(t, tester, targetBlocks+1) } // Tests that if a large batch of blocks are being downloaded, it is throttled // until the cached blocks are retrieved. -func TestThrottling61(t *testing.T) { testThrottling(t, 61) } -func TestThrottling62(t *testing.T) { testThrottling(t, 62) } -func TestThrottling63(t *testing.T) { testThrottling(t, 63) } -func TestThrottling64(t *testing.T) { testThrottling(t, 64) } - -func testThrottling(t *testing.T, protocol int) { +func TestThrottling61(t *testing.T) { testThrottling(t, 61, FullSync) } +func TestThrottling62(t *testing.T) { testThrottling(t, 62, FullSync) } +func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) } +func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) } +func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } +func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } + +func testThrottling(t *testing.T, protocol int, mode SyncMode) { // Create a long block chain to download and the tester targetBlocks := 8 * blockCacheLimit - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) - tester := newTester() - tester.newPeer("peer", protocol, hashes, blocks) + tester := newTester(mode) + tester.newPeer("peer", protocol, hashes, headers, blocks) // Wrap the importer to allow stepping blocked, proceed := uint32(0), make(chan struct{}) - tester.downloader.chainInsertHook = func(blocks []*Block) { - atomic.StoreUint32(&blocked, uint32(len(blocks))) + tester.downloader.chainInsertHook = func(results []*fetchResult) { + atomic.StoreUint32(&blocked, uint32(len(results))) <-proceed } // Start a synchronisation concurrently @@ -469,7 +606,12 @@ func testThrottling(t *testing.T, protocol int) { time.Sleep(25 * time.Millisecond) tester.downloader.queue.lock.RLock() - cached = len(tester.downloader.queue.blockPool) + cached = len(tester.downloader.queue.blockDonePool) + if mode == FastSync { + if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { + cached = receipts + } + } tester.downloader.queue.lock.RUnlock() if cached == blockCacheLimit || len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) == targetBlocks+1 { @@ -488,9 +630,7 @@ func testThrottling(t *testing.T, protocol int) { } } // Check that we haven't pulled more blocks than available - if len(tester.ownBlocks) > targetBlocks+1 { - t.Fatalf("target block count mismatch: have %v, want %v", len(tester.ownBlocks), targetBlocks+1) - } + assertOwnChain(t, tester, targetBlocks+1) if err := <-errc; err != nil { t.Fatalf("block synchronization failed: %v", err) } @@ -499,39 +639,39 @@ func testThrottling(t *testing.T, protocol int) { // Tests that simple synchronization against a forked chain works correctly. In // this test common ancestor lookup should *not* be short circuited, and a full // binary search should be executed. -func TestForkedSynchronisation61(t *testing.T) { testForkedSynchronisation(t, 61) } -func TestForkedSynchronisation62(t *testing.T) { testForkedSynchronisation(t, 62) } -func TestForkedSynchronisation63(t *testing.T) { testForkedSynchronisation(t, 63) } -func TestForkedSynchronisation64(t *testing.T) { testForkedSynchronisation(t, 64) } - -func testForkedSynchronisation(t *testing.T, protocol int) { +func TestForkedSynchronisation61(t *testing.T) { testForkedSynchronisation(t, 61, FullSync) } +func TestForkedSynchronisation62(t *testing.T) { testForkedSynchronisation(t, 62, FullSync) } +func TestForkedSynchronisation63Full(t *testing.T) { testForkedSynchronisation(t, 63, FullSync) } +func TestForkedSynchronisation63Fast(t *testing.T) { testForkedSynchronisation(t, 63, FastSync) } +func TestForkedSynchronisation64Full(t *testing.T) { testForkedSynchronisation(t, 64, FullSync) } +func TestForkedSynchronisation64Fast(t *testing.T) { testForkedSynchronisation(t, 64, FastSync) } +func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation(t, 64, LightSync) } + +func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) { // Create a long enough forked chain common, fork := MaxHashFetch, 2*MaxHashFetch - hashesA, hashesB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis) + hashesA, hashesB, headersA, headersB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis) - tester := newTester() - tester.newPeer("fork A", protocol, hashesA, blocksA) - tester.newPeer("fork B", protocol, hashesB, blocksB) + tester := newTester(mode) + tester.newPeer("fork A", protocol, hashesA, headersA, blocksA) + tester.newPeer("fork B", protocol, hashesB, headersB, blocksB) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("fork A", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != common+fork+1 { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, common+fork+1) - } + assertOwnChain(t, tester, common+fork+1) + // Synchronise with the second peer and make sure that fork is pulled too if err := tester.sync("fork B", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != common+2*fork+1 { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, common+2*fork+1) - } + assertOwnChain(t, tester, common+2*fork+1) } // Tests that an inactive downloader will not accept incoming hashes and blocks. func TestInactiveDownloader61(t *testing.T) { - tester := newTester() + tester := newTester(FullSync) // Check that neither hashes nor blocks are accepted if err := tester.downloader.DeliverHashes61("bad peer", []common.Hash{}); err != errNoSyncActive { @@ -542,9 +682,10 @@ func TestInactiveDownloader61(t *testing.T) { } } -// Tests that an inactive downloader will not accept incoming block headers and bodies. +// Tests that an inactive downloader will not accept incoming block headers and +// bodies. func TestInactiveDownloader62(t *testing.T) { - tester := newTester() + tester := newTester(FullSync) // Check that neither block headers nor bodies are accepted if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { @@ -555,13 +696,33 @@ func TestInactiveDownloader62(t *testing.T) { } } -// Tests that a canceled download wipes all previously accumulated state. -func TestCancel61(t *testing.T) { testCancel(t, 61) } -func TestCancel62(t *testing.T) { testCancel(t, 62) } -func TestCancel63(t *testing.T) { testCancel(t, 63) } -func TestCancel64(t *testing.T) { testCancel(t, 64) } +// Tests that an inactive downloader will not accept incoming block headers, +// bodies and receipts. +func TestInactiveDownloader63(t *testing.T) { + tester := newTester(FullSync) + + // Check that neither block headers nor bodies are accepted + if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { + t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) + } + if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { + t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) + } + if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { + t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) + } +} -func testCancel(t *testing.T, protocol int) { +// Tests that a canceled download wipes all previously accumulated state. +func TestCancel61(t *testing.T) { testCancel(t, 61, FullSync) } +func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) } +func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) } +func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) } +func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } +func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } +func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) } + +func testCancel(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download and the tester targetBlocks := blockCacheLimit - 15 if targetBlocks >= MaxHashFetch { @@ -570,80 +731,81 @@ func testCancel(t *testing.T, protocol int) { if targetBlocks >= MaxHeaderFetch { targetBlocks = MaxHeaderFetch - 15 } - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) - tester := newTester() - tester.newPeer("peer", protocol, hashes, blocks) + tester := newTester(mode) + tester.newPeer("peer", protocol, hashes, headers, blocks) // Make sure canceling works with a pristine downloader tester.downloader.cancel() - downloading, importing := tester.downloader.queue.Size() - if downloading > 0 || importing > 0 { - t.Errorf("download or import count mismatch: %d downloading, %d importing, want 0", downloading, importing) + if !tester.downloader.queue.Idle() { + t.Errorf("download queue not idle") } // Synchronise with the peer, but cancel afterwards if err := tester.sync("peer", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } tester.downloader.cancel() - downloading, importing = tester.downloader.queue.Size() - if downloading > 0 || importing > 0 { - t.Errorf("download or import count mismatch: %d downloading, %d importing, want 0", downloading, importing) + if !tester.downloader.queue.Idle() { + t.Errorf("download queue not idle") } } // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation61(t *testing.T) { testMultiSynchronisation(t, 61) } -func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62) } -func TestMultiSynchronisation63(t *testing.T) { testMultiSynchronisation(t, 63) } -func TestMultiSynchronisation64(t *testing.T) { testMultiSynchronisation(t, 64) } - -func testMultiSynchronisation(t *testing.T, protocol int) { +func TestMultiSynchronisation61(t *testing.T) { testMultiSynchronisation(t, 61, FullSync) } +func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) } +func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) } +func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) } +func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } +func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } +func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) } + +func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { // Create various peers with various parts of the chain targetPeers := 8 targetBlocks := targetPeers*blockCacheLimit - 15 - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) - tester := newTester() + tester := newTester(mode) for i := 0; i < targetPeers; i++ { id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], blocks) + tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks) } // Synchronise with the middle peer and make sure half of the blocks were retrieved id := fmt.Sprintf("peer #%d", targetPeers/2) if err := tester.sync(id, nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != len(tester.peerHashes[id]) { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(tester.peerHashes[id])) - } + assertOwnChain(t, tester, len(tester.peerHashes[id])) + // Synchronise with the best peer and make sure everything is retrieved if err := tester.sync("peer #0", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != targetBlocks+1 { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) - } + assertOwnChain(t, tester, targetBlocks+1) } // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havok on other nodes in the network. -func TestMultiProtocolSynchronisation61(t *testing.T) { testMultiProtocolSynchronisation(t, 61) } -func TestMultiProtocolSynchronisation62(t *testing.T) { testMultiProtocolSynchronisation(t, 62) } -func TestMultiProtocolSynchronisation63(t *testing.T) { testMultiProtocolSynchronisation(t, 63) } -func TestMultiProtocolSynchronisation64(t *testing.T) { testMultiProtocolSynchronisation(t, 64) } - -func testMultiProtocolSynchronisation(t *testing.T, protocol int) { +func TestMultiProtoSynchronisation61(t *testing.T) { testMultiProtoSync(t, 61, FullSync) } +func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) } +func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) } +func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) } +func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } +func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } +func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) } + +func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) // Create peers of every type - tester := newTester() - tester.newPeer("peer 61", 61, hashes, blocks) - tester.newPeer("peer 62", 62, hashes, blocks) - tester.newPeer("peer 63", 63, hashes, blocks) - tester.newPeer("peer 64", 64, hashes, blocks) + tester := newTester(mode) + tester.newPeer("peer 61", 61, hashes, headers, blocks) + tester.newPeer("peer 62", 62, hashes, headers, blocks) + tester.newPeer("peer 63", 63, hashes, headers, blocks) + tester.newPeer("peer 64", 64, hashes, headers, blocks) // Synchronise with the requestd peer and make sure all blocks were retrieved if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil); err != nil { @@ -661,150 +823,181 @@ func testMultiProtocolSynchronisation(t *testing.T, protocol int) { } } -// Tests that if a block is empty (i.e. header only), no body request should be +// Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. -func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) } -func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) } -func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) } - -func testEmptyBlockShortCircuit(t *testing.T, protocol int) { +func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) } +func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) } +func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) } +func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } +func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } +func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) } + +func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) - tester := newTester() - tester.newPeer("peer", protocol, hashes, blocks) + tester := newTester(mode) + tester.newPeer("peer", protocol, hashes, headers, blocks) // Instrument the downloader to signal body requests - requested := int32(0) + bodies, receipts := int32(0), int32(0) tester.downloader.bodyFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&requested, int32(len(headers))) + atomic.AddInt32(&bodies, int32(len(headers))) + } + tester.downloader.receiptFetchHook = func(headers []*types.Header) { + atomic.AddInt32(&receipts, int32(len(headers))) } // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("peer", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != targetBlocks+1 { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) - } + assertOwnChain(t, tester, targetBlocks+1) + // Validate the number of block bodies that should have been requested - needed := 0 + bodiesNeeded, receiptsNeeded := 0, 0 for _, block := range blocks { - if block != genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { - needed++ + if mode != LightSync && block != genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { + bodiesNeeded++ } + if mode == FastSync && block != genesis && len(block.Receipts()) > 0 { + receiptsNeeded++ + } + } + if int(bodies) != bodiesNeeded { + t.Errorf("body retrieval count mismatch: have %v, want %v", bodies, bodiesNeeded) } - if int(requested) != needed { - t.Fatalf("block body retrieval count mismatch: have %v, want %v", requested, needed) + if int(receipts) != receiptsNeeded { + t.Errorf("receipt retrieval count mismatch: have %v, want %v", receipts, receiptsNeeded) } } // Tests that headers are enqueued continuously, preventing malicious nodes from // stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62) } -func TestMissingHeaderAttack63(t *testing.T) { testMissingHeaderAttack(t, 63) } -func TestMissingHeaderAttack64(t *testing.T) { testMissingHeaderAttack(t, 64) } - -func testMissingHeaderAttack(t *testing.T, protocol int) { +func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) } +func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) } +func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) } +func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } +func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } +func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) } + +func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) - tester := newTester() + tester := newTester(mode) // Attempt a full sync with an attacker feeding gapped headers - tester.newPeer("attack", protocol, hashes, blocks) + tester.newPeer("attack", protocol, hashes, headers, blocks) missing := targetBlocks / 2 + delete(tester.peerHeaders["attack"], hashes[missing]) delete(tester.peerBlocks["attack"], hashes[missing]) if err := tester.sync("attack", nil); err == nil { t.Fatalf("succeeded attacker synchronisation") } // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, hashes, blocks) + tester.newPeer("valid", protocol, hashes, headers, blocks) if err := tester.sync("valid", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != len(hashes) { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(hashes)) - } + assertOwnChain(t, tester, targetBlocks+1) } // Tests that if requested headers are shifted (i.e. first is missing), the queue // detects the invalid numbering. -func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62) } -func TestShiftedHeaderAttack63(t *testing.T) { testShiftedHeaderAttack(t, 63) } -func TestShiftedHeaderAttack64(t *testing.T) { testShiftedHeaderAttack(t, 64) } - -func testShiftedHeaderAttack(t *testing.T, protocol int) { +func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) } +func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) } +func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) } +func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } +func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } +func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) } + +func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) - tester := newTester() + tester := newTester(mode) // Attempt a full sync with an attacker feeding shifted headers - tester.newPeer("attack", protocol, hashes, blocks) + tester.newPeer("attack", protocol, hashes, headers, blocks) + delete(tester.peerHeaders["attack"], hashes[len(hashes)-2]) delete(tester.peerBlocks["attack"], hashes[len(hashes)-2]) if err := tester.sync("attack", nil); err == nil { t.Fatalf("succeeded attacker synchronisation") } // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, hashes, blocks) + tester.newPeer("valid", protocol, hashes, headers, blocks) if err := tester.sync("valid", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != len(hashes) { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(hashes)) - } + assertOwnChain(t, tester, targetBlocks+1) } -// Tests that if a peer sends an invalid body for a requested block, it gets -// dropped immediately by the downloader. -func TestInvalidBlockBodyAttack62(t *testing.T) { testInvalidBlockBodyAttack(t, 62) } -func TestInvalidBlockBodyAttack63(t *testing.T) { testInvalidBlockBodyAttack(t, 63) } -func TestInvalidBlockBodyAttack64(t *testing.T) { testInvalidBlockBodyAttack(t, 64) } +// Tests that if a peer sends an invalid block piece (body or receipt) for a +// requested block, it gets dropped immediately by the downloader. +func TestInvalidContentAttack62(t *testing.T) { testInvalidContentAttack(t, 62, FullSync) } +func TestInvalidContentAttack63Full(t *testing.T) { testInvalidContentAttack(t, 63, FullSync) } +func TestInvalidContentAttack63Fast(t *testing.T) { testInvalidContentAttack(t, 63, FastSync) } +func TestInvalidContentAttack64Full(t *testing.T) { testInvalidContentAttack(t, 64, FullSync) } +func TestInvalidContentAttack64Fast(t *testing.T) { testInvalidContentAttack(t, 64, FastSync) } +func TestInvalidContentAttack64Light(t *testing.T) { testInvalidContentAttack(t, 64, LightSync) } -func testInvalidBlockBodyAttack(t *testing.T, protocol int) { +func testInvalidContentAttack(t *testing.T, protocol int, mode SyncMode) { // Create two peers, one feeding invalid block bodies targetBlocks := 4*blockCacheLimit - 15 - hashes, validBlocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, validBlocks := makeChain(targetBlocks, 0, genesis) invalidBlocks := make(map[common.Hash]*types.Block) for hash, block := range validBlocks { invalidBlocks[hash] = types.NewBlockWithHeader(block.Header()) } + invalidReceipts := make(map[common.Hash]*types.Block) + for hash, block := range validBlocks { + invalidReceipts[hash] = types.NewBlockWithHeader(block.Header()).WithBody(block.Transactions(), block.Uncles()) + } - tester := newTester() - tester.newPeer("valid", protocol, hashes, validBlocks) - tester.newPeer("attack", protocol, hashes, invalidBlocks) - + tester := newTester(mode) + tester.newPeer("valid", protocol, hashes, headers, validBlocks) + if mode != LightSync { + tester.newPeer("body attack", protocol, hashes, headers, invalidBlocks) + } + if mode == FastSync { + tester.newPeer("receipt attack", protocol, hashes, headers, invalidReceipts) + } // Synchronise with the valid peer (will pull contents from the attacker too) if err := tester.sync("valid", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != len(hashes) { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(hashes)) - } + assertOwnChain(t, tester, targetBlocks+1) + // Make sure the attacker was detected and dropped in the mean time - if _, ok := tester.peerHashes["attack"]; ok { + if _, ok := tester.peerHashes["body attack"]; ok { t.Fatalf("block body attacker not detected/dropped") } + if _, ok := tester.peerHashes["receipt attack"]; ok { + t.Fatalf("receipt attacker not detected/dropped") + } } // Tests that a peer advertising an high TD doesn't get to stall the downloader // afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack61(t *testing.T) { testHighTDStarvationAttack(t, 61) } -func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62) } -func TestHighTDStarvationAttack63(t *testing.T) { testHighTDStarvationAttack(t, 63) } -func TestHighTDStarvationAttack64(t *testing.T) { testHighTDStarvationAttack(t, 64) } - -func testHighTDStarvationAttack(t *testing.T, protocol int) { - tester := newTester() - hashes, blocks := makeChain(0, 0, genesis) - - tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, blocks) +func TestHighTDStarvationAttack61(t *testing.T) { testHighTDStarvationAttack(t, 61, FullSync) } +func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) } +func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) } +func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) } +func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } +func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } +func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) } + +func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { + tester := newTester(mode) + hashes, headers, blocks := makeChain(0, 0, genesis) + + tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks) if err := tester.sync("attack", big.NewInt(1000000)); err != errStallingPeer { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } @@ -834,18 +1027,20 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop + {errInvalidBlock, false}, // A bad peer was detected, but not the sync origin {errInvalidBody, false}, // A bad peer was detected, but not the sync origin + {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin {errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop {errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop {errCancelHeaderFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop {errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop } // Run the tests and check disconnection status - tester := newTester() + tester := newTester(FullSync) for i, tt := range tests { // Register a new peer and ensure it's presence id := fmt.Sprintf("test %d", i) - if err := tester.newPeer(id, protocol, []common.Hash{genesis.Hash()}, nil); err != nil { + if err := tester.newPeer(id, protocol, []common.Hash{genesis.Hash()}, nil, nil); err != nil { t.Fatalf("test %d: failed to register new peer: %v", i, err) } if _, ok := tester.peerHashes[id]; !ok { @@ -861,67 +1056,26 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { } } -// Tests that feeding bad blocks will result in a peer drop. -func TestBlockBodyAttackerDropping61(t *testing.T) { testBlockBodyAttackerDropping(t, 61) } -func TestBlockBodyAttackerDropping62(t *testing.T) { testBlockBodyAttackerDropping(t, 62) } -func TestBlockBodyAttackerDropping63(t *testing.T) { testBlockBodyAttackerDropping(t, 63) } -func TestBlockBodyAttackerDropping64(t *testing.T) { testBlockBodyAttackerDropping(t, 64) } - -func testBlockBodyAttackerDropping(t *testing.T, protocol int) { - // Define the disconnection requirement for individual block import errors - tests := []struct { - failure bool - drop bool - }{ - {true, true}, - {false, false}, - } - - // Run the tests and check disconnection status - tester := newTester() - for i, tt := range tests { - // Register a new peer and ensure it's presence - id := fmt.Sprintf("test %d", i) - if err := tester.newPeer(id, protocol, []common.Hash{common.Hash{}}, nil); err != nil { - t.Fatalf("test %d: failed to register new peer: %v", i, err) - } - if _, ok := tester.peerHashes[id]; !ok { - t.Fatalf("test %d: registered peer not found", i) - } - // Assemble a good or bad block, depending of the test - raw := core.GenerateChain(genesis, testdb, 1, nil)[0] - if tt.failure { - parent := types.NewBlock(&types.Header{}, nil, nil, nil) - raw = core.GenerateChain(parent, testdb, 1, nil)[0] - } - block := &Block{OriginPeer: id, RawBlock: raw} - - // Simulate block processing and check the result - tester.downloader.queue.blockCache[0] = block - tester.downloader.process() - if _, ok := tester.peerHashes[id]; !ok != tt.drop { - t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.failure, !ok, tt.drop) - } - } -} - // Tests that synchronisation boundaries (origin block number and highest block // number) is tracked and updated correctly. -func TestSyncBoundaries61(t *testing.T) { testSyncBoundaries(t, 61) } -func TestSyncBoundaries62(t *testing.T) { testSyncBoundaries(t, 62) } -func TestSyncBoundaries63(t *testing.T) { testSyncBoundaries(t, 63) } -func TestSyncBoundaries64(t *testing.T) { testSyncBoundaries(t, 64) } - -func testSyncBoundaries(t *testing.T, protocol int) { +func TestSyncBoundaries61(t *testing.T) { testSyncBoundaries(t, 61, FullSync) } +func TestSyncBoundaries62(t *testing.T) { testSyncBoundaries(t, 62, FullSync) } +func TestSyncBoundaries63Full(t *testing.T) { testSyncBoundaries(t, 63, FullSync) } +func TestSyncBoundaries63Fast(t *testing.T) { testSyncBoundaries(t, 63, FastSync) } +func TestSyncBoundaries64Full(t *testing.T) { testSyncBoundaries(t, 64, FullSync) } +func TestSyncBoundaries64Fast(t *testing.T) { testSyncBoundaries(t, 64, FastSync) } +func TestSyncBoundaries64Light(t *testing.T) { testSyncBoundaries(t, 64, LightSync) } + +func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) // Set a sync init hook to catch boundary changes starting := make(chan struct{}) progress := make(chan struct{}) - tester := newTester() + tester := newTester(mode) tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress @@ -931,7 +1085,7 @@ func testSyncBoundaries(t *testing.T, protocol int) { t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) } // Synchronise half the blocks and check initial boundaries - tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], blocks) + tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks) pending := new(sync.WaitGroup) pending.Add(1) @@ -949,7 +1103,7 @@ func testSyncBoundaries(t *testing.T, protocol int) { pending.Wait() // Synchronise all the blocks and check continuation boundaries - tester.newPeer("peer-full", protocol, hashes, blocks) + tester.newPeer("peer-full", protocol, hashes, headers, blocks) pending.Add(1) go func() { @@ -969,21 +1123,24 @@ func testSyncBoundaries(t *testing.T, protocol int) { // Tests that synchronisation boundaries (origin block number and highest block // number) is tracked and updated correctly in case of a fork (or manual head // revertal). -func TestForkedSyncBoundaries61(t *testing.T) { testForkedSyncBoundaries(t, 61) } -func TestForkedSyncBoundaries62(t *testing.T) { testForkedSyncBoundaries(t, 62) } -func TestForkedSyncBoundaries63(t *testing.T) { testForkedSyncBoundaries(t, 63) } -func TestForkedSyncBoundaries64(t *testing.T) { testForkedSyncBoundaries(t, 64) } - -func testForkedSyncBoundaries(t *testing.T, protocol int) { +func TestForkedSyncBoundaries61(t *testing.T) { testForkedSyncBoundaries(t, 61, FullSync) } +func TestForkedSyncBoundaries62(t *testing.T) { testForkedSyncBoundaries(t, 62, FullSync) } +func TestForkedSyncBoundaries63Full(t *testing.T) { testForkedSyncBoundaries(t, 63, FullSync) } +func TestForkedSyncBoundaries63Fast(t *testing.T) { testForkedSyncBoundaries(t, 63, FastSync) } +func TestForkedSyncBoundaries64Full(t *testing.T) { testForkedSyncBoundaries(t, 64, FullSync) } +func TestForkedSyncBoundaries64Fast(t *testing.T) { testForkedSyncBoundaries(t, 64, FastSync) } +func TestForkedSyncBoundaries64Light(t *testing.T) { testForkedSyncBoundaries(t, 64, LightSync) } + +func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { // Create a forked chain to simulate origin revertal common, fork := MaxHashFetch, 2*MaxHashFetch - hashesA, hashesB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis) + hashesA, hashesB, headersA, headersB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis) // Set a sync init hook to catch boundary changes starting := make(chan struct{}) progress := make(chan struct{}) - tester := newTester() + tester := newTester(mode) tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress @@ -993,7 +1150,7 @@ func testForkedSyncBoundaries(t *testing.T, protocol int) { t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) } // Synchronise with one of the forks and check boundaries - tester.newPeer("fork A", protocol, hashesA, blocksA) + tester.newPeer("fork A", protocol, hashesA, headersA, blocksA) pending := new(sync.WaitGroup) pending.Add(1) @@ -1014,7 +1171,7 @@ func testForkedSyncBoundaries(t *testing.T, protocol int) { tester.downloader.syncStatsOrigin = tester.downloader.syncStatsHeight // Synchronise with the second fork and check boundary resets - tester.newPeer("fork B", protocol, hashesB, blocksB) + tester.newPeer("fork B", protocol, hashesB, headersB, blocksB) pending.Add(1) go func() { @@ -1034,21 +1191,24 @@ func testForkedSyncBoundaries(t *testing.T, protocol int) { // Tests that if synchronisation is aborted due to some failure, then the boundary // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. -func TestFailedSyncBoundaries61(t *testing.T) { testFailedSyncBoundaries(t, 61) } -func TestFailedSyncBoundaries62(t *testing.T) { testFailedSyncBoundaries(t, 62) } -func TestFailedSyncBoundaries63(t *testing.T) { testFailedSyncBoundaries(t, 63) } -func TestFailedSyncBoundaries64(t *testing.T) { testFailedSyncBoundaries(t, 64) } - -func testFailedSyncBoundaries(t *testing.T, protocol int) { +func TestFailedSyncBoundaries61(t *testing.T) { testFailedSyncBoundaries(t, 61, FullSync) } +func TestFailedSyncBoundaries62(t *testing.T) { testFailedSyncBoundaries(t, 62, FullSync) } +func TestFailedSyncBoundaries63Full(t *testing.T) { testFailedSyncBoundaries(t, 63, FullSync) } +func TestFailedSyncBoundaries63Fast(t *testing.T) { testFailedSyncBoundaries(t, 63, FastSync) } +func TestFailedSyncBoundaries64Full(t *testing.T) { testFailedSyncBoundaries(t, 64, FullSync) } +func TestFailedSyncBoundaries64Fast(t *testing.T) { testFailedSyncBoundaries(t, 64, FastSync) } +func TestFailedSyncBoundaries64Light(t *testing.T) { testFailedSyncBoundaries(t, 64, LightSync) } + +func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) // Set a sync init hook to catch boundary changes starting := make(chan struct{}) progress := make(chan struct{}) - tester := newTester() + tester := newTester(mode) tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress @@ -1058,8 +1218,9 @@ func testFailedSyncBoundaries(t *testing.T, protocol int) { t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) } // Attempt a full sync with a faulty peer - tester.newPeer("faulty", protocol, hashes, blocks) + tester.newPeer("faulty", protocol, hashes, headers, blocks) missing := targetBlocks / 2 + delete(tester.peerHeaders["faulty"], hashes[missing]) delete(tester.peerBlocks["faulty"], hashes[missing]) pending := new(sync.WaitGroup) @@ -1079,7 +1240,7 @@ func testFailedSyncBoundaries(t *testing.T, protocol int) { pending.Wait() // Synchronise with a good peer and check that the boundary origin remind the same after a failure - tester.newPeer("valid", protocol, hashes, blocks) + tester.newPeer("valid", protocol, hashes, headers, blocks) pending.Add(1) go func() { @@ -1098,21 +1259,24 @@ func testFailedSyncBoundaries(t *testing.T, protocol int) { // Tests that if an attacker fakes a chain height, after the attack is detected, // the boundary height is successfully reduced at the next sync invocation. -func TestFakedSyncBoundaries61(t *testing.T) { testFakedSyncBoundaries(t, 61) } -func TestFakedSyncBoundaries62(t *testing.T) { testFakedSyncBoundaries(t, 62) } -func TestFakedSyncBoundaries63(t *testing.T) { testFakedSyncBoundaries(t, 63) } -func TestFakedSyncBoundaries64(t *testing.T) { testFakedSyncBoundaries(t, 64) } - -func testFakedSyncBoundaries(t *testing.T, protocol int) { +func TestFakedSyncBoundaries61(t *testing.T) { testFakedSyncBoundaries(t, 61, FullSync) } +func TestFakedSyncBoundaries62(t *testing.T) { testFakedSyncBoundaries(t, 62, FullSync) } +func TestFakedSyncBoundaries63Full(t *testing.T) { testFakedSyncBoundaries(t, 63, FullSync) } +func TestFakedSyncBoundaries63Fast(t *testing.T) { testFakedSyncBoundaries(t, 63, FastSync) } +func TestFakedSyncBoundaries64Full(t *testing.T) { testFakedSyncBoundaries(t, 64, FullSync) } +func TestFakedSyncBoundaries64Fast(t *testing.T) { testFakedSyncBoundaries(t, 64, FastSync) } +func TestFakedSyncBoundaries64Light(t *testing.T) { testFakedSyncBoundaries(t, 64, LightSync) } + +func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { // Create a small block chain targetBlocks := blockCacheLimit - 15 - hashes, blocks := makeChain(targetBlocks+3, 0, genesis) + hashes, headers, blocks := makeChain(targetBlocks+3, 0, genesis) // Set a sync init hook to catch boundary changes starting := make(chan struct{}) progress := make(chan struct{}) - tester := newTester() + tester := newTester(mode) tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress @@ -1122,8 +1286,9 @@ func testFakedSyncBoundaries(t *testing.T, protocol int) { t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) } // Create and sync with an attacker that promises a higher chain than available - tester.newPeer("attack", protocol, hashes, blocks) + tester.newPeer("attack", protocol, hashes, headers, blocks) for i := 1; i < 3; i++ { + delete(tester.peerHeaders["attack"], hashes[i]) delete(tester.peerBlocks["attack"], hashes[i]) } @@ -1144,7 +1309,7 @@ func testFakedSyncBoundaries(t *testing.T, protocol int) { pending.Wait() // Synchronise with a good peer and check that the boundary height has been reduced to the true value - tester.newPeer("valid", protocol, hashes[3:], blocks) + tester.newPeer("valid", protocol, hashes[3:], headers, blocks) pending.Add(1) go func() { diff --git a/eth/downloader/metrics.go b/eth/downloader/metrics.go index fd926affd..92acb6ba8 100644 --- a/eth/downloader/metrics.go +++ b/eth/downloader/metrics.go @@ -42,4 +42,9 @@ var ( bodyReqTimer = metrics.NewTimer("eth/downloader/bodies/req") bodyDropMeter = metrics.NewMeter("eth/downloader/bodies/drop") bodyTimeoutMeter = metrics.NewMeter("eth/downloader/bodies/timeout") + + receiptInMeter = metrics.NewMeter("eth/downloader/receipts/in") + receiptReqTimer = metrics.NewTimer("eth/downloader/receipts/req") + receiptDropMeter = metrics.NewMeter("eth/downloader/receipts/drop") + receiptTimeoutMeter = metrics.NewMeter("eth/downloader/receipts/timeout") ) diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go new file mode 100644 index 000000000..8916dbb79 --- /dev/null +++ b/eth/downloader/modes.go @@ -0,0 +1,26 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +// SyncMode represents the synchronisation mode of the downloader. +type SyncMode int + +const ( + FullSync SyncMode = iota // Synchronise the entire block-chain history from full blocks + FastSync // Quikcly download the headers, full sync only at the chain head + LightSync // Download only the headers and terminate afterwards +) diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index c1d20ac61..5fc0db587 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -36,10 +36,11 @@ type relativeHashFetcherFn func(common.Hash) error type absoluteHashFetcherFn func(uint64, int) error type blockFetcherFn func([]common.Hash) error -// Block header and body fethers belonging to eth/62 and above +// Block header and body fetchers belonging to eth/62 and above type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error type absoluteHeaderFetcherFn func(uint64, int, int, bool) error type blockBodyFetcherFn func([]common.Hash) error +type receiptFetcherFn func([]common.Hash) error var ( errAlreadyFetching = errors.New("already fetching blocks from peer") @@ -52,11 +53,14 @@ type peer struct { id string // Unique identifier of the peer head common.Hash // Hash of the peers latest known block - idle int32 // Current activity state of the peer (idle = 0, active = 1) - rep int32 // Simple peer reputation + blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) + receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) + rep int32 // Simple peer reputation - capacity int32 // Number of blocks allowed to fetch per request - started time.Time // Time instance when the last fetch was started + blockCapacity int32 // Number of blocks (bodies) allowed to fetch per request + receiptCapacity int32 // Number of receipts allowed to fetch per request + blockStarted time.Time // Time instance when the last block (body)fetch was started + receiptStarted time.Time // Time instance when the last receipt fetch was started ignored *set.Set // Set of hashes not to request (didn't have previously) @@ -68,6 +72,8 @@ type peer struct { getAbsHeaders absoluteHeaderFetcherFn // [eth/62] Method to retrieve a batch of headers from an absolute position getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies + getReceipts receiptFetcherFn // [eth/63] Method to retrieve a batch of block transaction receipts + version int // Eth protocol version number to switch strategies } @@ -75,12 +81,14 @@ type peer struct { // mechanisms. func newPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading - getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn) *peer { + getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, + getReceipts receiptFetcherFn) *peer { return &peer{ - id: id, - head: head, - capacity: 1, - ignored: set.New(), + id: id, + head: head, + blockCapacity: 1, + receiptCapacity: 1, + ignored: set.New(), getRelHashes: getRelHashes, getAbsHashes: getAbsHashes, @@ -90,24 +98,28 @@ func newPeer(id string, version int, head common.Hash, getAbsHeaders: getAbsHeaders, getBlockBodies: getBlockBodies, + getReceipts: getReceipts, + version: version, } } // Reset clears the internal state of a peer entity. func (p *peer) Reset() { - atomic.StoreInt32(&p.idle, 0) - atomic.StoreInt32(&p.capacity, 1) + atomic.StoreInt32(&p.blockIdle, 0) + atomic.StoreInt32(&p.receiptIdle, 0) + atomic.StoreInt32(&p.blockCapacity, 1) + atomic.StoreInt32(&p.receiptCapacity, 1) p.ignored.Clear() } // Fetch61 sends a block retrieval request to the remote peer. func (p *peer) Fetch61(request *fetchRequest) error { // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.idle, 0, 1) { + if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { return errAlreadyFetching } - p.started = time.Now() + p.blockStarted = time.Now() // Convert the hash set to a retrievable slice hashes := make([]common.Hash, 0, len(request.Hashes)) @@ -119,13 +131,13 @@ func (p *peer) Fetch61(request *fetchRequest) error { return nil } -// Fetch sends a block body retrieval request to the remote peer. -func (p *peer) Fetch(request *fetchRequest) error { +// FetchBodies sends a block body retrieval request to the remote peer. +func (p *peer) FetchBodies(request *fetchRequest) error { // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.idle, 0, 1) { + if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { return errAlreadyFetching } - p.started = time.Now() + p.blockStarted = time.Now() // Convert the header set to a retrievable slice hashes := make([]common.Hash, 0, len(request.Headers)) @@ -137,55 +149,64 @@ func (p *peer) Fetch(request *fetchRequest) error { return nil } -// SetIdle61 sets the peer to idle, allowing it to execute new retrieval requests. -// Its block retrieval allowance will also be updated either up- or downwards, -// depending on whether the previous fetch completed in time or not. -func (p *peer) SetIdle61() { - // Update the peer's download allowance based on previous performance - scale := 2.0 - if time.Since(p.started) > blockSoftTTL { - scale = 0.5 - if time.Since(p.started) > blockHardTTL { - scale = 1 / float64(MaxBlockFetch) // reduces capacity to 1 - } +// FetchReceipts sends a receipt retrieval request to the remote peer. +func (p *peer) FetchReceipts(request *fetchRequest) error { + // Short circuit if the peer is already fetching + if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { + return errAlreadyFetching } - for { - // Calculate the new download bandwidth allowance - prev := atomic.LoadInt32(&p.capacity) - next := int32(math.Max(1, math.Min(float64(MaxBlockFetch), float64(prev)*scale))) + p.receiptStarted = time.Now() - // Try to update the old value - if atomic.CompareAndSwapInt32(&p.capacity, prev, next) { - // If we're having problems at 1 capacity, try to find better peers - if next == 1 { - p.Demote() - } - break - } + // Convert the header set to a retrievable slice + hashes := make([]common.Hash, 0, len(request.Headers)) + for _, header := range request.Headers { + hashes = append(hashes, header.Hash()) } - // Set the peer to idle to allow further block requests - atomic.StoreInt32(&p.idle, 0) + go p.getReceipts(hashes) + + return nil +} + +// SetBlocksIdle sets the peer to idle, allowing it to execute new retrieval requests. +// Its block retrieval allowance will also be updated either up- or downwards, +// depending on whether the previous fetch completed in time or not. +func (p *peer) SetBlocksIdle() { + p.setIdle(p.blockStarted, blockSoftTTL, blockHardTTL, MaxBlockFetch, &p.blockCapacity, &p.blockIdle) } -// SetIdle sets the peer to idle, allowing it to execute new retrieval requests. +// SetBodiesIdle sets the peer to idle, allowing it to execute new retrieval requests. // Its block body retrieval allowance will also be updated either up- or downwards, // depending on whether the previous fetch completed in time or not. -func (p *peer) SetIdle() { +func (p *peer) SetBodiesIdle() { + p.setIdle(p.blockStarted, bodySoftTTL, bodyHardTTL, MaxBlockFetch, &p.blockCapacity, &p.blockIdle) +} + +// SetReceiptsIdle sets the peer to idle, allowing it to execute new retrieval requests. +// Its receipt retrieval allowance will also be updated either up- or downwards, +// depending on whether the previous fetch completed in time or not. +func (p *peer) SetReceiptsIdle() { + p.setIdle(p.receiptStarted, receiptSoftTTL, receiptHardTTL, MaxReceiptFetch, &p.receiptCapacity, &p.receiptIdle) +} + +// setIdle sets the peer to idle, allowing it to execute new retrieval requests. +// Its data retrieval allowance will also be updated either up- or downwards, +// depending on whether the previous fetch completed in time or not. +func (p *peer) setIdle(started time.Time, softTTL, hardTTL time.Duration, maxFetch int, capacity, idle *int32) { // Update the peer's download allowance based on previous performance scale := 2.0 - if time.Since(p.started) > bodySoftTTL { + if time.Since(started) > softTTL { scale = 0.5 - if time.Since(p.started) > bodyHardTTL { - scale = 1 / float64(MaxBodyFetch) // reduces capacity to 1 + if time.Since(started) > hardTTL { + scale = 1 / float64(maxFetch) // reduces capacity to 1 } } for { // Calculate the new download bandwidth allowance - prev := atomic.LoadInt32(&p.capacity) - next := int32(math.Max(1, math.Min(float64(MaxBodyFetch), float64(prev)*scale))) + prev := atomic.LoadInt32(capacity) + next := int32(math.Max(1, math.Min(float64(maxFetch), float64(prev)*scale))) // Try to update the old value - if atomic.CompareAndSwapInt32(&p.capacity, prev, next) { + if atomic.CompareAndSwapInt32(capacity, prev, next) { // If we're having problems at 1 capacity, try to find better peers if next == 1 { p.Demote() @@ -193,14 +214,20 @@ func (p *peer) SetIdle() { break } } - // Set the peer to idle to allow further block requests - atomic.StoreInt32(&p.idle, 0) + // Set the peer to idle to allow further fetch requests + atomic.StoreInt32(idle, 0) +} + +// BlockCapacity retrieves the peers block download allowance based on its +// previously discovered bandwidth capacity. +func (p *peer) BlockCapacity() int { + return int(atomic.LoadInt32(&p.blockCapacity)) } -// Capacity retrieves the peers block download allowance based on its previously -// discovered bandwidth capacity. -func (p *peer) Capacity() int { - return int(atomic.LoadInt32(&p.capacity)) +// ReceiptCapacity retrieves the peers block download allowance based on its +// previously discovered bandwidth capacity. +func (p *peer) ReceiptCapacity() int { + return int(atomic.LoadInt32(&p.receiptCapacity)) } // Promote increases the peer's reputation. @@ -226,7 +253,8 @@ func (p *peer) Demote() { func (p *peer) String() string { return fmt.Sprintf("Peer %s [%s]", p.id, fmt.Sprintf("reputation %3d, ", atomic.LoadInt32(&p.rep))+ - fmt.Sprintf("capacity %3d, ", atomic.LoadInt32(&p.capacity))+ + fmt.Sprintf("block cap %3d, ", atomic.LoadInt32(&p.blockCapacity))+ + fmt.Sprintf("receipt cap %3d, ", atomic.LoadInt32(&p.receiptCapacity))+ fmt.Sprintf("ignored %4d", p.ignored.Size()), ) } @@ -310,26 +338,52 @@ func (ps *peerSet) AllPeers() []*peer { return list } -// IdlePeers retrieves a flat list of all the currently idle peers within the +// BlockIdlePeers retrieves a flat list of all the currently idle peers within the // active peer set, ordered by their reputation. -func (ps *peerSet) IdlePeers(version int) []*peer { +func (ps *peerSet) BlockIdlePeers(version int) ([]*peer, int) { ps.lock.RLock() defer ps.lock.RUnlock() - list := make([]*peer, 0, len(ps.peers)) + idle, total := make([]*peer, 0, len(ps.peers)), 0 for _, p := range ps.peers { - if (version == eth61 && p.version == eth61) || (version >= eth62 && p.version >= eth62) { - if atomic.LoadInt32(&p.idle) == 0 { - list = append(list, p) + if (version == 61 && p.version == 61) || (version >= 62 && p.version >= 62) { + if atomic.LoadInt32(&p.blockIdle) == 0 { + idle = append(idle, p) } + total++ } } - for i := 0; i < len(list); i++ { - for j := i + 1; j < len(list); j++ { - if atomic.LoadInt32(&list[i].rep) < atomic.LoadInt32(&list[j].rep) { - list[i], list[j] = list[j], list[i] + for i := 0; i < len(idle); i++ { + for j := i + 1; j < len(idle); j++ { + if atomic.LoadInt32(&idle[i].rep) < atomic.LoadInt32(&idle[j].rep) { + idle[i], idle[j] = idle[j], idle[i] } } } - return list + return idle, total +} + +// ReceiptIdlePeers retrieves a flat list of all the currently idle peers within the +// active peer set, ordered by their reputation. +func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) { + ps.lock.RLock() + defer ps.lock.RUnlock() + + idle, total := make([]*peer, 0, len(ps.peers)), 0 + for _, p := range ps.peers { + if p.version >= 63 { + if atomic.LoadInt32(&p.receiptIdle) == 0 { + idle = append(idle, p) + } + total++ + } + } + for i := 0; i < len(idle); i++ { + for j := i + 1; j < len(idle); j++ { + if atomic.LoadInt32(&idle[i].rep) < atomic.LoadInt32(&idle[j].rep) { + idle[i], idle[j] = idle[j], idle[i] + } + } + } + return idle, total } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 49d1046fb..c53ad939e 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -29,11 +29,12 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" + "github.com/rcrowley/go-metrics" "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) var ( - blockCacheLimit = 8 * MaxBlockFetch // Maximum number of blocks to cache before throttling the download + blockCacheLimit = 1024 // Maximum number of blocks to cache before throttling the download ) var ( @@ -41,29 +42,47 @@ var ( errStaleDelivery = errors.New("stale delivery") ) -// fetchRequest is a currently running block retrieval operation. +// fetchRequest is a currently running data retrieval operation. type fetchRequest struct { Peer *peer // Peer to which the request was sent - Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority) + Hashes map[common.Hash]int // [eth/61] Requested block with their insertion index (priority) Headers []*types.Header // [eth/62] Requested headers, sorted by request order Time time.Time // Time when the request was made } +// fetchResult is the assembly collecting partial results from potentially more +// than one fetcher routines, until all outstanding retrievals complete and the +// result as a whole can be processed. +type fetchResult struct { + Pending int // Number of data fetches still pending + + Header *types.Header + Uncles []*types.Header + Transactions types.Transactions + Receipts types.Receipts +} + // queue represents hashes that are either need fetching or are being fetched type queue struct { hashPool map[common.Hash]int // [eth/61] Pending hashes, mapping to their insertion index (priority) hashQueue *prque.Prque // [eth/61] Priority queue of the block hashes to fetch hashCounter int // [eth/61] Counter indexing the added hashes to ensure retrieval order - headerPool map[common.Hash]*types.Header // [eth/62] Pending headers, mapping from their hashes - headerQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the bodies for - headerHead common.Hash // [eth/62] Hash of the last queued header to verify order + headerHead common.Hash // [eth/62] Hash of the last queued header to verify order - pendPool map[string]*fetchRequest // Currently pending block retrieval operations + blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers + blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for + blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations + blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches - blockPool map[common.Hash]uint64 // Hash-set of the downloaded data blocks, mapping to cache indexes - blockCache []*Block // Downloaded but not yet delivered blocks - blockOffset uint64 // Offset of the first cached block in the block-chain + receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers + receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for + receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations + receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches + + resultCache []*fetchResult // Downloaded but not yet delivered fetch results + resultOffset uint64 // Offset of the first cached fetch result in the block-chain + resultParts int // Number of fetch components required to complete an item lock sync.RWMutex } @@ -71,13 +90,17 @@ type queue struct { // newQueue creates a new download queue for scheduling block retrieval. func newQueue() *queue { return &queue{ - hashPool: make(map[common.Hash]int), - hashQueue: prque.New(), - headerPool: make(map[common.Hash]*types.Header), - headerQueue: prque.New(), - pendPool: make(map[string]*fetchRequest), - blockPool: make(map[common.Hash]uint64), - blockCache: make([]*Block, blockCacheLimit), + hashPool: make(map[common.Hash]int), + hashQueue: prque.New(), + blockTaskPool: make(map[common.Hash]*types.Header), + blockTaskQueue: prque.New(), + blockPendPool: make(map[string]*fetchRequest), + blockDonePool: make(map[common.Hash]struct{}), + receiptTaskPool: make(map[common.Hash]*types.Header), + receiptTaskQueue: prque.New(), + receiptPendPool: make(map[string]*fetchRequest), + receiptDonePool: make(map[common.Hash]struct{}), + resultCache: make([]*fetchResult, blockCacheLimit), } } @@ -90,32 +113,37 @@ func (q *queue) Reset() { q.hashQueue.Reset() q.hashCounter = 0 - q.headerPool = make(map[common.Hash]*types.Header) - q.headerQueue.Reset() q.headerHead = common.Hash{} - q.pendPool = make(map[string]*fetchRequest) + q.blockTaskPool = make(map[common.Hash]*types.Header) + q.blockTaskQueue.Reset() + q.blockPendPool = make(map[string]*fetchRequest) + q.blockDonePool = make(map[common.Hash]struct{}) + + q.receiptTaskPool = make(map[common.Hash]*types.Header) + q.receiptTaskQueue.Reset() + q.receiptPendPool = make(map[string]*fetchRequest) + q.receiptDonePool = make(map[common.Hash]struct{}) - q.blockPool = make(map[common.Hash]uint64) - q.blockOffset = 0 - q.blockCache = make([]*Block, blockCacheLimit) + q.resultCache = make([]*fetchResult, blockCacheLimit) + q.resultOffset = 0 + q.resultParts = 0 } -// Size retrieves the number of blocks in the queue, returning separately for -// pending and already downloaded. -func (q *queue) Size() (int, int) { +// PendingBlocks retrieves the number of block (body) requests pending for retrieval. +func (q *queue) PendingBlocks() int { q.lock.RLock() defer q.lock.RUnlock() - return len(q.hashPool) + len(q.headerPool), len(q.blockPool) + return q.hashQueue.Size() + q.blockTaskQueue.Size() } -// Pending retrieves the number of blocks pending for retrieval. -func (q *queue) Pending() int { +// PendingReceipts retrieves the number of block receipts pending for retrieval. +func (q *queue) PendingReceipts() int { q.lock.RLock() defer q.lock.RUnlock() - return q.hashQueue.Size() + q.headerQueue.Size() + return q.receiptTaskQueue.Size() } // InFlight retrieves the number of fetch requests currently in flight. @@ -123,44 +151,55 @@ func (q *queue) InFlight() int { q.lock.RLock() defer q.lock.RUnlock() - return len(q.pendPool) + return len(q.blockPendPool) + len(q.receiptPendPool) +} + +// Idle returns if the queue is fully idle or has some data still inside. This +// method is used by the tester to detect termination events. +func (q *queue) Idle() bool { + q.lock.RLock() + defer q.lock.RUnlock() + + queued := q.hashQueue.Size() + q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + pending := len(q.blockPendPool) + len(q.receiptPendPool) + cached := len(q.blockDonePool) + len(q.receiptDonePool) + + return (queued + pending + cached) == 0 } -// Throttle checks if the download should be throttled (active block fetches -// exceed block cache). -func (q *queue) Throttle() bool { +// ThrottleBlocks checks if the download should be throttled (active block (body) +// fetches exceed block cache). +func (q *queue) ThrottleBlocks() bool { q.lock.RLock() defer q.lock.RUnlock() - // Calculate the currently in-flight block requests + // Calculate the currently in-flight block (body) requests pending := 0 - for _, request := range q.pendPool { + for _, request := range q.blockPendPool { pending += len(request.Hashes) + len(request.Headers) } - // Throttle if more blocks are in-flight than free space in the cache - return pending >= len(q.blockCache)-len(q.blockPool) + // Throttle if more blocks (bodies) are in-flight than free space in the cache + return pending >= len(q.resultCache)-len(q.blockDonePool) } -// Has checks if a hash is within the download queue or not. -func (q *queue) Has(hash common.Hash) bool { +// ThrottleReceipts checks if the download should be throttled (active receipt +// fetches exceed block cache). +func (q *queue) ThrottleReceipts() bool { q.lock.RLock() defer q.lock.RUnlock() - if _, ok := q.hashPool[hash]; ok { - return true - } - if _, ok := q.headerPool[hash]; ok { - return true - } - if _, ok := q.blockPool[hash]; ok { - return true + // Calculate the currently in-flight receipt requests + pending := 0 + for _, request := range q.receiptPendPool { + pending += len(request.Headers) } - return false + // Throttle if more receipts are in-flight than free space in the cache + return pending >= len(q.resultCache)-len(q.receiptDonePool) } -// Insert61 adds a set of hashes for the download queue for scheduling, returning +// Schedule61 adds a set of hashes for the download queue for scheduling, returning // the new hashes encountered. -func (q *queue) Insert61(hashes []common.Hash, fifo bool) []common.Hash { +func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash { q.lock.Lock() defer q.lock.Unlock() @@ -186,22 +225,17 @@ func (q *queue) Insert61(hashes []common.Hash, fifo bool) []common.Hash { return inserts } -// Insert adds a set of headers for the download queue for scheduling, returning +// Schedule adds a set of headers for the download queue for scheduling, returning // the new headers encountered. -func (q *queue) Insert(headers []*types.Header, from uint64) []*types.Header { +func (q *queue) Schedule(headers []*types.Header, from uint64, receipts bool) []*types.Header { q.lock.Lock() defer q.lock.Unlock() // Insert all the headers prioritized by the contained block number inserts := make([]*types.Header, 0, len(headers)) for _, header := range headers { - // Make sure no duplicate requests are executed - hash := header.Hash() - if _, ok := q.headerPool[hash]; ok { - glog.V(logger.Warn).Infof("Header #%d [%x] already scheduled", header.Number.Uint64(), hash[:4]) - continue - } // Make sure chain order is honored and preserved throughout + hash := header.Hash() if header.Number == nil || header.Number.Uint64() != from { glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ordering, expected %d", header.Number, hash[:4], from) break @@ -210,69 +244,72 @@ func (q *queue) Insert(headers []*types.Header, from uint64) []*types.Header { glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ancestry", header.Number, hash[:4]) break } - // Queue the header for body retrieval + // Make sure no duplicate requests are executed + if _, ok := q.blockTaskPool[hash]; ok { + glog.V(logger.Warn).Infof("Header #%d [%x] already scheduled for block fetch", header.Number.Uint64(), hash[:4]) + continue + } + if _, ok := q.receiptTaskPool[hash]; ok { + glog.V(logger.Warn).Infof("Header #%d [%x] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4]) + continue + } + // Queue the header for content retrieval + q.blockTaskPool[hash] = header + q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) + if receipts { + q.receiptTaskPool[hash] = header + q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) + } inserts = append(inserts, header) - q.headerPool[hash] = header - q.headerQueue.Push(header, -float32(header.Number.Uint64())) q.headerHead = hash from++ } return inserts } -// GetHeadBlock retrieves the first block from the cache, or nil if it hasn't +// GetHeadResult retrieves the first fetch result from the cache, or nil if it hasn't // been downloaded yet (or simply non existent). -func (q *queue) GetHeadBlock() *Block { +func (q *queue) GetHeadResult() *fetchResult { q.lock.RLock() defer q.lock.RUnlock() - if len(q.blockCache) == 0 { + if len(q.resultCache) == 0 || q.resultCache[0] == nil { return nil } - return q.blockCache[0] -} - -// GetBlock retrieves a downloaded block, or nil if non-existent. -func (q *queue) GetBlock(hash common.Hash) *Block { - q.lock.RLock() - defer q.lock.RUnlock() - - // Short circuit if the block hasn't been downloaded yet - index, ok := q.blockPool[hash] - if !ok { + if q.resultCache[0].Pending > 0 { return nil } - // Return the block if it's still available in the cache - if q.blockOffset <= index && index < q.blockOffset+uint64(len(q.blockCache)) { - return q.blockCache[index-q.blockOffset] - } - return nil + return q.resultCache[0] } -// TakeBlocks retrieves and permanently removes a batch of blocks from the cache. -func (q *queue) TakeBlocks() []*Block { +// TakeResults retrieves and permanently removes a batch of fetch results from +// the cache. +func (q *queue) TakeResults() []*fetchResult { q.lock.Lock() defer q.lock.Unlock() - // Accumulate all available blocks - blocks := []*Block{} - for _, block := range q.blockCache { - if block == nil { + // Accumulate all available results + results := []*fetchResult{} + for _, result := range q.resultCache { + if result == nil || result.Pending > 0 { break } - blocks = append(blocks, block) - delete(q.blockPool, block.RawBlock.Hash()) + results = append(results, result) + + hash := result.Header.Hash() + delete(q.blockDonePool, hash) + delete(q.receiptDonePool, hash) } - // Delete the blocks from the slice and let them be garbage collected - // without this slice trick the blocks would stay in memory until nil - // would be assigned to q.blocks - copy(q.blockCache, q.blockCache[len(blocks):]) - for k, n := len(q.blockCache)-len(blocks), len(q.blockCache); k < n; k++ { - q.blockCache[k] = nil + // Delete the results from the slice and let them be garbage collected + // without this slice trick the results would stay in memory until nil + // would be assigned to them. + copy(q.resultCache, q.resultCache[len(results):]) + for k, n := len(q.resultCache)-len(results), len(q.resultCache); k < n; k++ { + q.resultCache[k] = nil } - q.blockOffset += uint64(len(blocks)) + q.resultOffset += uint64(len(results)) - return blocks + return results } // Reserve61 reserves a set of hashes for the given peer, skipping any previously @@ -286,12 +323,12 @@ func (q *queue) Reserve61(p *peer, count int) *fetchRequest { if q.hashQueue.Empty() { return nil } - if _, ok := q.pendPool[p.id]; ok { + if _, ok := q.blockPendPool[p.id]; ok { return nil } // Calculate an upper limit on the hashes we might fetch (i.e. throttling) - space := len(q.blockCache) - len(q.blockPool) - for _, request := range q.pendPool { + space := len(q.resultCache) - len(q.blockDonePool) + for _, request := range q.blockPendPool { space -= len(request.Hashes) } // Retrieve a batch of hashes, skipping previously failed ones @@ -319,49 +356,82 @@ func (q *queue) Reserve61(p *peer, count int) *fetchRequest { Hashes: send, Time: time.Now(), } - q.pendPool[p.id] = request + q.blockPendPool[p.id] = request return request } -// Reserve reserves a set of headers for the given peer, skipping any previously -// failed download. Beside the next batch of needed fetches, it also returns a -// flag whether empty blocks were queued requiring processing. -func (q *queue) Reserve(p *peer, count int) (*fetchRequest, bool, error) { +// ReserveBlocks reserves a set of body fetches for the given peer, skipping any +// previously failed downloads. Beside the next batch of needed fetches, it also +// returns a flag whether empty blocks were queued requiring processing. +func (q *queue) ReserveBlocks(p *peer, count int) (*fetchRequest, bool, error) { + noop := func(header *types.Header) bool { + return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash + } + return q.reserveFetch(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, noop) +} + +// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping +// any previously failed downloads. Beside the next batch of needed fetches, it +// also returns a flag whether empty receipts were queued requiring importing. +func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error) { + noop := func(header *types.Header) bool { + return header.ReceiptHash == types.EmptyRootHash + } + return q.reserveFetch(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, noop) +} + +// reserveFetch reserves a set of data download operations for a given peer, +// skipping any previously failed ones. This method is a generic version used +// by the individual special reservation functions. +func (q *queue) reserveFetch(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, + pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, noop func(*types.Header) bool) (*fetchRequest, bool, error) { q.lock.Lock() defer q.lock.Unlock() // Short circuit if the pool has been depleted, or if the peer's already // downloading something (sanity check not to corrupt state) - if q.headerQueue.Empty() { + if taskQueue.Empty() { return nil, false, nil } - if _, ok := q.pendPool[p.id]; ok { + if _, ok := pendPool[p.id]; ok { return nil, false, nil } - // Calculate an upper limit on the bodies we might fetch (i.e. throttling) - space := len(q.blockCache) - len(q.blockPool) - for _, request := range q.pendPool { + // Calculate an upper limit on the items we might fetch (i.e. throttling) + space := len(q.resultCache) - len(donePool) + for _, request := range pendPool { space -= len(request.Headers) } - // Retrieve a batch of headers, skipping previously failed ones + // Retrieve a batch of tasks, skipping previously failed ones send := make([]*types.Header, 0, count) skip := make([]*types.Header, 0) - process := false - for proc := 0; proc < space && len(send) < count && !q.headerQueue.Empty(); proc++ { - header := q.headerQueue.PopItem().(*types.Header) + progress := false + for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { + header := taskQueue.PopItem().(*types.Header) - // If the header defines an empty block, deliver straight - if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { - if err := q.enqueue("", types.NewBlockWithHeader(header)); err != nil { - return nil, false, errInvalidChain + // If we're the first to request this task, initialize the result container + index := int(header.Number.Int64() - int64(q.resultOffset)) + if index >= len(q.resultCache) || index < 0 { + return nil, false, errInvalidChain + } + if q.resultCache[index] == nil { + q.resultCache[index] = &fetchResult{ + Pending: q.resultParts, + Header: header, } - delete(q.headerPool, header.Hash()) - process, space, proc = true, space-1, proc-1 + } + // If this fetch task is a noop, skip this fetch operation + if noop(header) { + donePool[header.Hash()] = struct{}{} + delete(taskPool, header.Hash()) + + space, proc = space-1, proc-1 + q.resultCache[index].Pending-- + progress = true continue } - // If it's a content block, add to the body fetch request + // Otherwise if not a known unknown block, add to the retrieve list if p.ignored.Has(header.Hash()) { skip = append(skip, header) } else { @@ -370,24 +440,41 @@ func (q *queue) Reserve(p *peer, count int) (*fetchRequest, bool, error) { } // Merge all the skipped headers back for _, header := range skip { - q.headerQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -float32(header.Number.Uint64())) } // Assemble and return the block download request if len(send) == 0 { - return nil, process, nil + return nil, progress, nil } request := &fetchRequest{ Peer: p, Headers: send, Time: time.Now(), } - q.pendPool[p.id] = request + pendPool[p.id] = request + + return request, progress, nil +} + +// Cancel61 aborts a fetch request, returning all pending hashes to the queue. +func (q *queue) Cancel61(request *fetchRequest) { + q.cancel(request, nil, q.blockPendPool) +} + +// CancelBlocks aborts a body fetch request, returning all pending hashes to the +// task queue. +func (q *queue) CancelBlocks(request *fetchRequest) { + q.cancel(request, q.blockTaskQueue, q.blockPendPool) +} - return request, process, nil +// CancelReceipts aborts a body fetch request, returning all pending hashes to +// the task queue. +func (q *queue) CancelReceipts(request *fetchRequest) { + q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) } -// Cancel aborts a fetch request, returning all pending hashes to the queue. -func (q *queue) Cancel(request *fetchRequest) { +// Cancel aborts a fetch request, returning all pending hashes to the task queue. +func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { q.lock.Lock() defer q.lock.Unlock() @@ -395,20 +482,62 @@ func (q *queue) Cancel(request *fetchRequest) { q.hashQueue.Push(hash, float32(index)) } for _, header := range request.Headers { - q.headerQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -float32(header.Number.Uint64())) } - delete(q.pendPool, request.Peer.id) + delete(pendPool, request.Peer.id) } -// Expire checks for in flight requests that exceeded a timeout allowance, +// Revoke cancels all pending requests belonging to a given peer. This method is +// meant to be called during a peer drop to quickly reassign owned data fetches +// to remaining nodes. +func (q *queue) Revoke(peerId string) { + q.lock.Lock() + defer q.lock.Unlock() + + if request, ok := q.blockPendPool[peerId]; ok { + for hash, index := range request.Hashes { + q.hashQueue.Push(hash, float32(index)) + } + for _, header := range request.Headers { + q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) + } + delete(q.blockPendPool, peerId) + } + if request, ok := q.receiptPendPool[peerId]; ok { + for _, header := range request.Headers { + q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) + } + delete(q.receiptPendPool, peerId) + } +} + +// Expire61 checks for in flight requests that exceeded a timeout allowance, // canceling them and returning the responsible peers for penalization. -func (q *queue) Expire(timeout time.Duration) []string { +func (q *queue) Expire61(timeout time.Duration) []string { + return q.expire(timeout, q.blockPendPool, nil) +} + +// ExpireBlocks checks for in flight block body requests that exceeded a timeout +// allowance, canceling them and returning the responsible peers for penalization. +func (q *queue) ExpireBlocks(timeout time.Duration) []string { + return q.expire(timeout, q.blockPendPool, q.blockTaskQueue) +} + +// ExpireReceipts checks for in flight receipt requests that exceeded a timeout +// allowance, canceling them and returning the responsible peers for penalization. +func (q *queue) ExpireReceipts(timeout time.Duration) []string { + return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue) +} + +// expire is the generic check that move expired tasks from a pending pool back +// into a task pool, returning all entities caught with expired tasks. +func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) []string { q.lock.Lock() defer q.lock.Unlock() // Iterate over the expired requests and return each to the queue peers := []string{} - for id, request := range q.pendPool { + for id, request := range pendPool { if time.Since(request.Time) > timeout { // Update the metrics with the timeout if len(request.Hashes) > 0 { @@ -421,14 +550,14 @@ func (q *queue) Expire(timeout time.Duration) []string { q.hashQueue.Push(hash, float32(index)) } for _, header := range request.Headers { - q.headerQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -float32(header.Number.Uint64())) } peers = append(peers, id) } } // Remove the expired requests from the pending pool for _, id := range peers { - delete(q.pendPool, id) + delete(pendPool, id) } return peers } @@ -439,12 +568,12 @@ func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) { defer q.lock.Unlock() // Short circuit if the blocks were never requested - request := q.pendPool[id] + request := q.blockPendPool[id] if request == nil { return errNoFetchesPending } blockReqTimer.UpdateSince(request.Time) - delete(q.pendPool, id) + delete(q.blockPendPool, id) // If no blocks were retrieved, mark them as unavailable for the origin peer if len(blocks) == 0 { @@ -461,10 +590,19 @@ func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) { errs = append(errs, fmt.Errorf("non-requested block %x", hash)) continue } - // Queue the block up for processing - if err := q.enqueue(id, block); err != nil { - return err + // Reconstruct the next result if contents match up + index := int(block.Number().Int64() - int64(q.resultOffset)) + if index >= len(q.resultCache) || index < 0 { + errs = []error{errInvalidChain} + break + } + q.resultCache[index] = &fetchResult{ + Header: block.Header(), + Transactions: block.Transactions(), + Uncles: block.Uncles(), } + q.blockDonePool[block.Hash()] = struct{}{} + delete(request.Hashes, hash) delete(q.hashPool, hash) } @@ -473,60 +611,94 @@ func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) { q.hashQueue.Push(hash, float32(index)) } // If none of the blocks were good, it's a stale delivery - if len(errs) != 0 { - if len(errs) == len(blocks) { - return errStaleDelivery - } + switch { + case len(errs) == 0: + return nil + + case len(errs) == 1 && (errs[0] == errInvalidChain || errs[0] == errInvalidBlock): + return errs[0] + + case len(errs) == len(request.Headers): + return errStaleDelivery + + default: return fmt.Errorf("multiple failures: %v", errs) } - return nil } -// Deliver injects a block body retrieval response into the download queue. -func (q *queue) Deliver(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) error { +// DeliverBlocks injects a block (body) retrieval response into the results queue. +func (q *queue) DeliverBlocks(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) error { + reconstruct := func(header *types.Header, index int, result *fetchResult) error { + if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { + return errInvalidBody + } + result.Transactions = txLists[index] + result.Uncles = uncleLists[index] + return nil + } + return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct) +} + +// DeliverReceipts injects a receipt retrieval response into the results queue. +func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) error { + reconstruct := func(header *types.Header, index int, result *fetchResult) error { + if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { + return errInvalidReceipt + } + result.Receipts = receiptList[index] + return nil + } + return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct) +} + +// deliver injects a data retrieval response into the results queue. +func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, pendPool map[string]*fetchRequest, + donePool map[common.Hash]struct{}, reqTimer metrics.Timer, results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) error { q.lock.Lock() defer q.lock.Unlock() - // Short circuit if the block bodies were never requested - request := q.pendPool[id] + // Short circuit if the data was never requested + request := pendPool[id] if request == nil { return errNoFetchesPending } - bodyReqTimer.UpdateSince(request.Time) - delete(q.pendPool, id) + reqTimer.UpdateSince(request.Time) + delete(pendPool, id) - // If no block bodies were retrieved, mark them as unavailable for the origin peer - if len(txLists) == 0 || len(uncleLists) == 0 { + // If no data items were retrieved, mark them as unavailable for the origin peer + if results == 0 { for hash, _ := range request.Headers { request.Peer.ignored.Add(hash) } } - // Assemble each of the block bodies with their headers and queue for processing + // Assemble each of the results with their headers and retrieved data parts errs := make([]error, 0) for i, header := range request.Headers { - // Short circuit block assembly if no more bodies are found - if i >= len(txLists) || i >= len(uncleLists) { + // Short circuit assembly if no more fetch results are found + if i >= results { break } - // Reconstruct the next block if contents match up - if types.DeriveSha(types.Transactions(txLists[i])) != header.TxHash || types.CalcUncleHash(uncleLists[i]) != header.UncleHash { - errs = []error{errInvalidBody} + // Reconstruct the next result if contents match up + index := int(header.Number.Int64() - int64(q.resultOffset)) + if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { + errs = []error{errInvalidChain} break } - block := types.NewBlockWithHeader(header).WithBody(txLists[i], uncleLists[i]) - - // Queue the block up for processing - if err := q.enqueue(id, block); err != nil { + if err := reconstruct(header, i, q.resultCache[index]); err != nil { errs = []error{err} break } + donePool[header.Hash()] = struct{}{} + q.resultCache[index].Pending-- + + // Clean up a successful fetch request.Headers[i] = nil - delete(q.headerPool, header.Hash()) + delete(taskPool, header.Hash()) } // Return all failed or missing fetches to the queue for _, header := range request.Headers { if header != nil { - q.headerQueue.Push(header, -float32(header.Number.Uint64())) + taskQueue.Push(header, -float32(header.Number.Uint64())) } } // If none of the blocks were good, it's a stale delivery @@ -534,11 +706,8 @@ func (q *queue) Deliver(id string, txLists [][]*types.Transaction, uncleLists [] case len(errs) == 0: return nil - case len(errs) == 1 && errs[0] == errInvalidBody: - return errInvalidBody - - case len(errs) == 1 && errs[0] == errInvalidChain: - return errInvalidChain + case len(errs) == 1 && (errs[0] == errInvalidChain || errs[0] == errInvalidBody || errs[0] == errInvalidReceipt): + return errs[0] case len(errs) == len(request.Headers): return errStaleDelivery @@ -548,29 +717,14 @@ func (q *queue) Deliver(id string, txLists [][]*types.Transaction, uncleLists [] } } -// enqueue inserts a new block into the final delivery queue, waiting for pickup -// by the processor. -func (q *queue) enqueue(origin string, block *types.Block) error { - // If a requested block falls out of the range, the hash chain is invalid - index := int(int64(block.NumberU64()) - int64(q.blockOffset)) - if index >= len(q.blockCache) || index < 0 { - return errInvalidChain - } - // Otherwise merge the block and mark the hash done - q.blockCache[index] = &Block{ - RawBlock: block, - OriginPeer: origin, - } - q.blockPool[block.Header().Hash()] = block.NumberU64() - return nil -} - -// Prepare configures the block cache offset to allow accepting inbound blocks. -func (q *queue) Prepare(offset uint64) { +// Prepare configures the result cache to allow accepting and caching inbound +// fetch results. +func (q *queue) Prepare(offset uint64, parts int) { q.lock.Lock() defer q.lock.Unlock() - if q.blockOffset < offset { - q.blockOffset = offset + if q.resultOffset < offset { + q.resultOffset = offset } + q.resultParts = parts } diff --git a/eth/handler.go b/eth/handler.go index 021be1024..daa285730 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -120,15 +120,25 @@ func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txP return nil, errIncompatibleConfig } // Construct the different synchronisation mechanisms - manager.downloader = downloader.New(manager.eventMux, manager.blockchain.HasBlock, manager.blockchain.GetBlock, manager.blockchain.CurrentBlock, manager.blockchain.GetTd, manager.blockchain.InsertChain, manager.removePeer) + var syncMode downloader.SyncMode + switch mode { + case ArchiveMode: + syncMode = downloader.FullSync + case FullMode: + syncMode = downloader.FastSync + case LightMode: + syncMode = downloader.LightSync + } + manager.downloader = downloader.New(syncMode, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, blockchain.GetBlock, + blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, nil, manager.removePeer) validator := func(block *types.Block, parent *types.Block) error { return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false) } heighter := func() uint64 { - return manager.blockchain.CurrentBlock().NumberU64() + return blockchain.CurrentBlock().NumberU64() } - manager.fetcher = fetcher.New(manager.blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, manager.blockchain.InsertChain, manager.removePeer) + manager.fetcher = fetcher.New(blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, blockchain.InsertChain, manager.removePeer) return manager, nil } @@ -210,7 +220,7 @@ func (pm *ProtocolManager) handle(p *peer) error { // Register the peer in the downloader. If the downloader considers it banned, we disconnect if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(), p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks, - p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies); err != nil { + p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts); err != nil { return err } // Propagate existing transactions. new transactions appearing @@ -514,22 +524,31 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { var ( hash common.Hash bytes int - receipts []*types.Receipt + receipts []rlp.RawValue ) - for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptsFetch { - // Retrieve the hash of the next transaction receipt + for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch { + // Retrieve the hash of the next block if err := msgStream.Decode(&hash); err == rlp.EOL { break } else if err != nil { return errResp(ErrDecode, "msg %v: %v", msg, err) } - // Retrieve the requested receipt, stopping if enough was found - if receipt := core.GetReceipt(pm.chaindb, hash); receipt != nil { - receipts = append(receipts, receipt) - bytes += len(receipt.RlpEncode()) + // Retrieve the requested block's receipts, skipping if unknown to us + results := core.GetBlockReceipts(pm.chaindb, hash) + if results == nil { + if header := pm.blockchain.GetHeader(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { + continue + } + } + // If known, encode and queue for response packet + if encoded, err := rlp.EncodeToBytes(results); err != nil { + glog.V(logger.Error).Infof("failed to encode receipt: %v", err) + } else { + receipts = append(receipts, encoded) + bytes += len(encoded) } } - return p.SendReceipts(receipts) + return p.SendReceiptsRLP(receipts) case msg.Code == NewBlockHashesMsg: // Retrieve and deseralize the remote new block hashes notification diff --git a/eth/handler_test.go b/eth/handler_test.go index 8ab5c1aad..5ddfc4a8f 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -535,15 +535,12 @@ func testGetReceipt(t *testing.T, protocol int) { defer peer.close() // Collect the hashes to request, and the response to expect - hashes := []common.Hash{} + hashes, receipts := []common.Hash{}, []types.Receipts{} for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { - for _, tx := range pm.blockchain.GetBlockByNumber(i).Transactions() { - hashes = append(hashes, tx.Hash()) - } - } - receipts := make([]*types.Receipt, len(hashes)) - for i, hash := range hashes { - receipts[i] = core.GetReceipt(pm.chaindb, hash) + block := pm.blockchain.GetBlockByNumber(i) + + hashes = append(hashes, block.Hash()) + receipts = append(receipts, core.GetBlockReceipts(pm.chaindb, block.Hash())) } // Send the hash request and verify the response p2p.Send(peer.app, 0x0f, hashes) diff --git a/eth/peer.go b/eth/peer.go index 603b49b88..e24be97f1 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -197,9 +197,9 @@ func (p *peer) SendNodeData(data [][]byte) error { return p2p.Send(p.rw, NodeDataMsg, data) } -// SendReceipts sends a batch of transaction receipts, corresponding to the ones -// requested. -func (p *peer) SendReceipts(receipts []*types.Receipt) error { +// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the +// ones requested from an already RLP encoded format. +func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error { return p2p.Send(p.rw, ReceiptsMsg, receipts) } -- cgit v1.2.3 From 42c8afd44006b170c20159abaadc31cc7545bec2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 29 Sep 2015 19:36:16 +0300 Subject: core: differentiate receipt concensus and storage decoding --- eth/filters/filter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'eth') diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 2e81ea177..ff192cdf6 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -138,7 +138,7 @@ func (self *Filter) getLogs(start, end uint64) (logs vm.Logs) { unfiltered vm.Logs ) for _, receipt := range receipts { - unfiltered = append(unfiltered, receipt.Logs()...) + unfiltered = append(unfiltered, receipt.Logs...) } logs = append(logs, self.FilterLogs(unfiltered)...) } -- cgit v1.2.3 From 832b37c8221e330896c36eb419d92af6b1fdc9dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 30 Sep 2015 19:23:31 +0300 Subject: core, eth: receipt chain reconstruction --- eth/downloader/downloader.go | 55 ++++++---- eth/downloader/downloader_test.go | 225 ++++++++++++++++++++++---------------- eth/fetcher/fetcher_test.go | 2 +- eth/handler.go | 60 ++++++---- eth/helper_test.go | 2 +- eth/protocol.go | 2 +- 6 files changed, 205 insertions(+), 141 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 7ae7aa221..24ba3da17 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -102,6 +102,9 @@ type headHeaderRetrievalFn func() *types.Header // headBlockRetrievalFn is a callback type for retrieving the head block from the local chain. type headBlockRetrievalFn func() *types.Block +// headFastBlockRetrievalFn is a callback type for retrieving the head fast block from the local chain. +type headFastBlockRetrievalFn func() *types.Block + // tdRetrievalFn is a callback type for retrieving the total difficulty of a local block. type tdRetrievalFn func(common.Hash) *big.Int @@ -188,17 +191,18 @@ type Downloader struct { syncStatsLock sync.RWMutex // Lock protecting the sync stats fields // Callbacks - hasHeader headerCheckFn // Checks if a header is present in the chain - hasBlock blockCheckFn // Checks if a block is present in the chain - getHeader headerRetrievalFn // Retrieves a header from the chain - getBlock blockRetrievalFn // Retrieves a block from the chain - headHeader headHeaderRetrievalFn // Retrieves the head header from the chain - headBlock headBlockRetrievalFn // Retrieves the head block from the chain - getTd tdRetrievalFn // Retrieves the TD of a block from the chain - insertHeaders headerChainInsertFn // Injects a batch of headers into the chain - insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain - insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain - dropPeer peerDropFn // Drops a peer for misbehaving + hasHeader headerCheckFn // Checks if a header is present in the chain + hasBlock blockCheckFn // Checks if a block is present in the chain + getHeader headerRetrievalFn // Retrieves a header from the chain + getBlock blockRetrievalFn // Retrieves a block from the chain + headHeader headHeaderRetrievalFn // Retrieves the head header from the chain + headBlock headBlockRetrievalFn // Retrieves the head block from the chain + headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain + getTd tdRetrievalFn // Retrieves the TD of a block from the chain + insertHeaders headerChainInsertFn // Injects a batch of headers into the chain + insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain + insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain + dropPeer peerDropFn // Drops a peer for misbehaving // Status synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing @@ -229,8 +233,8 @@ type Downloader struct { // New creates a new downloader to fetch hashes and blocks from remote peers. func New(mode SyncMode, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn, getBlock blockRetrievalFn, - headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, - insertReceipts receiptChainInsertFn, dropPeer peerDropFn) *Downloader { + headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, headFastBlock headFastBlockRetrievalFn, getTd tdRetrievalFn, + insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, dropPeer peerDropFn) *Downloader { return &Downloader{ mode: mode, @@ -243,6 +247,7 @@ func New(mode SyncMode, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock bl getBlock: getBlock, headHeader: headHeader, headBlock: headBlock, + headFastBlock: headFastBlock, getTd: getTd, insertHeaders: insertHeaders, insertBlocks: insertBlocks, @@ -393,7 +398,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e }() glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version) - defer glog.V(logger.Debug).Infof("Synchronisation terminated") + defer func(start time.Time) { + glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start)) + }(time.Now()) switch { case p.version == 61: @@ -989,6 +996,8 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) { head := d.headHeader().Number.Uint64() if d.mode == FullSync { head = d.headBlock().NumberU64() + } else if d.mode == FastSync { + head = d.headFastBlock().NumberU64() } from := int64(head) - int64(MaxHeaderFetch) + 1 if from < 0 { @@ -1020,7 +1029,7 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) { // Check if a common ancestor was found finished = true for i := len(headers) - 1; i >= 0; i-- { - if (d.mode == FullSync && d.hasBlock(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) { + if (d.mode != LightSync && d.hasBlock(headers[i].Hash())) || (d.mode == LightSync && d.hasHeader(headers[i].Hash())) { number, hash = headers[i].Number.Uint64(), headers[i].Hash() break } @@ -1182,17 +1191,18 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { // Otherwise insert all the new headers, aborting in case of junk glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headerPack.headers), from) + if d.mode == FastSync || d.mode == LightSync { + if n, err := d.insertHeaders(headerPack.headers, false); err != nil { + glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headerPack.headers[n].Number, headerPack.headers[n].Hash().Bytes()[:4], err) + return errInvalidChain + } + } if d.mode == FullSync || d.mode == FastSync { inserts := d.queue.Schedule(headerPack.headers, from, d.mode == FastSync) if len(inserts) != len(headerPack.headers) { glog.V(logger.Debug).Infof("%v: stale headers", p) return errBadPeer } - } else { - if n, err := d.insertHeaders(headerPack.headers, true); err != nil { - glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headerPack.headers[n].Number, headerPack.headers[n].Hash().Bytes()[:4], err) - return errInvalidChain - } } // Notify the content fetchers of new headers, but stop if queue is full cont := d.queue.PendingBlocks() < maxQueuedHeaders || d.queue.PendingReceipts() < maxQueuedHeaders @@ -1394,6 +1404,7 @@ func (d *Downloader) fetchParts(from uint64, errCancel error, deliveryCh chan da for _, pid := range expire() { if peer := d.peers.Peer(pid); peer != nil { peer.Demote() + setIdle(peer) glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind)) } } @@ -1497,7 +1508,7 @@ func (d *Downloader) process() { // Actually import the blocks if glog.V(logger.Debug) { first, last := results[0].Header, results[len(results)-1].Header - glog.V(logger.Debug).Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4]) + glog.Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4]) } for len(results) != 0 { // Check for any termination requests @@ -1536,7 +1547,7 @@ func (d *Downloader) process() { index, err = d.insertHeaders(headers, true) } if err != nil { - glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash(), err) + glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err) d.cancel() return } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 18bdb56dd..68c4ca26e 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -45,9 +45,9 @@ var ( // the returned hash chain is ordered head->parent. In addition, every 3rd block // contains a transaction and every 5th an uncle to allow testing correct block // reassembly. -func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block) { +func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) { // Generate the block chain - blocks := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) { + blocks, receipts := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) { block.SetCoinbase(common.Address{seed}) // If the block number is multiple of 3, send a bonus transaction to the miner @@ -73,25 +73,29 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common blockm := make(map[common.Hash]*types.Block, n+1) blockm[parent.Hash()] = parent + receiptm := make(map[common.Hash]types.Receipts, n+1) + receiptm[parent.Hash()] = parentReceipts + for i, b := range blocks { hashes[len(hashes)-i-2] = b.Hash() headerm[b.Hash()] = b.Header() blockm[b.Hash()] = b + receiptm[b.Hash()] = receipts[i] } - return hashes, headerm, blockm + return hashes, headerm, blockm, receiptm } // makeChainFork creates two chains of length n, such that h1[:f] and // h2[:f] are different but have a common suffix of length n-f. -func makeChainFork(n, f int, parent *types.Block) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block) { +func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) { // Create the common suffix - hashes, headers, blocks := makeChain(n-f, 0, parent) + hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts) // Create the forks - hashes1, headers1, blocks1 := makeChain(f, 1, blocks[hashes[0]]) + hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]]) hashes1 = append(hashes1, hashes[1:]...) - hashes2, headers2, blocks2 := makeChain(f, 2, blocks[hashes[0]]) + hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]]) hashes2 = append(hashes2, hashes[1:]...) for hash, header := range headers { @@ -102,22 +106,28 @@ func makeChainFork(n, f int, parent *types.Block) ([]common.Hash, []common.Hash, blocks1[hash] = block blocks2[hash] = block } - return hashes1, hashes2, headers1, headers2, blocks1, blocks2 + for hash, receipt := range receipts { + receipts1[hash] = receipt + receipts2[hash] = receipt + } + return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2 } // downloadTester is a test simulator for mocking out local block chain. type downloadTester struct { downloader *Downloader - ownHashes []common.Hash // Hash chain belonging to the tester - ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester - ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester - ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester - ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain - peerHashes map[string][]common.Hash // Hash chain belonging to different test peers - peerHeaders map[string]map[common.Hash]*types.Header // Headers belonging to different test peers - peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers - peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains + ownHashes []common.Hash // Hash chain belonging to the tester + ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester + ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester + ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester + ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain + + peerHashes map[string][]common.Hash // Hash chain belonging to different test peers + peerHeaders map[string]map[common.Hash]*types.Header // Headers belonging to different test peers + peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers + peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers + peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains lock sync.RWMutex } @@ -128,15 +138,16 @@ func newTester(mode SyncMode) *downloadTester { ownHashes: []common.Hash{genesis.Hash()}, ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, - ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): genesis.Receipts()}, + ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil}, ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()}, peerHashes: make(map[string][]common.Hash), peerHeaders: make(map[string]map[common.Hash]*types.Header), peerBlocks: make(map[string]map[common.Hash]*types.Block), + peerReceipts: make(map[string]map[common.Hash]types.Receipts), peerChainTds: make(map[string]map[common.Hash]*big.Int), } tester.downloader = New(mode, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, tester.getBlock, - tester.headHeader, tester.headBlock, tester.getTd, tester.insertHeaders, tester.insertBlocks, tester.insertConfirmedBlocks, tester.dropPeer) + tester.headHeader, tester.headBlock, tester.headFastBlock, tester.getTd, tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.dropPeer) return tester } @@ -197,7 +208,12 @@ func (dl *downloadTester) headHeader() *types.Header { dl.lock.RLock() defer dl.lock.RUnlock() - return dl.getHeader(dl.ownHashes[len(dl.ownHashes)-1]) + for i := len(dl.ownHashes) - 1; i >= 0; i-- { + if header := dl.getHeader(dl.ownHashes[i]); header != nil { + return header + } + } + return nil } // headBlock retrieves the current head block from the canonical chain. @@ -213,6 +229,21 @@ func (dl *downloadTester) headBlock() *types.Block { return nil } +// headFastBlock retrieves the current head fast-sync block from the canonical chain. +func (dl *downloadTester) headFastBlock() *types.Block { + dl.lock.RLock() + defer dl.lock.RUnlock() + + for i := len(dl.ownHashes) - 1; i >= 0; i-- { + if block := dl.getBlock(dl.ownHashes[i]); block != nil { + if _, ok := dl.ownReceipts[block.Hash()]; ok { + return block + } + } + } + return nil +} + // getTd retrieves the block's total difficulty from the canonical chain. func (dl *downloadTester) getTd(hash common.Hash) *big.Int { dl.lock.RLock() @@ -227,6 +258,9 @@ func (dl *downloadTester) insertHeaders(headers []*types.Header, verify bool) (i defer dl.lock.Unlock() for i, header := range headers { + if _, ok := dl.ownHeaders[header.Hash()]; ok { + continue + } if _, ok := dl.ownHeaders[header.ParentHash]; !ok { return i, errors.New("unknown parent") } @@ -254,33 +288,33 @@ func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) { return len(blocks), nil } -// insertBlocks injects a new batch of blocks into the simulated chain. -func (dl *downloadTester) insertConfirmedBlocks(blocks types.Blocks, receipts []types.Receipts) (int, error) { +// insertReceipts injects a new batch of blocks into the simulated chain. +func (dl *downloadTester) insertReceipts(blocks types.Blocks, receipts []types.Receipts) (int, error) { dl.lock.Lock() defer dl.lock.Unlock() for i := 0; i < len(blocks) && i < len(receipts); i++ { + if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { + return i, errors.New("unknown owner") + } if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { return i, errors.New("unknown parent") } - dl.ownHashes = append(dl.ownHashes, blocks[i].Hash()) - dl.ownHeaders[blocks[i].Hash()] = blocks[i].Header() dl.ownBlocks[blocks[i].Hash()] = blocks[i] - dl.ownReceipts[blocks[i].Hash()] = blocks[i].Receipts() - dl.ownChainTd[blocks[i].Hash()] = dl.ownChainTd[blocks[i].ParentHash()] + dl.ownReceipts[blocks[i].Hash()] = receipts[i] } return len(blocks), nil } // newPeer registers a new block download source into the downloader. -func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block) error { - return dl.newSlowPeer(id, version, hashes, headers, blocks, 0) +func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error { + return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0) } // newSlowPeer registers a new block download source into the downloader, with a // specific delay time on processing the network packets sent to it, simulating // potentially slow network IO. -func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, delay time.Duration) error { +func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error { dl.lock.Lock() defer dl.lock.Unlock() @@ -302,6 +336,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha dl.peerHeaders[id] = make(map[common.Hash]*types.Header) dl.peerBlocks[id] = make(map[common.Hash]*types.Block) + dl.peerReceipts[id] = make(map[common.Hash]types.Receipts) dl.peerChainTds[id] = make(map[common.Hash]*big.Int) for _, hash := range hashes { @@ -317,6 +352,9 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()]) } } + if receipt, ok := receipts[hash]; ok { + dl.peerReceipts[id][hash] = receipt + } } } return err @@ -501,15 +539,15 @@ func (dl *downloadTester) peerGetReceiptsFn(id string, delay time.Duration) func dl.lock.RLock() defer dl.lock.RUnlock() - blocks := dl.peerBlocks[id] + receipts := dl.peerReceipts[id] - receipts := make([][]*types.Receipt, 0, len(hashes)) + results := make([][]*types.Receipt, 0, len(hashes)) for _, hash := range hashes { - if block, ok := blocks[hash]; ok { - receipts = append(receipts, block.Receipts()) + if receipt, ok := receipts[hash]; ok { + results = append(results, receipt) } } - go dl.downloader.DeliverReceipts(id, receipts) + go dl.downloader.DeliverReceipts(id, results) return nil } @@ -551,10 +589,10 @@ func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronis func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) tester := newTester(mode) - tester.newPeer("peer", protocol, hashes, headers, blocks) + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Synchronise with the peer and make sure all relevant data was retrieved if err := tester.sync("peer", nil); err != nil { @@ -575,10 +613,10 @@ func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } func testThrottling(t *testing.T, protocol int, mode SyncMode) { // Create a long block chain to download and the tester targetBlocks := 8 * blockCacheLimit - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) tester := newTester(mode) - tester.newPeer("peer", protocol, hashes, headers, blocks) + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Wrap the importer to allow stepping blocked, proceed := uint32(0), make(chan struct{}) @@ -650,11 +688,11 @@ func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation( func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) { // Create a long enough forked chain common, fork := MaxHashFetch, 2*MaxHashFetch - hashesA, hashesB, headersA, headersB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis) + hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil) tester := newTester(mode) - tester.newPeer("fork A", protocol, hashesA, headersA, blocksA) - tester.newPeer("fork B", protocol, hashesB, headersB, blocksB) + tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) + tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("fork A", nil); err != nil { @@ -731,10 +769,10 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) { if targetBlocks >= MaxHeaderFetch { targetBlocks = MaxHeaderFetch - 15 } - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) tester := newTester(mode) - tester.newPeer("peer", protocol, hashes, headers, blocks) + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Make sure canceling works with a pristine downloader tester.downloader.cancel() @@ -764,12 +802,12 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { // Create various peers with various parts of the chain targetPeers := 8 targetBlocks := targetPeers*blockCacheLimit - 15 - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) tester := newTester(mode) for i := 0; i < targetPeers; i++ { id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks) + tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts) } // Synchronise with the middle peer and make sure half of the blocks were retrieved id := fmt.Sprintf("peer #%d", targetPeers/2) @@ -798,22 +836,21 @@ func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) // Create peers of every type tester := newTester(mode) - tester.newPeer("peer 61", 61, hashes, headers, blocks) - tester.newPeer("peer 62", 62, hashes, headers, blocks) - tester.newPeer("peer 63", 63, hashes, headers, blocks) - tester.newPeer("peer 64", 64, hashes, headers, blocks) + tester.newPeer("peer 61", 61, hashes, headers, blocks, receipts) + tester.newPeer("peer 62", 62, hashes, headers, blocks, receipts) + tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts) + tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts) // Synchronise with the requestd peer and make sure all blocks were retrieved if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if imported := len(tester.ownBlocks); imported != targetBlocks+1 { - t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1) - } + assertOwnChain(t, tester, targetBlocks+1) + // Check that no peers have been dropped off for _, version := range []int{61, 62, 63, 64} { peer := fmt.Sprintf("peer %d", version) @@ -835,18 +872,18 @@ func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, L func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) tester := newTester(mode) - tester.newPeer("peer", protocol, hashes, headers, blocks) + tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Instrument the downloader to signal body requests - bodies, receipts := int32(0), int32(0) + bodiesHave, receiptsHave := int32(0), int32(0) tester.downloader.bodyFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&bodies, int32(len(headers))) + atomic.AddInt32(&bodiesHave, int32(len(headers))) } tester.downloader.receiptFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&receipts, int32(len(headers))) + atomic.AddInt32(&receiptsHave, int32(len(headers))) } // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("peer", nil); err != nil { @@ -860,15 +897,17 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { if mode != LightSync && block != genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { bodiesNeeded++ } - if mode == FastSync && block != genesis && len(block.Receipts()) > 0 { + } + for _, receipt := range receipts { + if mode == FastSync && len(receipt) > 0 { receiptsNeeded++ } } - if int(bodies) != bodiesNeeded { - t.Errorf("body retrieval count mismatch: have %v, want %v", bodies, bodiesNeeded) + if int(bodiesHave) != bodiesNeeded { + t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) } - if int(receipts) != receiptsNeeded { - t.Errorf("receipt retrieval count mismatch: have %v, want %v", receipts, receiptsNeeded) + if int(receiptsHave) != receiptsNeeded { + t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) } } @@ -884,21 +923,20 @@ func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 6 func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) tester := newTester(mode) // Attempt a full sync with an attacker feeding gapped headers - tester.newPeer("attack", protocol, hashes, headers, blocks) + tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) missing := targetBlocks / 2 delete(tester.peerHeaders["attack"], hashes[missing]) - delete(tester.peerBlocks["attack"], hashes[missing]) if err := tester.sync("attack", nil); err == nil { t.Fatalf("succeeded attacker synchronisation") } // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, hashes, headers, blocks) + tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) if err := tester.sync("valid", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } @@ -917,20 +955,21 @@ func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 6 func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) tester := newTester(mode) // Attempt a full sync with an attacker feeding shifted headers - tester.newPeer("attack", protocol, hashes, headers, blocks) + tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) delete(tester.peerHeaders["attack"], hashes[len(hashes)-2]) delete(tester.peerBlocks["attack"], hashes[len(hashes)-2]) + delete(tester.peerReceipts["attack"], hashes[len(hashes)-2]) if err := tester.sync("attack", nil); err == nil { t.Fatalf("succeeded attacker synchronisation") } // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, hashes, headers, blocks) + tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) if err := tester.sync("valid", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } @@ -949,24 +988,24 @@ func TestInvalidContentAttack64Light(t *testing.T) { testInvalidContentAttack(t, func testInvalidContentAttack(t *testing.T, protocol int, mode SyncMode) { // Create two peers, one feeding invalid block bodies targetBlocks := 4*blockCacheLimit - 15 - hashes, headers, validBlocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, validBlocks, validReceipts := makeChain(targetBlocks, 0, genesis, nil) invalidBlocks := make(map[common.Hash]*types.Block) for hash, block := range validBlocks { invalidBlocks[hash] = types.NewBlockWithHeader(block.Header()) } - invalidReceipts := make(map[common.Hash]*types.Block) - for hash, block := range validBlocks { - invalidReceipts[hash] = types.NewBlockWithHeader(block.Header()).WithBody(block.Transactions(), block.Uncles()) + invalidReceipts := make(map[common.Hash]types.Receipts) + for hash, _ := range validReceipts { + invalidReceipts[hash] = types.Receipts{&types.Receipt{}} } tester := newTester(mode) - tester.newPeer("valid", protocol, hashes, headers, validBlocks) + tester.newPeer("valid", protocol, hashes, headers, validBlocks, validReceipts) if mode != LightSync { - tester.newPeer("body attack", protocol, hashes, headers, invalidBlocks) + tester.newPeer("body attack", protocol, hashes, headers, invalidBlocks, validReceipts) } if mode == FastSync { - tester.newPeer("receipt attack", protocol, hashes, headers, invalidReceipts) + tester.newPeer("receipt attack", protocol, hashes, headers, validBlocks, invalidReceipts) } // Synchronise with the valid peer (will pull contents from the attacker too) if err := tester.sync("valid", nil); err != nil { @@ -995,9 +1034,9 @@ func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttac func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { tester := newTester(mode) - hashes, headers, blocks := makeChain(0, 0, genesis) + hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil) - tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks) + tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts) if err := tester.sync("attack", big.NewInt(1000000)); err != errStallingPeer { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } @@ -1040,7 +1079,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { for i, tt := range tests { // Register a new peer and ensure it's presence id := fmt.Sprintf("test %d", i) - if err := tester.newPeer(id, protocol, []common.Hash{genesis.Hash()}, nil, nil); err != nil { + if err := tester.newPeer(id, protocol, []common.Hash{genesis.Hash()}, nil, nil, nil); err != nil { t.Fatalf("test %d: failed to register new peer: %v", i, err) } if _, ok := tester.peerHashes[id]; !ok { @@ -1069,7 +1108,7 @@ func TestSyncBoundaries64Light(t *testing.T) { testSyncBoundaries(t, 64, LightSy func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) // Set a sync init hook to catch boundary changes starting := make(chan struct{}) @@ -1085,7 +1124,7 @@ func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) } // Synchronise half the blocks and check initial boundaries - tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks) + tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts) pending := new(sync.WaitGroup) pending.Add(1) @@ -1103,7 +1142,7 @@ func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { pending.Wait() // Synchronise all the blocks and check continuation boundaries - tester.newPeer("peer-full", protocol, hashes, headers, blocks) + tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts) pending.Add(1) go func() { @@ -1134,7 +1173,7 @@ func TestForkedSyncBoundaries64Light(t *testing.T) { testForkedSyncBoundaries(t, func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { // Create a forked chain to simulate origin revertal common, fork := MaxHashFetch, 2*MaxHashFetch - hashesA, hashesB, headersA, headersB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis) + hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil) // Set a sync init hook to catch boundary changes starting := make(chan struct{}) @@ -1150,7 +1189,7 @@ func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) } // Synchronise with one of the forks and check boundaries - tester.newPeer("fork A", protocol, hashesA, headersA, blocksA) + tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) pending := new(sync.WaitGroup) pending.Add(1) @@ -1171,7 +1210,7 @@ func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { tester.downloader.syncStatsOrigin = tester.downloader.syncStatsHeight // Synchronise with the second fork and check boundary resets - tester.newPeer("fork B", protocol, hashesB, headersB, blocksB) + tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) pending.Add(1) go func() { @@ -1202,7 +1241,7 @@ func TestFailedSyncBoundaries64Light(t *testing.T) { testFailedSyncBoundaries(t, func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) // Set a sync init hook to catch boundary changes starting := make(chan struct{}) @@ -1218,10 +1257,11 @@ func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) } // Attempt a full sync with a faulty peer - tester.newPeer("faulty", protocol, hashes, headers, blocks) + tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts) missing := targetBlocks / 2 delete(tester.peerHeaders["faulty"], hashes[missing]) delete(tester.peerBlocks["faulty"], hashes[missing]) + delete(tester.peerReceipts["faulty"], hashes[missing]) pending := new(sync.WaitGroup) pending.Add(1) @@ -1240,7 +1280,7 @@ func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { pending.Wait() // Synchronise with a good peer and check that the boundary origin remind the same after a failure - tester.newPeer("valid", protocol, hashes, headers, blocks) + tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) pending.Add(1) go func() { @@ -1270,7 +1310,7 @@ func TestFakedSyncBoundaries64Light(t *testing.T) { testFakedSyncBoundaries(t, 6 func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { // Create a small block chain targetBlocks := blockCacheLimit - 15 - hashes, headers, blocks := makeChain(targetBlocks+3, 0, genesis) + hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil) // Set a sync init hook to catch boundary changes starting := make(chan struct{}) @@ -1286,10 +1326,11 @@ func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) } // Create and sync with an attacker that promises a higher chain than available - tester.newPeer("attack", protocol, hashes, headers, blocks) + tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) for i := 1; i < 3; i++ { delete(tester.peerHeaders["attack"], hashes[i]) delete(tester.peerBlocks["attack"], hashes[i]) + delete(tester.peerReceipts["attack"], hashes[i]) } pending := new(sync.WaitGroup) @@ -1309,7 +1350,7 @@ func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { pending.Wait() // Synchronise with a good peer and check that the boundary height has been reduced to the true value - tester.newPeer("valid", protocol, hashes[3:], headers, blocks) + tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts) pending.Add(1) go func() { diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/fetcher_test.go index 707d8d758..170a80aba 100644 --- a/eth/fetcher/fetcher_test.go +++ b/eth/fetcher/fetcher_test.go @@ -45,7 +45,7 @@ var ( // contains a transaction and every 5th an uncle to allow testing correct block // reassembly. func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) { - blocks := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) { + blocks, _ := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) { block.SetCoinbase(common.Address{seed}) // If the block number is multiple of 3, send a bonus transaction to the miner diff --git a/eth/handler.go b/eth/handler.go index daa285730..1117cb1b7 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -129,8 +129,9 @@ func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txP case LightMode: syncMode = downloader.LightSync } - manager.downloader = downloader.New(syncMode, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, blockchain.GetBlock, - blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, nil, manager.removePeer) + manager.downloader = downloader.New(syncMode, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, + blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.GetTd, + blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, manager.removePeer) validator := func(block *types.Block, parent *types.Block) error { return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false) @@ -438,28 +439,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } } - case p.version >= eth62 && msg.Code == BlockBodiesMsg: - // A batch of block bodies arrived to one of our previous requests - var request blockBodiesData - if err := msg.Decode(&request); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Deliver them all to the downloader for queuing - trasactions := make([][]*types.Transaction, len(request)) - uncles := make([][]*types.Header, len(request)) - - for i, body := range request { - trasactions[i] = body.Transactions - uncles[i] = body.Uncles - } - // Filter out any explicitly requested bodies, deliver the rest to the downloader - if trasactions, uncles := pm.fetcher.FilterBodies(trasactions, uncles, time.Now()); len(trasactions) > 0 || len(uncles) > 0 { - err := pm.downloader.DeliverBodies(p.id, trasactions, uncles) - if err != nil { - glog.V(logger.Debug).Infoln(err) - } - } - case p.version >= eth62 && msg.Code == GetBlockBodiesMsg: // Decode the retrieval message msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) @@ -487,6 +466,28 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } return p.SendBlockBodiesRLP(bodies) + case p.version >= eth62 && msg.Code == BlockBodiesMsg: + // A batch of block bodies arrived to one of our previous requests + var request blockBodiesData + if err := msg.Decode(&request); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + // Deliver them all to the downloader for queuing + trasactions := make([][]*types.Transaction, len(request)) + uncles := make([][]*types.Header, len(request)) + + for i, body := range request { + trasactions[i] = body.Transactions + uncles[i] = body.Uncles + } + // Filter out any explicitly requested bodies, deliver the rest to the downloader + if trasactions, uncles := pm.fetcher.FilterBodies(trasactions, uncles, time.Now()); len(trasactions) > 0 || len(uncles) > 0 { + err := pm.downloader.DeliverBodies(p.id, trasactions, uncles) + if err != nil { + glog.V(logger.Debug).Infoln(err) + } + } + case p.version >= eth63 && msg.Code == GetNodeDataMsg: // Decode the retrieval message msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) @@ -550,6 +551,17 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } return p.SendReceiptsRLP(receipts) + case p.version >= eth63 && msg.Code == ReceiptsMsg: + // A batch of receipts arrived to one of our previous requests + var receipts [][]*types.Receipt + if err := msg.Decode(&receipts); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + // Deliver all to the downloader + if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil { + glog.V(logger.Debug).Infof("failed to deliver receipts: %v", err) + } + case msg.Code == NewBlockHashesMsg: // Retrieve and deseralize the remote new block hashes notification type announce struct { diff --git a/eth/helper_test.go b/eth/helper_test.go index bd65b49f8..ede0e3f15 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -38,7 +38,7 @@ func newTestProtocolManager(mode Mode, blocks int, generator func(int, *core.Blo blockproc = core.NewBlockProcessor(db, pow, blockchain, evmux) ) blockchain.SetProcessor(blockproc) - chain := core.GenerateChain(genesis, db, blocks, generator) + chain, _ := core.GenerateChain(genesis, db, blocks, generator) if _, err := blockchain.InsertChain(chain); err != nil { panic(err) } diff --git a/eth/protocol.go b/eth/protocol.go index 0d2b5128d..f2b98a8b1 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -55,7 +55,7 @@ var minimumProtocolVersion = map[Mode]uint{ var ProtocolVersions = []uint{eth64, eth63, eth62, eth61} // Number of implemented message corresponding to different protocol versions. -var ProtocolLengths = []uint64{15, 12, 8, 9} +var ProtocolLengths = []uint64{19, 17, 8, 9} const ( NetworkId = 1 -- cgit v1.2.3 From ab27bee25a845be90bd60e774ff68d2ea1501772 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 5 Oct 2015 19:37:56 +0300 Subject: core, eth, trie: direct state trie synchronization --- eth/downloader/downloader.go | 568 ++++++++++++++++---------------------- eth/downloader/downloader_test.go | 123 ++++++--- eth/downloader/metrics.go | 5 + eth/downloader/peer.go | 107 +++++-- eth/downloader/queue.go | 271 ++++++++++++++---- eth/downloader/types.go | 137 +++++++++ eth/handler.go | 25 +- eth/peer.go | 2 +- 8 files changed, 779 insertions(+), 459 deletions(-) create mode 100644 eth/downloader/types.go (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 24ba3da17..96177ae8a 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -19,7 +19,6 @@ package downloader import ( "errors" - "fmt" "math" "math/big" "strings" @@ -29,9 +28,11 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" + "github.com/rcrowley/go-metrics" ) var ( @@ -39,8 +40,8 @@ var ( MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request - MaxStateFetch = 384 // Amount of node state values to allow fetching per request MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request + MaxStateFetch = 384 // Amount of node state values to allow fetching per request hashTTL = 5 * time.Second // [eth/61] Time it takes for a hash request to time out blockSoftTTL = 3 * time.Second // [eth/61] Request completion threshold for increasing or decreasing a peer's bandwidth @@ -49,10 +50,13 @@ var ( bodySoftTTL = 3 * time.Second // [eth/62] Request completion threshold for increasing or decreasing a peer's bandwidth bodyHardTTL = 3 * bodySoftTTL // [eth/62] Maximum time allowance before a block body request is considered expired receiptSoftTTL = 3 * time.Second // [eth/63] Request completion threshold for increasing or decreasing a peer's bandwidth - receiptHardTTL = 3 * receiptSoftTTL // [eth/63] Maximum time allowance before a block body request is considered expired + receiptHardTTL = 3 * receiptSoftTTL // [eth/63] Maximum time allowance before a receipt request is considered expired + stateSoftTTL = 2 * time.Second // [eth/63] Request completion threshold for increasing or decreasing a peer's bandwidth + stateHardTTL = 3 * stateSoftTTL // [eth/63] Maximum time allowance before a node data request is considered expired maxQueuedHashes = 256 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection) maxQueuedHeaders = 256 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) + maxQueuedStates = 256 * 1024 // [eth/63] Maximum number of state requests to queue (DOS protection) maxResultsProcess = 256 // Number of download results to import at once into the chain headerCheckFrequency = 64 // Verification frequency of the downloaded headers during fast sync @@ -84,98 +88,6 @@ var ( errNoSyncActive = errors.New("no sync active") ) -// headerCheckFn is a callback type for verifying a header's presence in the local chain. -type headerCheckFn func(common.Hash) bool - -// blockCheckFn is a callback type for verifying a block's presence in the local chain. -type blockCheckFn func(common.Hash) bool - -// headerRetrievalFn is a callback type for retrieving a header from the local chain. -type headerRetrievalFn func(common.Hash) *types.Header - -// blockRetrievalFn is a callback type for retrieving a block from the local chain. -type blockRetrievalFn func(common.Hash) *types.Block - -// headHeaderRetrievalFn is a callback type for retrieving the head header from the local chain. -type headHeaderRetrievalFn func() *types.Header - -// headBlockRetrievalFn is a callback type for retrieving the head block from the local chain. -type headBlockRetrievalFn func() *types.Block - -// headFastBlockRetrievalFn is a callback type for retrieving the head fast block from the local chain. -type headFastBlockRetrievalFn func() *types.Block - -// tdRetrievalFn is a callback type for retrieving the total difficulty of a local block. -type tdRetrievalFn func(common.Hash) *big.Int - -// headerChainInsertFn is a callback type to insert a batch of headers into the local chain. -type headerChainInsertFn func([]*types.Header, bool) (int, error) - -// blockChainInsertFn is a callback type to insert a batch of blocks into the local chain. -type blockChainInsertFn func(types.Blocks) (int, error) - -// receiptChainInsertFn is a callback type to insert a batch of receipts into the local chain. -type receiptChainInsertFn func(types.Blocks, []types.Receipts) (int, error) - -// peerDropFn is a callback type for dropping a peer detected as malicious. -type peerDropFn func(id string) - -// dataPack is a data message returned by a peer for some query. -type dataPack interface { - PeerId() string - Empty() bool - Stats() string -} - -// hashPack is a batch of block hashes returned by a peer (eth/61). -type hashPack struct { - peerId string - hashes []common.Hash -} - -// blockPack is a batch of blocks returned by a peer (eth/61). -type blockPack struct { - peerId string - blocks []*types.Block -} - -// headerPack is a batch of block headers returned by a peer. -type headerPack struct { - peerId string - headers []*types.Header -} - -// bodyPack is a batch of block bodies returned by a peer. -type bodyPack struct { - peerId string - transactions [][]*types.Transaction - uncles [][]*types.Header -} - -// PeerId retrieves the origin peer who sent this block body packet. -func (p *bodyPack) PeerId() string { return p.peerId } - -// Empty returns whether the no block bodies were delivered. -func (p *bodyPack) Empty() bool { return len(p.transactions) == 0 || len(p.uncles) == 0 } - -// Stats creates a textual stats report for logging purposes. -func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) } - -// receiptPack is a batch of receipts returned by a peer. -type receiptPack struct { - peerId string - receipts [][]*types.Receipt -} - -// PeerId retrieves the origin peer who sent this receipt packet. -func (p *receiptPack) PeerId() string { return p.peerId } - -// Empty returns whether the no receipts were delivered. -func (p *receiptPack) Empty() bool { return len(p.receipts) == 0 } - -// Stats creates a textual stats report for logging purposes. -func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) } - type Downloader struct { mode SyncMode // Synchronisation mode defining the strategies used mux *event.TypeMux // Event multiplexer to announce sync operation events @@ -186,23 +98,26 @@ type Downloader struct { interrupt int32 // Atomic boolean to signal termination // Statistics - syncStatsOrigin uint64 // Origin block number where syncing started at - syncStatsHeight uint64 // Highest block number known when syncing started - syncStatsLock sync.RWMutex // Lock protecting the sync stats fields + syncStatsChainOrigin uint64 // Origin block number where syncing started at + syncStatsChainHeight uint64 // Highest block number known when syncing started + syncStatsStateTotal uint64 // Total number of node state entries known so far + syncStatsStateDone uint64 // Number of state trie entries already pulled + syncStatsLock sync.RWMutex // Lock protecting the sync stats fields // Callbacks - hasHeader headerCheckFn // Checks if a header is present in the chain - hasBlock blockCheckFn // Checks if a block is present in the chain - getHeader headerRetrievalFn // Retrieves a header from the chain - getBlock blockRetrievalFn // Retrieves a block from the chain - headHeader headHeaderRetrievalFn // Retrieves the head header from the chain - headBlock headBlockRetrievalFn // Retrieves the head block from the chain - headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain - getTd tdRetrievalFn // Retrieves the TD of a block from the chain - insertHeaders headerChainInsertFn // Injects a batch of headers into the chain - insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain - insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain - dropPeer peerDropFn // Drops a peer for misbehaving + hasHeader headerCheckFn // Checks if a header is present in the chain + hasBlock blockCheckFn // Checks if a block is present in the chain + getHeader headerRetrievalFn // Retrieves a header from the chain + getBlock blockRetrievalFn // Retrieves a block from the chain + headHeader headHeaderRetrievalFn // Retrieves the head header from the chain + headBlock headBlockRetrievalFn // Retrieves the head block from the chain + headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain + commitHeadBlock headBlockCommitterFn // Commits a manually assembled block as the chain head + getTd tdRetrievalFn // Retrieves the TD of a block from the chain + insertHeaders headerChainInsertFn // Injects a batch of headers into the chain + insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain + insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain + dropPeer peerDropFn // Drops a peer for misbehaving // Status synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing @@ -212,14 +127,16 @@ type Downloader struct { // Channels newPeerCh chan *peer - hashCh chan hashPack // [eth/61] Channel receiving inbound hashes - blockCh chan blockPack // [eth/61] Channel receiving inbound blocks - headerCh chan headerPack // [eth/62] Channel receiving inbound block headers - bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies - receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts - blockWakeCh chan bool // [eth/61] Channel to signal the block fetcher of new tasks - bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks - receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks + hashCh chan dataPack // [eth/61] Channel receiving inbound hashes + blockCh chan dataPack // [eth/61] Channel receiving inbound blocks + headerCh chan dataPack // [eth/62] Channel receiving inbound block headers + bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies + receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts + stateCh chan dataPack // [eth/63] Channel receiving inbound node state data + blockWakeCh chan bool // [eth/61] Channel to signal the block fetcher of new tasks + bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks + receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks + stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks cancelCh chan struct{} // Channel to cancel mid-flight syncs cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers @@ -232,36 +149,40 @@ type Downloader struct { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(mode SyncMode, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn, getBlock blockRetrievalFn, - headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, headFastBlock headFastBlockRetrievalFn, getTd tdRetrievalFn, - insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, dropPeer peerDropFn) *Downloader { +func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn, + getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, headFastBlock headFastBlockRetrievalFn, + commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, + insertReceipts receiptChainInsertFn, dropPeer peerDropFn) *Downloader { return &Downloader{ - mode: mode, - mux: mux, - queue: newQueue(), - peers: newPeerSet(), - hasHeader: hasHeader, - hasBlock: hasBlock, - getHeader: getHeader, - getBlock: getBlock, - headHeader: headHeader, - headBlock: headBlock, - headFastBlock: headFastBlock, - getTd: getTd, - insertHeaders: insertHeaders, - insertBlocks: insertBlocks, - insertReceipts: insertReceipts, - dropPeer: dropPeer, - newPeerCh: make(chan *peer, 1), - hashCh: make(chan hashPack, 1), - blockCh: make(chan blockPack, 1), - headerCh: make(chan headerPack, 1), - bodyCh: make(chan dataPack, 1), - receiptCh: make(chan dataPack, 1), - blockWakeCh: make(chan bool, 1), - bodyWakeCh: make(chan bool, 1), - receiptWakeCh: make(chan bool, 1), + mode: mode, + mux: mux, + queue: newQueue(stateDb), + peers: newPeerSet(), + hasHeader: hasHeader, + hasBlock: hasBlock, + getHeader: getHeader, + getBlock: getBlock, + headHeader: headHeader, + headBlock: headBlock, + headFastBlock: headFastBlock, + commitHeadBlock: commitHeadBlock, + getTd: getTd, + insertHeaders: insertHeaders, + insertBlocks: insertBlocks, + insertReceipts: insertReceipts, + dropPeer: dropPeer, + newPeerCh: make(chan *peer, 1), + hashCh: make(chan dataPack, 1), + blockCh: make(chan dataPack, 1), + headerCh: make(chan dataPack, 1), + bodyCh: make(chan dataPack, 1), + receiptCh: make(chan dataPack, 1), + stateCh: make(chan dataPack, 1), + blockWakeCh: make(chan bool, 1), + bodyWakeCh: make(chan bool, 1), + receiptWakeCh: make(chan bool, 1), + stateWakeCh: make(chan bool, 1), } } @@ -272,7 +193,7 @@ func (d *Downloader) Boundaries() (uint64, uint64) { d.syncStatsLock.RLock() defer d.syncStatsLock.RUnlock() - return d.syncStatsOrigin, d.syncStatsHeight + return d.syncStatsChainOrigin, d.syncStatsChainHeight } // Synchronising returns whether the downloader is currently retrieving blocks. @@ -284,10 +205,11 @@ func (d *Downloader) Synchronising() bool { // used for fetching hashes and blocks from. func (d *Downloader) RegisterPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading - getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, getReceipts receiptFetcherFn) error { + getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, + getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error { glog.V(logger.Detail).Infoln("Registering peer", id) - if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts)); err != nil { + if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil { glog.V(logger.Error).Infoln("Register failed:", err) return err } @@ -357,12 +279,18 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int) error d.queue.Reset() d.peers.Reset() - for _, ch := range []chan bool{d.blockWakeCh, d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.blockWakeCh, d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { select { case <-ch: default: } } + // Reset and ephemeral sync statistics + d.syncStatsLock.Lock() + d.syncStatsStateTotal = 0 + d.syncStatsStateDone = 0 + d.syncStatsLock.Unlock() + // Create cancel channel for aborting mid-flight d.cancelLock.Lock() d.cancelCh = make(chan struct{}) @@ -414,17 +342,17 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e return err } d.syncStatsLock.Lock() - if d.syncStatsHeight <= origin || d.syncStatsOrigin > origin { - d.syncStatsOrigin = origin + if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { + d.syncStatsChainOrigin = origin } - d.syncStatsHeight = latest + d.syncStatsChainHeight = latest d.syncStatsLock.Unlock() // Initiate the sync using a concurrent hash and block retrieval algorithm if d.syncInitHook != nil { d.syncInitHook(origin, latest) } - d.queue.Prepare(origin+1, 1) + d.queue.Prepare(origin+1, d.mode, 0) errc := make(chan error, 2) go func() { errc <- d.fetchHashes61(p, td, origin+1) }() @@ -449,26 +377,27 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e return err } d.syncStatsLock.Lock() - if d.syncStatsHeight <= origin || d.syncStatsOrigin > origin { - d.syncStatsOrigin = origin + if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { + d.syncStatsChainOrigin = origin } - d.syncStatsHeight = latest + d.syncStatsChainHeight = latest d.syncStatsLock.Unlock() // Initiate the sync using a concurrent header and content retrieval algorithm - parts := 1 - if d.mode == FastSync { - parts = 2 // receipts are fetched too + pivot := uint64(0) + if latest > uint64(minFullBlocks) { + pivot = latest - uint64(minFullBlocks) } - d.queue.Prepare(origin+1, parts) + d.queue.Prepare(origin+1, d.mode, pivot) if d.syncInitHook != nil { d.syncInitHook(origin, latest) } - errc := make(chan error, 3) + errc := make(chan error, 4) go func() { errc <- d.fetchHeaders(p, td, origin+1) }() // Headers are always retrieved + go func() { errc <- d.fetchBodies(origin + 1) }() // Bodies are retrieved during normal and fast sync go func() { errc <- d.fetchReceipts(origin + 1) }() // Receipts are retrieved during fast sync - go func() { errc <- d.fetchBodies(origin + 1) }() // Bodies are retrieved during normal sync + go func() { errc <- d.fetchNodeData() }() // Node state data is retrieved during fast sync // If any fetcher fails, cancel the others var fail error @@ -538,14 +467,14 @@ func (d *Downloader) fetchHeight61(p *peer) (uint64, error) { case <-d.hashCh: // Out of bounds hashes received, ignore them - case blockPack := <-d.blockCh: + case packet := <-d.blockCh: // Discard anything not from the origin peer - if blockPack.peerId != p.id { - glog.V(logger.Debug).Infof("Received blocks from incorrect peer(%s)", blockPack.peerId) + if packet.PeerId() != p.id { + glog.V(logger.Debug).Infof("Received blocks from incorrect peer(%s)", packet.PeerId()) break } // Make sure the peer actually gave something valid - blocks := blockPack.blocks + blocks := packet.(*blockPack).blocks if len(blocks) != 1 { glog.V(logger.Debug).Infof("%v: invalid number of head blocks: %d != 1", p, len(blocks)) return 0, errBadPeer @@ -584,14 +513,14 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) { case <-d.cancelCh: return 0, errCancelHashFetch - case hashPack := <-d.hashCh: + case packet := <-d.hashCh: // Discard anything not from the origin peer - if hashPack.peerId != p.id { - glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId) + if packet.PeerId() != p.id { + glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId()) break } // Make sure the peer actually gave something valid - hashes := hashPack.hashes + hashes := packet.(*hashPack).hashes if len(hashes) == 0 { glog.V(logger.Debug).Infof("%v: empty head hash set", p) return 0, errEmptyHashSet @@ -639,14 +568,14 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) { case <-d.cancelCh: return 0, errCancelHashFetch - case hashPack := <-d.hashCh: + case packet := <-d.hashCh: // Discard anything not from the origin peer - if hashPack.peerId != p.id { - glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId) + if packet.PeerId() != p.id { + glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId()) break } // Make sure the peer actually gave something valid - hashes := hashPack.hashes + hashes := packet.(*hashPack).hashes if len(hashes) != 1 { glog.V(logger.Debug).Infof("%v: invalid search hash set (%d)", p, len(hashes)) return 0, errBadPeer @@ -716,17 +645,17 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error { case <-d.bodyCh: // Out of bounds eth/62 block bodies received, ignore them - case hashPack := <-d.hashCh: + case packet := <-d.hashCh: // Make sure the active peer is giving us the hashes - if hashPack.peerId != p.id { - glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId) + if packet.PeerId() != p.id { + glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId()) break } hashReqTimer.UpdateSince(request) timeout.Stop() // If no more hashes are inbound, notify the block fetcher and return - if len(hashPack.hashes) == 0 { + if packet.Items() == 0 { glog.V(logger.Debug).Infof("%v: no available hashes", p) select { @@ -751,12 +680,13 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error { return nil } gotHashes = true + hashes := packet.(*hashPack).hashes // Otherwise insert all the new hashes, aborting in case of junk - glog.V(logger.Detail).Infof("%v: scheduling %d hashes from #%d", p, len(hashPack.hashes), from) + glog.V(logger.Detail).Infof("%v: scheduling %d hashes from #%d", p, len(hashes), from) - inserts := d.queue.Schedule61(hashPack.hashes, true) - if len(inserts) != len(hashPack.hashes) { + inserts := d.queue.Schedule61(hashes, true) + if len(inserts) != len(hashes) { glog.V(logger.Debug).Infof("%v: stale hashes", p) return errBadPeer } @@ -776,7 +706,7 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error { return nil } // Queue not yet full, fetch the next batch - from += uint64(len(hashPack.hashes)) + from += uint64(len(hashes)) getHashes(from) case <-timeout.C: @@ -813,16 +743,17 @@ func (d *Downloader) fetchBlocks61(from uint64) error { case <-d.bodyCh: // Out of bounds eth/62 block bodies received, ignore them - case blockPack := <-d.blockCh: + case packet := <-d.blockCh: // If the peer was previously banned and failed to deliver it's pack // in a reasonable time frame, ignore it's message. - if peer := d.peers.Peer(blockPack.peerId); peer != nil { + if peer := d.peers.Peer(packet.PeerId()); peer != nil { // Deliver the received chunk of blocks, and demote in case of errors - err := d.queue.Deliver61(blockPack.peerId, blockPack.blocks) + blocks := packet.(*blockPack).blocks + err := d.queue.DeliverBlocks(peer.id, blocks) switch err { case nil: // If no blocks were delivered, demote the peer (need the delivery above) - if len(blockPack.blocks) == 0 { + if len(blocks) == 0 { peer.Demote() peer.SetBlocksIdle() glog.V(logger.Detail).Infof("%s: no blocks delivered", peer) @@ -831,7 +762,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { // All was successful, promote the peer and potentially start processing peer.Promote() peer.SetBlocksIdle() - glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks)) + glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blocks)) go d.process() case errInvalidChain: @@ -891,7 +822,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { return errNoPeers } // Check for block request timeouts and demote the responsible peers - for _, pid := range d.queue.Expire61(blockHardTTL) { + for _, pid := range d.queue.ExpireBlocks(blockHardTTL) { if peer := d.peers.Peer(pid); peer != nil { peer.Demote() glog.V(logger.Detail).Infof("%s: block delivery timeout", peer) @@ -907,7 +838,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { } // Send a download request to all idle peers, until throttled throttled := false - idles, total := d.peers.BlockIdlePeers(61) + idles, total := d.peers.BlockIdlePeers() for _, peer := range idles { // Short circuit if throttling activated @@ -918,7 +849,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { // Reserve a chunk of hashes for a peer. A nil can mean either that // no more hashes are available, or that the peer is known not to // have them. - request := d.queue.Reserve61(peer, peer.BlockCapacity()) + request := d.queue.ReserveBlocks(peer, peer.BlockCapacity()) if request == nil { continue } @@ -928,7 +859,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { // Fetch the chunk and make sure any errors return the hashes to the queue if err := peer.Fetch61(request); err != nil { glog.V(logger.Error).Infof("%v: fetch failed, rescheduling", peer) - d.queue.Cancel61(request) + d.queue.CancelBlocks(request) } } // Make sure that we have peers available for fetching. If all peers have been tried @@ -954,14 +885,14 @@ func (d *Downloader) fetchHeight(p *peer) (uint64, error) { case <-d.cancelCh: return 0, errCancelBlockFetch - case headerPack := <-d.headerCh: + case packet := <-d.headerCh: // Discard anything not from the origin peer - if headerPack.peerId != p.id { - glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", headerPack.peerId) + if packet.PeerId() != p.id { + glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId()) break } // Make sure the peer actually gave something valid - headers := headerPack.headers + headers := packet.(*headerPack).headers if len(headers) != 1 { glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers)) return 0, errBadPeer @@ -1014,14 +945,14 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) { case <-d.cancelCh: return 0, errCancelHashFetch - case headerPack := <-d.headerCh: + case packet := <-d.headerCh: // Discard anything not from the origin peer - if headerPack.peerId != p.id { - glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", headerPack.peerId) + if packet.PeerId() != p.id { + glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId()) break } // Make sure the peer actually gave something valid - headers := headerPack.headers + headers := packet.(*headerPack).headers if len(headers) == 0 { glog.V(logger.Debug).Infof("%v: empty head header set", p) return 0, errEmptyHeaderSet @@ -1069,14 +1000,14 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) { case <-d.cancelCh: return 0, errCancelHashFetch - case headerPack := <-d.headerCh: + case packer := <-d.headerCh: // Discard anything not from the origin peer - if headerPack.peerId != p.id { - glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", headerPack.peerId) + if packer.PeerId() != p.id { + glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packer.PeerId()) break } // Make sure the peer actually gave something valid - headers := headerPack.headers + headers := packer.(*headerPack).headers if len(headers) != 1 { glog.V(logger.Debug).Infof("%v: invalid search header set (%d)", p, len(headers)) return 0, errBadPeer @@ -1150,20 +1081,20 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { case <-d.blockCh: // Out of bounds eth/61 blocks received, ignore them - case headerPack := <-d.headerCh: + case packet := <-d.headerCh: // Make sure the active peer is giving us the headers - if headerPack.peerId != p.id { - glog.V(logger.Debug).Infof("Received headers from incorrect peer (%s)", headerPack.peerId) + if packet.PeerId() != p.id { + glog.V(logger.Debug).Infof("Received headers from incorrect peer (%s)", packet.PeerId()) break } headerReqTimer.UpdateSince(request) timeout.Stop() // If no more headers are inbound, notify the content fetchers and return - if len(headerPack.headers) == 0 { + if packet.Items() == 0 { glog.V(logger.Debug).Infof("%v: no available headers", p) - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { select { case ch <- false: case <-d.cancelCh: @@ -1187,26 +1118,27 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { return nil } gotHeaders = true + headers := packet.(*headerPack).headers // Otherwise insert all the new headers, aborting in case of junk - glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headerPack.headers), from) + glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) if d.mode == FastSync || d.mode == LightSync { - if n, err := d.insertHeaders(headerPack.headers, false); err != nil { - glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headerPack.headers[n].Number, headerPack.headers[n].Hash().Bytes()[:4], err) + if n, err := d.insertHeaders(headers, false); err != nil { + glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headers[n].Number, headers[n].Hash().Bytes()[:4], err) return errInvalidChain } } if d.mode == FullSync || d.mode == FastSync { - inserts := d.queue.Schedule(headerPack.headers, from, d.mode == FastSync) - if len(inserts) != len(headerPack.headers) { + inserts := d.queue.Schedule(headers, from) + if len(inserts) != len(headers) { glog.V(logger.Debug).Infof("%v: stale headers", p) return errBadPeer } } // Notify the content fetchers of new headers, but stop if queue is full cont := d.queue.PendingBlocks() < maxQueuedHeaders || d.queue.PendingReceipts() < maxQueuedHeaders - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { if cont { // We still have headers to fetch, send continuation wake signal (potential) select { @@ -1223,7 +1155,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { } } // Queue not yet full, fetch the next batch - from += uint64(len(headerPack.headers)) + from += uint64(len(headers)) getHeaders(from) case <-timeout.C: @@ -1233,7 +1165,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { d.dropPeer(p.id) // Finish the sync gracefully instead of dumping the gathered data though - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} { select { case ch <- false: case <-d.cancelCh: @@ -1251,19 +1183,19 @@ func (d *Downloader) fetchBodies(from uint64) error { glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from) var ( - deliver = func(packet interface{}) error { + deliver = func(packet dataPack) error { pack := packet.(*bodyPack) - return d.queue.DeliverBlocks(pack.peerId, pack.transactions, pack.uncles) + return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles) } - expire = func() []string { return d.queue.ExpireBlocks(bodyHardTTL) } + expire = func() []string { return d.queue.ExpireBodies(bodyHardTTL) } fetch = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) } capacity = func(p *peer) int { return p.BlockCapacity() } - getIdles = func() ([]*peer, int) { return d.peers.BlockIdlePeers(62) } + getIdles = func() ([]*peer, int) { return d.peers.BodyIdlePeers() } setIdle = func(p *peer) { p.SetBlocksIdle() } ) - err := d.fetchParts(from, errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, - d.queue.PendingBlocks, d.queue.ThrottleBlocks, d.queue.ReserveBlocks, d.bodyFetchHook, - fetch, d.queue.CancelBlocks, capacity, getIdles, setIdle, "Body") + err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, + d.queue.PendingBlocks, d.queue.ThrottleBlocks, d.queue.ReserveBodies, d.bodyFetchHook, + fetch, d.queue.CancelBodies, capacity, getIdles, setIdle, "Body") glog.V(logger.Debug).Infof("Block body download terminated: %v", err) return err @@ -1276,7 +1208,7 @@ func (d *Downloader) fetchReceipts(from uint64) error { glog.V(logger.Debug).Infof("Downloading receipts from #%d", from) var ( - deliver = func(packet interface{}) error { + deliver = func(packet dataPack) error { pack := packet.(*receiptPack) return d.queue.DeliverReceipts(pack.peerId, pack.receipts) } @@ -1285,7 +1217,7 @@ func (d *Downloader) fetchReceipts(from uint64) error { capacity = func(p *peer) int { return p.ReceiptCapacity() } setIdle = func(p *peer) { p.SetReceiptsIdle() } ) - err := d.fetchParts(from, errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, + err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, d.queue.PendingReceipts, d.queue.ThrottleReceipts, d.queue.ReserveReceipts, d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt") @@ -1293,10 +1225,46 @@ func (d *Downloader) fetchReceipts(from uint64) error { return err } +// fetchNodeData iteratively downloads the scheduled state trie nodes, taking any +// available peers, reserving a chunk of nodes for each, waiting for delivery and +// also periodically checking for timeouts. +func (d *Downloader) fetchNodeData() error { + glog.V(logger.Debug).Infof("Downloading node state data") + + var ( + deliver = func(packet dataPack) error { + start := time.Now() + done, found, err := d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states) + + d.syncStatsLock.Lock() + totalDone, totalKnown := d.syncStatsStateDone+uint64(done), d.syncStatsStateTotal+uint64(found) + d.syncStatsStateDone, d.syncStatsStateTotal = totalDone, totalKnown + d.syncStatsLock.Unlock() + + glog.V(logger.Info).Infof("imported %d [%d / %d] state entries in %v.", done, totalDone, totalKnown, time.Since(start)) + return err + } + expire = func() []string { return d.queue.ExpireNodeData(stateHardTTL) } + throttle = func() bool { return false } + reserve = func(p *peer, count int) (*fetchRequest, bool, error) { + return d.queue.ReserveNodeData(p, count), false, nil + } + fetch = func(p *peer, req *fetchRequest) error { return p.FetchNodeData(req) } + capacity = func(p *peer) int { return p.NodeDataCapacity() } + setIdle = func(p *peer) { p.SetNodeDataIdle() } + ) + err := d.fetchParts(errCancelReceiptFetch, d.stateCh, deliver, d.stateWakeCh, expire, + d.queue.PendingNodeData, throttle, reserve, nil, fetch, d.queue.CancelNodeData, + capacity, d.peers.ReceiptIdlePeers, setIdle, "State") + + glog.V(logger.Debug).Infof("Node state data download terminated: %v", err) + return err +} + // fetchParts iteratively downloads scheduled block parts, taking any available // peers, reserving a chunk of fetch requests for each, waiting for delivery and // also periodically checking for timeouts. -func (d *Downloader) fetchParts(from uint64, errCancel error, deliveryCh chan dataPack, deliver func(packet interface{}) error, wakeCh chan bool, +func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(packet dataPack) error, wakeCh chan bool, expire func() []string, pending func() int, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error), fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int, idle func() ([]*peer, int), setIdle func(*peer), kind string) error { @@ -1327,7 +1295,7 @@ func (d *Downloader) fetchParts(from uint64, errCancel error, deliveryCh chan da switch err := deliver(packet); err { case nil: // If no blocks were delivered, demote the peer (need the delivery above to clean internal queue!) - if packet.Empty() { + if packet.Items() == 0 { peer.Demote() setIdle(peer) glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind)) @@ -1441,7 +1409,11 @@ func (d *Downloader) fetchParts(from uint64, errCancel error, deliveryCh chan da continue } if glog.V(logger.Detail) { - glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number) + if len(request.Headers) > 0 { + glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number) + } else { + glog.Infof("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind)) + } } // Fetch the chunk and make sure any errors return the hashes to the queue if fetchHook != nil { @@ -1528,7 +1500,9 @@ func (d *Downloader) process() { blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)) case d.mode == FastSync: blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)) - receipts = append(receipts, result.Receipts) + if result.Header.Number.Uint64() <= d.queue.fastSyncPivot { + receipts = append(receipts, result.Receipts) + } case d.mode == LightSync: headers = append(headers, result.Header) } @@ -1539,12 +1513,16 @@ func (d *Downloader) process() { index int ) switch { - case d.mode == FullSync: - index, err = d.insertBlocks(blocks) - case d.mode == FastSync: - index, err = d.insertReceipts(blocks, receipts) - case d.mode == LightSync: + case len(headers) > 0: index, err = d.insertHeaders(headers, true) + + case len(receipts) > 0: + index, err = d.insertReceipts(blocks, receipts) + if err == nil && blocks[len(blocks)-1].NumberU64() == d.queue.fastSyncPivot { + err = d.commitHeadBlock(blocks[len(blocks)-1].Hash()) + } + default: + index, err = d.insertBlocks(blocks) } if err != nil { glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err) @@ -1557,125 +1535,47 @@ func (d *Downloader) process() { } } -// DeliverHashes61 injects a new batch of hashes received from a remote node into +// DeliverHashes injects a new batch of hashes received from a remote node into // the download schedule. This is usually invoked through the BlockHashesMsg by // the protocol handler. -func (d *Downloader) DeliverHashes61(id string, hashes []common.Hash) (err error) { - // Update the delivery metrics for both good and failed deliveries - hashInMeter.Mark(int64(len(hashes))) - defer func() { - if err != nil { - hashDropMeter.Mark(int64(len(hashes))) - } - }() - // Make sure the downloader is active - if atomic.LoadInt32(&d.synchronising) == 0 { - return errNoSyncActive - } - // Deliver or abort if the sync is canceled while queuing - d.cancelLock.RLock() - cancel := d.cancelCh - d.cancelLock.RUnlock() - - select { - case d.hashCh <- hashPack{id, hashes}: - return nil - - case <-cancel: - return errNoSyncActive - } +func (d *Downloader) DeliverHashes(id string, hashes []common.Hash) (err error) { + return d.deliver(id, d.hashCh, &hashPack{id, hashes}, hashInMeter, hashDropMeter) } -// DeliverBlocks61 injects a new batch of blocks received from a remote node. +// DeliverBlocks injects a new batch of blocks received from a remote node. // This is usually invoked through the BlocksMsg by the protocol handler. -func (d *Downloader) DeliverBlocks61(id string, blocks []*types.Block) (err error) { - // Update the delivery metrics for both good and failed deliveries - blockInMeter.Mark(int64(len(blocks))) - defer func() { - if err != nil { - blockDropMeter.Mark(int64(len(blocks))) - } - }() - // Make sure the downloader is active - if atomic.LoadInt32(&d.synchronising) == 0 { - return errNoSyncActive - } - // Deliver or abort if the sync is canceled while queuing - d.cancelLock.RLock() - cancel := d.cancelCh - d.cancelLock.RUnlock() - - select { - case d.blockCh <- blockPack{id, blocks}: - return nil - - case <-cancel: - return errNoSyncActive - } +func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) (err error) { + return d.deliver(id, d.blockCh, &blockPack{id, blocks}, blockInMeter, blockDropMeter) } // DeliverHeaders injects a new batch of blck headers received from a remote // node into the download schedule. func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { - // Update the delivery metrics for both good and failed deliveries - headerInMeter.Mark(int64(len(headers))) - defer func() { - if err != nil { - headerDropMeter.Mark(int64(len(headers))) - } - }() - // Make sure the downloader is active - if atomic.LoadInt32(&d.synchronising) == 0 { - return errNoSyncActive - } - // Deliver or abort if the sync is canceled while queuing - d.cancelLock.RLock() - cancel := d.cancelCh - d.cancelLock.RUnlock() - - select { - case d.headerCh <- headerPack{id, headers}: - return nil - - case <-cancel: - return errNoSyncActive - } + return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) } // DeliverBodies injects a new batch of block bodies received from a remote node. func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) { - // Update the delivery metrics for both good and failed deliveries - bodyInMeter.Mark(int64(len(transactions))) - defer func() { - if err != nil { - bodyDropMeter.Mark(int64(len(transactions))) - } - }() - // Make sure the downloader is active - if atomic.LoadInt32(&d.synchronising) == 0 { - return errNoSyncActive - } - // Deliver or abort if the sync is canceled while queuing - d.cancelLock.RLock() - cancel := d.cancelCh - d.cancelLock.RUnlock() - - select { - case d.bodyCh <- &bodyPack{id, transactions, uncles}: - return nil - - case <-cancel: - return errNoSyncActive - } + return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) } // DeliverReceipts injects a new batch of receipts received from a remote node. func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { + return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) +} + +// DeliverNodeData injects a new batch of node state data received from a remote node. +func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { + return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) +} + +// deliver injects a new batch of data received from a remote node. +func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { // Update the delivery metrics for both good and failed deliveries - receiptInMeter.Mark(int64(len(receipts))) + inMeter.Mark(int64(packet.Items())) defer func() { if err != nil { - receiptDropMeter.Mark(int64(len(receipts))) + dropMeter.Mark(int64(packet.Items())) } }() // Make sure the downloader is active @@ -1688,7 +1588,7 @@ func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (er d.cancelLock.RUnlock() select { - case d.receiptCh <- &receiptPack{id, receipts}: + case destCh <- packet: return nil case <-cancel: diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 68c4ca26e..8944ae4b0 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -27,11 +27,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -115,6 +117,7 @@ func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts) // downloadTester is a test simulator for mocking out local block chain. type downloadTester struct { + stateDb ethdb.Database downloader *Downloader ownHashes []common.Hash // Hash chain belonging to the tester @@ -146,8 +149,10 @@ func newTester(mode SyncMode) *downloadTester { peerReceipts: make(map[string]map[common.Hash]types.Receipts), peerChainTds: make(map[string]map[common.Hash]*big.Int), } - tester.downloader = New(mode, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, tester.getBlock, - tester.headHeader, tester.headBlock, tester.headFastBlock, tester.getTd, tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.dropPeer) + tester.stateDb, _ = ethdb.NewMemDatabase() + tester.downloader = New(mode, tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, + tester.getBlock, tester.headHeader, tester.headBlock, tester.headFastBlock, tester.commitHeadBlock, tester.getTd, + tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.dropPeer) return tester } @@ -213,7 +218,7 @@ func (dl *downloadTester) headHeader() *types.Header { return header } } - return nil + return genesis.Header() } // headBlock retrieves the current head block from the canonical chain. @@ -223,10 +228,12 @@ func (dl *downloadTester) headBlock() *types.Block { for i := len(dl.ownHashes) - 1; i >= 0; i-- { if block := dl.getBlock(dl.ownHashes[i]); block != nil { - return block + if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { + return block + } } } - return nil + return genesis } // headFastBlock retrieves the current head fast-sync block from the canonical chain. @@ -236,12 +243,20 @@ func (dl *downloadTester) headFastBlock() *types.Block { for i := len(dl.ownHashes) - 1; i >= 0; i-- { if block := dl.getBlock(dl.ownHashes[i]); block != nil { - if _, ok := dl.ownReceipts[block.Hash()]; ok { - return block - } + return block } } - return nil + return genesis +} + +// commitHeadBlock manually sets the head block to a given hash. +func (dl *downloadTester) commitHeadBlock(hash common.Hash) error { + // For now only check that the state trie is correct + if block := dl.getBlock(hash); block != nil { + _, err := trie.NewSecure(block.Root(), dl.stateDb) + return err + } + return fmt.Errorf("non existent block: %x", hash[:4]) } // getTd retrieves the block's total difficulty from the canonical chain. @@ -283,6 +298,7 @@ func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) { dl.ownHashes = append(dl.ownHashes, block.Hash()) dl.ownHeaders[block.Hash()] = block.Header() dl.ownBlocks[block.Hash()] = block + dl.stateDb.Put(block.Root().Bytes(), []byte{}) dl.ownChainTd[block.Hash()] = dl.ownChainTd[block.ParentHash()] } return len(blocks), nil @@ -321,13 +337,13 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha var err error switch version { case 61: - err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), nil, nil, nil, nil) + err = dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, delay), dl.peerGetBlocksFn(id, delay), nil, nil, nil, nil, nil) case 62: - err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil) + err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), nil, nil) case 63: - err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay)) + err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay)) case 64: - err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay)) + err = dl.downloader.RegisterPeer(id, version, hashes[0], nil, nil, nil, dl.peerGetRelHeadersFn(id, delay), dl.peerGetAbsHeadersFn(id, delay), dl.peerGetBodiesFn(id, delay), dl.peerGetReceiptsFn(id, delay), dl.peerGetNodeDataFn(id, delay)) } if err == nil { // Assign the owned hashes, headers and blocks to the peer (deep copy) @@ -399,7 +415,7 @@ func (dl *downloadTester) peerGetRelHashesFn(id string, delay time.Duration) fun // Delay delivery a bit to allow attacks to unfold go func() { time.Sleep(time.Millisecond) - dl.downloader.DeliverHashes61(id, result) + dl.downloader.DeliverHashes(id, result) }() return nil } @@ -424,7 +440,7 @@ func (dl *downloadTester) peerGetAbsHashesFn(id string, delay time.Duration) fun // Delay delivery a bit to allow attacks to unfold go func() { time.Sleep(time.Millisecond) - dl.downloader.DeliverHashes61(id, result) + dl.downloader.DeliverHashes(id, result) }() return nil } @@ -447,7 +463,7 @@ func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([ result = append(result, block) } } - go dl.downloader.DeliverBlocks61(id, result) + go dl.downloader.DeliverBlocks(id, result) return nil } @@ -553,17 +569,54 @@ func (dl *downloadTester) peerGetReceiptsFn(id string, delay time.Duration) func } } +// peerGetNodeDataFn constructs a getNodeData method associated with a particular +// peer in the download tester. The returned function can be used to retrieve +// batches of node state data from the particularly requested peer. +func (dl *downloadTester) peerGetNodeDataFn(id string, delay time.Duration) func([]common.Hash) error { + return func(hashes []common.Hash) error { + time.Sleep(delay) + + dl.lock.RLock() + defer dl.lock.RUnlock() + + results := make([][]byte, 0, len(hashes)) + for _, hash := range hashes { + if data, err := testdb.Get(hash.Bytes()); err == nil { + results = append(results, data) + } + } + go dl.downloader.DeliverNodeData(id, results) + + return nil + } +} + // assertOwnChain checks if the local chain contains the correct number of items // of the various chain components. func assertOwnChain(t *testing.T, tester *downloadTester, length int) { - headers, blocks, receipts := length, length, length + assertOwnForkedChain(t, tester, 1, []int{length}) +} + +// assertOwnForkedChain checks if the local forked chain contains the correct +// number of items of the various chain components. +func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { + // Initialize the counters for the first fork + headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-minFullBlocks + if receipts < 0 { + receipts = 1 + } + // Update the counters for each subsequent fork + for _, length := range lengths[1:] { + headers += length - common + blocks += length - common + receipts += length - common - minFullBlocks + } switch tester.downloader.mode { case FullSync: receipts = 1 case LightSync: blocks, receipts = 1, 1 } - if hs := len(tester.ownHeaders); hs != headers { t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) } @@ -573,6 +626,14 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { if rs := len(tester.ownReceipts); rs != receipts { t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) } + // Verify the state trie too for fast syncs + if tester.downloader.mode == FastSync { + if index := lengths[len(lengths)-1] - minFullBlocks - 1; index > 0 { + if statedb := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, tester.stateDb); statedb == nil { + t.Fatalf("state reconstruction failed") + } + } + } } // Tests that simple synchronization against a canonical chain works correctly. @@ -647,7 +708,9 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { cached = len(tester.downloader.queue.blockDonePool) if mode == FastSync { if receipts := len(tester.downloader.queue.receiptDonePool); receipts < cached { - cached = receipts + if tester.downloader.queue.resultCache[receipts].Header.Number.Uint64() < tester.downloader.queue.fastSyncPivot { + cached = receipts + } } } tester.downloader.queue.lock.RUnlock() @@ -704,7 +767,7 @@ func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) { if err := tester.sync("fork B", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, common+2*fork+1) + assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1}) } // Tests that an inactive downloader will not accept incoming hashes and blocks. @@ -712,10 +775,10 @@ func TestInactiveDownloader61(t *testing.T) { tester := newTester(FullSync) // Check that neither hashes nor blocks are accepted - if err := tester.downloader.DeliverHashes61("bad peer", []common.Hash{}); err != errNoSyncActive { + if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive { t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) } - if err := tester.downloader.DeliverBlocks61("bad peer", []*types.Block{}); err != errNoSyncActive { + if err := tester.downloader.DeliverBlocks("bad peer", []*types.Block{}); err != errNoSyncActive { t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) } } @@ -809,14 +872,6 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { id := fmt.Sprintf("peer #%d", i) tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts) } - // Synchronise with the middle peer and make sure half of the blocks were retrieved - id := fmt.Sprintf("peer #%d", targetPeers/2) - if err := tester.sync(id, nil); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(tester.peerHashes[id])) - - // Synchronise with the best peer and make sure everything is retrieved if err := tester.sync("peer #0", nil); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } @@ -870,8 +925,8 @@ func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, F func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) } func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { - // Create a small enough block chain to download - targetBlocks := blockCacheLimit - 15 + // Create a block chain to download + targetBlocks := 2*blockCacheLimit - 15 hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) tester := newTester(mode) @@ -898,8 +953,8 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { bodiesNeeded++ } } - for _, receipt := range receipts { - if mode == FastSync && len(receipt) > 0 { + for hash, receipt := range receipts { + if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= uint64(targetBlocks-minFullBlocks) { receiptsNeeded++ } } diff --git a/eth/downloader/metrics.go b/eth/downloader/metrics.go index 92acb6ba8..d6fcfa25c 100644 --- a/eth/downloader/metrics.go +++ b/eth/downloader/metrics.go @@ -47,4 +47,9 @@ var ( receiptReqTimer = metrics.NewTimer("eth/downloader/receipts/req") receiptDropMeter = metrics.NewMeter("eth/downloader/receipts/drop") receiptTimeoutMeter = metrics.NewMeter("eth/downloader/receipts/timeout") + + stateInMeter = metrics.NewMeter("eth/downloader/states/in") + stateReqTimer = metrics.NewTimer("eth/downloader/states/req") + stateDropMeter = metrics.NewMeter("eth/downloader/states/drop") + stateTimeoutMeter = metrics.NewMeter("eth/downloader/states/timeout") ) diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 5fc0db587..5011d5d46 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -41,6 +41,7 @@ type relativeHeaderFetcherFn func(common.Hash, int, int, bool) error type absoluteHeaderFetcherFn func(uint64, int, int, bool) error type blockBodyFetcherFn func([]common.Hash) error type receiptFetcherFn func([]common.Hash) error +type stateFetcherFn func([]common.Hash) error var ( errAlreadyFetching = errors.New("already fetching blocks from peer") @@ -55,12 +56,16 @@ type peer struct { blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) + stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) rep int32 // Simple peer reputation - blockCapacity int32 // Number of blocks (bodies) allowed to fetch per request - receiptCapacity int32 // Number of receipts allowed to fetch per request - blockStarted time.Time // Time instance when the last block (body)fetch was started - receiptStarted time.Time // Time instance when the last receipt fetch was started + blockCapacity int32 // Number of blocks (bodies) allowed to fetch per request + receiptCapacity int32 // Number of receipts allowed to fetch per request + stateCapacity int32 // Number of node data pieces allowed to fetch per request + + blockStarted time.Time // Time instance when the last block (body)fetch was started + receiptStarted time.Time // Time instance when the last receipt fetch was started + stateStarted time.Time // Time instance when the last node data fetch was started ignored *set.Set // Set of hashes not to request (didn't have previously) @@ -73,6 +78,7 @@ type peer struct { getBlockBodies blockBodyFetcherFn // [eth/62] Method to retrieve a batch of block bodies getReceipts receiptFetcherFn // [eth/63] Method to retrieve a batch of block transaction receipts + getNodeData stateFetcherFn // [eth/63] Method to retrieve a batch of state trie data version int // Eth protocol version number to switch strategies } @@ -82,12 +88,13 @@ type peer struct { func newPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, - getReceipts receiptFetcherFn) *peer { + getReceipts receiptFetcherFn, getNodeData stateFetcherFn) *peer { return &peer{ id: id, head: head, blockCapacity: 1, receiptCapacity: 1, + stateCapacity: 1, ignored: set.New(), getRelHashes: getRelHashes, @@ -99,6 +106,7 @@ func newPeer(id string, version int, head common.Hash, getBlockBodies: getBlockBodies, getReceipts: getReceipts, + getNodeData: getNodeData, version: version, } @@ -110,6 +118,7 @@ func (p *peer) Reset() { atomic.StoreInt32(&p.receiptIdle, 0) atomic.StoreInt32(&p.blockCapacity, 1) atomic.StoreInt32(&p.receiptCapacity, 1) + atomic.StoreInt32(&p.stateCapacity, 1) p.ignored.Clear() } @@ -167,6 +176,24 @@ func (p *peer) FetchReceipts(request *fetchRequest) error { return nil } +// FetchNodeData sends a node state data retrieval request to the remote peer. +func (p *peer) FetchNodeData(request *fetchRequest) error { + // Short circuit if the peer is already fetching + if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { + return errAlreadyFetching + } + p.stateStarted = time.Now() + + // Convert the hash set to a retrievable slice + hashes := make([]common.Hash, 0, len(request.Hashes)) + for hash, _ := range request.Hashes { + hashes = append(hashes, hash) + } + go p.getNodeData(hashes) + + return nil +} + // SetBlocksIdle sets the peer to idle, allowing it to execute new retrieval requests. // Its block retrieval allowance will also be updated either up- or downwards, // depending on whether the previous fetch completed in time or not. @@ -188,6 +215,13 @@ func (p *peer) SetReceiptsIdle() { p.setIdle(p.receiptStarted, receiptSoftTTL, receiptHardTTL, MaxReceiptFetch, &p.receiptCapacity, &p.receiptIdle) } +// SetNodeDataIdle sets the peer to idle, allowing it to execute new retrieval +// requests. Its node data retrieval allowance will also be updated either up- or +// downwards, depending on whether the previous fetch completed in time or not. +func (p *peer) SetNodeDataIdle() { + p.setIdle(p.stateStarted, stateSoftTTL, stateSoftTTL, MaxStateFetch, &p.stateCapacity, &p.stateIdle) +} + // setIdle sets the peer to idle, allowing it to execute new retrieval requests. // Its data retrieval allowance will also be updated either up- or downwards, // depending on whether the previous fetch completed in time or not. @@ -230,6 +264,12 @@ func (p *peer) ReceiptCapacity() int { return int(atomic.LoadInt32(&p.receiptCapacity)) } +// NodeDataCapacity retrieves the peers block download allowance based on its +// previously discovered bandwidth capacity. +func (p *peer) NodeDataCapacity() int { + return int(atomic.LoadInt32(&p.stateCapacity)) +} + // Promote increases the peer's reputation. func (p *peer) Promote() { atomic.AddInt32(&p.rep, 1) @@ -340,39 +380,50 @@ func (ps *peerSet) AllPeers() []*peer { // BlockIdlePeers retrieves a flat list of all the currently idle peers within the // active peer set, ordered by their reputation. -func (ps *peerSet) BlockIdlePeers(version int) ([]*peer, int) { - ps.lock.RLock() - defer ps.lock.RUnlock() - - idle, total := make([]*peer, 0, len(ps.peers)), 0 - for _, p := range ps.peers { - if (version == 61 && p.version == 61) || (version >= 62 && p.version >= 62) { - if atomic.LoadInt32(&p.blockIdle) == 0 { - idle = append(idle, p) - } - total++ - } +func (ps *peerSet) BlockIdlePeers() ([]*peer, int) { + idle := func(p *peer) bool { + return atomic.LoadInt32(&p.blockIdle) == 0 } - for i := 0; i < len(idle); i++ { - for j := i + 1; j < len(idle); j++ { - if atomic.LoadInt32(&idle[i].rep) < atomic.LoadInt32(&idle[j].rep) { - idle[i], idle[j] = idle[j], idle[i] - } - } + return ps.idlePeers(61, 61, idle) +} + +// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within +// the active peer set, ordered by their reputation. +func (ps *peerSet) BodyIdlePeers() ([]*peer, int) { + idle := func(p *peer) bool { + return atomic.LoadInt32(&p.blockIdle) == 0 } - return idle, total + return ps.idlePeers(62, 64, idle) } -// ReceiptIdlePeers retrieves a flat list of all the currently idle peers within the -// active peer set, ordered by their reputation. +// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers +// within the active peer set, ordered by their reputation. func (ps *peerSet) ReceiptIdlePeers() ([]*peer, int) { + idle := func(p *peer) bool { + return atomic.LoadInt32(&p.receiptIdle) == 0 + } + return ps.idlePeers(63, 64, idle) +} + +// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle +// peers within the active peer set, ordered by their reputation. +func (ps *peerSet) NodeDataIdlePeers() ([]*peer, int) { + idle := func(p *peer) bool { + return atomic.LoadInt32(&p.stateIdle) == 0 + } + return ps.idlePeers(63, 64, idle) +} + +// idlePeers retrieves a flat list of all currently idle peers satisfying the +// protocol version constraints, using the provided function to check idleness. +func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peer) bool) ([]*peer, int) { ps.lock.RLock() defer ps.lock.RUnlock() idle, total := make([]*peer, 0, len(ps.peers)), 0 for _, p := range ps.peers { - if p.version >= 63 { - if atomic.LoadInt32(&p.receiptIdle) == 0 { + if p.version >= minProtocol && p.version <= maxProtocol { + if idleCheck(p) { idle = append(idle, p) } total++ diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index c53ad939e..942ed0d63 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -26,9 +26,13 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/trie" "github.com/rcrowley/go-metrics" "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) @@ -39,13 +43,14 @@ var ( var ( errNoFetchesPending = errors.New("no fetches pending") + errStateSyncPending = errors.New("state trie sync already scheduled") errStaleDelivery = errors.New("stale delivery") ) // fetchRequest is a currently running data retrieval operation. type fetchRequest struct { Peer *peer // Peer to which the request was sent - Hashes map[common.Hash]int // [eth/61] Requested block with their insertion index (priority) + Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority) Headers []*types.Header // [eth/62] Requested headers, sorted by request order Time time.Time // Time when the request was made } @@ -64,6 +69,9 @@ type fetchResult struct { // queue represents hashes that are either need fetching or are being fetched type queue struct { + mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching + fastSyncPivot uint64 // Block number where the fast sync pivots into archive synchronisation mode + hashPool map[common.Hash]int // [eth/61] Pending hashes, mapping to their insertion index (priority) hashQueue *prque.Prque // [eth/61] Priority queue of the block hashes to fetch hashCounter int // [eth/61] Counter indexing the added hashes to ensure retrieval order @@ -80,15 +88,22 @@ type queue struct { receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches + stateTaskIndex int // [eth/63] Counter indexing the added hashes to ensure prioritized retrieval order + stateTaskPool map[common.Hash]int // [eth/63] Pending node data retrieval tasks, mapping to their priority + stateTaskQueue *prque.Prque // [eth/63] Priority queue of the hashes to fetch the node data for + statePendPool map[string]*fetchRequest // [eth/63] Currently pending node data retrieval operations + + stateDatabase ethdb.Database // [eth/63] Trie database to populate during state reassembly + stateScheduler *state.StateSync // [eth/63] State trie synchronisation scheduler and integrator + resultCache []*fetchResult // Downloaded but not yet delivered fetch results resultOffset uint64 // Offset of the first cached fetch result in the block-chain - resultParts int // Number of fetch components required to complete an item lock sync.RWMutex } // newQueue creates a new download queue for scheduling block retrieval. -func newQueue() *queue { +func newQueue(stateDb ethdb.Database) *queue { return &queue{ hashPool: make(map[common.Hash]int), hashQueue: prque.New(), @@ -100,6 +115,10 @@ func newQueue() *queue { receiptTaskQueue: prque.New(), receiptPendPool: make(map[string]*fetchRequest), receiptDonePool: make(map[common.Hash]struct{}), + stateTaskPool: make(map[common.Hash]int), + stateTaskQueue: prque.New(), + statePendPool: make(map[string]*fetchRequest), + stateDatabase: stateDb, resultCache: make([]*fetchResult, blockCacheLimit), } } @@ -109,6 +128,9 @@ func (q *queue) Reset() { q.lock.Lock() defer q.lock.Unlock() + q.mode = FullSync + q.fastSyncPivot = 0 + q.hashPool = make(map[common.Hash]int) q.hashQueue.Reset() q.hashCounter = 0 @@ -125,9 +147,14 @@ func (q *queue) Reset() { q.receiptPendPool = make(map[string]*fetchRequest) q.receiptDonePool = make(map[common.Hash]struct{}) + q.stateTaskIndex = 0 + q.stateTaskPool = make(map[common.Hash]int) + q.stateTaskQueue.Reset() + q.statePendPool = make(map[string]*fetchRequest) + q.stateScheduler = nil + q.resultCache = make([]*fetchResult, blockCacheLimit) q.resultOffset = 0 - q.resultParts = 0 } // PendingBlocks retrieves the number of block (body) requests pending for retrieval. @@ -146,12 +173,20 @@ func (q *queue) PendingReceipts() int { return q.receiptTaskQueue.Size() } +// PendingNodeData retrieves the number of node data entries pending for retrieval. +func (q *queue) PendingNodeData() int { + q.lock.RLock() + defer q.lock.RUnlock() + + return q.stateTaskQueue.Size() +} + // InFlight retrieves the number of fetch requests currently in flight. func (q *queue) InFlight() int { q.lock.RLock() defer q.lock.RUnlock() - return len(q.blockPendPool) + len(q.receiptPendPool) + return len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool) } // Idle returns if the queue is fully idle or has some data still inside. This @@ -160,8 +195,8 @@ func (q *queue) Idle() bool { q.lock.RLock() defer q.lock.RUnlock() - queued := q.hashQueue.Size() + q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() - pending := len(q.blockPendPool) + len(q.receiptPendPool) + queued := q.hashQueue.Size() + q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() + q.stateTaskQueue.Size() + pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool) cached := len(q.blockDonePool) + len(q.receiptDonePool) return (queued + pending + cached) == 0 @@ -227,7 +262,7 @@ func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash { // Schedule adds a set of headers for the download queue for scheduling, returning // the new headers encountered. -func (q *queue) Schedule(headers []*types.Header, from uint64, receipts bool) []*types.Header { +func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { q.lock.Lock() defer q.lock.Unlock() @@ -256,10 +291,21 @@ func (q *queue) Schedule(headers []*types.Header, from uint64, receipts bool) [] // Queue the header for content retrieval q.blockTaskPool[hash] = header q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) - if receipts { + + if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { + // Fast phase of the fast sync, retrieve receipts too q.receiptTaskPool[hash] = header q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) } + if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot { + // Pivoting point of the fast sync, retrieve the state tries + q.stateScheduler = state.NewStateSync(header.Root, q.stateDatabase) + for _, hash := range q.stateScheduler.Missing(0) { + q.stateTaskPool[hash] = q.stateTaskIndex + q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) + q.stateTaskIndex++ + } + } inserts = append(inserts, header) q.headerHead = hash from++ @@ -279,6 +325,9 @@ func (q *queue) GetHeadResult() *fetchResult { if q.resultCache[0].Pending > 0 { return nil } + if q.mode == FastSync && q.resultCache[0].Header.Number.Uint64() == q.fastSyncPivot && len(q.stateTaskPool) > 0 { + return nil + } return q.resultCache[0] } @@ -291,9 +340,18 @@ func (q *queue) TakeResults() []*fetchResult { // Accumulate all available results results := []*fetchResult{} for _, result := range q.resultCache { + // Stop if no more results are ready if result == nil || result.Pending > 0 { break } + // The fast sync pivot block may only be processed after state fetch completes + if q.mode == FastSync && result.Header.Number.Uint64() == q.fastSyncPivot && len(q.stateTaskPool) > 0 { + break + } + // If we've just inserted the fast sync pivot, stop as the following batch needs different insertion + if q.mode == FastSync && result.Header.Number.Uint64() == q.fastSyncPivot+1 && len(results) > 0 { + break + } results = append(results, result) hash := result.Header.Hash() @@ -312,31 +370,45 @@ func (q *queue) TakeResults() []*fetchResult { return results } -// Reserve61 reserves a set of hashes for the given peer, skipping any previously -// failed download. -func (q *queue) Reserve61(p *peer, count int) *fetchRequest { +// ReserveBlocks reserves a set of block hashes for the given peer, skipping any +// previously failed download. +func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest { + return q.reserveHashes(p, count, q.hashQueue, q.blockPendPool, len(q.resultCache)-len(q.blockDonePool)) +} + +// ReserveNodeData reserves a set of node data hashes for the given peer, skipping +// any previously failed download. +func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest { + return q.reserveHashes(p, count, q.stateTaskQueue, q.statePendPool, 0) +} + +// reserveHashes reserves a set of hashes for the given peer, skipping previously +// failed ones. +func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, pendPool map[string]*fetchRequest, maxPending int) *fetchRequest { q.lock.Lock() defer q.lock.Unlock() // Short circuit if the pool has been depleted, or if the peer's already // downloading something (sanity check not to corrupt state) - if q.hashQueue.Empty() { + if taskQueue.Empty() { return nil } - if _, ok := q.blockPendPool[p.id]; ok { + if _, ok := pendPool[p.id]; ok { return nil } // Calculate an upper limit on the hashes we might fetch (i.e. throttling) - space := len(q.resultCache) - len(q.blockDonePool) - for _, request := range q.blockPendPool { - space -= len(request.Hashes) + allowance := maxPending + if allowance > 0 { + for _, request := range pendPool { + allowance -= len(request.Hashes) + } } // Retrieve a batch of hashes, skipping previously failed ones send := make(map[common.Hash]int) skip := make(map[common.Hash]int) - for proc := 0; proc < space && len(send) < count && !q.hashQueue.Empty(); proc++ { - hash, priority := q.hashQueue.Pop() + for proc := 0; (allowance == 0 || proc < allowance) && len(send) < count && !taskQueue.Empty(); proc++ { + hash, priority := taskQueue.Pop() if p.ignored.Has(hash) { skip[hash.(common.Hash)] = int(priority) } else { @@ -345,7 +417,7 @@ func (q *queue) Reserve61(p *peer, count int) *fetchRequest { } // Merge all the skipped hashes back for hash, index := range skip { - q.hashQueue.Push(hash, float32(index)) + taskQueue.Push(hash, float32(index)) } // Assemble and return the block download request if len(send) == 0 { @@ -356,19 +428,19 @@ func (q *queue) Reserve61(p *peer, count int) *fetchRequest { Hashes: send, Time: time.Now(), } - q.blockPendPool[p.id] = request + pendPool[p.id] = request return request } -// ReserveBlocks reserves a set of body fetches for the given peer, skipping any +// ReserveBodies reserves a set of body fetches for the given peer, skipping any // previously failed downloads. Beside the next batch of needed fetches, it also // returns a flag whether empty blocks were queued requiring processing. -func (q *queue) ReserveBlocks(p *peer, count int) (*fetchRequest, bool, error) { +func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) { noop := func(header *types.Header) bool { return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash } - return q.reserveFetch(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, noop) + return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, noop) } // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping @@ -378,13 +450,13 @@ func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error) noop := func(header *types.Header) bool { return header.ReceiptHash == types.EmptyRootHash } - return q.reserveFetch(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, noop) + return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, noop) } -// reserveFetch reserves a set of data download operations for a given peer, +// reserveHeaders reserves a set of data download operations for a given peer, // skipping any previously failed ones. This method is a generic version used // by the individual special reservation functions. -func (q *queue) reserveFetch(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, +func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, noop func(*types.Header) bool) (*fetchRequest, bool, error) { q.lock.Lock() defer q.lock.Unlock() @@ -416,8 +488,12 @@ func (q *queue) reserveFetch(p *peer, count int, taskPool map[common.Hash]*types return nil, false, errInvalidChain } if q.resultCache[index] == nil { + components := 1 + if q.mode == FastSync && header.Number.Uint64() <= q.fastSyncPivot { + components = 2 + } q.resultCache[index] = &fetchResult{ - Pending: q.resultParts, + Pending: components, Header: header, } } @@ -456,30 +532,36 @@ func (q *queue) reserveFetch(p *peer, count int, taskPool map[common.Hash]*types return request, progress, nil } -// Cancel61 aborts a fetch request, returning all pending hashes to the queue. -func (q *queue) Cancel61(request *fetchRequest) { - q.cancel(request, nil, q.blockPendPool) +// CancelBlocks aborts a fetch request, returning all pending hashes to the queue. +func (q *queue) CancelBlocks(request *fetchRequest) { + q.cancel(request, q.hashQueue, q.blockPendPool) } -// CancelBlocks aborts a body fetch request, returning all pending hashes to the +// CancelBodies aborts a body fetch request, returning all pending headers to the // task queue. -func (q *queue) CancelBlocks(request *fetchRequest) { +func (q *queue) CancelBodies(request *fetchRequest) { q.cancel(request, q.blockTaskQueue, q.blockPendPool) } -// CancelReceipts aborts a body fetch request, returning all pending hashes to +// CancelReceipts aborts a body fetch request, returning all pending headers to // the task queue. func (q *queue) CancelReceipts(request *fetchRequest) { q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) } +// CancelNodeData aborts a node state data fetch request, returning all pending +// hashes to the task queue. +func (q *queue) CancelNodeData(request *fetchRequest) { + q.cancel(request, q.stateTaskQueue, q.statePendPool) +} + // Cancel aborts a fetch request, returning all pending hashes to the task queue. func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { q.lock.Lock() defer q.lock.Unlock() for hash, index := range request.Hashes { - q.hashQueue.Push(hash, float32(index)) + taskQueue.Push(hash, float32(index)) } for _, header := range request.Headers { taskQueue.Push(header, -float32(header.Number.Uint64())) @@ -509,29 +591,41 @@ func (q *queue) Revoke(peerId string) { } delete(q.receiptPendPool, peerId) } + if request, ok := q.statePendPool[peerId]; ok { + for hash, index := range request.Hashes { + q.stateTaskQueue.Push(hash, float32(index)) + } + delete(q.statePendPool, peerId) + } } -// Expire61 checks for in flight requests that exceeded a timeout allowance, +// ExpireBlocks checks for in flight requests that exceeded a timeout allowance, // canceling them and returning the responsible peers for penalization. -func (q *queue) Expire61(timeout time.Duration) []string { - return q.expire(timeout, q.blockPendPool, nil) +func (q *queue) ExpireBlocks(timeout time.Duration) []string { + return q.expire(timeout, q.blockPendPool, q.hashQueue, blockTimeoutMeter) } -// ExpireBlocks checks for in flight block body requests that exceeded a timeout +// ExpireBodies checks for in flight block body requests that exceeded a timeout // allowance, canceling them and returning the responsible peers for penalization. -func (q *queue) ExpireBlocks(timeout time.Duration) []string { - return q.expire(timeout, q.blockPendPool, q.blockTaskQueue) +func (q *queue) ExpireBodies(timeout time.Duration) []string { + return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) } // ExpireReceipts checks for in flight receipt requests that exceeded a timeout // allowance, canceling them and returning the responsible peers for penalization. func (q *queue) ExpireReceipts(timeout time.Duration) []string { - return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue) + return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) +} + +// ExpireNodeData checks for in flight node data requests that exceeded a timeout +// allowance, canceling them and returning the responsible peers for penalization. +func (q *queue) ExpireNodeData(timeout time.Duration) []string { + return q.expire(timeout, q.statePendPool, q.stateTaskQueue, stateTimeoutMeter) } // expire is the generic check that move expired tasks from a pending pool back // into a task pool, returning all entities caught with expired tasks. -func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) []string { +func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) []string { q.lock.Lock() defer q.lock.Unlock() @@ -540,14 +634,11 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, for id, request := range pendPool { if time.Since(request.Time) > timeout { // Update the metrics with the timeout - if len(request.Hashes) > 0 { - blockTimeoutMeter.Mark(1) - } else { - bodyTimeoutMeter.Mark(1) - } + timeoutMeter.Mark(1) + // Return any non satisfied requests to the pool for hash, index := range request.Hashes { - q.hashQueue.Push(hash, float32(index)) + taskQueue.Push(hash, float32(index)) } for _, header := range request.Headers { taskQueue.Push(header, -float32(header.Number.Uint64())) @@ -562,8 +653,8 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, return peers } -// Deliver61 injects a block retrieval response into the download queue. -func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) { +// DeliverBlocks injects a block retrieval response into the download queue. +func (q *queue) DeliverBlocks(id string, blocks []*types.Block) error { q.lock.Lock() defer q.lock.Unlock() @@ -626,8 +717,8 @@ func (q *queue) Deliver61(id string, blocks []*types.Block) (err error) { } } -// DeliverBlocks injects a block (body) retrieval response into the results queue. -func (q *queue) DeliverBlocks(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) error { +// DeliverBodies injects a block body retrieval response into the results queue. +func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) error { reconstruct := func(header *types.Header, index int, result *fetchResult) error { if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { return errInvalidBody @@ -717,14 +808,84 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ } } +// DeliverNodeData injects a node state data retrieval response into the queue. +func (q *queue) DeliverNodeData(id string, data [][]byte) (int, int, error) { + q.lock.Lock() + defer q.lock.Unlock() + + // Short circuit if the data was never requested + request := q.statePendPool[id] + if request == nil { + return 0, 0, errNoFetchesPending + } + stateReqTimer.UpdateSince(request.Time) + delete(q.statePendPool, id) + + // If no data was retrieved, mark them as unavailable for the origin peer + if len(data) == 0 { + for hash, _ := range request.Hashes { + request.Peer.ignored.Add(hash) + } + } + // Iterate over the downloaded data and verify each of them + errs := make([]error, 0) + processed := 0 + for _, blob := range data { + // Skip any blocks that were not requested + hash := common.BytesToHash(crypto.Sha3(blob)) + if _, ok := request.Hashes[hash]; !ok { + errs = append(errs, fmt.Errorf("non-requested state data %x", hash)) + continue + } + // Inject the next state trie item into the database + if err := q.stateScheduler.Process([]trie.SyncResult{{hash, blob}}); err != nil { + errs = []error{err} + break + } + processed++ + + delete(request.Hashes, hash) + delete(q.stateTaskPool, hash) + } + // Return all failed or missing fetches to the queue + for hash, index := range request.Hashes { + q.stateTaskQueue.Push(hash, float32(index)) + } + // Also enqueue any newly required state trie nodes + discovered := 0 + if len(q.stateTaskPool) < maxQueuedStates { + for _, hash := range q.stateScheduler.Missing(4 * MaxStateFetch) { + q.stateTaskPool[hash] = q.stateTaskIndex + q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) + q.stateTaskIndex++ + discovered++ + } + } + // If none of the data items were good, it's a stale delivery + switch { + case len(errs) == 0: + return processed, discovered, nil + + case len(errs) == len(request.Hashes): + return processed, discovered, errStaleDelivery + + default: + return processed, discovered, fmt.Errorf("multiple failures: %v", errs) + } +} + // Prepare configures the result cache to allow accepting and caching inbound // fetch results. -func (q *queue) Prepare(offset uint64, parts int) { +func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64) { q.lock.Lock() defer q.lock.Unlock() if q.resultOffset < offset { q.resultOffset = offset } - q.resultParts = parts + q.fastSyncPivot = 0 + if mode == FastSync { + q.fastSyncPivot = pivot + } + q.mode = mode } diff --git a/eth/downloader/types.go b/eth/downloader/types.go new file mode 100644 index 000000000..221ef38f6 --- /dev/null +++ b/eth/downloader/types.go @@ -0,0 +1,137 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// headerCheckFn is a callback type for verifying a header's presence in the local chain. +type headerCheckFn func(common.Hash) bool + +// blockCheckFn is a callback type for verifying a block's presence in the local chain. +type blockCheckFn func(common.Hash) bool + +// headerRetrievalFn is a callback type for retrieving a header from the local chain. +type headerRetrievalFn func(common.Hash) *types.Header + +// blockRetrievalFn is a callback type for retrieving a block from the local chain. +type blockRetrievalFn func(common.Hash) *types.Block + +// headHeaderRetrievalFn is a callback type for retrieving the head header from the local chain. +type headHeaderRetrievalFn func() *types.Header + +// headBlockRetrievalFn is a callback type for retrieving the head block from the local chain. +type headBlockRetrievalFn func() *types.Block + +// headFastBlockRetrievalFn is a callback type for retrieving the head fast block from the local chain. +type headFastBlockRetrievalFn func() *types.Block + +// headBlockCommitterFn is a callback for directly committing the head block to a certain entity. +type headBlockCommitterFn func(common.Hash) error + +// tdRetrievalFn is a callback type for retrieving the total difficulty of a local block. +type tdRetrievalFn func(common.Hash) *big.Int + +// headerChainInsertFn is a callback type to insert a batch of headers into the local chain. +type headerChainInsertFn func([]*types.Header, bool) (int, error) + +// blockChainInsertFn is a callback type to insert a batch of blocks into the local chain. +type blockChainInsertFn func(types.Blocks) (int, error) + +// receiptChainInsertFn is a callback type to insert a batch of receipts into the local chain. +type receiptChainInsertFn func(types.Blocks, []types.Receipts) (int, error) + +// peerDropFn is a callback type for dropping a peer detected as malicious. +type peerDropFn func(id string) + +// dataPack is a data message returned by a peer for some query. +type dataPack interface { + PeerId() string + Items() int + Stats() string +} + +// hashPack is a batch of block hashes returned by a peer (eth/61). +type hashPack struct { + peerId string + hashes []common.Hash +} + +func (p *hashPack) PeerId() string { return p.peerId } +func (p *hashPack) Items() int { return len(p.hashes) } +func (p *hashPack) Stats() string { return fmt.Sprintf("%d", len(p.hashes)) } + +// blockPack is a batch of blocks returned by a peer (eth/61). +type blockPack struct { + peerId string + blocks []*types.Block +} + +func (p *blockPack) PeerId() string { return p.peerId } +func (p *blockPack) Items() int { return len(p.blocks) } +func (p *blockPack) Stats() string { return fmt.Sprintf("%d", len(p.blocks)) } + +// headerPack is a batch of block headers returned by a peer. +type headerPack struct { + peerId string + headers []*types.Header +} + +func (p *headerPack) PeerId() string { return p.peerId } +func (p *headerPack) Items() int { return len(p.headers) } +func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) } + +// bodyPack is a batch of block bodies returned by a peer. +type bodyPack struct { + peerId string + transactions [][]*types.Transaction + uncles [][]*types.Header +} + +func (p *bodyPack) PeerId() string { return p.peerId } +func (p *bodyPack) Items() int { + if len(p.transactions) <= len(p.uncles) { + return len(p.transactions) + } + return len(p.uncles) +} +func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) } + +// receiptPack is a batch of receipts returned by a peer. +type receiptPack struct { + peerId string + receipts [][]*types.Receipt +} + +func (p *receiptPack) PeerId() string { return p.peerId } +func (p *receiptPack) Items() int { return len(p.receipts) } +func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) } + +// statePack is a batch of states returned by a peer. +type statePack struct { + peerId string + states [][]byte +} + +func (p *statePack) PeerId() string { return p.peerId } +func (p *statePack) Items() int { return len(p.states) } +func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) } diff --git a/eth/handler.go b/eth/handler.go index 1117cb1b7..b0916d50b 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -129,9 +129,9 @@ func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txP case LightMode: syncMode = downloader.LightSync } - manager.downloader = downloader.New(syncMode, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, - blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.GetTd, - blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, manager.removePeer) + manager.downloader = downloader.New(syncMode, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, + blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, + blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, manager.removePeer) validator := func(block *types.Block, parent *types.Block) error { return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false) @@ -220,8 +220,8 @@ func (pm *ProtocolManager) handle(p *peer) error { // Register the peer in the downloader. If the downloader considers it banned, we disconnect if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(), - p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks, - p.RequestHeadersByHash, p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts); err != nil { + p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks, p.RequestHeadersByHash, + p.RequestHeadersByNumber, p.RequestBodies, p.RequestReceipts, p.RequestNodeData); err != nil { return err } // Propagate existing transactions. new transactions appearing @@ -307,7 +307,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { break } // Deliver them all to the downloader for queuing - err := pm.downloader.DeliverHashes61(p.id, hashes) + err := pm.downloader.DeliverHashes(p.id, hashes) if err != nil { glog.V(logger.Debug).Infoln(err) } @@ -353,7 +353,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } // Filter out any explicitly requested blocks, deliver the rest to the downloader if blocks := pm.fetcher.FilterBlocks(blocks); len(blocks) > 0 { - pm.downloader.DeliverBlocks61(p.id, blocks) + pm.downloader.DeliverBlocks(p.id, blocks) } // Block header query, collect the requested headers and reply @@ -515,6 +515,17 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } return p.SendNodeData(data) + case p.version >= eth63 && msg.Code == NodeDataMsg: + // A batch of node state data arrived to one of our previous requests + var data [][]byte + if err := msg.Decode(&data); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + // Deliver all to the downloader + if err := pm.downloader.DeliverNodeData(p.id, data); err != nil { + glog.V(logger.Debug).Infof("failed to deliver node state data: %v", err) + } + case p.version >= eth63 && msg.Code == GetReceiptsMsg: // Decode the retrieval message msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) diff --git a/eth/peer.go b/eth/peer.go index e24be97f1..68ce903a6 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -191,7 +191,7 @@ func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error { return p2p.Send(p.rw, BlockBodiesMsg, bodies) } -// SendNodeData sends a batch of arbitrary internal data, corresponding to the +// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the // hashes requested. func (p *peer) SendNodeData(data [][]byte) error { return p2p.Send(p.rw, NodeDataMsg, data) -- cgit v1.2.3 From b97e34a8e4d06b315cc495819ba6612f89dec54f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 7 Oct 2015 12:14:30 +0300 Subject: eth/downloader: concurrent receipt and state processing --- eth/downloader/downloader.go | 64 +++++++++------ eth/downloader/downloader_test.go | 4 +- eth/downloader/queue.go | 160 +++++++++++++++++++++++++++----------- eth/downloader/types.go | 2 +- 4 files changed, 158 insertions(+), 72 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 96177ae8a..e19b70dfd 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -830,7 +830,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { } // If there's nothing more to fetch, wait or terminate if d.queue.PendingBlocks() == 0 { - if d.queue.InFlight() == 0 && finished { + if !d.queue.InFlightBlocks() && finished { glog.V(logger.Debug).Infof("Block fetching completed") return nil } @@ -864,7 +864,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { } // Make sure that we have peers available for fetching. If all peers have been tried // and all failed throw an error - if !throttled && d.queue.InFlight() == 0 && len(idles) == total { + if !throttled && !d.queue.InFlightBlocks() && len(idles) == total { return errPeersUnavailable } } @@ -1124,7 +1124,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) if d.mode == FastSync || d.mode == LightSync { - if n, err := d.insertHeaders(headers, false); err != nil { + if n, err := d.insertHeaders(headers, headerCheckFrequency); err != nil { glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headers[n].Number, headers[n].Hash().Bytes()[:4], err) return errInvalidChain } @@ -1194,8 +1194,8 @@ func (d *Downloader) fetchBodies(from uint64) error { setIdle = func(p *peer) { p.SetBlocksIdle() } ) err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, - d.queue.PendingBlocks, d.queue.ThrottleBlocks, d.queue.ReserveBodies, d.bodyFetchHook, - fetch, d.queue.CancelBodies, capacity, getIdles, setIdle, "Body") + d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ThrottleBlocks, d.queue.ReserveBodies, + d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, getIdles, setIdle, "Body") glog.V(logger.Debug).Infof("Block body download terminated: %v", err) return err @@ -1218,8 +1218,8 @@ func (d *Downloader) fetchReceipts(from uint64) error { setIdle = func(p *peer) { p.SetReceiptsIdle() } ) err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, - d.queue.PendingReceipts, d.queue.ThrottleReceipts, d.queue.ReserveReceipts, d.receiptFetchHook, - fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt") + d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ThrottleReceipts, d.queue.ReserveReceipts, + d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt") glog.V(logger.Debug).Infof("Receipt download terminated: %v", err) return err @@ -1234,15 +1234,29 @@ func (d *Downloader) fetchNodeData() error { var ( deliver = func(packet dataPack) error { start := time.Now() - done, found, err := d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states) - - d.syncStatsLock.Lock() - totalDone, totalKnown := d.syncStatsStateDone+uint64(done), d.syncStatsStateTotal+uint64(found) - d.syncStatsStateDone, d.syncStatsStateTotal = totalDone, totalKnown - d.syncStatsLock.Unlock() + return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(err error, delivered int) { + if err != nil { + // If the node data processing failed, the root hash is very wrong, abort + glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err) + d.cancel() + return + } + // Processing succeeded, notify state fetcher and processor of continuation + if d.queue.PendingNodeData() == 0 { + go d.process() + } else { + select { + case d.stateWakeCh <- true: + default: + } + } + // Log a message to the user and return + d.syncStatsLock.Lock() + defer d.syncStatsLock.Unlock() - glog.V(logger.Info).Infof("imported %d [%d / %d] state entries in %v.", done, totalDone, totalKnown, time.Since(start)) - return err + d.syncStatsStateDone += uint64(delivered) + glog.V(logger.Info).Infof("imported %d state entries in %v: processed %d in total", delivered, time.Since(start), d.syncStatsStateDone) + }) } expire = func() []string { return d.queue.ExpireNodeData(stateHardTTL) } throttle = func() bool { return false } @@ -1254,8 +1268,8 @@ func (d *Downloader) fetchNodeData() error { setIdle = func(p *peer) { p.SetNodeDataIdle() } ) err := d.fetchParts(errCancelReceiptFetch, d.stateCh, deliver, d.stateWakeCh, expire, - d.queue.PendingNodeData, throttle, reserve, nil, fetch, d.queue.CancelNodeData, - capacity, d.peers.ReceiptIdlePeers, setIdle, "State") + d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch, + d.queue.CancelNodeData, capacity, d.peers.ReceiptIdlePeers, setIdle, "State") glog.V(logger.Debug).Infof("Node state data download terminated: %v", err) return err @@ -1265,8 +1279,9 @@ func (d *Downloader) fetchNodeData() error { // peers, reserving a chunk of fetch requests for each, waiting for delivery and // also periodically checking for timeouts. func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(packet dataPack) error, wakeCh chan bool, - expire func() []string, pending func() int, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error), fetchHook func([]*types.Header), - fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int, idle func() ([]*peer, int), setIdle func(*peer), kind string) error { + expire func() []string, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error), + fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int, + idle func() ([]*peer, int), setIdle func(*peer), kind string) error { // Create a ticker to detect expired retreival tasks ticker := time.NewTicker(100 * time.Millisecond) @@ -1378,14 +1393,14 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv } // If there's nothing more to fetch, wait or terminate if pending() == 0 { - if d.queue.InFlight() == 0 && finished { + if !inFlight() && finished { glog.V(logger.Debug).Infof("%s fetching completed", kind) return nil } break } // Send a download request to all idle peers, until throttled - progressed, throttled := false, false + progressed, throttled, running := false, false, inFlight() idles, total := idle() for _, peer := range idles { @@ -1423,10 +1438,11 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv glog.V(logger.Error).Infof("%v: %s fetch failed, rescheduling", peer, strings.ToLower(kind)) cancel(request) } + running = true } // Make sure that we have peers available for fetching. If all peers have been tried // and all failed throw an error - if !progressed && !throttled && d.queue.InFlight() == 0 && len(idles) == total { + if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { return errPeersUnavailable } } @@ -1514,12 +1530,12 @@ func (d *Downloader) process() { ) switch { case len(headers) > 0: - index, err = d.insertHeaders(headers, true) + index, err = d.insertHeaders(headers, headerCheckFrequency) case len(receipts) > 0: index, err = d.insertReceipts(blocks, receipts) if err == nil && blocks[len(blocks)-1].NumberU64() == d.queue.fastSyncPivot { - err = d.commitHeadBlock(blocks[len(blocks)-1].Hash()) + index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash()) } default: index, err = d.insertBlocks(blocks) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 8944ae4b0..0e60371b3 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -268,7 +268,7 @@ func (dl *downloadTester) getTd(hash common.Hash) *big.Int { } // insertHeaders injects a new batch of headers into the simulated chain. -func (dl *downloadTester) insertHeaders(headers []*types.Header, verify bool) (int, error) { +func (dl *downloadTester) insertHeaders(headers []*types.Header, checkFreq int) (int, error) { dl.lock.Lock() defer dl.lock.Unlock() @@ -1262,7 +1262,7 @@ func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { pending.Wait() // Simulate a successful sync above the fork - tester.downloader.syncStatsOrigin = tester.downloader.syncStatsHeight + tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight // Synchronise with the second fork and check boundary resets tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 942ed0d63..bb8d892cd 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -93,8 +94,10 @@ type queue struct { stateTaskQueue *prque.Prque // [eth/63] Priority queue of the hashes to fetch the node data for statePendPool map[string]*fetchRequest // [eth/63] Currently pending node data retrieval operations - stateDatabase ethdb.Database // [eth/63] Trie database to populate during state reassembly - stateScheduler *state.StateSync // [eth/63] State trie synchronisation scheduler and integrator + stateDatabase ethdb.Database // [eth/63] Trie database to populate during state reassembly + stateScheduler *state.StateSync // [eth/63] State trie synchronisation scheduler and integrator + stateProcessors int32 // [eth/63] Number of currently running state processors + stateSchedLock sync.RWMutex // [eth/63] Lock serializing access to the state scheduler resultCache []*fetchResult // Downloaded but not yet delivered fetch results resultOffset uint64 // Offset of the first cached fetch result in the block-chain @@ -175,18 +178,40 @@ func (q *queue) PendingReceipts() int { // PendingNodeData retrieves the number of node data entries pending for retrieval. func (q *queue) PendingNodeData() int { + q.stateSchedLock.RLock() + defer q.stateSchedLock.RUnlock() + + if q.stateScheduler != nil { + return q.stateScheduler.Pending() + } + return 0 +} + +// InFlightBlocks retrieves whether there are block fetch requests currently in +// flight. +func (q *queue) InFlightBlocks() bool { q.lock.RLock() defer q.lock.RUnlock() - return q.stateTaskQueue.Size() + return len(q.blockPendPool) > 0 } -// InFlight retrieves the number of fetch requests currently in flight. -func (q *queue) InFlight() int { +// InFlightReceipts retrieves whether there are receipt fetch requests currently +// in flight. +func (q *queue) InFlightReceipts() bool { q.lock.RLock() defer q.lock.RUnlock() - return len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool) + return len(q.receiptPendPool) > 0 +} + +// InFlightNodeData retrieves whether there are node data entry fetch requests +// currently in flight. +func (q *queue) InFlightNodeData() bool { + q.lock.RLock() + defer q.lock.RUnlock() + + return len(q.statePendPool)+int(atomic.LoadInt32(&q.stateProcessors)) > 0 } // Idle returns if the queue is fully idle or has some data still inside. This @@ -199,6 +224,12 @@ func (q *queue) Idle() bool { pending := len(q.blockPendPool) + len(q.receiptPendPool) + len(q.statePendPool) cached := len(q.blockDonePool) + len(q.receiptDonePool) + q.stateSchedLock.RLock() + if q.stateScheduler != nil { + queued += q.stateScheduler.Pending() + } + q.stateSchedLock.RUnlock() + return (queued + pending + cached) == 0 } @@ -299,12 +330,9 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { } if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot { // Pivoting point of the fast sync, retrieve the state tries + q.stateSchedLock.Lock() q.stateScheduler = state.NewStateSync(header.Root, q.stateDatabase) - for _, hash := range q.stateScheduler.Missing(0) { - q.stateTaskPool[hash] = q.stateTaskIndex - q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) - q.stateTaskIndex++ - } + q.stateSchedLock.Unlock() } inserts = append(inserts, header) q.headerHead = hash @@ -325,8 +353,13 @@ func (q *queue) GetHeadResult() *fetchResult { if q.resultCache[0].Pending > 0 { return nil } - if q.mode == FastSync && q.resultCache[0].Header.Number.Uint64() == q.fastSyncPivot && len(q.stateTaskPool) > 0 { - return nil + if q.mode == FastSync && q.resultCache[0].Header.Number.Uint64() == q.fastSyncPivot { + if len(q.stateTaskPool) > 0 { + return nil + } + if q.PendingNodeData() > 0 { + return nil + } } return q.resultCache[0] } @@ -345,8 +378,13 @@ func (q *queue) TakeResults() []*fetchResult { break } // The fast sync pivot block may only be processed after state fetch completes - if q.mode == FastSync && result.Header.Number.Uint64() == q.fastSyncPivot && len(q.stateTaskPool) > 0 { - break + if q.mode == FastSync && result.Header.Number.Uint64() == q.fastSyncPivot { + if len(q.stateTaskPool) > 0 { + break + } + if q.PendingNodeData() > 0 { + break + } } // If we've just inserted the fast sync pivot, stop as the following batch needs different insertion if q.mode == FastSync && result.Header.Number.Uint64() == q.fastSyncPivot+1 && len(results) > 0 { @@ -373,26 +411,34 @@ func (q *queue) TakeResults() []*fetchResult { // ReserveBlocks reserves a set of block hashes for the given peer, skipping any // previously failed download. func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest { - return q.reserveHashes(p, count, q.hashQueue, q.blockPendPool, len(q.resultCache)-len(q.blockDonePool)) + return q.reserveHashes(p, count, q.hashQueue, nil, q.blockPendPool, len(q.resultCache)-len(q.blockDonePool)) } // ReserveNodeData reserves a set of node data hashes for the given peer, skipping // any previously failed download. func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest { - return q.reserveHashes(p, count, q.stateTaskQueue, q.statePendPool, 0) + // Create a task generator to fetch status-fetch tasks if all schedules ones are done + generator := func(max int) { + q.stateSchedLock.Lock() + defer q.stateSchedLock.Unlock() + + for _, hash := range q.stateScheduler.Missing(max) { + q.stateTaskPool[hash] = q.stateTaskIndex + q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) + q.stateTaskIndex++ + } + } + return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, count) } // reserveHashes reserves a set of hashes for the given peer, skipping previously // failed ones. -func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, pendPool map[string]*fetchRequest, maxPending int) *fetchRequest { +func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, taskGen func(int), pendPool map[string]*fetchRequest, maxPending int) *fetchRequest { q.lock.Lock() defer q.lock.Unlock() - // Short circuit if the pool has been depleted, or if the peer's already - // downloading something (sanity check not to corrupt state) - if taskQueue.Empty() { - return nil - } + // Short circuit if the peer's already downloading something (sanity check not + // to corrupt state) if _, ok := pendPool[p.id]; ok { return nil } @@ -403,6 +449,13 @@ func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, pendPo allowance -= len(request.Hashes) } } + // If there's a task generator, ask it to fill our task queue + if taskGen != nil && taskQueue.Size() < allowance { + taskGen(allowance - taskQueue.Size()) + } + if taskQueue.Empty() { + return nil + } // Retrieve a batch of hashes, skipping previously failed ones send := make(map[common.Hash]int) skip := make(map[common.Hash]int) @@ -809,14 +862,14 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ } // DeliverNodeData injects a node state data retrieval response into the queue. -func (q *queue) DeliverNodeData(id string, data [][]byte) (int, int, error) { +func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, int)) error { q.lock.Lock() defer q.lock.Unlock() // Short circuit if the data was never requested request := q.statePendPool[id] if request == nil { - return 0, 0, errNoFetchesPending + return errNoFetchesPending } stateReqTimer.UpdateSince(request.Time) delete(q.statePendPool, id) @@ -829,7 +882,7 @@ func (q *queue) DeliverNodeData(id string, data [][]byte) (int, int, error) { } // Iterate over the downloaded data and verify each of them errs := make([]error, 0) - processed := 0 + process := []trie.SyncResult{} for _, blob := range data { // Skip any blocks that were not requested hash := common.BytesToHash(crypto.Sha3(blob)) @@ -837,41 +890,58 @@ func (q *queue) DeliverNodeData(id string, data [][]byte) (int, int, error) { errs = append(errs, fmt.Errorf("non-requested state data %x", hash)) continue } - // Inject the next state trie item into the database - if err := q.stateScheduler.Process([]trie.SyncResult{{hash, blob}}); err != nil { - errs = []error{err} - break - } - processed++ + // Inject the next state trie item into the processing queue + process = append(process, trie.SyncResult{hash, blob}) delete(request.Hashes, hash) delete(q.stateTaskPool, hash) } + // Start the asynchronous node state data injection + atomic.AddInt32(&q.stateProcessors, 1) + go func() { + defer atomic.AddInt32(&q.stateProcessors, -1) + q.deliverNodeData(process, callback) + }() // Return all failed or missing fetches to the queue for hash, index := range request.Hashes { q.stateTaskQueue.Push(hash, float32(index)) } - // Also enqueue any newly required state trie nodes - discovered := 0 - if len(q.stateTaskPool) < maxQueuedStates { - for _, hash := range q.stateScheduler.Missing(4 * MaxStateFetch) { - q.stateTaskPool[hash] = q.stateTaskIndex - q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) - q.stateTaskIndex++ - discovered++ - } - } // If none of the data items were good, it's a stale delivery switch { case len(errs) == 0: - return processed, discovered, nil + return nil case len(errs) == len(request.Hashes): - return processed, discovered, errStaleDelivery + return errStaleDelivery default: - return processed, discovered, fmt.Errorf("multiple failures: %v", errs) + return fmt.Errorf("multiple failures: %v", errs) + } +} + +// deliverNodeData is the asynchronous node data processor that injects a batch +// of sync results into the state scheduler. +func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(error, int)) { + // Process results one by one to permit task fetches in between + for i, result := range results { + q.stateSchedLock.Lock() + + if q.stateScheduler == nil { + // Syncing aborted since this async delivery started, bail out + q.stateSchedLock.Unlock() + callback(errNoFetchesPending, i) + return + } + if _, err := q.stateScheduler.Process([]trie.SyncResult{result}); err != nil { + // Processing a state result failed, bail out + q.stateSchedLock.Unlock() + callback(err, i) + return + } + // Item processing succeeded, release the lock (temporarily) + q.stateSchedLock.Unlock() } + callback(nil, len(results)) } // Prepare configures the result cache to allow accepting and caching inbound diff --git a/eth/downloader/types.go b/eth/downloader/types.go index 221ef38f6..60d9a2b12 100644 --- a/eth/downloader/types.go +++ b/eth/downloader/types.go @@ -52,7 +52,7 @@ type headBlockCommitterFn func(common.Hash) error type tdRetrievalFn func(common.Hash) *big.Int // headerChainInsertFn is a callback type to insert a batch of headers into the local chain. -type headerChainInsertFn func([]*types.Header, bool) (int, error) +type headerChainInsertFn func([]*types.Header, int) (int, error) // blockChainInsertFn is a callback type to insert a batch of blocks into the local chain. type blockChainInsertFn func(types.Blocks) (int, error) -- cgit v1.2.3 From a9d8dfc8e77330412b1f21e25a69b96d59567e36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 9 Oct 2015 16:21:47 +0300 Subject: core, eth: roll back uncertain headers in failed fast syncs --- eth/downloader/downloader.go | 60 +++++++++++++++++++++++++----- eth/downloader/downloader_test.go | 78 ++++++++++++++++++++++++++++++++++++++- eth/downloader/types.go | 3 ++ eth/handler.go | 2 +- 4 files changed, 131 insertions(+), 12 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index e19b70dfd..0298dfa0b 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -59,8 +59,8 @@ var ( maxQueuedStates = 256 * 1024 // [eth/63] Maximum number of state requests to queue (DOS protection) maxResultsProcess = 256 // Number of download results to import at once into the chain - headerCheckFrequency = 64 // Verification frequency of the downloaded headers during fast sync - minCheckedHeaders = 1024 // Number of headers to verify fully when approaching the chain head + headerCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync + minCheckedHeaders = 2048 // Number of headers to verify fully when approaching the chain head minFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync ) @@ -117,6 +117,7 @@ type Downloader struct { insertHeaders headerChainInsertFn // Injects a batch of headers into the chain insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain + rollback chainRollbackFn // Removes a batch of recently added chain links dropPeer peerDropFn // Drops a peer for misbehaving // Status @@ -152,7 +153,7 @@ type Downloader struct { func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn, getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, headFastBlock headFastBlockRetrievalFn, commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, - insertReceipts receiptChainInsertFn, dropPeer peerDropFn) *Downloader { + insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader { return &Downloader{ mode: mode, @@ -171,6 +172,7 @@ func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, hasHeader he insertHeaders: insertHeaders, insertBlocks: insertBlocks, insertReceipts: insertReceipts, + rollback: rollback, dropPeer: dropPeer, newPeerCh: make(chan *peer, 1), hashCh: make(chan dataPack, 1), @@ -383,7 +385,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e d.syncStatsChainHeight = latest d.syncStatsLock.Unlock() - // Initiate the sync using a concurrent header and content retrieval algorithm + // Initiate the sync using a concurrent header and content retrieval algorithm pivot := uint64(0) if latest > uint64(minFullBlocks) { pivot = latest - uint64(minFullBlocks) @@ -394,10 +396,10 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e d.syncInitHook(origin, latest) } errc := make(chan error, 4) - go func() { errc <- d.fetchHeaders(p, td, origin+1) }() // Headers are always retrieved - go func() { errc <- d.fetchBodies(origin + 1) }() // Bodies are retrieved during normal and fast sync - go func() { errc <- d.fetchReceipts(origin + 1) }() // Receipts are retrieved during fast sync - go func() { errc <- d.fetchNodeData() }() // Node state data is retrieved during fast sync + go func() { errc <- d.fetchHeaders(p, td, origin+1, latest) }() // Headers are always retrieved + go func() { errc <- d.fetchBodies(origin + 1) }() // Bodies are retrieved during normal and fast sync + go func() { errc <- d.fetchReceipts(origin + 1) }() // Receipts are retrieved during fast sync + go func() { errc <- d.fetchNodeData() }() // Node state data is retrieved during fast sync // If any fetcher fails, cancel the others var fail error @@ -1049,10 +1051,28 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) { // // The queue parameter can be used to switch between queuing headers for block // body download too, or directly import as pure header chains. -func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { +func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from, latest uint64) error { glog.V(logger.Debug).Infof("%v: downloading headers from #%d", p, from) defer glog.V(logger.Debug).Infof("%v: header download terminated", p) + // Keep a count of uncertain headers to roll back + rollback := []*types.Header{} + defer func() { + if len(rollback) > 0 { + hashes := make([]common.Hash, len(rollback)) + for i, header := range rollback { + hashes[i] = header.Hash() + } + d.rollback(hashes) + } + }() + // Calculate the pivoting point for switching from fast to slow sync + pivot := uint64(0) + if d.mode == FastSync && latest > uint64(minFullBlocks) { + pivot = latest - uint64(minFullBlocks) + } else if d.mode == LightSync { + pivot = latest + } // Create a timeout timer, and the associated hash fetcher request := time.Now() // time of the last fetch request timeout := time.NewTimer(0) // timer to dump a non-responsive active peer @@ -1124,10 +1144,30 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) if d.mode == FastSync || d.mode == LightSync { - if n, err := d.insertHeaders(headers, headerCheckFrequency); err != nil { + // Collect the yet unknown headers to mark them as uncertain + unknown := make([]*types.Header, 0, len(headers)) + for _, header := range headers { + if !d.hasHeader(header.Hash()) { + unknown = append(unknown, header) + } + } + // If we're importing pure headers, verify based on their recentness + frequency := headerCheckFrequency + if headers[len(headers)-1].Number.Uint64()+uint64(minCheckedHeaders) > pivot { + frequency = 1 + } + if n, err := d.insertHeaders(headers, frequency); err != nil { glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headers[n].Number, headers[n].Hash().Bytes()[:4], err) return errInvalidChain } + // All verifications passed, store newly found uncertain headers + rollback = append(rollback, unknown...) + if len(rollback) > minCheckedHeaders { + rollback = append(rollback[:0], rollback[len(rollback)-minCheckedHeaders:]...) + } + if headers[len(headers)-1].Number.Uint64() >= pivot { + rollback = rollback[:0] + } } if d.mode == FullSync || d.mode == FastSync { inserts := d.queue.Schedule(headers, from) diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 0e60371b3..f01650ebd 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -152,7 +152,7 @@ func newTester(mode SyncMode) *downloadTester { tester.stateDb, _ = ethdb.NewMemDatabase() tester.downloader = New(mode, tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, tester.getBlock, tester.headHeader, tester.headBlock, tester.headFastBlock, tester.commitHeadBlock, tester.getTd, - tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.dropPeer) + tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.rollback, tester.dropPeer) return tester } @@ -272,6 +272,16 @@ func (dl *downloadTester) insertHeaders(headers []*types.Header, checkFreq int) dl.lock.Lock() defer dl.lock.Unlock() + // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anthing in case of errors + if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok { + return 0, errors.New("unknown parent") + } + for i := 1; i < len(headers); i++ { + if headers[i].ParentHash != headers[i-1].Hash() { + return i, errors.New("unknown parent") + } + } + // Do a full insert if pre-checks passed for i, header := range headers { if _, ok := dl.ownHeaders[header.Hash()]; ok { continue @@ -322,6 +332,22 @@ func (dl *downloadTester) insertReceipts(blocks types.Blocks, receipts []types.R return len(blocks), nil } +// rollback removes some recently added elements from the chain. +func (dl *downloadTester) rollback(hashes []common.Hash) { + dl.lock.Lock() + defer dl.lock.Unlock() + + for i := len(hashes) - 1; i >= 0; i-- { + if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] { + dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1] + } + delete(dl.ownChainTd, hashes[i]) + delete(dl.ownHeaders, hashes[i]) + delete(dl.ownReceipts, hashes[i]) + delete(dl.ownBlocks, hashes[i]) + } +} + // newPeer registers a new block download source into the downloader. func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error { return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0) @@ -1031,6 +1057,56 @@ func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { assertOwnChain(t, tester, targetBlocks+1) } +// Tests that upon detecting an invalid header, the recent ones are rolled back +func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) } +func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } +func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) } + +func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { + // Create a small enough block chain to download + targetBlocks := 3*minCheckedHeaders + minFullBlocks + hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) + + tester := newTester(mode) + + // Attempt to sync with an attacker that feeds junk during the fast sync phase + tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts) + missing := minCheckedHeaders + MaxHeaderFetch + 1 + delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) + + if err := tester.sync("fast-attack", nil); err == nil { + t.Fatalf("succeeded fast attacker synchronisation") + } + if head := tester.headHeader().Number.Int64(); int(head) > MaxHeaderFetch { + t.Fatalf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) + } + // Attempt to sync with an attacker that feeds junk during the block import phase + tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts) + missing = 3*minCheckedHeaders + MaxHeaderFetch + 1 + delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing]) + + if err := tester.sync("block-attack", nil); err == nil { + t.Fatalf("succeeded block attacker synchronisation") + } + if mode == FastSync { + // Fast sync should not discard anything below the verified pivot point + if head := tester.headHeader().Number.Int64(); int(head) < 3*minCheckedHeaders { + t.Fatalf("rollback head mismatch: have %v, want at least %v", head, 3*minCheckedHeaders) + } + } else if mode == LightSync { + // Light sync should still discard data as before + if head := tester.headHeader().Number.Int64(); int(head) > 3*minCheckedHeaders { + t.Fatalf("rollback head mismatch: have %v, want at most %v", head, 3*minCheckedHeaders) + } + } + // Synchronise with the valid peer and make sure sync succeeds + tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) + if err := tester.sync("valid", nil); err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) + } + assertOwnChain(t, tester, targetBlocks+1) +} + // Tests that if a peer sends an invalid block piece (body or receipt) for a // requested block, it gets dropped immediately by the downloader. func TestInvalidContentAttack62(t *testing.T) { testInvalidContentAttack(t, 62, FullSync) } diff --git a/eth/downloader/types.go b/eth/downloader/types.go index 60d9a2b12..5937be606 100644 --- a/eth/downloader/types.go +++ b/eth/downloader/types.go @@ -60,6 +60,9 @@ type blockChainInsertFn func(types.Blocks) (int, error) // receiptChainInsertFn is a callback type to insert a batch of receipts into the local chain. type receiptChainInsertFn func(types.Blocks, []types.Receipts) (int, error) +// chainRollbackFn is a callback type to remove a few recently added elements from the local chain. +type chainRollbackFn func([]common.Hash) + // peerDropFn is a callback type for dropping a peer detected as malicious. type peerDropFn func(id string) diff --git a/eth/handler.go b/eth/handler.go index b0916d50b..40a578842 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -131,7 +131,7 @@ func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txP } manager.downloader = downloader.New(syncMode, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, - blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, manager.removePeer) + blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback, manager.removePeer) validator := func(block *types.Block, parent *types.Block) error { return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false) -- cgit v1.2.3 From aa0538db0b5de2bb2c609d629b65d083649f9171 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 9 Oct 2015 18:36:31 +0300 Subject: eth: clean out light node notions from eth --- eth/backend.go | 4 ++-- eth/downloader/queue.go | 10 ++++++---- eth/handler.go | 17 ++++++----------- eth/handler_test.go | 27 +++++++++++---------------- eth/helper_test.go | 8 ++++---- eth/protocol.go | 27 ++------------------------- eth/protocol_test.go | 9 +++------ 7 files changed, 34 insertions(+), 68 deletions(-) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index f4acc76cb..0a3791783 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -88,8 +88,8 @@ type Config struct { GenesisNonce int GenesisFile string GenesisBlock *types.Block // used by block tests + FastSync bool Olympic bool - Mode Mode BlockChainVersion int SkipBcVersionCheck bool // e.g. blockchain export @@ -399,7 +399,7 @@ func New(config *Config) (*Ethereum, error) { eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux()) eth.blockchain.SetProcessor(eth.blockProcessor) - if eth.protocolManager, err = NewProtocolManager(config.Mode, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil { + if eth.protocolManager, err = NewProtocolManager(config.FastSync, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil { return nil, err } eth.miner = miner.New(eth, eth.EventMux(), eth.pow) diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index bb8d892cd..17fbb1c7f 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -422,10 +422,12 @@ func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest { q.stateSchedLock.Lock() defer q.stateSchedLock.Unlock() - for _, hash := range q.stateScheduler.Missing(max) { - q.stateTaskPool[hash] = q.stateTaskIndex - q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) - q.stateTaskIndex++ + if q.stateScheduler != nil { + for _, hash := range q.stateScheduler.Missing(max) { + q.stateTaskPool[hash] = q.stateTaskIndex + q.stateTaskQueue.Push(hash, -float32(q.stateTaskIndex)) + q.stateTaskIndex++ + } } } return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, count) diff --git a/eth/handler.go b/eth/handler.go index 40a578842..725178035 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -55,7 +55,7 @@ type hashFetcherFn func(common.Hash) error type blockFetcherFn func([]common.Hash) error type ProtocolManager struct { - mode Mode + fastSync bool txpool txPool blockchain *core.BlockChain chaindb ethdb.Database @@ -83,10 +83,10 @@ type ProtocolManager struct { // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the ethereum network. -func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { +func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { // Create the protocol manager with the base fields manager := &ProtocolManager{ - mode: mode, + fastSync: fastSync, eventMux: mux, txpool: txpool, blockchain: blockchain, @@ -100,7 +100,7 @@ func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txP manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions)) for i, version := range ProtocolVersions { // Skip protocol version if incompatible with the mode of operation - if minimumProtocolVersion[mode] > version { + if fastSync && version < eth63 { continue } // Compatible, initialize the sub-protocol @@ -120,14 +120,9 @@ func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txP return nil, errIncompatibleConfig } // Construct the different synchronisation mechanisms - var syncMode downloader.SyncMode - switch mode { - case ArchiveMode: - syncMode = downloader.FullSync - case FullMode: + syncMode := downloader.FullSync + if fastSync { syncMode = downloader.FastSync - case LightMode: - syncMode = downloader.LightSync } manager.downloader = downloader.New(syncMode, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, diff --git a/eth/handler_test.go b/eth/handler_test.go index 5ddfc4a8f..843b02fd4 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -22,12 +22,11 @@ func TestProtocolCompatibility(t *testing.T) { // Define the compatibility chart tests := []struct { version uint - mode Mode + fastSync bool compatible bool }{ - {61, ArchiveMode, true}, {62, ArchiveMode, true}, {63, ArchiveMode, true}, {64, ArchiveMode, true}, - {61, FullMode, false}, {62, FullMode, false}, {63, FullMode, true}, {64, FullMode, true}, - {61, LightMode, false}, {62, LightMode, false}, {63, LightMode, false}, {64, LightMode, true}, + {61, false, true}, {62, false, true}, {63, false, true}, + {61, true, false}, {62, true, false}, {63, true, true}, } // Make sure anything we screw up is restored backup := ProtocolVersions @@ -37,7 +36,7 @@ func TestProtocolCompatibility(t *testing.T) { for i, tt := range tests { ProtocolVersions = []uint{tt.version} - pm, err := newTestProtocolManager(tt.mode, 0, nil, nil) + pm, err := newTestProtocolManager(tt.fastSync, 0, nil, nil) if pm != nil { defer pm.Stop() } @@ -52,7 +51,7 @@ func TestProtocolCompatibility(t *testing.T) { func TestGetBlockHashes61(t *testing.T) { testGetBlockHashes(t, 61) } func testGetBlockHashes(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxHashFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -95,7 +94,7 @@ func testGetBlockHashes(t *testing.T, protocol int) { func TestGetBlockHashesFromNumber61(t *testing.T) { testGetBlockHashesFromNumber(t, 61) } func testGetBlockHashesFromNumber(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxHashFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -135,7 +134,7 @@ func testGetBlockHashesFromNumber(t *testing.T, protocol int) { func TestGetBlocks61(t *testing.T) { testGetBlocks(t, 61) } func testGetBlocks(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxHashFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -204,10 +203,9 @@ func testGetBlocks(t *testing.T, protocol int) { // Tests that block headers can be retrieved from a remote chain based on user queries. func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) } func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) } -func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) } func testGetBlockHeaders(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxHashFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -330,10 +328,9 @@ func testGetBlockHeaders(t *testing.T, protocol int) { // Tests that block contents can be retrieved from a remote chain based on their hashes. func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) } func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) } -func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) } func testGetBlockBodies(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, ArchiveMode, downloader.MaxBlockFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, false, downloader.MaxBlockFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -402,7 +399,6 @@ func testGetBlockBodies(t *testing.T, protocol int) { // Tests that the node state database can be retrieved based on hashes. func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) } -func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) } func testGetNodeData(t *testing.T, protocol int) { // Define three accounts to simulate transactions with @@ -440,7 +436,7 @@ func testGetNodeData(t *testing.T, protocol int) { } } // Assemble the test environment - pm := newTestProtocolManagerMust(t, ArchiveMode, 4, generator, nil) + pm := newTestProtocolManagerMust(t, false, 4, generator, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -492,7 +488,6 @@ func testGetNodeData(t *testing.T, protocol int) { // Tests that the transaction receipts can be retrieved based on hashes. func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) } -func TestGetReceipt64(t *testing.T) { testGetReceipt(t, 64) } func testGetReceipt(t *testing.T, protocol int) { // Define three accounts to simulate transactions with @@ -530,7 +525,7 @@ func testGetReceipt(t *testing.T, protocol int) { } } // Assemble the test environment - pm := newTestProtocolManagerMust(t, ArchiveMode, 4, generator, nil) + pm := newTestProtocolManagerMust(t, false, 4, generator, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() diff --git a/eth/helper_test.go b/eth/helper_test.go index ede0e3f15..16907be8b 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -28,7 +28,7 @@ var ( // newTestProtocolManager creates a new protocol manager for testing purposes, // with the given number of blocks already known, and potential notification // channels for different events. -func newTestProtocolManager(mode Mode, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) { +func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) { var ( evmux = new(event.TypeMux) pow = new(core.FakePow) @@ -42,7 +42,7 @@ func newTestProtocolManager(mode Mode, blocks int, generator func(int, *core.Blo if _, err := blockchain.InsertChain(chain); err != nil { panic(err) } - pm, err := NewProtocolManager(mode, NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db) + pm, err := NewProtocolManager(fastSync, NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db) if err != nil { return nil, err } @@ -54,8 +54,8 @@ func newTestProtocolManager(mode Mode, blocks int, generator func(int, *core.Blo // with the given number of blocks already known, and potential notification // channels for different events. In case of an error, the constructor force- // fails the test. -func newTestProtocolManagerMust(t *testing.T, mode Mode, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { - pm, err := newTestProtocolManager(mode, blocks, generator, newtx) +func newTestProtocolManagerMust(t *testing.T, fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { + pm, err := newTestProtocolManager(fastSync, blocks, generator, newtx) if err != nil { t.Fatalf("Failed to create protocol manager: %v", err) } diff --git a/eth/protocol.go b/eth/protocol.go index f2b98a8b1..410347ed3 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -26,36 +26,18 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -// Mode represents the mode of operation of the eth client. -type Mode int - -const ( - ArchiveMode Mode = iota // Maintain the entire blockchain history - FullMode // Maintain only a recent view of the blockchain - LightMode // Don't maintain any history, rather fetch on demand -) - // Constants to match up protocol versions and messages const ( eth61 = 61 eth62 = 62 eth63 = 63 - eth64 = 64 ) -// minimumProtocolVersion is the minimum version of the protocol eth must run to -// support the desired mode of operation. -var minimumProtocolVersion = map[Mode]uint{ - ArchiveMode: eth61, - FullMode: eth63, - LightMode: eth64, -} - // Supported versions of the eth protocol (first is primary). -var ProtocolVersions = []uint{eth64, eth63, eth62, eth61} +var ProtocolVersions = []uint{eth63, eth62, eth61} // Number of implemented message corresponding to different protocol versions. -var ProtocolLengths = []uint64{19, 17, 8, 9} +var ProtocolLengths = []uint64{17, 8, 9} const ( NetworkId = 1 @@ -90,11 +72,6 @@ const ( NodeDataMsg = 0x0e GetReceiptsMsg = 0x0f ReceiptsMsg = 0x10 - - // Protocol messages belonging to eth/64 - GetAcctProofMsg = 0x11 - GetStorageDataProof = 0x12 - Proof = 0x13 ) type errCode int diff --git a/eth/protocol_test.go b/eth/protocol_test.go index bac519ae3..372c7e203 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -41,10 +41,9 @@ var testAccount = crypto.NewKey(rand.Reader) func TestStatusMsgErrors61(t *testing.T) { testStatusMsgErrors(t, 61) } func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) } func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) } -func TestStatusMsgErrors64(t *testing.T) { testStatusMsgErrors(t, 64) } func testStatusMsgErrors(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, ArchiveMode, 0, nil, nil) + pm := newTestProtocolManagerMust(t, false, 0, nil, nil) td, currentBlock, genesis := pm.blockchain.Status() defer pm.Stop() @@ -95,11 +94,10 @@ func testStatusMsgErrors(t *testing.T, protocol int) { func TestRecvTransactions61(t *testing.T) { testRecvTransactions(t, 61) } func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) } func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) } -func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) } func testRecvTransactions(t *testing.T, protocol int) { txAdded := make(chan []*types.Transaction) - pm := newTestProtocolManagerMust(t, ArchiveMode, 0, nil, txAdded) + pm := newTestProtocolManagerMust(t, false, 0, nil, txAdded) p, _ := newTestPeer("peer", protocol, pm, true) defer pm.Stop() defer p.close() @@ -124,10 +122,9 @@ func testRecvTransactions(t *testing.T, protocol int) { func TestSendTransactions61(t *testing.T) { testSendTransactions(t, 61) } func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) } func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) } -func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) } func testSendTransactions(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, ArchiveMode, 0, nil, nil) + pm := newTestProtocolManagerMust(t, false, 0, nil, nil) defer pm.Stop() // Fill the pool with big transactions. -- cgit v1.2.3 From 5b0ee8ec304663898073b7a4c659e1def23716df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 13 Oct 2015 12:04:25 +0300 Subject: core, eth, trie: fix data races and merge/review issues --- eth/backend.go | 1 - eth/backend_test.go | 10 +- eth/downloader/downloader.go | 191 ++++++++++------ eth/downloader/downloader_test.go | 464 +++++++++++++++++++++----------------- eth/downloader/modes.go | 4 +- eth/downloader/peer.go | 28 ++- eth/downloader/queue.go | 178 ++++++++++----- eth/fetcher/fetcher.go | 26 ++- eth/fetcher/fetcher_test.go | 49 +++- eth/filters/filter_test.go | 33 ++- eth/handler.go | 17 +- eth/handler_test.go | 4 +- eth/metrics.go | 2 +- eth/sync.go | 18 +- eth/sync_test.go | 53 +++++ 15 files changed, 681 insertions(+), 397 deletions(-) create mode 100644 eth/sync_test.go (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index 0a3791783..a4f656ecd 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -391,7 +391,6 @@ func New(config *Config) (*Ethereum, error) { if err == core.ErrNoGenesis { return nil, fmt.Errorf(`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`) } - return nil, err } newPool := core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) diff --git a/eth/backend_test.go b/eth/backend_test.go index 220426c17..0379fc843 100644 --- a/eth/backend_test.go +++ b/eth/backend_test.go @@ -16,17 +16,17 @@ func TestMipmapUpgrade(t *testing.T) { addr := common.BytesToAddress([]byte("jeff")) genesis := core.WriteGenesisBlockForTesting(db) - chain := core.GenerateChain(genesis, db, 10, func(i int, gen *core.BlockGen) { + chain, receipts := core.GenerateChain(genesis, db, 10, func(i int, gen *core.BlockGen) { var receipts types.Receipts switch i { case 1: receipt := types.NewReceipt(nil, new(big.Int)) - receipt.SetLogs(vm.Logs{&vm.Log{Address: addr}}) + receipt.Logs = vm.Logs{&vm.Log{Address: addr}} gen.AddUncheckedReceipt(receipt) receipts = types.Receipts{receipt} case 2: receipt := types.NewReceipt(nil, new(big.Int)) - receipt.SetLogs(vm.Logs{&vm.Log{Address: addr}}) + receipt.Logs = vm.Logs{&vm.Log{Address: addr}} gen.AddUncheckedReceipt(receipt) receipts = types.Receipts{receipt} } @@ -37,7 +37,7 @@ func TestMipmapUpgrade(t *testing.T) { t.Fatal(err) } }) - for _, block := range chain { + for i, block := range chain { core.WriteBlock(db, block) if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { t.Fatalf("failed to insert block number: %v", err) @@ -45,7 +45,7 @@ func TestMipmapUpgrade(t *testing.T) { if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { t.Fatalf("failed to insert block number: %v", err) } - if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil { + if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil { t.Fatal("error writing block receipts:", err) } } diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 0298dfa0b..4bcbd8557 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -18,7 +18,9 @@ package downloader import ( + "crypto/rand" "errors" + "fmt" "math" "math/big" "strings" @@ -59,9 +61,11 @@ var ( maxQueuedStates = 256 * 1024 // [eth/63] Maximum number of state requests to queue (DOS protection) maxResultsProcess = 256 // Number of download results to import at once into the chain - headerCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync - minCheckedHeaders = 2048 // Number of headers to verify fully when approaching the chain head - minFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync + fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync + fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected + fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it + fsPivotInterval = 512 // Number of headers out of which to randomize the pivot point + fsMinFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync ) var ( @@ -85,12 +89,14 @@ var ( errCancelHeaderFetch = errors.New("block header download canceled (requested)") errCancelBodyFetch = errors.New("block body download canceled (requested)") errCancelReceiptFetch = errors.New("receipt download canceled (requested)") + errCancelStateFetch = errors.New("state data download canceled (requested)") errNoSyncActive = errors.New("no sync active") ) type Downloader struct { - mode SyncMode // Synchronisation mode defining the strategies used - mux *event.TypeMux // Event multiplexer to announce sync operation events + mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) + noFast bool // Flag to disable fast syncing in case of a security error + mux *event.TypeMux // Event multiplexer to announce sync operation events queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed @@ -150,13 +156,13 @@ type Downloader struct { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn, +func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn, getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, headFastBlock headFastBlockRetrievalFn, commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader { return &Downloader{ - mode: mode, + mode: FullSync, mux: mux, queue: newQueue(stateDb), peers: newPeerSet(), @@ -188,19 +194,28 @@ func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, hasHeader he } } -// Boundaries retrieves the synchronisation boundaries, specifically the origin -// block where synchronisation started at (may have failed/suspended) and the -// latest known block which the synchonisation targets. -func (d *Downloader) Boundaries() (uint64, uint64) { +// Progress retrieves the synchronisation boundaries, specifically the origin +// block where synchronisation started at (may have failed/suspended); the block +// or header sync is currently at; and the latest known block which the sync targets. +func (d *Downloader) Progress() (uint64, uint64, uint64) { d.syncStatsLock.RLock() defer d.syncStatsLock.RUnlock() - return d.syncStatsChainOrigin, d.syncStatsChainHeight + current := uint64(0) + switch d.mode { + case FullSync: + current = d.headBlock().NumberU64() + case FastSync: + current = d.headFastBlock().NumberU64() + case LightSync: + current = d.headHeader().Number.Uint64() + } + return d.syncStatsChainOrigin, current, d.syncStatsChainHeight } // Synchronising returns whether the downloader is currently retrieving blocks. func (d *Downloader) Synchronising() bool { - return atomic.LoadInt32(&d.synchronising) > 0 + return atomic.LoadInt32(&d.synchronising) > 0 || atomic.LoadInt32(&d.processing) > 0 } // RegisterPeer injects a new download peer into the set of block source to be @@ -233,10 +248,10 @@ func (d *Downloader) UnregisterPeer(id string) error { // Synchronise tries to sync up our local block chain with a remote peer, both // adding various sanity checks as well as wrapping it with various log entries. -func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int) { +func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) { glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td) - switch err := d.synchronise(id, head, td); err { + switch err := d.synchronise(id, head, td, mode); err { case nil: glog.V(logger.Detail).Infof("Synchronisation completed") @@ -258,7 +273,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int) { // synchronise will select the peer and use it for synchronising. If an empty string is given // it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous -func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int) error { +func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { // Mock out the synchonisation if testing if d.synchroniseMock != nil { return d.synchroniseMock(id, hash) @@ -298,6 +313,11 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int) error d.cancelCh = make(chan struct{}) d.cancelLock.Unlock() + // Set the requested sync mode, unless it's forbidden + d.mode = mode + if d.mode == FastSync && d.noFast { + d.mode = FullSync + } // Retrieve the origin peer and initiate the downloading process p := d.peers.Peer(id) if p == nil { @@ -306,13 +326,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int) error return d.syncWithPeer(p, hash, td) } -/* -// Has checks if the downloader knows about a particular hash, meaning that its -// either already downloaded of pending retrieval. -func (d *Downloader) Has(hash common.Hash) bool { - return d.queue.Has(hash) -} -*/ // syncWithPeer starts a block synchronization based on the hash chain from the // specified peer and head hash. func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err error) { @@ -387,8 +400,28 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e // Initiate the sync using a concurrent header and content retrieval algorithm pivot := uint64(0) - if latest > uint64(minFullBlocks) { - pivot = latest - uint64(minFullBlocks) + switch d.mode { + case LightSync: + pivot = latest + + case FastSync: + // Calculate the new fast/slow sync pivot point + pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval))) + if err != nil { + panic(fmt.Sprintf("Failed to access crypto random source: %v", err)) + } + if latest > uint64(fsMinFullBlocks)+pivotOffset.Uint64() { + pivot = latest - uint64(fsMinFullBlocks) - pivotOffset.Uint64() + } + // If the point is below the origin, move origin back to ensure state download + if pivot < origin { + if pivot > 0 { + origin = pivot - 1 + } else { + origin = 0 + } + } + glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot) } d.queue.Prepare(origin+1, d.mode, pivot) @@ -396,10 +429,10 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e d.syncInitHook(origin, latest) } errc := make(chan error, 4) - go func() { errc <- d.fetchHeaders(p, td, origin+1, latest) }() // Headers are always retrieved - go func() { errc <- d.fetchBodies(origin + 1) }() // Bodies are retrieved during normal and fast sync - go func() { errc <- d.fetchReceipts(origin + 1) }() // Receipts are retrieved during fast sync - go func() { errc <- d.fetchNodeData() }() // Node state data is retrieved during fast sync + go func() { errc <- d.fetchHeaders(p, td, origin+1) }() // Headers are always retrieved + go func() { errc <- d.fetchBodies(origin + 1) }() // Bodies are retrieved during normal and fast sync + go func() { errc <- d.fetchReceipts(origin + 1) }() // Receipts are retrieved during fast sync + go func() { errc <- d.fetchNodeData() }() // Node state data is retrieved during fast sync // If any fetcher fails, cancel the others var fail error @@ -844,7 +877,7 @@ func (d *Downloader) fetchBlocks61(from uint64) error { for _, peer := range idles { // Short circuit if throttling activated - if d.queue.ThrottleBlocks() { + if d.queue.ShouldThrottleBlocks() { throttled = true break } @@ -860,8 +893,13 @@ func (d *Downloader) fetchBlocks61(from uint64) error { } // Fetch the chunk and make sure any errors return the hashes to the queue if err := peer.Fetch61(request); err != nil { - glog.V(logger.Error).Infof("%v: fetch failed, rescheduling", peer) - d.queue.CancelBlocks(request) + // Although we could try and make an attempt to fix this, this error really + // means that we've double allocated a fetch task to a peer. If that is the + // case, the internal state of the downloader and the queue is very wrong so + // better hard crash and note the error instead of silently accumulating into + // a much bigger issue. + panic(fmt.Sprintf("%v: fetch assignment failed, hard panic", peer)) + d.queue.CancelBlocks(request) // noop for now } } // Make sure that we have peers available for fetching. If all peers have been tried @@ -1051,28 +1089,34 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) { // // The queue parameter can be used to switch between queuing headers for block // body download too, or directly import as pure header chains. -func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from, latest uint64) error { +func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { glog.V(logger.Debug).Infof("%v: downloading headers from #%d", p, from) defer glog.V(logger.Debug).Infof("%v: header download terminated", p) + // Calculate the pivoting point for switching from fast to slow sync + pivot := d.queue.FastSyncPivot() + // Keep a count of uncertain headers to roll back rollback := []*types.Header{} defer func() { if len(rollback) > 0 { + // Flatten the headers and roll them back hashes := make([]common.Hash, len(rollback)) for i, header := range rollback { hashes[i] = header.Hash() } + lh, lfb, lb := d.headHeader().Number, d.headFastBlock().Number(), d.headBlock().Number() d.rollback(hashes) + glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)", + len(hashes), lh, d.headHeader().Number, lfb, d.headFastBlock().Number(), lb, d.headBlock().Number()) + + // If we're already past the pivot point, this could be an attack, disable fast sync + if rollback[len(rollback)-1].Number.Uint64() > pivot { + d.noFast = true + } } }() - // Calculate the pivoting point for switching from fast to slow sync - pivot := uint64(0) - if d.mode == FastSync && latest > uint64(minFullBlocks) { - pivot = latest - uint64(minFullBlocks) - } else if d.mode == LightSync { - pivot = latest - } + // Create a timeout timer, and the associated hash fetcher request := time.Now() // time of the last fetch request timeout := time.NewTimer(0) // timer to dump a non-responsive active peer @@ -1135,6 +1179,19 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from, latest uint64) err if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 { return errStallingPeer } + // If fast or light syncing, ensure promised headers are indeed delivered. This is + // needed to detect scenarios where an attacker feeds a bad pivot and then bails out + // of delivering the post-pivot blocks that would flag the invalid content. + // + // This check cannot be executed "as is" for full imports, since blocks may still be + // queued for processing when the header download completes. However, as long as the + // peer gave us something useful, we're already happy/progressed (above check). + if d.mode == FastSync || d.mode == LightSync { + if td.Cmp(d.getTd(d.headHeader().Hash())) > 0 { + return errStallingPeer + } + } + rollback = nil return nil } gotHeaders = true @@ -1152,8 +1209,8 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from, latest uint64) err } } // If we're importing pure headers, verify based on their recentness - frequency := headerCheckFrequency - if headers[len(headers)-1].Number.Uint64()+uint64(minCheckedHeaders) > pivot { + frequency := fsHeaderCheckFrequency + if headers[len(headers)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { frequency = 1 } if n, err := d.insertHeaders(headers, frequency); err != nil { @@ -1162,11 +1219,8 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from, latest uint64) err } // All verifications passed, store newly found uncertain headers rollback = append(rollback, unknown...) - if len(rollback) > minCheckedHeaders { - rollback = append(rollback[:0], rollback[len(rollback)-minCheckedHeaders:]...) - } - if headers[len(headers)-1].Number.Uint64() >= pivot { - rollback = rollback[:0] + if len(rollback) > fsHeaderSafetyNet { + rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) } } if d.mode == FullSync || d.mode == FastSync { @@ -1230,12 +1284,11 @@ func (d *Downloader) fetchBodies(from uint64) error { expire = func() []string { return d.queue.ExpireBodies(bodyHardTTL) } fetch = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) } capacity = func(p *peer) int { return p.BlockCapacity() } - getIdles = func() ([]*peer, int) { return d.peers.BodyIdlePeers() } - setIdle = func(p *peer) { p.SetBlocksIdle() } + setIdle = func(p *peer) { p.SetBodiesIdle() } ) err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, - d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ThrottleBlocks, d.queue.ReserveBodies, - d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, getIdles, setIdle, "Body") + d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, + d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body") glog.V(logger.Debug).Infof("Block body download terminated: %v", err) return err @@ -1252,13 +1305,13 @@ func (d *Downloader) fetchReceipts(from uint64) error { pack := packet.(*receiptPack) return d.queue.DeliverReceipts(pack.peerId, pack.receipts) } - expire = func() []string { return d.queue.ExpireReceipts(bodyHardTTL) } + expire = func() []string { return d.queue.ExpireReceipts(receiptHardTTL) } fetch = func(p *peer, req *fetchRequest) error { return p.FetchReceipts(req) } capacity = func(p *peer) int { return p.ReceiptCapacity() } setIdle = func(p *peer) { p.SetReceiptsIdle() } ) err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, - d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ThrottleReceipts, d.queue.ReserveReceipts, + d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt") glog.V(logger.Debug).Infof("Receipt download terminated: %v", err) @@ -1307,9 +1360,9 @@ func (d *Downloader) fetchNodeData() error { capacity = func(p *peer) int { return p.NodeDataCapacity() } setIdle = func(p *peer) { p.SetNodeDataIdle() } ) - err := d.fetchParts(errCancelReceiptFetch, d.stateCh, deliver, d.stateWakeCh, expire, + err := d.fetchParts(errCancelStateFetch, d.stateCh, deliver, d.stateWakeCh, expire, d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch, - d.queue.CancelNodeData, capacity, d.peers.ReceiptIdlePeers, setIdle, "State") + d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State") glog.V(logger.Debug).Infof("Node state data download terminated: %v", err) return err @@ -1323,7 +1376,7 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int, idle func() ([]*peer, int), setIdle func(*peer), kind string) error { - // Create a ticker to detect expired retreival tasks + // Create a ticker to detect expired retrieval tasks ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() @@ -1366,11 +1419,6 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv // The hash chain is invalid (blocks are not ordered properly), abort return err - case errInvalidBody, errInvalidReceipt: - // The peer delivered something very bad, drop immediately - glog.V(logger.Error).Infof("%s: delivered invalid %s, dropping", peer, strings.ToLower(kind)) - d.dropPeer(peer.id) - case errNoFetchesPending: // Peer probably timed out with its delivery but came through // in the end, demote, but allow to to pull from this peer. @@ -1475,8 +1523,13 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv fetchHook(request.Headers) } if err := fetch(peer, request); err != nil { - glog.V(logger.Error).Infof("%v: %s fetch failed, rescheduling", peer, strings.ToLower(kind)) - cancel(request) + // Although we could try and make an attempt to fix this, this error really + // means that we've double allocated a fetch task to a peer. If that is the + // case, the internal state of the downloader and the queue is very wrong so + // better hard crash and note the error instead of silently accumulating into + // a much bigger issue. + panic(fmt.Sprintf("%v: %s fetch assignment failed, hard panic", peer, strings.ToLower(kind))) + cancel(request) // noop for now } running = true } @@ -1526,6 +1579,7 @@ func (d *Downloader) process() { // Repeat the processing as long as there are results to process for { // Fetch the next batch of results + pivot := d.queue.FastSyncPivot() // Fetch pivot before results to prevent reset race results := d.queue.TakeResults() if len(results) == 0 { return @@ -1545,7 +1599,6 @@ func (d *Downloader) process() { } // Retrieve the a batch of results to import var ( - headers = make([]*types.Header, 0, maxResultsProcess) blocks = make([]*types.Block, 0, maxResultsProcess) receipts = make([]types.Receipts, 0, maxResultsProcess) ) @@ -1556,11 +1609,9 @@ func (d *Downloader) process() { blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)) case d.mode == FastSync: blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)) - if result.Header.Number.Uint64() <= d.queue.fastSyncPivot { + if result.Header.Number.Uint64() <= pivot { receipts = append(receipts, result.Receipts) } - case d.mode == LightSync: - headers = append(headers, result.Header) } } // Try to process the results, aborting if there's an error @@ -1569,12 +1620,10 @@ func (d *Downloader) process() { index int ) switch { - case len(headers) > 0: - index, err = d.insertHeaders(headers, headerCheckFrequency) - case len(receipts) > 0: index, err = d.insertReceipts(blocks, receipts) - if err == nil && blocks[len(blocks)-1].NumberU64() == d.queue.fastSyncPivot { + if err == nil && blocks[len(blocks)-1].NumberU64() == pivot { + glog.V(logger.Debug).Infof("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4]) index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash()) } default: diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index f01650ebd..ef6f74a6b 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -136,7 +136,7 @@ type downloadTester struct { } // newTester creates a new downloader test mocker. -func newTester(mode SyncMode) *downloadTester { +func newTester() *downloadTester { tester := &downloadTester{ ownHashes: []common.Hash{genesis.Hash()}, ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, @@ -150,7 +150,7 @@ func newTester(mode SyncMode) *downloadTester { peerChainTds: make(map[string]map[common.Hash]*big.Int), } tester.stateDb, _ = ethdb.NewMemDatabase() - tester.downloader = New(mode, tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, + tester.downloader = New(tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, tester.getBlock, tester.headHeader, tester.headBlock, tester.headFastBlock, tester.commitHeadBlock, tester.getTd, tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.rollback, tester.dropPeer) @@ -158,7 +158,7 @@ func newTester(mode SyncMode) *downloadTester { } // sync starts synchronizing with a remote peer, blocking until it completes. -func (dl *downloadTester) sync(id string, td *big.Int) error { +func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { dl.lock.RLock() hash := dl.peerHashes[id][0] // If no particular TD was requested, load from the peer's blockchain @@ -170,7 +170,7 @@ func (dl *downloadTester) sync(id string, td *big.Int) error { } dl.lock.RUnlock() - err := dl.downloader.synchronise(id, hash, td) + err := dl.downloader.synchronise(id, hash, td, mode) for { // If the queue is empty and processing stopped, break if dl.downloader.queue.Idle() && atomic.LoadInt32(&dl.downloader.processing) == 0 { @@ -214,7 +214,7 @@ func (dl *downloadTester) headHeader() *types.Header { defer dl.lock.RUnlock() for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if header := dl.getHeader(dl.ownHashes[i]); header != nil { + if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { return header } } @@ -227,7 +227,7 @@ func (dl *downloadTester) headBlock() *types.Block { defer dl.lock.RUnlock() for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.getBlock(dl.ownHashes[i]); block != nil { + if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { return block } @@ -242,7 +242,7 @@ func (dl *downloadTester) headFastBlock() *types.Block { defer dl.lock.RUnlock() for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.getBlock(dl.ownHashes[i]); block != nil { + if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { return block } } @@ -291,7 +291,7 @@ func (dl *downloadTester) insertHeaders(headers []*types.Header, checkFreq int) } dl.ownHashes = append(dl.ownHashes, header.Hash()) dl.ownHeaders[header.Hash()] = header - dl.ownChainTd[header.Hash()] = dl.ownChainTd[header.ParentHash] + dl.ownChainTd[header.Hash()] = new(big.Int).Add(dl.ownChainTd[header.ParentHash], header.Difficulty) } return len(headers), nil } @@ -305,11 +305,13 @@ func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) { if _, ok := dl.ownBlocks[block.ParentHash()]; !ok { return i, errors.New("unknown parent") } - dl.ownHashes = append(dl.ownHashes, block.Hash()) - dl.ownHeaders[block.Hash()] = block.Header() + if _, ok := dl.ownHeaders[block.Hash()]; !ok { + dl.ownHashes = append(dl.ownHashes, block.Hash()) + dl.ownHeaders[block.Hash()] = block.Header() + } dl.ownBlocks[block.Hash()] = block - dl.stateDb.Put(block.Root().Bytes(), []byte{}) - dl.ownChainTd[block.Hash()] = dl.ownChainTd[block.ParentHash()] + dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) + dl.ownChainTd[block.Hash()] = new(big.Int).Add(dl.ownChainTd[block.ParentHash()], block.Difficulty()) } return len(blocks), nil } @@ -381,7 +383,19 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha dl.peerReceipts[id] = make(map[common.Hash]types.Receipts) dl.peerChainTds[id] = make(map[common.Hash]*big.Int) - for _, hash := range hashes { + genesis := hashes[len(hashes)-1] + if header := headers[genesis]; header != nil { + dl.peerHeaders[id][genesis] = header + dl.peerChainTds[id][genesis] = header.Difficulty + } + if block := blocks[genesis]; block != nil { + dl.peerBlocks[id][genesis] = block + dl.peerChainTds[id][genesis] = block.Difficulty() + } + + for i := len(hashes) - 2; i >= 0; i-- { + hash := hashes[i] + if header, ok := headers[hash]; ok { dl.peerHeaders[id][hash] = header if _, ok := dl.peerHeaders[id][header.ParentHash]; ok { @@ -627,21 +641,28 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { // number of items of the various chain components. func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { // Initialize the counters for the first fork - headers, blocks, receipts := lengths[0], lengths[0], lengths[0]-minFullBlocks - if receipts < 0 { - receipts = 1 + headers, blocks := lengths[0], lengths[0] + + minReceipts, maxReceipts := lengths[0]-fsMinFullBlocks-fsPivotInterval, lengths[0]-fsMinFullBlocks + if minReceipts < 0 { + minReceipts = 1 + } + if maxReceipts < 0 { + maxReceipts = 1 } // Update the counters for each subsequent fork for _, length := range lengths[1:] { headers += length - common blocks += length - common - receipts += length - common - minFullBlocks + + minReceipts += length - common - fsMinFullBlocks - fsPivotInterval + maxReceipts += length - common - fsMinFullBlocks } switch tester.downloader.mode { case FullSync: - receipts = 1 + minReceipts, maxReceipts = 1, 1 case LightSync: - blocks, receipts = 1, 1 + blocks, minReceipts, maxReceipts = 1, 1, 1 } if hs := len(tester.ownHeaders); hs != headers { t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) @@ -649,14 +670,20 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng if bs := len(tester.ownBlocks); bs != blocks { t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) } - if rs := len(tester.ownReceipts); rs != receipts { - t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) + if rs := len(tester.ownReceipts); rs < minReceipts || rs > maxReceipts { + t.Fatalf("synchronised receipts mismatch: have %v, want between [%v, %v]", rs, minReceipts, maxReceipts) } // Verify the state trie too for fast syncs if tester.downloader.mode == FastSync { - if index := lengths[len(lengths)-1] - minFullBlocks - 1; index > 0 { - if statedb := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, tester.stateDb); statedb == nil { - t.Fatalf("state reconstruction failed") + index := 0 + if pivot := int(tester.downloader.queue.fastSyncPivot); pivot < common { + index = pivot + } else { + index = len(tester.ownHashes) - lengths[len(lengths)-1] + int(tester.downloader.queue.fastSyncPivot) + } + if index > 0 { + if statedb, err := state.New(tester.ownHeaders[tester.ownHashes[index]].Root, tester.stateDb); statedb == nil || err != nil { + t.Fatalf("state reconstruction failed: %v", err) } } } @@ -678,11 +705,11 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { targetBlocks := blockCacheLimit - 15 hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - tester := newTester(mode) + tester := newTester() tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Synchronise with the peer and make sure all relevant data was retrieved - if err := tester.sync("peer", nil); err != nil { + if err := tester.sync("peer", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } assertOwnChain(t, tester, targetBlocks+1) @@ -702,7 +729,7 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { targetBlocks := 8 * blockCacheLimit hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - tester := newTester(mode) + tester := newTester() tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Wrap the importer to allow stepping @@ -714,7 +741,7 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { // Start a synchronisation concurrently errc := make(chan error) go func() { - errc <- tester.sync("peer", nil) + errc <- tester.sync("peer", nil, mode) }() // Iteratively take some blocks, always checking the retrieval count for { @@ -726,10 +753,11 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { break } // Wait a bit for sync to throttle itself - var cached int + var cached, frozen int for start := time.Now(); time.Since(start) < time.Second; { time.Sleep(25 * time.Millisecond) + tester.lock.RLock() tester.downloader.queue.lock.RLock() cached = len(tester.downloader.queue.blockDonePool) if mode == FastSync { @@ -739,16 +767,23 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { } } } + frozen = int(atomic.LoadUint32(&blocked)) + retrieved = len(tester.ownBlocks) tester.downloader.queue.lock.RUnlock() + tester.lock.RUnlock() - if cached == blockCacheLimit || len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) == targetBlocks+1 { + if cached == blockCacheLimit || retrieved+cached+frozen == targetBlocks+1 { break } } // Make sure we filled up the cache, then exhaust it time.Sleep(25 * time.Millisecond) // give it a chance to screw up - if cached != blockCacheLimit && len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) != targetBlocks+1 { - t.Fatalf("block count mismatch: have %v, want %v (owned %v, target %v)", cached, blockCacheLimit, len(tester.ownBlocks), targetBlocks+1) + + tester.lock.RLock() + retrieved = len(tester.ownBlocks) + tester.lock.RUnlock() + if cached != blockCacheLimit && retrieved+cached+frozen != targetBlocks+1 { + t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheLimit, retrieved, frozen, targetBlocks+1) } // Permit the blocked blocks to import if atomic.LoadUint32(&blocked) > 0 { @@ -779,18 +814,18 @@ func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) { common, fork := MaxHashFetch, 2*MaxHashFetch hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil) - tester := newTester(mode) + tester := newTester() tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("fork A", nil); err != nil { + if err := tester.sync("fork A", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } assertOwnChain(t, tester, common+fork+1) // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("fork B", nil); err != nil { + if err := tester.sync("fork B", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } assertOwnForkedChain(t, tester, common+1, []int{common + fork + 1, common + fork + 1}) @@ -798,7 +833,7 @@ func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) { // Tests that an inactive downloader will not accept incoming hashes and blocks. func TestInactiveDownloader61(t *testing.T) { - tester := newTester(FullSync) + tester := newTester() // Check that neither hashes nor blocks are accepted if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive { @@ -812,7 +847,7 @@ func TestInactiveDownloader61(t *testing.T) { // Tests that an inactive downloader will not accept incoming block headers and // bodies. func TestInactiveDownloader62(t *testing.T) { - tester := newTester(FullSync) + tester := newTester() // Check that neither block headers nor bodies are accepted if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { @@ -826,7 +861,7 @@ func TestInactiveDownloader62(t *testing.T) { // Tests that an inactive downloader will not accept incoming block headers, // bodies and receipts. func TestInactiveDownloader63(t *testing.T) { - tester := newTester(FullSync) + tester := newTester() // Check that neither block headers nor bodies are accepted if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { @@ -860,7 +895,7 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) { } hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - tester := newTester(mode) + tester := newTester() tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Make sure canceling works with a pristine downloader @@ -869,7 +904,7 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) { t.Errorf("download queue not idle") } // Synchronise with the peer, but cancel afterwards - if err := tester.sync("peer", nil); err != nil { + if err := tester.sync("peer", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } tester.downloader.cancel() @@ -893,12 +928,12 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { targetBlocks := targetPeers*blockCacheLimit - 15 hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - tester := newTester(mode) + tester := newTester() for i := 0; i < targetPeers; i++ { id := fmt.Sprintf("peer #%d", i) tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts) } - if err := tester.sync("peer #0", nil); err != nil { + if err := tester.sync("peer #0", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } assertOwnChain(t, tester, targetBlocks+1) @@ -920,14 +955,14 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) // Create peers of every type - tester := newTester(mode) - tester.newPeer("peer 61", 61, hashes, headers, blocks, receipts) - tester.newPeer("peer 62", 62, hashes, headers, blocks, receipts) + tester := newTester() + tester.newPeer("peer 61", 61, hashes, nil, blocks, nil) + tester.newPeer("peer 62", 62, hashes, headers, blocks, nil) tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts) tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts) - // Synchronise with the requestd peer and make sure all blocks were retrieved - if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil); err != nil { + // Synchronise with the requested peer and make sure all blocks were retrieved + if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } assertOwnChain(t, tester, targetBlocks+1) @@ -955,7 +990,7 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { targetBlocks := 2*blockCacheLimit - 15 hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - tester := newTester(mode) + tester := newTester() tester.newPeer("peer", protocol, hashes, headers, blocks, receipts) // Instrument the downloader to signal body requests @@ -967,7 +1002,7 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { atomic.AddInt32(&receiptsHave, int32(len(headers))) } // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("peer", nil); err != nil { + if err := tester.sync("peer", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } assertOwnChain(t, tester, targetBlocks+1) @@ -980,7 +1015,7 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { } } for hash, receipt := range receipts { - if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= uint64(targetBlocks-minFullBlocks) { + if mode == FastSync && len(receipt) > 0 && headers[hash].Number.Uint64() <= tester.downloader.queue.fastSyncPivot { receiptsNeeded++ } } @@ -1006,19 +1041,19 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { targetBlocks := blockCacheLimit - 15 hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - tester := newTester(mode) + tester := newTester() // Attempt a full sync with an attacker feeding gapped headers tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) missing := targetBlocks / 2 delete(tester.peerHeaders["attack"], hashes[missing]) - if err := tester.sync("attack", nil); err == nil { + if err := tester.sync("attack", nil, mode); err == nil { t.Fatalf("succeeded attacker synchronisation") } // Synchronise with the valid peer and make sure sync succeeds tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) - if err := tester.sync("valid", nil); err != nil { + if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } assertOwnChain(t, tester, targetBlocks+1) @@ -1038,7 +1073,7 @@ func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { targetBlocks := blockCacheLimit - 15 hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - tester := newTester(mode) + tester := newTester() // Attempt a full sync with an attacker feeding shifted headers tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) @@ -1046,12 +1081,12 @@ func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { delete(tester.peerBlocks["attack"], hashes[len(hashes)-2]) delete(tester.peerReceipts["attack"], hashes[len(hashes)-2]) - if err := tester.sync("attack", nil); err == nil { + if err := tester.sync("attack", nil, mode); err == nil { t.Fatalf("succeeded attacker synchronisation") } // Synchronise with the valid peer and make sure sync succeeds tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) - if err := tester.sync("valid", nil); err != nil { + if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } assertOwnChain(t, tester, targetBlocks+1) @@ -1064,92 +1099,81 @@ func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback( func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download - targetBlocks := 3*minCheckedHeaders + minFullBlocks + targetBlocks := 3*fsHeaderSafetyNet + fsMinFullBlocks hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - tester := newTester(mode) + tester := newTester() - // Attempt to sync with an attacker that feeds junk during the fast sync phase + // Attempt to sync with an attacker that feeds junk during the fast sync phase. + // This should result in the last fsHeaderSafetyNet headers being rolled back. tester.newPeer("fast-attack", protocol, hashes, headers, blocks, receipts) - missing := minCheckedHeaders + MaxHeaderFetch + 1 + missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 delete(tester.peerHeaders["fast-attack"], hashes[len(hashes)-missing]) - if err := tester.sync("fast-attack", nil); err == nil { + if err := tester.sync("fast-attack", nil, mode); err == nil { t.Fatalf("succeeded fast attacker synchronisation") } if head := tester.headHeader().Number.Int64(); int(head) > MaxHeaderFetch { - t.Fatalf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) + t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) } - // Attempt to sync with an attacker that feeds junk during the block import phase + // Attempt to sync with an attacker that feeds junk during the block import phase. + // This should result in both the last fsHeaderSafetyNet number of headers being + // rolled back, and also the pivot point being reverted to a non-block status. tester.newPeer("block-attack", protocol, hashes, headers, blocks, receipts) - missing = 3*minCheckedHeaders + MaxHeaderFetch + 1 + missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 delete(tester.peerHeaders["block-attack"], hashes[len(hashes)-missing]) - if err := tester.sync("block-attack", nil); err == nil { + if err := tester.sync("block-attack", nil, mode); err == nil { t.Fatalf("succeeded block attacker synchronisation") } + if head := tester.headHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { + t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) + } if mode == FastSync { - // Fast sync should not discard anything below the verified pivot point - if head := tester.headHeader().Number.Int64(); int(head) < 3*minCheckedHeaders { - t.Fatalf("rollback head mismatch: have %v, want at least %v", head, 3*minCheckedHeaders) + if head := tester.headBlock().NumberU64(); head != 0 { + t.Errorf("fast sync pivot block #%d not rolled back", head) } - } else if mode == LightSync { - // Light sync should still discard data as before - if head := tester.headHeader().Number.Int64(); int(head) > 3*minCheckedHeaders { - t.Fatalf("rollback head mismatch: have %v, want at most %v", head, 3*minCheckedHeaders) - } - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) - if err := tester.sync("valid", nil); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, targetBlocks+1) -} + // Attempt to sync with an attacker that withholds promised blocks after the + // fast sync pivot point. This could be a trial to leave the node with a bad + // but already imported pivot block. + tester.newPeer("withhold-attack", protocol, hashes, headers, blocks, receipts) + missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 -// Tests that if a peer sends an invalid block piece (body or receipt) for a -// requested block, it gets dropped immediately by the downloader. -func TestInvalidContentAttack62(t *testing.T) { testInvalidContentAttack(t, 62, FullSync) } -func TestInvalidContentAttack63Full(t *testing.T) { testInvalidContentAttack(t, 63, FullSync) } -func TestInvalidContentAttack63Fast(t *testing.T) { testInvalidContentAttack(t, 63, FastSync) } -func TestInvalidContentAttack64Full(t *testing.T) { testInvalidContentAttack(t, 64, FullSync) } -func TestInvalidContentAttack64Fast(t *testing.T) { testInvalidContentAttack(t, 64, FastSync) } -func TestInvalidContentAttack64Light(t *testing.T) { testInvalidContentAttack(t, 64, LightSync) } - -func testInvalidContentAttack(t *testing.T, protocol int, mode SyncMode) { - // Create two peers, one feeding invalid block bodies - targetBlocks := 4*blockCacheLimit - 15 - hashes, headers, validBlocks, validReceipts := makeChain(targetBlocks, 0, genesis, nil) - - invalidBlocks := make(map[common.Hash]*types.Block) - for hash, block := range validBlocks { - invalidBlocks[hash] = types.NewBlockWithHeader(block.Header()) - } - invalidReceipts := make(map[common.Hash]types.Receipts) - for hash, _ := range validReceipts { - invalidReceipts[hash] = types.Receipts{&types.Receipt{}} + tester.downloader.noFast = false + tester.downloader.syncInitHook = func(uint64, uint64) { + for i := missing; i <= len(hashes); i++ { + delete(tester.peerHeaders["withhold-attack"], hashes[len(hashes)-i]) + } + tester.downloader.syncInitHook = nil } - tester := newTester(mode) - tester.newPeer("valid", protocol, hashes, headers, validBlocks, validReceipts) - if mode != LightSync { - tester.newPeer("body attack", protocol, hashes, headers, invalidBlocks, validReceipts) + if err := tester.sync("withhold-attack", nil, mode); err == nil { + t.Fatalf("succeeded withholding attacker synchronisation") + } + if head := tester.headHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { + t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) } if mode == FastSync { - tester.newPeer("receipt attack", protocol, hashes, headers, validBlocks, invalidReceipts) + if head := tester.headBlock().NumberU64(); head != 0 { + t.Errorf("fast sync pivot block #%d not rolled back", head) + } } - // Synchronise with the valid peer (will pull contents from the attacker too) - if err := tester.sync("valid", nil); err != nil { + // Synchronise with the valid peer and make sure sync succeeds. Since the last + // rollback should also disable fast syncing for this process, verify that we + // did a fresh full sync. Note, we can't assert anything about the receipts + // since we won't purge the database of them, hence we can't use asserOwnChain. + tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) + if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, targetBlocks+1) - - // Make sure the attacker was detected and dropped in the mean time - if _, ok := tester.peerHashes["body attack"]; ok { - t.Fatalf("block body attacker not detected/dropped") + if hs := len(tester.ownHeaders); hs != len(headers) { + t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, len(headers)) } - if _, ok := tester.peerHashes["receipt attack"]; ok { - t.Fatalf("receipt attacker not detected/dropped") + if mode != LightSync { + if bs := len(tester.ownBlocks); bs != len(blocks) { + t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(blocks)) + } } } @@ -1164,11 +1188,11 @@ func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttac func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) } func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { - tester := newTester(mode) + tester := newTester() hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil) tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts) - if err := tester.sync("attack", big.NewInt(1000000)); err != errStallingPeer { + if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } } @@ -1206,7 +1230,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { {errCancelBodyFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop } // Run the tests and check disconnection status - tester := newTester(FullSync) + tester := newTester() for i, tt := range tests { // Register a new peer and ensure it's presence id := fmt.Sprintf("test %d", i) @@ -1219,120 +1243,125 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { // Simulate a synchronisation and check the required result tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - tester.downloader.Synchronise(id, genesis.Hash(), big.NewInt(1000)) + tester.downloader.Synchronise(id, genesis.Hash(), big.NewInt(1000), FullSync) if _, ok := tester.peerHashes[id]; !ok != tt.drop { t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) } } } -// Tests that synchronisation boundaries (origin block number and highest block -// number) is tracked and updated correctly. -func TestSyncBoundaries61(t *testing.T) { testSyncBoundaries(t, 61, FullSync) } -func TestSyncBoundaries62(t *testing.T) { testSyncBoundaries(t, 62, FullSync) } -func TestSyncBoundaries63Full(t *testing.T) { testSyncBoundaries(t, 63, FullSync) } -func TestSyncBoundaries63Fast(t *testing.T) { testSyncBoundaries(t, 63, FastSync) } -func TestSyncBoundaries64Full(t *testing.T) { testSyncBoundaries(t, 64, FullSync) } -func TestSyncBoundaries64Fast(t *testing.T) { testSyncBoundaries(t, 64, FastSync) } -func TestSyncBoundaries64Light(t *testing.T) { testSyncBoundaries(t, 64, LightSync) } - -func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { +// Tests that synchronisation progress (origin block number, current block number +// and highest block number) is tracked and updated correctly. +func TestSyncProgress61(t *testing.T) { testSyncProgress(t, 61, FullSync) } +func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) } +func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) } +func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) } +func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } +func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } +func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) } + +func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - // Set a sync init hook to catch boundary changes + // Set a sync init hook to catch progress changes starting := make(chan struct{}) progress := make(chan struct{}) - tester := newTester(mode) + tester := newTester() tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress } - // Retrieve the sync boundaries and ensure they are zero (pristine sync) - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 { - t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) + // Retrieve the sync progress and ensure they are zero (pristine sync) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 { + t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0) } - // Synchronise half the blocks and check initial boundaries + // Synchronise half the blocks and check initial progress tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts) pending := new(sync.WaitGroup) pending.Add(1) go func() { defer pending.Done() - if err := tester.sync("peer-half", nil); err != nil { + if err := tester.sync("peer-half", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } }() <-starting - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks/2+1) { - t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks/2+1) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks/2+1) { + t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, targetBlocks/2+1) } progress <- struct{}{} pending.Wait() - // Synchronise all the blocks and check continuation boundaries + // Synchronise all the blocks and check continuation progress tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts) pending.Add(1) go func() { defer pending.Done() - if err := tester.sync("peer-full", nil); err != nil { + if err := tester.sync("peer-full", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } }() <-starting - if origin, latest := tester.downloader.Boundaries(); origin != uint64(targetBlocks/2+1) || latest != uint64(targetBlocks) { - t.Fatalf("Completing boundary mismatch: have %v/%v, want %v/%v", origin, latest, targetBlocks/2+1, targetBlocks) + if origin, current, latest := tester.downloader.Progress(); origin != uint64(targetBlocks/2+1) || current != uint64(targetBlocks/2+1) || latest != uint64(targetBlocks) { + t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, targetBlocks/2+1, targetBlocks/2+1, targetBlocks) } progress <- struct{}{} pending.Wait() + + // Check final progress after successful sync + if origin, current, latest := tester.downloader.Progress(); origin != uint64(targetBlocks/2+1) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) { + t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, targetBlocks/2+1, targetBlocks, targetBlocks) + } } -// Tests that synchronisation boundaries (origin block number and highest block +// Tests that synchronisation progress (origin block number and highest block // number) is tracked and updated correctly in case of a fork (or manual head // revertal). -func TestForkedSyncBoundaries61(t *testing.T) { testForkedSyncBoundaries(t, 61, FullSync) } -func TestForkedSyncBoundaries62(t *testing.T) { testForkedSyncBoundaries(t, 62, FullSync) } -func TestForkedSyncBoundaries63Full(t *testing.T) { testForkedSyncBoundaries(t, 63, FullSync) } -func TestForkedSyncBoundaries63Fast(t *testing.T) { testForkedSyncBoundaries(t, 63, FastSync) } -func TestForkedSyncBoundaries64Full(t *testing.T) { testForkedSyncBoundaries(t, 64, FullSync) } -func TestForkedSyncBoundaries64Fast(t *testing.T) { testForkedSyncBoundaries(t, 64, FastSync) } -func TestForkedSyncBoundaries64Light(t *testing.T) { testForkedSyncBoundaries(t, 64, LightSync) } - -func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { +func TestForkedSyncProgress61(t *testing.T) { testForkedSyncProgress(t, 61, FullSync) } +func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) } +func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) } +func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) } +func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } +func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } +func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) } + +func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { // Create a forked chain to simulate origin revertal common, fork := MaxHashFetch, 2*MaxHashFetch hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil) - // Set a sync init hook to catch boundary changes + // Set a sync init hook to catch progress changes starting := make(chan struct{}) progress := make(chan struct{}) - tester := newTester(mode) + tester := newTester() tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress } - // Retrieve the sync boundaries and ensure they are zero (pristine sync) - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 { - t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) + // Retrieve the sync progress and ensure they are zero (pristine sync) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 { + t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0) } - // Synchronise with one of the forks and check boundaries + // Synchronise with one of the forks and check progress tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA) pending := new(sync.WaitGroup) pending.Add(1) go func() { defer pending.Done() - if err := tester.sync("fork A", nil); err != nil { + if err := tester.sync("fork A", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } }() <-starting - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(len(hashesA)-1) { - t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, len(hashesA)-1) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(len(hashesA)-1) { + t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, len(hashesA)-1) } progress <- struct{}{} pending.Wait() @@ -1340,52 +1369,57 @@ func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { // Simulate a successful sync above the fork tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight - // Synchronise with the second fork and check boundary resets + // Synchronise with the second fork and check progress resets tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB) pending.Add(1) go func() { defer pending.Done() - if err := tester.sync("fork B", nil); err != nil { + if err := tester.sync("fork B", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } }() <-starting - if origin, latest := tester.downloader.Boundaries(); origin != uint64(common) || latest != uint64(len(hashesB)-1) { - t.Fatalf("Forking boundary mismatch: have %v/%v, want %v/%v", origin, latest, common, len(hashesB)-1) + if origin, current, latest := tester.downloader.Progress(); origin != uint64(common) || current != uint64(len(hashesA)-1) || latest != uint64(len(hashesB)-1) { + t.Fatalf("Forking progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, common, len(hashesA)-1, len(hashesB)-1) } progress <- struct{}{} pending.Wait() + + // Check final progress after successful sync + if origin, current, latest := tester.downloader.Progress(); origin != uint64(common) || current != uint64(len(hashesB)-1) || latest != uint64(len(hashesB)-1) { + t.Fatalf("Final progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, common, len(hashesB)-1, len(hashesB)-1) + } } -// Tests that if synchronisation is aborted due to some failure, then the boundary +// Tests that if synchronisation is aborted due to some failure, then the progress // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. -func TestFailedSyncBoundaries61(t *testing.T) { testFailedSyncBoundaries(t, 61, FullSync) } -func TestFailedSyncBoundaries62(t *testing.T) { testFailedSyncBoundaries(t, 62, FullSync) } -func TestFailedSyncBoundaries63Full(t *testing.T) { testFailedSyncBoundaries(t, 63, FullSync) } -func TestFailedSyncBoundaries63Fast(t *testing.T) { testFailedSyncBoundaries(t, 63, FastSync) } -func TestFailedSyncBoundaries64Full(t *testing.T) { testFailedSyncBoundaries(t, 64, FullSync) } -func TestFailedSyncBoundaries64Fast(t *testing.T) { testFailedSyncBoundaries(t, 64, FastSync) } -func TestFailedSyncBoundaries64Light(t *testing.T) { testFailedSyncBoundaries(t, 64, LightSync) } - -func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { +func TestFailedSyncProgress61(t *testing.T) { testFailedSyncProgress(t, 61, FullSync) } +func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) } +func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) } +func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) } +func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } +func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } +func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) } + +func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { // Create a small enough block chain to download targetBlocks := blockCacheLimit - 15 hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil) - // Set a sync init hook to catch boundary changes + // Set a sync init hook to catch progress changes starting := make(chan struct{}) progress := make(chan struct{}) - tester := newTester(mode) + tester := newTester() tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress } - // Retrieve the sync boundaries and ensure they are zero (pristine sync) - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 { - t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) + // Retrieve the sync progress and ensure they are zero (pristine sync) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 { + t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0) } // Attempt a full sync with a faulty peer tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts) @@ -1399,62 +1433,67 @@ func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { go func() { defer pending.Done() - if err := tester.sync("faulty", nil); err == nil { + if err := tester.sync("faulty", nil, mode); err == nil { t.Fatalf("succeeded faulty synchronisation") } }() <-starting - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks) { - t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks) { + t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, targetBlocks) } progress <- struct{}{} pending.Wait() - // Synchronise with a good peer and check that the boundary origin remind the same after a failure + // Synchronise with a good peer and check that the progress origin remind the same after a failure tester.newPeer("valid", protocol, hashes, headers, blocks, receipts) pending.Add(1) go func() { defer pending.Done() - if err := tester.sync("valid", nil); err != nil { + if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } }() <-starting - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks) { - t.Fatalf("Completing boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current > uint64(targetBlocks/2) || latest != uint64(targetBlocks) { + t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", origin, current, latest, 0, targetBlocks/2, targetBlocks) } progress <- struct{}{} pending.Wait() + + // Check final progress after successful sync + if origin, current, latest := tester.downloader.Progress(); origin > uint64(targetBlocks/2) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) { + t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", origin, current, latest, targetBlocks/2, targetBlocks, targetBlocks) + } } // Tests that if an attacker fakes a chain height, after the attack is detected, -// the boundary height is successfully reduced at the next sync invocation. -func TestFakedSyncBoundaries61(t *testing.T) { testFakedSyncBoundaries(t, 61, FullSync) } -func TestFakedSyncBoundaries62(t *testing.T) { testFakedSyncBoundaries(t, 62, FullSync) } -func TestFakedSyncBoundaries63Full(t *testing.T) { testFakedSyncBoundaries(t, 63, FullSync) } -func TestFakedSyncBoundaries63Fast(t *testing.T) { testFakedSyncBoundaries(t, 63, FastSync) } -func TestFakedSyncBoundaries64Full(t *testing.T) { testFakedSyncBoundaries(t, 64, FullSync) } -func TestFakedSyncBoundaries64Fast(t *testing.T) { testFakedSyncBoundaries(t, 64, FastSync) } -func TestFakedSyncBoundaries64Light(t *testing.T) { testFakedSyncBoundaries(t, 64, LightSync) } - -func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { +// the progress height is successfully reduced at the next sync invocation. +func TestFakedSyncProgress61(t *testing.T) { testFakedSyncProgress(t, 61, FullSync) } +func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) } +func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) } +func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) } +func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } +func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } +func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) } + +func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { // Create a small block chain targetBlocks := blockCacheLimit - 15 hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil) - // Set a sync init hook to catch boundary changes + // Set a sync init hook to catch progress changes starting := make(chan struct{}) progress := make(chan struct{}) - tester := newTester(mode) + tester := newTester() tester.downloader.syncInitHook = func(origin, latest uint64) { starting <- struct{}{} <-progress } - // Retrieve the sync boundaries and ensure they are zero (pristine sync) - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != 0 { - t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) + // Retrieve the sync progress and ensure they are zero (pristine sync) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != 0 { + t.Fatalf("Pristine progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, 0) } // Create and sync with an attacker that promises a higher chain than available tester.newPeer("attack", protocol, hashes, headers, blocks, receipts) @@ -1469,31 +1508,36 @@ func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { go func() { defer pending.Done() - if err := tester.sync("attack", nil); err == nil { + if err := tester.sync("attack", nil, mode); err == nil { t.Fatalf("succeeded attacker synchronisation") } }() <-starting - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks+3) { - t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks+3) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current != 0 || latest != uint64(targetBlocks+3) { + t.Fatalf("Initial progress mismatch: have %v/%v/%v, want %v/%v/%v", origin, current, latest, 0, 0, targetBlocks+3) } progress <- struct{}{} pending.Wait() - // Synchronise with a good peer and check that the boundary height has been reduced to the true value + // Synchronise with a good peer and check that the progress height has been reduced to the true value tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts) pending.Add(1) go func() { defer pending.Done() - if err := tester.sync("valid", nil); err != nil { + if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } }() <-starting - if origin, latest := tester.downloader.Boundaries(); origin != 0 || latest != uint64(targetBlocks) { - t.Fatalf("Initial boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, targetBlocks) + if origin, current, latest := tester.downloader.Progress(); origin != 0 || current > uint64(targetBlocks) || latest != uint64(targetBlocks) { + t.Fatalf("Completing progress mismatch: have %v/%v/%v, want %v/0-%v/%v", origin, current, latest, 0, targetBlocks, targetBlocks) } progress <- struct{}{} pending.Wait() + + // Check final progress after successful sync + if origin, current, latest := tester.downloader.Progress(); origin > uint64(targetBlocks) || current != uint64(targetBlocks) || latest != uint64(targetBlocks) { + t.Fatalf("Final progress mismatch: have %v/%v/%v, want 0-%v/%v/%v", origin, current, latest, targetBlocks, targetBlocks, targetBlocks) + } } diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go index 8916dbb79..ec339c074 100644 --- a/eth/downloader/modes.go +++ b/eth/downloader/modes.go @@ -20,7 +20,7 @@ package downloader type SyncMode int const ( - FullSync SyncMode = iota // Synchronise the entire block-chain history from full blocks - FastSync // Quikcly download the headers, full sync only at the chain head + FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks + FastSync // Quickly download the headers, full sync only at the chain head LightSync // Download only the headers and terminate afterwards ) diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 5011d5d46..1f457cb15 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -124,6 +124,10 @@ func (p *peer) Reset() { // Fetch61 sends a block retrieval request to the remote peer. func (p *peer) Fetch61(request *fetchRequest) error { + // Sanity check the protocol version + if p.version != 61 { + panic(fmt.Sprintf("block fetch [eth/61] requested on eth/%d", p.version)) + } // Short circuit if the peer is already fetching if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { return errAlreadyFetching @@ -142,6 +146,10 @@ func (p *peer) Fetch61(request *fetchRequest) error { // FetchBodies sends a block body retrieval request to the remote peer. func (p *peer) FetchBodies(request *fetchRequest) error { + // Sanity check the protocol version + if p.version < 62 { + panic(fmt.Sprintf("body fetch [eth/62+] requested on eth/%d", p.version)) + } // Short circuit if the peer is already fetching if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { return errAlreadyFetching @@ -160,6 +168,10 @@ func (p *peer) FetchBodies(request *fetchRequest) error { // FetchReceipts sends a receipt retrieval request to the remote peer. func (p *peer) FetchReceipts(request *fetchRequest) error { + // Sanity check the protocol version + if p.version < 63 { + panic(fmt.Sprintf("body fetch [eth/63+] requested on eth/%d", p.version)) + } // Short circuit if the peer is already fetching if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { return errAlreadyFetching @@ -178,6 +190,10 @@ func (p *peer) FetchReceipts(request *fetchRequest) error { // FetchNodeData sends a node state data retrieval request to the remote peer. func (p *peer) FetchNodeData(request *fetchRequest) error { + // Sanity check the protocol version + if p.version < 63 { + panic(fmt.Sprintf("node data fetch [eth/63+] requested on eth/%d", p.version)) + } // Short circuit if the peer is already fetching if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { return errAlreadyFetching @@ -196,35 +212,35 @@ func (p *peer) FetchNodeData(request *fetchRequest) error { // SetBlocksIdle sets the peer to idle, allowing it to execute new retrieval requests. // Its block retrieval allowance will also be updated either up- or downwards, -// depending on whether the previous fetch completed in time or not. +// depending on whether the previous fetch completed in time. func (p *peer) SetBlocksIdle() { p.setIdle(p.blockStarted, blockSoftTTL, blockHardTTL, MaxBlockFetch, &p.blockCapacity, &p.blockIdle) } // SetBodiesIdle sets the peer to idle, allowing it to execute new retrieval requests. // Its block body retrieval allowance will also be updated either up- or downwards, -// depending on whether the previous fetch completed in time or not. +// depending on whether the previous fetch completed in time. func (p *peer) SetBodiesIdle() { - p.setIdle(p.blockStarted, bodySoftTTL, bodyHardTTL, MaxBlockFetch, &p.blockCapacity, &p.blockIdle) + p.setIdle(p.blockStarted, bodySoftTTL, bodyHardTTL, MaxBodyFetch, &p.blockCapacity, &p.blockIdle) } // SetReceiptsIdle sets the peer to idle, allowing it to execute new retrieval requests. // Its receipt retrieval allowance will also be updated either up- or downwards, -// depending on whether the previous fetch completed in time or not. +// depending on whether the previous fetch completed in time. func (p *peer) SetReceiptsIdle() { p.setIdle(p.receiptStarted, receiptSoftTTL, receiptHardTTL, MaxReceiptFetch, &p.receiptCapacity, &p.receiptIdle) } // SetNodeDataIdle sets the peer to idle, allowing it to execute new retrieval // requests. Its node data retrieval allowance will also be updated either up- or -// downwards, depending on whether the previous fetch completed in time or not. +// downwards, depending on whether the previous fetch completed in time. func (p *peer) SetNodeDataIdle() { p.setIdle(p.stateStarted, stateSoftTTL, stateSoftTTL, MaxStateFetch, &p.stateCapacity, &p.stateIdle) } // setIdle sets the peer to idle, allowing it to execute new retrieval requests. // Its data retrieval allowance will also be updated either up- or downwards, -// depending on whether the previous fetch completed in time or not. +// depending on whether the previous fetch completed in time. func (p *peer) setIdle(started time.Time, softTTL, hardTTL time.Duration, maxFetch int, capacity, idle *int32) { // Update the peer's download allowance based on previous performance scale := 2.0 diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 17fbb1c7f..56b46e285 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -56,9 +56,8 @@ type fetchRequest struct { Time time.Time // Time when the request was made } -// fetchResult is the assembly collecting partial results from potentially more -// than one fetcher routines, until all outstanding retrievals complete and the -// result as a whole can be processed. +// fetchResult is a struct collecting partial results from data fetchers until +// all outstanding pieces complete and the result as a whole can be processed. type fetchResult struct { Pending int // Number of data fetches still pending @@ -89,7 +88,7 @@ type queue struct { receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches - stateTaskIndex int // [eth/63] Counter indexing the added hashes to ensure prioritized retrieval order + stateTaskIndex int // [eth/63] Counter indexing the added hashes to ensure prioritised retrieval order stateTaskPool map[common.Hash]int // [eth/63] Pending node data retrieval tasks, mapping to their priority stateTaskQueue *prque.Prque // [eth/63] Priority queue of the hashes to fetch the node data for statePendPool map[string]*fetchRequest // [eth/63] Currently pending node data retrieval operations @@ -97,10 +96,10 @@ type queue struct { stateDatabase ethdb.Database // [eth/63] Trie database to populate during state reassembly stateScheduler *state.StateSync // [eth/63] State trie synchronisation scheduler and integrator stateProcessors int32 // [eth/63] Number of currently running state processors - stateSchedLock sync.RWMutex // [eth/63] Lock serializing access to the state scheduler + stateSchedLock sync.RWMutex // [eth/63] Lock serialising access to the state scheduler resultCache []*fetchResult // Downloaded but not yet delivered fetch results - resultOffset uint64 // Offset of the first cached fetch result in the block-chain + resultOffset uint64 // Offset of the first cached fetch result in the block chain lock sync.RWMutex } @@ -131,6 +130,9 @@ func (q *queue) Reset() { q.lock.Lock() defer q.lock.Unlock() + q.stateSchedLock.Lock() + defer q.stateSchedLock.Unlock() + q.mode = FullSync q.fastSyncPivot = 0 @@ -233,9 +235,17 @@ func (q *queue) Idle() bool { return (queued + pending + cached) == 0 } -// ThrottleBlocks checks if the download should be throttled (active block (body) +// FastSyncPivot retrieves the currently used fast sync pivot point. +func (q *queue) FastSyncPivot() uint64 { + q.lock.RLock() + defer q.lock.RUnlock() + + return q.fastSyncPivot +} + +// ShouldThrottleBlocks checks if the download should be throttled (active block (body) // fetches exceed block cache). -func (q *queue) ThrottleBlocks() bool { +func (q *queue) ShouldThrottleBlocks() bool { q.lock.RLock() defer q.lock.RUnlock() @@ -248,9 +258,9 @@ func (q *queue) ThrottleBlocks() bool { return pending >= len(q.resultCache)-len(q.blockDonePool) } -// ThrottleReceipts checks if the download should be throttled (active receipt +// ShouldThrottleReceipts checks if the download should be throttled (active receipt // fetches exceed block cache). -func (q *queue) ThrottleReceipts() bool { +func (q *queue) ShouldThrottleReceipts() bool { q.lock.RLock() defer q.lock.RUnlock() @@ -269,7 +279,7 @@ func (q *queue) Schedule61(hashes []common.Hash, fifo bool) []common.Hash { q.lock.Lock() defer q.lock.Unlock() - // Insert all the hashes prioritized in the arrival order + // Insert all the hashes prioritised in the arrival order inserts := make([]common.Hash, 0, len(hashes)) for _, hash := range hashes { // Skip anything we already have @@ -297,10 +307,10 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { q.lock.Lock() defer q.lock.Unlock() - // Insert all the headers prioritized by the contained block number + // Insert all the headers prioritised by the contained block number inserts := make([]*types.Header, 0, len(headers)) for _, header := range headers { - // Make sure chain order is honored and preserved throughout + // Make sure chain order is honoured and preserved throughout hash := header.Hash() if header.Number == nil || header.Number.Uint64() != from { glog.V(logger.Warn).Infof("Header #%v [%x] broke chain ordering, expected %d", header.Number, hash[:4], from) @@ -347,19 +357,29 @@ func (q *queue) GetHeadResult() *fetchResult { q.lock.RLock() defer q.lock.RUnlock() + // If there are no results pending, return nil if len(q.resultCache) == 0 || q.resultCache[0] == nil { return nil } + // If the next result is still incomplete, return nil if q.resultCache[0].Pending > 0 { return nil } + // If the next result is the fast sync pivot... if q.mode == FastSync && q.resultCache[0].Header.Number.Uint64() == q.fastSyncPivot { + // If the pivot state trie is still being pulled, return nil if len(q.stateTaskPool) > 0 { return nil } if q.PendingNodeData() > 0 { return nil } + // If the state is done, but not enough post-pivot headers were verified, stall... + for i := 0; i < fsHeaderForceVerify; i++ { + if i+1 >= len(q.resultCache) || q.resultCache[i+1] == nil { + return nil + } + } } return q.resultCache[0] } @@ -372,7 +392,7 @@ func (q *queue) TakeResults() []*fetchResult { // Accumulate all available results results := []*fetchResult{} - for _, result := range q.resultCache { + for i, result := range q.resultCache { // Stop if no more results are ready if result == nil || result.Pending > 0 { break @@ -385,6 +405,16 @@ func (q *queue) TakeResults() []*fetchResult { if q.PendingNodeData() > 0 { break } + // Even is state fetch is done, ensure post-pivot headers passed verifications + safe := true + for j := 0; j < fsHeaderForceVerify; j++ { + if i+j+1 >= len(q.resultCache) || q.resultCache[i+j+1] == nil { + safe = false + } + } + if !safe { + break + } } // If we've just inserted the fast sync pivot, stop as the following batch needs different insertion if q.mode == FastSync && result.Header.Number.Uint64() == q.fastSyncPivot+1 && len(results) > 0 { @@ -411,6 +441,9 @@ func (q *queue) TakeResults() []*fetchResult { // ReserveBlocks reserves a set of block hashes for the given peer, skipping any // previously failed download. func (q *queue) ReserveBlocks(p *peer, count int) *fetchRequest { + q.lock.Lock() + defer q.lock.Unlock() + return q.reserveHashes(p, count, q.hashQueue, nil, q.blockPendPool, len(q.resultCache)-len(q.blockDonePool)) } @@ -430,17 +463,21 @@ func (q *queue) ReserveNodeData(p *peer, count int) *fetchRequest { } } } + q.lock.Lock() + defer q.lock.Unlock() + return q.reserveHashes(p, count, q.stateTaskQueue, generator, q.statePendPool, count) } // reserveHashes reserves a set of hashes for the given peer, skipping previously // failed ones. +// +// Note, this method expects the queue lock to be already held for writing. The +// reason the lock is not obtained in here is because the parameters already need +// to access the queue, so they already need a lock anyway. func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, taskGen func(int), pendPool map[string]*fetchRequest, maxPending int) *fetchRequest { - q.lock.Lock() - defer q.lock.Unlock() - - // Short circuit if the peer's already downloading something (sanity check not - // to corrupt state) + // Short circuit if the peer's already downloading something (sanity check to + // not corrupt state) if _, ok := pendPool[p.id]; ok { return nil } @@ -492,30 +529,37 @@ func (q *queue) reserveHashes(p *peer, count int, taskQueue *prque.Prque, taskGe // previously failed downloads. Beside the next batch of needed fetches, it also // returns a flag whether empty blocks were queued requiring processing. func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) { - noop := func(header *types.Header) bool { + isNoop := func(header *types.Header) bool { return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash } - return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, noop) + q.lock.Lock() + defer q.lock.Unlock() + + return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop) } // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping // any previously failed downloads. Beside the next batch of needed fetches, it // also returns a flag whether empty receipts were queued requiring importing. func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error) { - noop := func(header *types.Header) bool { + isNoop := func(header *types.Header) bool { return header.ReceiptHash == types.EmptyRootHash } - return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, noop) + q.lock.Lock() + defer q.lock.Unlock() + + return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop) } // reserveHeaders reserves a set of data download operations for a given peer, // skipping any previously failed ones. This method is a generic version used // by the individual special reservation functions. +// +// Note, this method expects the queue lock to be already held for writing. The +// reason the lock is not obtained in here is because the parameters already need +// to access the queue, so they already need a lock anyway. func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, - pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, noop func(*types.Header) bool) (*fetchRequest, bool, error) { - q.lock.Lock() - defer q.lock.Unlock() - + pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) { // Short circuit if the pool has been depleted, or if the peer's already // downloading something (sanity check not to corrupt state) if taskQueue.Empty() { @@ -537,7 +581,7 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ { header := taskQueue.PopItem().(*types.Header) - // If we're the first to request this task, initialize the result container + // If we're the first to request this task, initialise the result container index := int(header.Number.Int64() - int64(q.resultOffset)) if index >= len(q.resultCache) || index < 0 { return nil, false, errInvalidChain @@ -553,7 +597,7 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ } } // If this fetch task is a noop, skip this fetch operation - if noop(header) { + if isNoop(header) { donePool[header.Hash()] = struct{}{} delete(taskPool, header.Hash()) @@ -562,7 +606,7 @@ func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*typ progress = true continue } - // Otherwise if not a known unknown block, add to the retrieve list + // Otherwise unless the peer is known not to have the data, add to the retrieve list if p.ignored.Has(header.Hash()) { skip = append(skip, header) } else { @@ -655,35 +699,48 @@ func (q *queue) Revoke(peerId string) { } // ExpireBlocks checks for in flight requests that exceeded a timeout allowance, -// canceling them and returning the responsible peers for penalization. +// canceling them and returning the responsible peers for penalisation. func (q *queue) ExpireBlocks(timeout time.Duration) []string { + q.lock.Lock() + defer q.lock.Unlock() + return q.expire(timeout, q.blockPendPool, q.hashQueue, blockTimeoutMeter) } // ExpireBodies checks for in flight block body requests that exceeded a timeout -// allowance, canceling them and returning the responsible peers for penalization. +// allowance, canceling them and returning the responsible peers for penalisation. func (q *queue) ExpireBodies(timeout time.Duration) []string { + q.lock.Lock() + defer q.lock.Unlock() + return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) } // ExpireReceipts checks for in flight receipt requests that exceeded a timeout -// allowance, canceling them and returning the responsible peers for penalization. +// allowance, canceling them and returning the responsible peers for penalisation. func (q *queue) ExpireReceipts(timeout time.Duration) []string { + q.lock.Lock() + defer q.lock.Unlock() + return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) } // ExpireNodeData checks for in flight node data requests that exceeded a timeout -// allowance, canceling them and returning the responsible peers for penalization. +// allowance, canceling them and returning the responsible peers for penalisation. func (q *queue) ExpireNodeData(timeout time.Duration) []string { + q.lock.Lock() + defer q.lock.Unlock() + return q.expire(timeout, q.statePendPool, q.stateTaskQueue, stateTimeoutMeter) } // expire is the generic check that move expired tasks from a pending pool back // into a task pool, returning all entities caught with expired tasks. +// +// Note, this method expects the queue lock to be already held for writing. The +// reason the lock is not obtained in here is because the parameters already need +// to access the queue, so they already need a lock anyway. func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) []string { - q.lock.Lock() - defer q.lock.Unlock() - // Iterate over the expired requests and return each to the queue peers := []string{} for id, request := range pendPool { @@ -764,7 +821,7 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) error { case len(errs) == 1 && (errs[0] == errInvalidChain || errs[0] == errInvalidBlock): return errs[0] - case len(errs) == len(request.Headers): + case len(errs) == len(blocks): return errStaleDelivery default: @@ -774,6 +831,9 @@ func (q *queue) DeliverBlocks(id string, blocks []*types.Block) error { // DeliverBodies injects a block body retrieval response into the results queue. func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) error { + q.lock.Lock() + defer q.lock.Unlock() + reconstruct := func(header *types.Header, index int, result *fetchResult) error { if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash { return errInvalidBody @@ -787,6 +847,9 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi // DeliverReceipts injects a receipt retrieval response into the results queue. func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) error { + q.lock.Lock() + defer q.lock.Unlock() + reconstruct := func(header *types.Header, index int, result *fetchResult) error { if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash { return errInvalidReceipt @@ -798,11 +861,12 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) error } // deliver injects a data retrieval response into the results queue. +// +// Note, this method expects the queue lock to be already held for writing. The +// reason the lock is not obtained in here is because the parameters already need +// to access the queue, so they already need a lock anyway. func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer, results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) error { - q.lock.Lock() - defer q.lock.Unlock() - // Short circuit if the data was never requested request := pendPool[id] if request == nil { @@ -818,7 +882,10 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ } } // Assemble each of the results with their headers and retrieved data parts - errs := make([]error, 0) + var ( + failure error + useful bool + ) for i, header := range request.Headers { // Short circuit assembly if no more fetch results are found if i >= results { @@ -827,15 +894,16 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ // Reconstruct the next result if contents match up index := int(header.Number.Int64() - int64(q.resultOffset)) if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil { - errs = []error{errInvalidChain} + failure = errInvalidChain break } if err := reconstruct(header, i, q.resultCache[index]); err != nil { - errs = []error{err} + failure = err break } donePool[header.Hash()] = struct{}{} q.resultCache[index].Pending-- + useful = true // Clean up a successful fetch request.Headers[i] = nil @@ -847,19 +915,16 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ taskQueue.Push(header, -float32(header.Number.Uint64())) } } - // If none of the blocks were good, it's a stale delivery + // If none of the data was good, it's a stale delivery switch { - case len(errs) == 0: - return nil - - case len(errs) == 1 && (errs[0] == errInvalidChain || errs[0] == errInvalidBody || errs[0] == errInvalidReceipt): - return errs[0] + case failure == nil || failure == errInvalidChain: + return failure - case len(errs) == len(request.Headers): - return errStaleDelivery + case useful: + return fmt.Errorf("partial failure: %v", failure) default: - return fmt.Errorf("multiple failures: %v", errs) + return errStaleDelivery } } @@ -876,7 +941,7 @@ func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, i stateReqTimer.UpdateSince(request.Time) delete(q.statePendPool, id) - // If no data was retrieved, mark them as unavailable for the origin peer + // If no data was retrieved, mark their hashes as unavailable for the origin peer if len(data) == 0 { for hash, _ := range request.Hashes { request.Peer.ignored.Add(hash) @@ -955,9 +1020,6 @@ func (q *queue) Prepare(offset uint64, mode SyncMode, pivot uint64) { if q.resultOffset < offset { q.resultOffset = offset } - q.fastSyncPivot = 0 - if mode == FastSync { - q.fastSyncPivot = pivot - } + q.fastSyncPivot = pivot q.mode = mode } diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go index b8ec1fc55..d88d91982 100644 --- a/eth/fetcher/fetcher.go +++ b/eth/fetcher/fetcher.go @@ -142,9 +142,11 @@ type Fetcher struct { dropPeer peerDropFn // Drops a peer for misbehaving // Testing hooks - fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch - completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62) - importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62) + announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list + queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue + fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch + completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62) + importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62) } // New creates a block fetcher to retrieve blocks based on hash announcements. @@ -324,11 +326,16 @@ func (f *Fetcher) loop() { height := f.chainHeight() for !f.queue.Empty() { op := f.queue.PopItem().(*inject) - + if f.queueChangeHook != nil { + f.queueChangeHook(op.block.Hash(), false) + } // If too high up the chain or phase, continue later number := op.block.NumberU64() if number > height+1 { f.queue.Push(op, -float32(op.block.NumberU64())) + if f.queueChangeHook != nil { + f.queueChangeHook(op.block.Hash(), true) + } break } // Otherwise if fresh and still unknown, try and import @@ -372,6 +379,9 @@ func (f *Fetcher) loop() { } f.announces[notification.origin] = count f.announced[notification.hash] = append(f.announced[notification.hash], notification) + if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 { + f.announceChangeHook(notification.hash, true) + } if len(f.announced) == 1 { f.rescheduleFetch(fetchTimer) } @@ -714,7 +724,9 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) { f.queues[peer] = count f.queued[hash] = op f.queue.Push(op, -float32(block.NumberU64())) - + if f.queueChangeHook != nil { + f.queueChangeHook(op.block.Hash(), true) + } if glog.V(logger.Debug) { glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size()) } @@ -781,7 +793,9 @@ func (f *Fetcher) forgetHash(hash common.Hash) { } } delete(f.announced, hash) - + if f.announceChangeHook != nil { + f.announceChangeHook(hash, false) + } // Remove any pending fetches and decrement the DOS counters if announce := f.fetching[hash]; announce != nil { f.announces[announce.origin]-- diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/fetcher_test.go index 170a80aba..2404c8cfa 100644 --- a/eth/fetcher/fetcher_test.go +++ b/eth/fetcher/fetcher_test.go @@ -145,6 +145,9 @@ func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) { // dropPeer is an emulator for the peer removal, simply accumulating the various // peers dropped by the fetcher. func (f *fetcherTester) dropPeer(peer string) { + f.lock.Lock() + defer f.lock.Unlock() + f.drops[peer] = true } @@ -608,8 +611,11 @@ func TestDistantPropagationDiscarding(t *testing.T) { // Create a tester and simulate a head block being the middle of the above chain tester := newTester() + + tester.lock.Lock() tester.hashes = []common.Hash{head} tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} + tester.lock.Unlock() // Ensure that a block with a lower number than the threshold is discarded tester.fetcher.Enqueue("lower", blocks[hashes[low]]) @@ -641,8 +647,11 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) { // Create a tester and simulate a head block being the middle of the above chain tester := newTester() + + tester.lock.Lock() tester.hashes = []common.Hash{head} tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} + tester.lock.Unlock() headerFetcher := tester.makeHeaderFetcher(blocks, -gatherSlack) bodyFetcher := tester.makeBodyFetcher(blocks, 0) @@ -687,14 +696,22 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) { tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher) verifyImportEvent(t, imported, false) - if !tester.drops["bad"] { + tester.lock.RLock() + dropped := tester.drops["bad"] + tester.lock.RUnlock() + + if !dropped { t.Fatalf("peer with invalid numbered announcement not dropped") } // Make sure a good announcement passes without a drop tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), nil, headerFetcher, bodyFetcher) verifyImportEvent(t, imported, true) - if tester.drops["good"] { + tester.lock.RLock() + dropped = tester.drops["good"] + tester.lock.RUnlock() + + if dropped { t.Fatalf("peer with valid numbered announcement dropped") } verifyImportDone(t, imported) @@ -752,9 +769,15 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) { // Create a tester with instrumented import hooks tester := newTester() - imported := make(chan *types.Block) + imported, announces := make(chan *types.Block), int32(0) tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - + tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) { + if added { + atomic.AddInt32(&announces, 1) + } else { + atomic.AddInt32(&announces, -1) + } + } // Create a valid chain and an infinite junk chain targetBlocks := hashLimit + 2*maxQueueDist hashes, blocks := makeChain(targetBlocks, 0, genesis) @@ -782,8 +805,8 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) { tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), nil, attackerHeaderFetcher, attackerBodyFetcher) } } - if len(tester.fetcher.announced) != hashLimit+maxQueueDist { - t.Fatalf("queued announce count mismatch: have %d, want %d", len(tester.fetcher.announced), hashLimit+maxQueueDist) + if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist { + t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist) } // Wait for fetches to complete verifyImportCount(t, imported, maxQueueDist) @@ -807,9 +830,15 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) { // Create a tester with instrumented import hooks tester := newTester() - imported := make(chan *types.Block) + imported, enqueued := make(chan *types.Block), int32(0) tester.fetcher.importedHook = func(block *types.Block) { imported <- block } - + tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) { + if added { + atomic.AddInt32(&enqueued, 1) + } else { + atomic.AddInt32(&enqueued, -1) + } + } // Create a valid chain and a batch of dangling (but in range) blocks targetBlocks := hashLimit + 2*maxQueueDist hashes, blocks := makeChain(targetBlocks, 0, genesis) @@ -825,7 +854,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) { tester.fetcher.Enqueue("attacker", block) } time.Sleep(200 * time.Millisecond) - if queued := tester.fetcher.queue.Size(); queued != blockLimit { + if queued := atomic.LoadInt32(&enqueued); queued != blockLimit { t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit) } // Queue up a batch of valid blocks, and check that a new peer is allowed to do so @@ -833,7 +862,7 @@ func TestBlockMemoryExhaustionAttack(t *testing.T) { tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]]) } time.Sleep(100 * time.Millisecond) - if queued := tester.fetcher.queue.Size(); queued != blockLimit+maxQueueDist-1 { + if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 { t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1) } // Insert the missing piece (and sanity check the import) diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index 9e7538fac..a5418e2e7 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -16,9 +16,9 @@ import ( func makeReceipt(addr common.Address) *types.Receipt { receipt := types.NewReceipt(nil, new(big.Int)) - receipt.SetLogs(vm.Logs{ + receipt.Logs = vm.Logs{ &vm.Log{Address: addr}, - }) + } receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) return receipt } @@ -41,7 +41,7 @@ func BenchmarkMipmaps(b *testing.B) { defer db.Close() genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr1, big.NewInt(1000000)}) - chain := core.GenerateChain(genesis, db, 100010, func(i int, gen *core.BlockGen) { + chain, receipts := core.GenerateChain(genesis, db, 100010, func(i int, gen *core.BlockGen) { var receipts types.Receipts switch i { case 2403: @@ -70,7 +70,7 @@ func BenchmarkMipmaps(b *testing.B) { } core.WriteMipmapBloom(db, uint64(i+1), receipts) }) - for _, block := range chain { + for i, block := range chain { core.WriteBlock(db, block) if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { b.Fatalf("failed to insert block number: %v", err) @@ -78,11 +78,10 @@ func BenchmarkMipmaps(b *testing.B) { if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { b.Fatalf("failed to insert block number: %v", err) } - if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil { + if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil { b.Fatal("error writing block receipts:", err) } } - b.ResetTimer() filter := New(db) @@ -118,47 +117,47 @@ func TestFilters(t *testing.T) { defer db.Close() genesis := core.WriteGenesisBlockForTesting(db, core.GenesisAccount{addr, big.NewInt(1000000)}) - chain := core.GenerateChain(genesis, db, 1000, func(i int, gen *core.BlockGen) { + chain, receipts := core.GenerateChain(genesis, db, 1000, func(i int, gen *core.BlockGen) { var receipts types.Receipts switch i { case 1: receipt := types.NewReceipt(nil, new(big.Int)) - receipt.SetLogs(vm.Logs{ + receipt.Logs = vm.Logs{ &vm.Log{ Address: addr, Topics: []common.Hash{hash1}, }, - }) + } gen.AddUncheckedReceipt(receipt) receipts = types.Receipts{receipt} case 2: receipt := types.NewReceipt(nil, new(big.Int)) - receipt.SetLogs(vm.Logs{ + receipt.Logs = vm.Logs{ &vm.Log{ Address: addr, Topics: []common.Hash{hash2}, }, - }) + } gen.AddUncheckedReceipt(receipt) receipts = types.Receipts{receipt} case 998: receipt := types.NewReceipt(nil, new(big.Int)) - receipt.SetLogs(vm.Logs{ + receipt.Logs = vm.Logs{ &vm.Log{ Address: addr, Topics: []common.Hash{hash3}, }, - }) + } gen.AddUncheckedReceipt(receipt) receipts = types.Receipts{receipt} case 999: receipt := types.NewReceipt(nil, new(big.Int)) - receipt.SetLogs(vm.Logs{ + receipt.Logs = vm.Logs{ &vm.Log{ Address: addr, Topics: []common.Hash{hash4}, }, - }) + } gen.AddUncheckedReceipt(receipt) receipts = types.Receipts{receipt} } @@ -173,7 +172,7 @@ func TestFilters(t *testing.T) { // by one core.WriteMipmapBloom(db, uint64(i+1), receipts) }) - for _, block := range chain { + for i, block := range chain { core.WriteBlock(db, block) if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { t.Fatalf("failed to insert block number: %v", err) @@ -181,7 +180,7 @@ func TestFilters(t *testing.T) { if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { t.Fatalf("failed to insert block number: %v", err) } - if err := core.PutBlockReceipts(db, block, block.Receipts()); err != nil { + if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil { t.Fatal("error writing block receipts:", err) } } diff --git a/eth/handler.go b/eth/handler.go index 725178035..7dc7de80e 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -84,6 +84,11 @@ type ProtocolManager struct { // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the ethereum network. func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { + // Figure out whether to allow fast sync or not + if fastSync && blockchain.CurrentBlock().NumberU64() > 0 { + glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled") + fastSync = false + } // Create the protocol manager with the base fields manager := &ProtocolManager{ fastSync: fastSync, @@ -103,7 +108,7 @@ func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool if fastSync && version < eth63 { continue } - // Compatible, initialize the sub-protocol + // Compatible; initialise the sub-protocol version := version // Closure for the run manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{ Name: "eth", @@ -120,13 +125,9 @@ func NewProtocolManager(fastSync bool, networkId int, mux *event.TypeMux, txpool return nil, errIncompatibleConfig } // Construct the different synchronisation mechanisms - syncMode := downloader.FullSync - if fastSync { - syncMode = downloader.FastSync - } - manager.downloader = downloader.New(syncMode, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, - blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, - blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback, manager.removePeer) + manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, blockchain.GetBlock, + blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, blockchain.GetTd, + blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback, manager.removePeer) validator := func(block *types.Block, parent *types.Block) error { return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false) diff --git a/eth/handler_test.go b/eth/handler_test.go index 843b02fd4..ab2ce54b1 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -443,7 +443,9 @@ func testGetNodeData(t *testing.T, protocol int) { // Fetch for now the entire chain db hashes := []common.Hash{} for _, key := range pm.chaindb.(*ethdb.MemDatabase).Keys() { - hashes = append(hashes, common.BytesToHash(key)) + if len(key) == len(common.Hash{}) { + hashes = append(hashes, common.BytesToHash(key)) + } } p2p.Send(peer.app, 0x0d, hashes) msg, err := peer.app.ReadMsg() diff --git a/eth/metrics.go b/eth/metrics.go index cfab3bcb3..8231a06ff 100644 --- a/eth/metrics.go +++ b/eth/metrics.go @@ -101,7 +101,7 @@ func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) { packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter case rw.version >= eth62 && msg.Code == BlockHeadersMsg: - packets, traffic = reqBlockInPacketsMeter, reqBlockInTrafficMeter + packets, traffic = reqHeaderInPacketsMeter, reqHeaderInTrafficMeter case rw.version >= eth62 && msg.Code == BlockBodiesMsg: packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter diff --git a/eth/sync.go b/eth/sync.go index 6295083e2..b69a24556 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/p2p/discover" @@ -165,5 +166,20 @@ func (pm *ProtocolManager) synchronise(peer *peer) { return } // Otherwise try to sync with the downloader - pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td()) + mode := downloader.FullSync + if pm.fastSync { + mode = downloader.FastSync + } + pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode) + + // If fast sync was enabled, and we synced up, disable it + if pm.fastSync { + for pm.downloader.Synchronising() { + time.Sleep(100 * time.Millisecond) + } + if pm.blockchain.CurrentBlock().NumberU64() > 0 { + glog.V(logger.Info).Infof("fast sync complete, auto disabling") + pm.fastSync = false + } + } } diff --git a/eth/sync_test.go b/eth/sync_test.go new file mode 100644 index 000000000..f3a6718ab --- /dev/null +++ b/eth/sync_test.go @@ -0,0 +1,53 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discover" +) + +// Tests that fast sync gets disabled as soon as a real block is successfully +// imported into the blockchain. +func TestFastSyncDisabling(t *testing.T) { + // Create a pristine protocol manager, check that fast sync is left enabled + pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil) + if !pmEmpty.fastSync { + t.Fatalf("fast sync disabled on pristine blockchain") + } + // Create a full protocol manager, check that fast sync gets disabled + pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil) + if pmFull.fastSync { + t.Fatalf("fast sync not disabled on non-empty blockchain") + } + // Sync up the two peers + io1, io2 := p2p.MsgPipe() + + go pmFull.handle(pmFull.newPeer(63, NetworkId, p2p.NewPeer(discover.NodeID{}, "empty", nil), io2)) + go pmEmpty.handle(pmEmpty.newPeer(63, NetworkId, p2p.NewPeer(discover.NodeID{}, "full", nil), io1)) + + time.Sleep(250 * time.Millisecond) + pmEmpty.synchronise(pmEmpty.peers.BestPeer()) + + // Check that fast sync was disabled + if pmEmpty.fastSync { + t.Fatalf("fast sync not disabled after successful synchronisation") + } +} -- cgit v1.2.3 From 8b81ad1fc40080af441c0c6df94f0b2ea46e320b Mon Sep 17 00:00:00 2001 From: zelig Date: Tue, 22 Sep 2015 10:34:58 +0200 Subject: console: * lines with leading space are ommitted from history * exit processed even with whitespace around * all whitespace lines (not only empty ones) are ignored add 7 missing commands to admin api autocomplete registrar: methods now return proper error if reg addresses are not set. fixes #1457 rpc/console: fix personal.newAccount() regression. Now all comms accept interactive password registrar: add registrar tests for errors crypto: catch AES decryption error on presale wallet import + fix error msg format. fixes #1580 CLI: improve error message when starting a second instance of geth. fixes #1564 cli/accounts: unlock multiple accounts. fixes #1785 * make unlocking multiple accounts work with inline <() fd * passwdfile now correctly read only once * improve logs * fix CLI help text for unlocking fix regression with docRoot / admin API * docRoot/jspath passed to rpc/api ParseApis, which passes onto adminApi * docRoot field for JS console in order to pass when RPC is (re)started * improve flag desc for jspath common/docserver: catch http errors from response fix rpc/api tests common/natspec: fix end to end test (skipped because takes 8s) registrar: fix major regression: * deploy registrars on frontier * register HashsReg and UrlHint in GlobalRegistrar. * set all 3 contract addresses in code * zero out addresses first in tests --- eth/backend.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index a4f656ecd..6ce0d0eb0 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -26,7 +26,9 @@ import ( "math/big" "os" "path/filepath" + "regexp" "strings" + "syscall" "time" "github.com/ethereum/ethash" @@ -62,6 +64,9 @@ const ( var ( jsonlogger = logger.NewJsonLogger() + datadirInUseErrNos = []uint{11, 32, 35} + portInUseErrRE = regexp.MustCompile("address already in use") + defaultBootNodes = []*discover.Node{ // ETH/DEV Go Bootnodes discover.MustParseNode("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"), // IE @@ -282,6 +287,17 @@ func New(config *Config) (*Ethereum, error) { // Open the chain database and perform any upgrades needed chainDb, err := newdb(filepath.Join(config.DataDir, "chaindata")) if err != nil { + var ok bool + errno := uint(err.(syscall.Errno)) + for _, no := range datadirInUseErrNos { + if errno == no { + ok = true + break + } + } + if ok { + err = fmt.Errorf("%v (check if another instance of geth is already running with the same data directory '%s')", err, config.DataDir) + } return nil, fmt.Errorf("blockchain db err: %v", err) } if db, ok := chainDb.(*ethdb.LDBDatabase); ok { @@ -296,6 +312,16 @@ func New(config *Config) (*Ethereum, error) { dappDb, err := newdb(filepath.Join(config.DataDir, "dapp")) if err != nil { + var ok bool + for _, no := range datadirInUseErrNos { + if uint(err.(syscall.Errno)) == no { + ok = true + break + } + } + if ok { + err = fmt.Errorf("%v (check if another instance of geth is already running with the same data directory '%s')", err, config.DataDir) + } return nil, fmt.Errorf("dapp db err: %v", err) } if db, ok := dappDb.(*ethdb.LDBDatabase); ok { @@ -553,6 +579,9 @@ func (s *Ethereum) Start() error { }) err := s.net.Start() if err != nil { + if portInUseErrRE.MatchString(err.Error()) { + err = fmt.Errorf("%v (possibly another instance of geth is using the same port)", err) + } return err } -- cgit v1.2.3 From 3cf74336c99a8a0ee18291edd61be9c9587d3c6a Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 22 Oct 2015 22:22:04 +0200 Subject: eth: time out status message exchange after 5s --- eth/peer.go | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) (limited to 'eth') diff --git a/eth/peer.go b/eth/peer.go index 68ce903a6..695e910f6 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -21,6 +21,7 @@ import ( "fmt" "math/big" "sync" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -38,8 +39,9 @@ var ( ) const ( - maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS) - maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS) + maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS) + maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS) + handshakeTimeout = 5 * time.Second ) type peer struct { @@ -267,8 +269,8 @@ func (p *peer) RequestReceipts(hashes []common.Hash) error { // Handshake executes the eth protocol handshake, negotiating version number, // network IDs, difficulties, head and genesis blocks. func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash) error { - // Send out own handshake in a new thread - errc := make(chan error, 1) + errc := make(chan error, 2) + var status statusData // safe to read after two values have been received from errc go func() { errc <- p2p.Send(p.rw, StatusMsg, &statusData{ ProtocolVersion: uint32(p.version), @@ -278,7 +280,26 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash) err GenesisBlock: genesis, }) }() - // In the mean time retrieve the remote status message + go func() { + errc <- p.readStatus(&status, genesis) + }() + timeout := time.NewTimer(handshakeTimeout) + defer timeout.Stop() + for i := 0; i < 2; i++ { + select { + case err := <-errc: + if err != nil { + return err + } + case <-timeout.C: + return p2p.DiscReadTimeout + } + } + p.td, p.head = status.TD, status.CurrentBlock + return nil +} + +func (p *peer) readStatus(status *statusData, genesis common.Hash) (err error) { msg, err := p.rw.ReadMsg() if err != nil { return err @@ -290,7 +311,6 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash) err return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) } // Decode the handshake and make sure everything matches - var status statusData if err := msg.Decode(&status); err != nil { return errResp(ErrDecode, "msg %v: %v", msg, err) } @@ -303,9 +323,7 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash) err if int(status.ProtocolVersion) != p.version { return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version) } - // Configure the remote peer, and sanity check out handshake too - p.td, p.head = status.TD, status.CurrentBlock - return <-errc + return nil } // String implements fmt.Stringer. -- cgit v1.2.3 From 4d005a2c1d2929dc770acd3a2bfed59495c70557 Mon Sep 17 00:00:00 2001 From: zelig Date: Mon, 26 Oct 2015 22:24:09 +0100 Subject: rpc api: eth_getNatSpec * xeth, rpc: implement eth_getNatSpec for tx confirmations * rename silly docserver -> httpclient * eth/backend: httpclient now accessible via eth.Ethereum init-d via config.DocRoot * cmd: introduce separate CLI flag for DocRoot (defaults to homedir) * common/path: delete unused assetpath func, separate HomeDir func --- eth/backend.go | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index 6ce0d0eb0..ee857e146 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/compiler" + "github.com/ethereum/go-ethereum/common/httpclient" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -106,6 +107,7 @@ type Config struct { LogJSON string VmDebug bool NatSpec bool + DocRoot string AutoDAG bool PowTest bool ExtraData []byte @@ -249,6 +251,8 @@ type Ethereum struct { GpobaseStepUp int GpobaseCorrectionFactor int + httpclient *httpclient.HTTPClient + net *p2p.Server eventMux *event.TypeMux miner *miner.Miner @@ -400,6 +404,7 @@ func New(config *Config) (*Ethereum, error) { GpobaseStepDown: config.GpobaseStepDown, GpobaseStepUp: config.GpobaseStepUp, GpobaseCorrectionFactor: config.GpobaseCorrectionFactor, + httpclient: httpclient.New(config.DocRoot), } if config.PowTest { @@ -702,6 +707,12 @@ func (self *Ethereum) StopAutoDAG() { glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir) } +// HTTPClient returns the light http client used for fetching offchain docs +// (natspec, source for verification) +func (self *Ethereum) HTTPClient() *httpclient.HTTPClient { + return self.httpclient +} + func (self *Ethereum) Solc() (*compiler.Solidity, error) { var err error if self.solc == nil { -- cgit v1.2.3 From ae1b5b3ff2611af1232643d38e13a77d704dae28 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 26 Oct 2015 21:42:24 +0100 Subject: eth, xeth: fix GasPriceOracle goroutine leak XEth.gpo was being initialized as needed. WithState copies the XEth struct including the gpo field. If gpo was nil at the time of the copy and Call or Transact were invoked on it, an additional GPO listenLoop would be spawned. Move the lazy initialization to GasPriceOracle instead so the same GPO instance is shared among all created XEths. Fixes #1317 Might help with #1930 --- eth/gasprice.go | 107 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 58 insertions(+), 49 deletions(-) (limited to 'eth') diff --git a/eth/gasprice.go b/eth/gasprice.go index b4409f346..b752c22dd 100644 --- a/eth/gasprice.go +++ b/eth/gasprice.go @@ -23,49 +23,66 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" ) -const gpoProcessPastBlocks = 100 +const ( + gpoProcessPastBlocks = 100 + + // for testing + gpoDefaultBaseCorrectionFactor = 110 + gpoDefaultMinGasPrice = 10000000000000 +) type blockPriceInfo struct { baseGasPrice *big.Int } +// GasPriceOracle recommends gas prices based on the content of recent +// blocks. type GasPriceOracle struct { - eth *Ethereum - chain *core.BlockChain - events event.Subscription + eth *Ethereum + initOnce sync.Once + minPrice *big.Int + lastBaseMutex sync.Mutex + lastBase *big.Int + + // state of listenLoop blocks map[uint64]*blockPriceInfo firstProcessed, lastProcessed uint64 - lastBaseMutex sync.Mutex - lastBase, minBase *big.Int + minBase *big.Int +} + +// NewGasPriceOracle returns a new oracle. +func NewGasPriceOracle(eth *Ethereum) *GasPriceOracle { + minprice := eth.GpoMinGasPrice + if minprice == nil { + minprice = big.NewInt(gpoDefaultMinGasPrice) + } + minbase := new(big.Int).Mul(minprice, big.NewInt(100)) + if eth.GpobaseCorrectionFactor > 0 { + minbase = minbase.Div(minbase, big.NewInt(int64(eth.GpobaseCorrectionFactor))) + } + return &GasPriceOracle{ + eth: eth, + blocks: make(map[uint64]*blockPriceInfo), + minBase: minbase, + minPrice: minprice, + lastBase: minprice, + } } -func NewGasPriceOracle(eth *Ethereum) (self *GasPriceOracle) { - self = &GasPriceOracle{} - self.blocks = make(map[uint64]*blockPriceInfo) - self.eth = eth - self.chain = eth.blockchain - self.events = eth.EventMux().Subscribe( - core.ChainEvent{}, - core.ChainSplitEvent{}, - ) - - minbase := new(big.Int).Mul(self.eth.GpoMinGasPrice, big.NewInt(100)) - minbase = minbase.Div(minbase, big.NewInt(int64(self.eth.GpobaseCorrectionFactor))) - self.minBase = minbase - - self.processPastBlocks() - go self.listenLoop() - return +func (gpo *GasPriceOracle) init() { + gpo.initOnce.Do(func() { + gpo.processPastBlocks(gpo.eth.BlockChain()) + go gpo.listenLoop() + }) } -func (self *GasPriceOracle) processPastBlocks() { +func (self *GasPriceOracle) processPastBlocks(chain *core.BlockChain) { last := int64(-1) - cblock := self.chain.CurrentBlock() + cblock := chain.CurrentBlock() if cblock != nil { last = int64(cblock.NumberU64()) } @@ -75,7 +92,7 @@ func (self *GasPriceOracle) processPastBlocks() { } self.firstProcessed = uint64(first) for i := first; i <= last; i++ { - block := self.chain.GetBlockByNumber(uint64(i)) + block := chain.GetBlockByNumber(uint64(i)) if block != nil { self.processBlock(block) } @@ -84,9 +101,10 @@ func (self *GasPriceOracle) processPastBlocks() { } func (self *GasPriceOracle) listenLoop() { - defer self.events.Unsubscribe() + events := self.eth.EventMux().Subscribe(core.ChainEvent{}, core.ChainSplitEvent{}) + defer events.Unsubscribe() - for event := range self.events.Chan() { + for event := range events.Chan() { switch event := event.Data.(type) { case core.ChainEvent: self.processBlock(event.Block) @@ -102,7 +120,7 @@ func (self *GasPriceOracle) processBlock(block *types.Block) { self.lastProcessed = i } - lastBase := self.eth.GpoMinGasPrice + lastBase := self.minPrice bpl := self.blocks[i-1] if bpl != nil { lastBase = bpl.baseGasPrice @@ -176,28 +194,19 @@ func (self *GasPriceOracle) lowestPrice(block *types.Block) *big.Int { return minPrice } +// SuggestPrice returns the recommended gas price. func (self *GasPriceOracle) SuggestPrice() *big.Int { + self.init() self.lastBaseMutex.Lock() - base := self.lastBase + price := new(big.Int).Set(self.lastBase) self.lastBaseMutex.Unlock() - if base == nil { - base = self.eth.GpoMinGasPrice + price.Mul(price, big.NewInt(int64(self.eth.GpobaseCorrectionFactor))) + price.Div(price, big.NewInt(100)) + if price.Cmp(self.minPrice) < 0 { + price.Set(self.minPrice) + } else if self.eth.GpoMaxGasPrice != nil && price.Cmp(self.eth.GpoMaxGasPrice) > 0 { + price.Set(self.eth.GpoMaxGasPrice) } - if base == nil { - return big.NewInt(10000000000000) // apparently MinGasPrice is not initialized during some tests - } - - baseCorr := new(big.Int).Mul(base, big.NewInt(int64(self.eth.GpobaseCorrectionFactor))) - baseCorr.Div(baseCorr, big.NewInt(100)) - - if baseCorr.Cmp(self.eth.GpoMinGasPrice) < 0 { - return self.eth.GpoMinGasPrice - } - - if baseCorr.Cmp(self.eth.GpoMaxGasPrice) > 0 { - return self.eth.GpoMaxGasPrice - } - - return baseCorr + return price } -- cgit v1.2.3 From 2019ed71b4758956a8bd9671c73ba98a244f4a7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 28 Oct 2015 16:41:01 +0200 Subject: eth: don't block sync goroutines that short circuit --- eth/downloader/downloader.go | 6 ++++-- eth/sync.go | 7 +++++-- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'eth') diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 4bcbd8557..153427ee4 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -248,10 +248,11 @@ func (d *Downloader) UnregisterPeer(id string) error { // Synchronise tries to sync up our local block chain with a remote peer, both // adding various sanity checks as well as wrapping it with various log entries. -func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) { +func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td) - switch err := d.synchronise(id, head, td, mode); err { + err := d.synchronise(id, head, td, mode) + switch err { case nil: glog.V(logger.Detail).Infof("Synchronisation completed") @@ -268,6 +269,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode default: glog.V(logger.Warn).Infof("Synchronisation failed: %v", err) } + return err } // synchronise will select the peer and use it for synchronising. If an empty string is given diff --git a/eth/sync.go b/eth/sync.go index b69a24556..bbf2abc04 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -170,13 +170,16 @@ func (pm *ProtocolManager) synchronise(peer *peer) { if pm.fastSync { mode = downloader.FastSync } - pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode) - + if err := pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), mode); err != nil { + return + } // If fast sync was enabled, and we synced up, disable it if pm.fastSync { + // Wait until all pending imports finish processing for pm.downloader.Synchronising() { time.Sleep(100 * time.Millisecond) } + // Disable fast sync if we indeed have something in our chain if pm.blockchain.CurrentBlock().NumberU64() > 0 { glog.V(logger.Info).Infof("fast sync complete, auto disabling") pm.fastSync = false -- cgit v1.2.3 From fbdb44dcc17240a01b45e55d3aa4e4b8db0868cd Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Thu, 29 Oct 2015 13:28:00 +0100 Subject: cmd/utils, rpc/comms: stop XEth when IPC connection ends There are a bunch of changes required to make this work: - in miner: allow unregistering agents, fix RemoteAgent.Stop - in eth/filters: make FilterSystem.Stop not crash - in rpc/comms: move listen loop to platform-independent code Fixes #1930. I ran the shell loop there for a few minutes and didn't see any changes in the memory profile. --- eth/filters/filter_system.go | 88 ++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 53 deletions(-) (limited to 'eth') diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index ae6093525..df3ce90c6 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -31,30 +31,32 @@ import ( // block, transaction and log events. The Filtering system can be used to listen // for specific LOG events fired by the EVM (Ethereum Virtual Machine). type FilterSystem struct { - eventMux *event.TypeMux - filterMu sync.RWMutex filterId int filters map[int]*Filter created map[int]time.Time - - quit chan struct{} + sub event.Subscription } // NewFilterSystem returns a newly allocated filter manager func NewFilterSystem(mux *event.TypeMux) *FilterSystem { fs := &FilterSystem{ - eventMux: mux, - filters: make(map[int]*Filter), - created: make(map[int]time.Time), + filters: make(map[int]*Filter), + created: make(map[int]time.Time), } + fs.sub = mux.Subscribe( + //core.PendingBlockEvent{}, + core.ChainEvent{}, + core.TxPreEvent{}, + vm.Logs(nil), + ) go fs.filterLoop() return fs } // Stop quits the filter loop required for polling events func (fs *FilterSystem) Stop() { - close(fs.quit) + fs.sub.Unsubscribe() } // Add adds a filter to the filter manager @@ -89,57 +91,37 @@ func (fs *FilterSystem) Get(id int) *Filter { // filterLoop waits for specific events from ethereum and fires their handlers // when the filter matches the requirements. func (fs *FilterSystem) filterLoop() { - // Subscribe to events - eventCh := fs.eventMux.Subscribe( - //core.PendingBlockEvent{}, - core.ChainEvent{}, - core.TxPreEvent{}, - vm.Logs(nil), - ).Chan() - -out: - for { - select { - case <-fs.quit: - break out - case event, ok := <-eventCh: - if !ok { - // Event subscription closed, set the channel to nil to stop spinning - eventCh = nil - continue - } - // A real event arrived, notify the registered filters - switch ev := event.Data.(type) { - case core.ChainEvent: - fs.filterMu.RLock() - for id, filter := range fs.filters { - if filter.BlockCallback != nil && fs.created[id].Before(event.Time) { - filter.BlockCallback(ev.Block, ev.Logs) - } + for event := range fs.sub.Chan() { + switch ev := event.Data.(type) { + case core.ChainEvent: + fs.filterMu.RLock() + for id, filter := range fs.filters { + if filter.BlockCallback != nil && fs.created[id].Before(event.Time) { + filter.BlockCallback(ev.Block, ev.Logs) } - fs.filterMu.RUnlock() + } + fs.filterMu.RUnlock() - case core.TxPreEvent: - fs.filterMu.RLock() - for id, filter := range fs.filters { - if filter.TransactionCallback != nil && fs.created[id].Before(event.Time) { - filter.TransactionCallback(ev.Tx) - } + case core.TxPreEvent: + fs.filterMu.RLock() + for id, filter := range fs.filters { + if filter.TransactionCallback != nil && fs.created[id].Before(event.Time) { + filter.TransactionCallback(ev.Tx) } - fs.filterMu.RUnlock() - - case vm.Logs: - fs.filterMu.RLock() - for id, filter := range fs.filters { - if filter.LogsCallback != nil && fs.created[id].Before(event.Time) { - msgs := filter.FilterLogs(ev) - if len(msgs) > 0 { - filter.LogsCallback(msgs) - } + } + fs.filterMu.RUnlock() + + case vm.Logs: + fs.filterMu.RLock() + for id, filter := range fs.filters { + if filter.LogsCallback != nil && fs.created[id].Before(event.Time) { + msgs := filter.FilterLogs(ev) + if len(msgs) > 0 { + filter.LogsCallback(msgs) } } - fs.filterMu.RUnlock() } + fs.filterMu.RUnlock() } } } -- cgit v1.2.3 From 1bc789553aae0b269c5be38df83950fada53237b Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Fri, 30 Oct 2015 10:01:19 +0100 Subject: eth: added new testnet peers --- eth/backend.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index ee857e146..4bd0eb371 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -78,7 +78,8 @@ var ( } defaultTestNetBootNodes = []*discover.Node{ - discover.MustParseNode("enode://5374c1bff8df923d3706357eeb4983cd29a63be40a269aaa2296ee5f3b2119a8978c0ed68b8f6fc84aad0df18790417daadf91a4bfbb786a16c9b0a199fa254a@92.51.165.126:30303"), + discover.MustParseNode("enode://e4533109cc9bd7604e4ff6c095f7a1d807e15b38e9bfeb05d3b7c423ba86af0a9e89abbf40bd9dde4250fef114cd09270fa4e224cbeef8b7bf05a51e8260d6b8@94.242.229.4:40404"), + discover.MustParseNode("enode://8c336ee6f03e99613ad21274f269479bf4413fb294d697ef15ab897598afb931f56beb8e97af530aee20ce2bcba5776f4a312bc168545de4d43736992c814592@94.242.229.203:30303"), } staticNodes = "static-nodes.json" // Path within to search for the static node list -- cgit v1.2.3 From 3c6e285d3bfa03935f4f346a2cb32236143e2fca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 29 Oct 2015 19:53:24 +0200 Subject: cmd/geth, cmd/utils, eth: group CLI flags by purpose --- eth/backend.go | 6 ------ 1 file changed, 6 deletions(-) (limited to 'eth') diff --git a/eth/backend.go b/eth/backend.go index ee857e146..72487457a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -91,7 +91,6 @@ type Config struct { Name string NetworkId int - GenesisNonce int GenesisFile string GenesisBlock *types.Block // used by block tests FastSync bool @@ -104,7 +103,6 @@ type Config struct { DataDir string LogFile string Verbosity int - LogJSON string VmDebug bool NatSpec bool DocRoot string @@ -273,11 +271,7 @@ type Ethereum struct { } func New(config *Config) (*Ethereum, error) { - // Bootstrap database logger.New(config.DataDir, config.LogFile, config.Verbosity) - if len(config.LogJSON) > 0 { - logger.NewJSONsystem(config.DataDir, config.LogJSON) - } // Let the database take 3/4 of the max open files (TODO figure out a way to get the actual limit of the open files) const dbCount = 3 -- cgit v1.2.3