aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--containers/vagrant/Vagrantfile29
-rw-r--r--core/state/iterator.go160
-rw-r--r--core/state/iterator_test.go57
-rw-r--r--core/state/sync_test.go100
-rw-r--r--eth/api.go18
-rw-r--r--eth/downloader/downloader.go30
-rw-r--r--ethdb/database.go4
-rw-r--r--p2p/discover/ntp.go127
-rw-r--r--p2p/discover/udp.go26
-rw-r--r--trie/iterator.go141
-rw-r--r--trie/iterator_test.go32
-rw-r--r--trie/sync_test.go95
13 files changed, 795 insertions, 26 deletions
diff --git a/.gitignore b/.gitignore
index 3b34d32c2..e8e10db2f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,3 +35,5 @@ cmd/mist/assets/ext/ethereum.js/
profile.tmp
profile.cov
+# vagrant
+.vagrant
diff --git a/containers/vagrant/Vagrantfile b/containers/vagrant/Vagrantfile
new file mode 100644
index 000000000..5d263eb76
--- /dev/null
+++ b/containers/vagrant/Vagrantfile
@@ -0,0 +1,29 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+ config.vm.box = "ubuntu/trusty64"
+
+ config.vm.provider "virtualbox" do |vb|
+ vb.memory = "2048"
+ end
+
+ config.vm.synced_folder "../../", "/home/vagrant/go/src/github.com/ethereum/go-ethereum"
+ config.vm.synced_folder ".", "/vagrant", disabled: true
+
+ config.vm.provision "shell", inline: <<-SHELL
+ sudo apt-get install software-properties-common
+ sudo add-apt-repository -y ppa:ethereum/ethereum
+ sudo add-apt-repository -y ppa:ethereum/ethereum-dev
+ sudo apt-get update
+
+ sudo apt-get install -y build-essential golang git-all
+
+ GOPATH=/home/vagrant/go go get github.com/tools/godep
+
+ sudo chown -R vagrant:vagrant ~vagrant/go
+
+ echo "export GOPATH=/home/vagrant/go" >> ~vagrant/.bashrc
+ echo "export PATH=\\\$PATH:\\\$GOPATH/bin:/usr/local/go/bin" >> ~vagrant/.bashrc
+ SHELL
+end
diff --git a/core/state/iterator.go b/core/state/iterator.go
new file mode 100644
index 000000000..9d8a69b7c
--- /dev/null
+++ b/core/state/iterator.go
@@ -0,0 +1,160 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package state
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+// NodeIterator is an iterator to traverse the entire state trie post-order,
+// including all of the contract code and contract state tries.
+type NodeIterator struct {
+ state *StateDB // State being iterated
+
+ stateIt *trie.NodeIterator // Primary iterator for the global state trie
+ dataIt *trie.NodeIterator // Secondary iterator for the data trie of a contract
+
+ accountHash common.Hash // Hash of the node containing the account
+ codeHash common.Hash // Hash of the contract source code
+ code []byte // Source code associated with a contract
+
+ Hash common.Hash // Hash of the current entry being iterated (nil if not standalone)
+ Entry interface{} // Current state entry being iterated (internal representation)
+ Parent common.Hash // Hash of the first full ancestor node (nil if current is the root)
+
+ Error error // Failure set in case of an internal error in the iterator
+}
+
+// NewNodeIterator creates an post-order state node iterator.
+func NewNodeIterator(state *StateDB) *NodeIterator {
+ return &NodeIterator{
+ state: state,
+ }
+}
+
+// Next moves the iterator to the next node, returning whether there are any
+// further nodes. In case of an internal error this method returns false and
+// sets the Error field to the encountered failure.
+func (it *NodeIterator) Next() bool {
+ // If the iterator failed previously, don't do anything
+ if it.Error != nil {
+ return false
+ }
+ // Otherwise step forward with the iterator and report any errors
+ if err := it.step(); err != nil {
+ it.Error = err
+ return false
+ }
+ return it.retrieve()
+}
+
+// step moves the iterator to the next entry of the state trie.
+func (it *NodeIterator) step() error {
+ // Abort if we reached the end of the iteration
+ if it.state == nil {
+ return nil
+ }
+ // Initialize the iterator if we've just started
+ if it.stateIt == nil {
+ it.stateIt = trie.NewNodeIterator(it.state.trie.Trie)
+ }
+ // If we had data nodes previously, we surely have at least state nodes
+ if it.dataIt != nil {
+ if cont := it.dataIt.Next(); !cont {
+ if it.dataIt.Error != nil {
+ return it.dataIt.Error
+ }
+ it.dataIt = nil
+ }
+ return nil
+ }
+ // If we had source code previously, discard that
+ if it.code != nil {
+ it.code = nil
+ return nil
+ }
+ // Step to the next state trie node, terminating if we're out of nodes
+ if cont := it.stateIt.Next(); !cont {
+ if it.stateIt.Error != nil {
+ return it.stateIt.Error
+ }
+ it.state, it.stateIt = nil, nil
+ return nil
+ }
+ // If the state trie node is an internal entry, leave as is
+ if !it.stateIt.Leaf {
+ return nil
+ }
+ // Otherwise we've reached an account node, initiate data iteration
+ var account struct {
+ Nonce uint64
+ Balance *big.Int
+ Root common.Hash
+ CodeHash []byte
+ }
+ if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob), &account); err != nil {
+ return err
+ }
+ dataTrie, err := trie.New(account.Root, it.state.db)
+ if err != nil {
+ return err
+ }
+ it.dataIt = trie.NewNodeIterator(dataTrie)
+ if !it.dataIt.Next() {
+ it.dataIt = nil
+ }
+ if bytes.Compare(account.CodeHash, emptyCodeHash) != 0 {
+ it.codeHash = common.BytesToHash(account.CodeHash)
+ it.code, err = it.state.db.Get(account.CodeHash)
+ if err != nil {
+ return fmt.Errorf("code %x: %v", account.CodeHash, err)
+ }
+ }
+ it.accountHash = it.stateIt.Parent
+ return nil
+}
+
+// retrieve pulls and caches the current state entry the iterator is traversing.
+// The method returns whether there are any more data left for inspection.
+func (it *NodeIterator) retrieve() bool {
+ // Clear out any previously set values
+ it.Hash, it.Entry = common.Hash{}, nil
+
+ // If the iteration's done, return no available data
+ if it.state == nil {
+ return false
+ }
+ // Otherwise retrieve the current entry
+ switch {
+ case it.dataIt != nil:
+ it.Hash, it.Entry, it.Parent = it.dataIt.Hash, it.dataIt.Node, it.dataIt.Parent
+ if it.Parent == (common.Hash{}) {
+ it.Parent = it.accountHash
+ }
+ case it.code != nil:
+ it.Hash, it.Entry, it.Parent = it.codeHash, it.code, it.accountHash
+ case it.stateIt != nil:
+ it.Hash, it.Entry, it.Parent = it.stateIt.Hash, it.stateIt.Node, it.stateIt.Parent
+ }
+ return true
+}
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
new file mode 100644
index 000000000..8b68870c6
--- /dev/null
+++ b/core/state/iterator_test.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package state
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+// Tests that the node iterator indeed walks over the entire database contents.
+func TestNodeIteratorCoverage(t *testing.T) {
+ // Create some arbitrary test state to iterate
+ db, root, _ := makeTestState()
+
+ state, err := New(root, db)
+ if err != nil {
+ t.Fatalf("failed to create state trie at %x: %v", root, err)
+ }
+ // Gather all the node hashes found by the iterator
+ hashes := make(map[common.Hash]struct{})
+ for it := NewNodeIterator(state); it.Next(); {
+ if it.Hash != (common.Hash{}) {
+ hashes[it.Hash] = struct{}{}
+ }
+ }
+ // Cross check the hashes and the database itself
+ for hash, _ := range hashes {
+ if _, err := db.Get(hash.Bytes()); err != nil {
+ t.Errorf("failed to retrieve reported node %x: %v", hash, err)
+ }
+ }
+ for _, key := range db.(*ethdb.MemDatabase).Keys() {
+ if bytes.HasPrefix(key, []byte("secure-key-")) {
+ continue
+ }
+ if _, ok := hashes[common.BytesToHash(key)]; !ok {
+ t.Errorf("state entry not reported %x", key)
+ }
+ }
+}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index 0dab372ba..a2a1edbdb 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -22,6 +22,7 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
)
@@ -42,7 +43,7 @@ func makeTestState() (ethdb.Database, common.Hash, []*testAccount) {
// Fill it with some arbitrary data
accounts := []*testAccount{}
- for i := byte(0); i < 255; i++ {
+ for i := byte(0); i < 96; i++ {
obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
acc := &testAccount{address: common.BytesToAddress([]byte{i})}
@@ -61,6 +62,9 @@ func makeTestState() (ethdb.Database, common.Hash, []*testAccount) {
}
root, _ := state.Commit()
+ // Remove any potentially cached data from the test state creation
+ trie.ClearGlobalCache()
+
// Return the generated state
return db, root, accounts
}
@@ -68,9 +72,18 @@ func makeTestState() (ethdb.Database, common.Hash, []*testAccount) {
// checkStateAccounts cross references a reconstructed state with an expected
// account array.
func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) {
- state, _ := New(root, db)
- for i, acc := range accounts {
+ // Remove any potentially cached data from the state synchronisation
+ trie.ClearGlobalCache()
+ // Check root availability and state contents
+ state, err := New(root, db)
+ if err != nil {
+ t.Fatalf("failed to create state trie at %x: %v", root, err)
+ }
+ if err := checkStateConsistency(db, root); err != nil {
+ t.Fatalf("inconsistent state trie at %x: %v", root, err)
+ }
+ for i, acc := range accounts {
if balance := state.GetBalance(acc.address); balance.Cmp(acc.balance) != 0 {
t.Errorf("account %d: balance mismatch: have %v, want %v", i, balance, acc.balance)
}
@@ -83,6 +96,25 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accou
}
}
+// checkStateConsistency checks that all nodes in a state trie are indeed present.
+func checkStateConsistency(db ethdb.Database, root common.Hash) error {
+ // Remove any potentially cached data from the test state creation or previous checks
+ trie.ClearGlobalCache()
+
+ // Create and iterate a state trie rooted in a sub-node
+ if _, err := db.Get(root.Bytes()); err != nil {
+ return nil // Consider a non existent state consistent
+ }
+ state, err := New(root, db)
+ if err != nil {
+ return err
+ }
+ it := NewNodeIterator(state)
+ for it.Next() {
+ }
+ return it.Error
+}
+
// Tests that an empty state is not scheduled for syncing.
func TestEmptyStateSync(t *testing.T) {
empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
@@ -236,3 +268,65 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
// Cross check that the two states are in sync
checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
}
+
+// Tests that at any point in time during a sync, only complete sub-tries are in
+// the database.
+func TestIncompleteStateSync(t *testing.T) {
+ // Create a random state to copy
+ srcDb, srcRoot, srcAccounts := makeTestState()
+
+ // Create a destination state and sync with the scheduler
+ dstDb, _ := ethdb.NewMemDatabase()
+ sched := NewStateSync(srcRoot, dstDb)
+
+ added := []common.Hash{}
+ queue := append([]common.Hash{}, sched.Missing(1)...)
+ for len(queue) > 0 {
+ // Fetch a batch of state nodes
+ results := make([]trie.SyncResult, len(queue))
+ for i, hash := range queue {
+ data, err := srcDb.Get(hash.Bytes())
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+ }
+ results[i] = trie.SyncResult{hash, data}
+ }
+ // Process each of the state nodes
+ if index, err := sched.Process(results); err != nil {
+ t.Fatalf("failed to process result #%d: %v", index, err)
+ }
+ for _, result := range results {
+ added = append(added, result.Hash)
+ }
+ // Check that all known sub-tries in the synced state is complete
+ for _, root := range added {
+ // Skim through the accounts and make sure the root hash is not a code node
+ codeHash := false
+ for _, acc := range srcAccounts {
+ if bytes.Compare(root.Bytes(), crypto.Sha3(acc.code)) == 0 {
+ codeHash = true
+ break
+ }
+ }
+ // If the root is a real trie node, check consistency
+ if !codeHash {
+ if err := checkStateConsistency(dstDb, root); err != nil {
+ t.Fatalf("state inconsistent: %v", err)
+ }
+ }
+ }
+ // Fetch the next batch to retrieve
+ queue = append(queue[:0], sched.Missing(1)...)
+ }
+ // Sanity check that removing any node from the database is detected
+ for _, node := range added[1:] {
+ key := node.Bytes()
+ value, _ := dstDb.Get(key)
+
+ dstDb.Delete(key)
+ if err := checkStateConsistency(dstDb, added[0]); err == nil {
+ t.Fatalf("trie inconsistency not caught, missing: %x", key)
+ }
+ dstDb.Put(key, value)
+ }
+}
diff --git a/eth/api.go b/eth/api.go
index b4815caae..cfbafd79f 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -762,7 +762,7 @@ type RPCTransaction struct {
// newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation
func newRPCPendingTransaction(tx *types.Transaction) *RPCTransaction {
- from, _ := tx.From()
+ from, _ := tx.FromFrontier()
return &RPCTransaction{
From: from,
@@ -780,7 +780,7 @@ func newRPCPendingTransaction(tx *types.Transaction) *RPCTransaction {
func newRPCTransactionFromBlockIndex(b *types.Block, txIndex int) (*RPCTransaction, error) {
if txIndex >= 0 && txIndex < len(b.Transactions()) {
tx := b.Transactions()[txIndex]
- from, err := tx.From()
+ from, err := tx.FromFrontier()
if err != nil {
return nil, err
}
@@ -970,7 +970,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (ma
return nil, nil
}
- from, err := tx.From()
+ from, err := tx.FromFrontier()
if err != nil {
glog.V(logger.Debug).Infof("%v\n", err)
return nil, nil
@@ -1084,7 +1084,7 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(encodedTx string) (string,
}
if tx.To() == nil {
- from, err := tx.From()
+ from, err := tx.FromFrontier()
if err != nil {
return "", err
}
@@ -1190,7 +1190,7 @@ type SignTransactionResult struct {
}
func newTx(t *types.Transaction) *Tx {
- from, _ := t.From()
+ from, _ := t.FromFrontier()
return &Tx{
tx: t,
To: t.To(),
@@ -1263,7 +1263,7 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, err
pending := s.txPool.GetTransactions()
transactions := make([]*RPCTransaction, 0)
for _, tx := range pending {
- if from, _ := tx.From(); accountSet.Has(from) {
+ if from, _ := tx.FromFrontier(); accountSet.Has(from) {
transactions = append(transactions, newRPCPendingTransaction(tx))
}
}
@@ -1298,7 +1298,7 @@ func (s *PublicTransactionPoolAPI) NewPendingTransactions() (rpc.Subscription, e
}
tx := transaction.(core.TxPreEvent)
- if from, err := tx.Tx.From(); err == nil {
+ if from, err := tx.Tx.FromFrontier(); err == nil {
if accountSet.Has(from) {
return tx.Tx.Hash()
}
@@ -1315,7 +1315,7 @@ func (s *PublicTransactionPoolAPI) Resend(tx *Tx, gasPrice, gasLimit *rpc.HexNum
pending := s.txPool.GetTransactions()
for _, p := range pending {
- if pFrom, err := p.From(); err == nil && pFrom == tx.From && p.SigHash() == tx.tx.SigHash() {
+ if pFrom, err := p.FromFrontier(); err == nil && pFrom == tx.From && p.SigHash() == tx.tx.SigHash() {
if gasPrice == nil {
gasPrice = rpc.NewHexNumber(tx.tx.GasPrice())
}
@@ -1589,7 +1589,7 @@ func (s *PrivateDebugAPI) doReplayTransaction(txHash common.Hash) ([]vm.StructLo
return nil, nil, nil, err
}
- txFrom, err := tx.From()
+ txFrom, err := tx.FromFrontier()
if err != nil {
return nil, nil, nil, fmt.Errorf("Unable to create transaction sender")
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 6dad6a2cd..de54bd859 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -571,8 +571,14 @@ func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
// Check if a common ancestor was found
finished = true
for i := len(hashes) - 1; i >= 0; i-- {
+ // Skip any headers that underflow/overflow our requested set
+ header := d.getHeader(hashes[i])
+ if header == nil || header.Number.Int64() < from || header.Number.Uint64() > head {
+ continue
+ }
+ // Otherwise check if we already know the header or not
if d.hasBlockAndState(hashes[i]) {
- number, hash = uint64(from)+uint64(i), hashes[i]
+ number, hash = header.Number.Uint64(), header.Hash()
break
}
}
@@ -990,12 +996,28 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
// Make sure the peer actually gave something valid
headers := packet.(*headerPack).headers
if len(headers) == 0 {
- glog.V(logger.Debug).Infof("%v: empty head header set", p)
+ glog.V(logger.Warn).Infof("%v: empty head header set", p)
return 0, errEmptyHeaderSet
}
+ // Make sure the peer's reply conforms to the request
+ for i := 0; i < len(headers); i++ {
+ if number := headers[i].Number.Int64(); number != from+int64(i) {
+ glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i), number)
+ return 0, errInvalidChain
+ }
+ if i > 0 && headers[i-1].Hash() != headers[i].ParentHash {
+ glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ancestry: expected [%x], got [%x]", p, i, headers[i-1].Hash().Bytes()[:4], headers[i].ParentHash[:4])
+ return 0, errInvalidChain
+ }
+ }
// Check if a common ancestor was found
finished = true
for i := len(headers) - 1; i >= 0; i-- {
+ // Skip any headers that underflow/overflow our requested set
+ if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > head {
+ continue
+ }
+ // Otherwise check if we already know the header or not
if (d.mode != LightSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode == LightSync && d.hasHeader(headers[i].Hash())) {
number, hash = headers[i].Number.Uint64(), headers[i].Hash()
break
@@ -1206,6 +1228,10 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
frequency = 1
}
if n, err := d.insertHeaders(headers, frequency); err != nil {
+ // If some headers were inserted, add them too to the rollback list
+ if n > 0 {
+ rollback = append(rollback, headers[:n]...)
+ }
glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headers[n].Number, headers[n].Hash().Bytes()[:4], err)
return errInvalidChain
}
diff --git a/ethdb/database.go b/ethdb/database.go
index 047821c30..10dc018b0 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -168,6 +168,10 @@ func (self *LDBDatabase) LDB() *leveldb.DB {
// Meter configures the database metrics collectors and
func (self *LDBDatabase) Meter(prefix string) {
+ // Short circuit metering if the metrics system is disabled
+ if !metrics.Enabled {
+ return
+ }
// Initialize all the metrics collector at the requested prefix
self.getTimer = metrics.NewTimer(prefix + "user/gets")
self.putTimer = metrics.NewTimer(prefix + "user/puts")
diff --git a/p2p/discover/ntp.go b/p2p/discover/ntp.go
new file mode 100644
index 000000000..c1a4b3af1
--- /dev/null
+++ b/p2p/discover/ntp.go
@@ -0,0 +1,127 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Contains the NTP time drift detection via the SNTP protocol:
+// https://tools.ietf.org/html/rfc4330
+
+package discover
+
+import (
+ "fmt"
+ "net"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/ethereum/go-ethereum/logger"
+ "github.com/ethereum/go-ethereum/logger/glog"
+)
+
+const (
+ ntpPool = "pool.ntp.org" // ntpPool is the NTP server to query for the current time
+ ntpChecks = 3 // Number of measurements to do against the NTP server
+)
+
+// durationSlice attaches the methods of sort.Interface to []time.Duration,
+// sorting in increasing order.
+type durationSlice []time.Duration
+
+func (s durationSlice) Len() int { return len(s) }
+func (s durationSlice) Less(i, j int) bool { return s[i] < s[j] }
+func (s durationSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// checkClockDrift queries an NTP server for clock drifts and warns the user if
+// one large enough is detected.
+func checkClockDrift() {
+ drift, err := sntpDrift(ntpChecks)
+ if err != nil {
+ return
+ }
+ if drift < -driftThreshold || drift > driftThreshold {
+ warning := fmt.Sprintf("System clock seems off by %v, which can prevent network connectivity", drift)
+ howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings")
+ separator := strings.Repeat("-", len(warning))
+
+ glog.V(logger.Warn).Info(separator)
+ glog.V(logger.Warn).Info(warning)
+ glog.V(logger.Warn).Info(howtofix)
+ glog.V(logger.Warn).Info(separator)
+ } else {
+ glog.V(logger.Debug).Infof("Sanity NTP check reported %v drift, all ok", drift)
+ }
+}
+
+// sntpDrift does a naive time resolution against an NTP server and returns the
+// measured drift. This method uses the simple version of NTP. It's not precise
+// but should be fine for these purposes.
+//
+// Note, it executes two extra measurements compared to the number of requested
+// ones to be able to discard the two extremes as outliers.
+func sntpDrift(measurements int) (time.Duration, error) {
+ // Resolve the address of the NTP server
+ addr, err := net.ResolveUDPAddr("udp", ntpPool+":123")
+ if err != nil {
+ return 0, err
+ }
+ // Construct the time request (empty package with only 2 fields set):
+ // Bits 3-5: Protocol version, 3
+ // Bits 6-8: Mode of operation, client, 3
+ request := make([]byte, 48)
+ request[0] = 3<<3 | 3
+
+ // Execute each of the measurements
+ drifts := []time.Duration{}
+ for i := 0; i < measurements+2; i++ {
+ // Dial the NTP server and send the time retrieval request
+ conn, err := net.DialUDP("udp", nil, addr)
+ if err != nil {
+ return 0, err
+ }
+ defer conn.Close()
+
+ sent := time.Now()
+ if _, err = conn.Write(request); err != nil {
+ return 0, err
+ }
+ // Retrieve the reply and calculate the elapsed time
+ conn.SetDeadline(time.Now().Add(5 * time.Second))
+
+ reply := make([]byte, 48)
+ if _, err = conn.Read(reply); err != nil {
+ return 0, err
+ }
+ elapsed := time.Since(sent)
+
+ // Reconstruct the time from the reply data
+ sec := uint64(reply[43]) | uint64(reply[42])<<8 | uint64(reply[41])<<16 | uint64(reply[40])<<24
+ frac := uint64(reply[47]) | uint64(reply[46])<<8 | uint64(reply[45])<<16 | uint64(reply[44])<<24
+
+ nanosec := sec*1e9 + (frac*1e9)>>32
+
+ t := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC).Add(time.Duration(nanosec)).Local()
+
+ // Calculate the drift based on an assumed answer time of RRT/2
+ drifts = append(drifts, sent.Sub(t)+elapsed/2)
+ }
+ // Calculate average drif (drop two extremities to avoid outliers)
+ sort.Sort(durationSlice(drifts))
+
+ drift := time.Duration(0)
+ for i := 1; i < len(drifts)-1; i++ {
+ drift += drifts[i]
+ }
+ return drift / time.Duration(measurements), nil
+}
diff --git a/p2p/discover/udp.go b/p2p/discover/udp.go
index 92b37b1f3..74758b6fd 100644
--- a/p2p/discover/udp.go
+++ b/p2p/discover/udp.go
@@ -51,6 +51,10 @@ const (
respTimeout = 500 * time.Millisecond
sendTimeout = 500 * time.Millisecond
expiration = 20 * time.Second
+
+ ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP
+ ntpWarningCooldown = 10 * time.Minute // Minimum amount of time to pass before repeating NTP warning
+ driftThreshold = 10 * time.Second // Allowed clock drift before warning user
)
// RPC packet types
@@ -316,13 +320,15 @@ func (t *udp) handleReply(from NodeID, ptype byte, req packet) bool {
}
}
-// loop runs in its own goroutin. it keeps track of
+// loop runs in its own goroutine. it keeps track of
// the refresh timer and the pending reply queue.
func (t *udp) loop() {
var (
- plist = list.New()
- timeout = time.NewTimer(0)
- nextTimeout *pending // head of plist when timeout was last reset
+ plist = list.New()
+ timeout = time.NewTimer(0)
+ nextTimeout *pending // head of plist when timeout was last reset
+ contTimeouts = 0 // number of continuous timeouts to do NTP checks
+ ntpWarnTime = time.Unix(0, 0)
)
<-timeout.C // ignore first timeout
defer timeout.Stop()
@@ -377,19 +383,31 @@ func (t *udp) loop() {
p.errc <- nil
plist.Remove(el)
}
+ // Reset the continuous timeout counter (time drift detection)
+ contTimeouts = 0
}
}
r.matched <- matched
case now := <-timeout.C:
nextTimeout = nil
+
// Notify and remove callbacks whose deadline is in the past.
for el := plist.Front(); el != nil; el = el.Next() {
p := el.Value.(*pending)
if now.After(p.deadline) || now.Equal(p.deadline) {
p.errc <- errTimeout
plist.Remove(el)
+ contTimeouts++
+ }
+ }
+ // If we've accumulated too many timeouts, do an NTP time sync check
+ if contTimeouts > ntpFailureThreshold {
+ if time.Since(ntpWarnTime) >= ntpWarningCooldown {
+ ntpWarnTime = time.Now()
+ go checkClockDrift()
}
+ contTimeouts = 0
}
}
}
diff --git a/trie/iterator.go b/trie/iterator.go
index 5f205e081..ceef52ec8 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -18,22 +18,27 @@ package trie
import (
"bytes"
+ "fmt"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
+// Iterator is a key-value trie iterator to traverse the data contents.
type Iterator struct {
trie *Trie
- Key []byte
- Value []byte
+ Key []byte // Current data key on which the iterator is positioned on
+ Value []byte // Current data value on which the iterator is positioned on
}
+// NewIterator creates a new key-value iterator.
func NewIterator(trie *Trie) *Iterator {
return &Iterator{trie: trie, Key: nil}
}
+// Next moves the iterator forward with one key-value entry.
func (self *Iterator) Next() bool {
isIterStart := false
if self.Key == nil {
@@ -142,6 +147,138 @@ func (self *Iterator) key(node interface{}) []byte {
}
return self.key(rn)
}
+ return nil
+}
+
+// nodeIteratorState represents the iteration state at one particular node of the
+// trie, which can be resumed at a later invocation.
+type nodeIteratorState struct {
+ hash common.Hash // Hash of the node being iterated (nil if not standalone)
+ node node // Trie node being iterated
+ parent common.Hash // Hash of the first full ancestor node (nil if current is the root)
+ child int // Child to be processed next
+}
+
+// NodeIterator is an iterator to traverse the trie post-order.
+type NodeIterator struct {
+ trie *Trie // Trie being iterated
+ stack []*nodeIteratorState // Hierarchy of trie nodes persisting the iteration state
+
+ Hash common.Hash // Hash of the current node being iterated (nil if not standalone)
+ Node node // Current node being iterated (internal representation)
+ Parent common.Hash // Hash of the first full ancestor node (nil if current is the root)
+ Leaf bool // Flag whether the current node is a value (data) node
+ LeafBlob []byte // Data blob contained within a leaf (otherwise nil)
+
+ Error error // Failure set in case of an internal error in the iterator
+}
+
+// NewNodeIterator creates an post-order trie iterator.
+func NewNodeIterator(trie *Trie) *NodeIterator {
+ if bytes.Compare(trie.Root(), emptyRoot.Bytes()) == 0 {
+ return new(NodeIterator)
+ }
+ return &NodeIterator{trie: trie}
+}
+
+// Next moves the iterator to the next node, returning whether there are any
+// further nodes. In case of an internal error this method returns false and
+// sets the Error field to the encountered failure.
+func (it *NodeIterator) Next() bool {
+ // If the iterator failed previously, don't do anything
+ if it.Error != nil {
+ return false
+ }
+ // Otherwise step forward with the iterator and report any errors
+ if err := it.step(); err != nil {
+ it.Error = err
+ return false
+ }
+ return it.retrieve()
+}
+
+// step moves the iterator to the next node of the trie.
+func (it *NodeIterator) step() error {
+ // Abort if we reached the end of the iteration
+ if it.trie == nil {
+ return nil
+ }
+ // Initialize the iterator if we've just started, or pop off the old node otherwise
+ if len(it.stack) == 0 {
+ it.stack = append(it.stack, &nodeIteratorState{node: it.trie.root, child: -1})
+ if it.stack[0].node == nil {
+ return fmt.Errorf("root node missing: %x", it.trie.Root())
+ }
+ } else {
+ it.stack = it.stack[:len(it.stack)-1]
+ if len(it.stack) == 0 {
+ it.trie = nil
+ return nil
+ }
+ }
+ // Continue iteration to the next child
+ for {
+ parent := it.stack[len(it.stack)-1]
+ ancestor := parent.hash
+ if (ancestor == common.Hash{}) {
+ ancestor = parent.parent
+ }
+ if node, ok := parent.node.(fullNode); ok {
+ // Full node, traverse all children, then the node itself
+ if parent.child >= len(node) {
+ break
+ }
+ for parent.child++; parent.child < len(node); parent.child++ {
+ if current := node[parent.child]; current != nil {
+ it.stack = append(it.stack, &nodeIteratorState{node: current, parent: ancestor, child: -1})
+ break
+ }
+ }
+ } else if node, ok := parent.node.(shortNode); ok {
+ // Short node, traverse the pointer singleton child, then the node itself
+ if parent.child >= 0 {
+ break
+ }
+ parent.child++
+ it.stack = append(it.stack, &nodeIteratorState{node: node.Val, parent: ancestor, child: -1})
+ } else if hash, ok := parent.node.(hashNode); ok {
+ // Hash node, resolve the hash child from the database, then the node itself
+ if parent.child >= 0 {
+ break
+ }
+ parent.child++
+ node, err := it.trie.resolveHash(hash, nil, nil)
+ if err != nil {
+ return err
+ }
+ it.stack = append(it.stack, &nodeIteratorState{hash: common.BytesToHash(hash), node: node, parent: ancestor, child: -1})
+ } else {
+ break
+ }
+ }
return nil
}
+
+// retrieve pulls and caches the current trie node the iterator is traversing.
+// In case of a value node, the additional leaf blob is also populated with the
+// data contents for external interpretation.
+//
+// The method returns whether there are any more data left for inspection.
+func (it *NodeIterator) retrieve() bool {
+ // Clear out any previously set values
+ it.Hash, it.Node, it.Parent, it.Leaf, it.LeafBlob = common.Hash{}, nil, common.Hash{}, false, nil
+
+ // If the iteration's done, return no available data
+ if it.trie == nil {
+ return false
+ }
+ // Otherwise retrieve the current node and resolve leaf accessors
+ state := it.stack[len(it.stack)-1]
+
+ it.Hash, it.Node, it.Parent = state.hash, state.node, state.parent
+ if value, ok := it.Node.(valueNode); ok {
+ it.Leaf, it.LeafBlob = true, []byte(value)
+ }
+ return true
+}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index fdc60b412..dc8276116 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -16,7 +16,12 @@
package trie
-import "testing"
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
func TestIterator(t *testing.T) {
trie := newEmpty()
@@ -47,3 +52,28 @@ func TestIterator(t *testing.T) {
}
}
}
+
+// Tests that the node iterator indeed walks over the entire database contents.
+func TestNodeIteratorCoverage(t *testing.T) {
+ // Create some arbitrary test trie to iterate
+ db, trie, _ := makeTestTrie()
+
+ // Gather all the node hashes found by the iterator
+ hashes := make(map[common.Hash]struct{})
+ for it := NewNodeIterator(trie); it.Next(); {
+ if it.Hash != (common.Hash{}) {
+ hashes[it.Hash] = struct{}{}
+ }
+ }
+ // Cross check the hashes and the database itself
+ for hash, _ := range hashes {
+ if _, err := db.Get(hash.Bytes()); err != nil {
+ t.Errorf("failed to retrieve reported node %x: %v", hash, err)
+ }
+ }
+ for _, key := range db.(*ethdb.MemDatabase).Keys() {
+ if _, ok := hashes[common.BytesToHash(key)]; !ok {
+ t.Errorf("state entry not reported %x", key)
+ }
+ }
+}
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 9c036a3a9..a81f7650e 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -33,6 +33,7 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) {
// Fill it with some arbitrary data
content := make(map[string][]byte)
for i := byte(0); i < 255; i++ {
+ // Map the same data under multiple keys
key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
content[string(key)] = val
trie.Update(key, val)
@@ -40,9 +41,19 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) {
key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
content[string(key)] = val
trie.Update(key, val)
+
+ // Add some other data to inflate th trie
+ for j := byte(3); j < 13; j++ {
+ key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
+ content[string(key)] = val
+ trie.Update(key, val)
+ }
}
trie.Commit()
+ // Remove any potentially cached data from the test trie creation
+ globalCache.Clear()
+
// Return the generated trie
return db, trie, content
}
@@ -50,10 +61,17 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) {
// checkTrieContents cross references a reconstructed trie with an expected data
// content map.
func checkTrieContents(t *testing.T, db Database, root []byte, content map[string][]byte) {
+ // Remove any potentially cached data from the trie synchronisation
+ globalCache.Clear()
+
+ // Check root availability and trie contents
trie, err := New(common.BytesToHash(root), db)
if err != nil {
t.Fatalf("failed to create trie at %x: %v", root, err)
}
+ if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil {
+ t.Fatalf("inconsistent trie at %x: %v", root, err)
+ }
for key, val := range content {
if have := trie.Get([]byte(key)); bytes.Compare(have, val) != 0 {
t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
@@ -61,6 +79,22 @@ func checkTrieContents(t *testing.T, db Database, root []byte, content map[strin
}
}
+// checkTrieConsistency checks that all nodes in a trie are indeed present.
+func checkTrieConsistency(db Database, root common.Hash) error {
+ // Remove any potentially cached data from the test trie creation or previous checks
+ globalCache.Clear()
+
+ // Create and iterate a trie rooted in a subnode
+ trie, err := New(root, db)
+ if err != nil {
+ return nil // // Consider a non existent state consistent
+ }
+ it := NewNodeIterator(trie)
+ for it.Next() {
+ }
+ return it.Error
+}
+
// Tests that an empty trie is not scheduled for syncing.
func TestEmptyTrieSync(t *testing.T) {
emptyA, _ := New(common.Hash{}, nil)
@@ -102,7 +136,7 @@ func testIterativeTrieSync(t *testing.T, batch int) {
}
queue = append(queue[:0], sched.Missing(batch)...)
}
- // Cross check that the two tries re in sync
+ // Cross check that the two tries are in sync
checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
@@ -132,7 +166,7 @@ func TestIterativeDelayedTrieSync(t *testing.T) {
}
queue = append(queue[len(results):], sched.Missing(10000)...)
}
- // Cross check that the two tries re in sync
+ // Cross check that the two tries are in sync
checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
@@ -173,7 +207,7 @@ func testIterativeRandomTrieSync(t *testing.T, batch int) {
queue[hash] = struct{}{}
}
}
- // Cross check that the two tries re in sync
+ // Cross check that the two tries are in sync
checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
@@ -216,7 +250,7 @@ func TestIterativeRandomDelayedTrieSync(t *testing.T) {
queue[hash] = struct{}{}
}
}
- // Cross check that the two tries re in sync
+ // Cross check that the two tries are in sync
checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
@@ -252,6 +286,57 @@ func TestDuplicateAvoidanceTrieSync(t *testing.T) {
}
queue = append(queue[:0], sched.Missing(0)...)
}
- // Cross check that the two tries re in sync
+ // Cross check that the two tries are in sync
checkTrieContents(t, dstDb, srcTrie.Root(), srcData)
}
+
+// Tests that at any point in time during a sync, only complete sub-tries are in
+// the database.
+func TestIncompleteTrieSync(t *testing.T) {
+ // Create a random trie to copy
+ srcDb, srcTrie, _ := makeTestTrie()
+
+ // Create a destination trie and sync with the scheduler
+ dstDb, _ := ethdb.NewMemDatabase()
+ sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil)
+
+ added := []common.Hash{}
+ queue := append([]common.Hash{}, sched.Missing(1)...)
+ for len(queue) > 0 {
+ // Fetch a batch of trie nodes
+ results := make([]SyncResult, len(queue))
+ for i, hash := range queue {
+ data, err := srcDb.Get(hash.Bytes())
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+ }
+ results[i] = SyncResult{hash, data}
+ }
+ // Process each of the trie nodes
+ if index, err := sched.Process(results); err != nil {
+ t.Fatalf("failed to process result #%d: %v", index, err)
+ }
+ for _, result := range results {
+ added = append(added, result.Hash)
+ }
+ // Check that all known sub-tries in the synced trie is complete
+ for _, root := range added {
+ if err := checkTrieConsistency(dstDb, root); err != nil {
+ t.Fatalf("trie inconsistent: %v", err)
+ }
+ }
+ // Fetch the next batch to retrieve
+ queue = append(queue[:0], sched.Missing(1)...)
+ }
+ // Sanity check that removing any node from the database is detected
+ for _, node := range added[1:] {
+ key := node.Bytes()
+ value, _ := dstDb.Get(key)
+
+ dstDb.Delete(key)
+ if err := checkTrieConsistency(dstDb, added[0]); err == nil {
+ t.Fatalf("trie inconsistency not caught, missing: %x", key)
+ }
+ dstDb.Put(key, value)
+ }
+}