diff options
-rw-r--r-- | cmd/geth/admin.go | 13 | ||||
-rw-r--r-- | core/helper_test.go | 5 | ||||
-rw-r--r-- | core/transaction_pool.go | 63 | ||||
-rw-r--r-- | eth/backend.go | 5 | ||||
-rw-r--r-- | eth/downloader/downloader.go | 46 | ||||
-rw-r--r-- | eth/sync.go | 55 | ||||
-rw-r--r-- | p2p/discover/node_test.go | 25 | ||||
-rw-r--r-- | p2p/discover/table.go | 8 | ||||
-rw-r--r-- | p2p/discover/table_test.go | 9 | ||||
-rw-r--r-- | p2p/discover/udp_test.go | 55 | ||||
-rw-r--r-- | p2p/server_test.go | 3 | ||||
-rw-r--r-- | rpc/args_test.go | 8 | ||||
-rw-r--r-- | tests/helper/vm.go | 2 |
13 files changed, 180 insertions, 117 deletions
diff --git a/cmd/geth/admin.go b/cmd/geth/admin.go index b0b4e0954..33ef69792 100644 --- a/cmd/geth/admin.go +++ b/cmd/geth/admin.go @@ -51,7 +51,7 @@ func (js *jsre) adminBindings() { admin.Set("import", js.importChain) admin.Set("export", js.exportChain) admin.Set("verbosity", js.verbosity) - admin.Set("progress", js.downloadProgress) + admin.Set("progress", js.syncProgress) admin.Set("setSolc", js.setSolc) admin.Set("contractInfo", struct{}{}) @@ -327,9 +327,14 @@ func (js *jsre) setHead(call otto.FunctionCall) otto.Value { return otto.UndefinedValue() } -func (js *jsre) downloadProgress(call otto.FunctionCall) otto.Value { - pending, cached := js.ethereum.Downloader().Stats() - v, _ := call.Otto.ToValue(map[string]interface{}{"pending": pending, "cached": cached}) +func (js *jsre) syncProgress(call otto.FunctionCall) otto.Value { + pending, cached, importing, eta := js.ethereum.Downloader().Stats() + v, _ := call.Otto.ToValue(map[string]interface{}{ + "pending": pending, + "cached": cached, + "importing": importing, + "estimate": (eta / time.Second * time.Second).String(), + }) return v } diff --git a/core/helper_test.go b/core/helper_test.go index 1e0ed178b..a308153aa 100644 --- a/core/helper_test.go +++ b/core/helper_test.go @@ -6,8 +6,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" // "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" ) @@ -76,8 +76,5 @@ func NewTestManager() *TestManager { // testManager.blockChain = NewChainManager(testManager) // testManager.stateManager = NewStateManager(testManager) - // Start the tx pool - testManager.txPool.Start() - return testManager } diff --git a/core/transaction_pool.go b/core/transaction_pool.go index a2f970195..4a0594228 100644 --- a/core/transaction_pool.go +++ b/core/transaction_pool.go @@ -50,7 +50,7 @@ type TxPool struct { } func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool { - return &TxPool{ + pool := &TxPool{ pending: make(map[common.Hash]*types.Transaction), queue: make(map[common.Address]map[common.Hash]*types.Transaction), quit: make(chan bool), @@ -58,14 +58,17 @@ func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func( currentState: currentStateFn, gasLimit: gasLimitFn, pendingState: state.ManageState(currentStateFn()), + events: eventMux.Subscribe(ChainEvent{}), } + go pool.eventLoop() + + return pool } -func (pool *TxPool) Start() { +func (pool *TxPool) eventLoop() { // Track chain events. When a chain events occurs (new chain canon block) // we need to know the new state. The new state will help us determine // the nonces in the managed state - pool.events = pool.eventMux.Subscribe(ChainEvent{}) for _ = range pool.events.Chan() { pool.mu.Lock() @@ -100,7 +103,6 @@ func (pool *TxPool) resetState() { } func (pool *TxPool) Stop() { - pool.pending = make(map[common.Hash]*types.Transaction) close(pool.quit) pool.events.Unsubscribe() glog.V(logger.Info).Infoln("TX Pool stopped") @@ -169,15 +171,10 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error { return nil } +// validate and queue transactions. func (self *TxPool) add(tx *types.Transaction) error { hash := tx.Hash() - /* XXX I'm unsure about this. This is extremely dangerous and may result - in total black listing of certain transactions - if self.invalidHashes.Has(hash) { - return fmt.Errorf("Invalid transaction (%x)", hash[:4]) - } - */ if self.pending[hash] != nil { return fmt.Errorf("Known transaction (%x)", hash[:4]) } @@ -207,6 +204,30 @@ func (self *TxPool) add(tx *types.Transaction) error { return nil } +// queueTx will queue an unknown transaction +func (self *TxPool) queueTx(hash common.Hash, tx *types.Transaction) { + from, _ := tx.From() // already validated + if self.queue[from] == nil { + self.queue[from] = make(map[common.Hash]*types.Transaction) + } + self.queue[from][hash] = tx +} + +// addTx will add a transaction to the pending (processable queue) list of transactions +func (pool *TxPool) addTx(hash common.Hash, addr common.Address, tx *types.Transaction) { + if _, ok := pool.pending[hash]; !ok { + pool.pending[hash] = tx + + // Increment the nonce on the pending state. This can only happen if + // the nonce is +1 to the previous one. + pool.pendingState.SetNonce(addr, tx.AccountNonce+1) + // Notify the subscribers. This event is posted in a goroutine + // because it's possible that somewhere during the post "Remove transaction" + // gets called which will then wait for the global tx pool lock and deadlock. + go pool.eventMux.Post(TxPreEvent{tx}) + } +} + // Add queues a single transaction in the pool if it is valid. func (self *TxPool) Add(tx *types.Transaction) error { self.mu.Lock() @@ -290,28 +311,6 @@ func (self *TxPool) RemoveTransactions(txs types.Transactions) { } } -func (self *TxPool) queueTx(hash common.Hash, tx *types.Transaction) { - from, _ := tx.From() // already validated - if self.queue[from] == nil { - self.queue[from] = make(map[common.Hash]*types.Transaction) - } - self.queue[from][hash] = tx -} - -func (pool *TxPool) addTx(hash common.Hash, addr common.Address, tx *types.Transaction) { - if _, ok := pool.pending[hash]; !ok { - pool.pending[hash] = tx - - // Increment the nonce on the pending state. This can only happen if - // the nonce is +1 to the previous one. - pool.pendingState.SetNonce(addr, tx.AccountNonce+1) - // Notify the subscribers. This event is posted in a goroutine - // because it's possible that somewhere during the post "Remove transaction" - // gets called which will then wait for the global tx pool lock and deadlock. - go pool.eventMux.Post(TxPreEvent{tx}) - } -} - // checkQueue moves transactions that have become processable to main pool. func (pool *TxPool) checkQueue() { state := pool.pendingState diff --git a/eth/backend.go b/eth/backend.go index fcbea04a2..60e9359dc 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -466,8 +466,6 @@ func (s *Ethereum) Start() error { s.StartAutoDAG() } - // Start services - go s.txPool.Start() s.protocolManager.Start() if s.whisper != nil { @@ -513,9 +511,6 @@ func (s *Ethereum) StartForTest() { ClientString: s.net.Name, ProtocolVersion: ProtocolVersion, }) - - // Start services - s.txPool.Start() } // AddPeer connects to the given node and maintains the connection until the diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 29b627771..f0a515d12 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -78,6 +78,12 @@ type Downloader struct { checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain banned *set.Set // Set of hashes we've received and banned + // Statistics + importStart time.Time // Instance when the last blocks were taken from the cache + importQueue []*Block // Previously taken blocks to check import progress + importDone int // Number of taken blocks already imported from the last batch + importLock sync.Mutex + // Callbacks hasBlock hashCheckFn getBlock getBlockFn @@ -121,8 +127,27 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloa return downloader } -func (d *Downloader) Stats() (current int, max int) { - return d.queue.Size() +// Stats retrieves the current status of the downloader. +func (d *Downloader) Stats() (pending int, cached int, importing int, estimate time.Duration) { + // Fetch the download status + pending, cached = d.queue.Size() + + // Figure out the import progress + d.importLock.Lock() + defer d.importLock.Unlock() + + for len(d.importQueue) > 0 && d.hasBlock(d.importQueue[0].RawBlock.Hash()) { + d.importQueue = d.importQueue[1:] + d.importDone++ + } + importing = len(d.importQueue) + + // Make an estimate on the total sync + estimate = 0 + if d.importDone > 0 { + estimate = time.Since(d.importStart) / time.Duration(d.importDone) * time.Duration(pending+cached+importing) + } + return } // Synchronising returns the state of the downloader @@ -202,7 +227,15 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { // TakeBlocks takes blocks from the queue and yields them to the caller. func (d *Downloader) TakeBlocks() []*Block { - return d.queue.TakeBlocks() + blocks := d.queue.TakeBlocks() + if len(blocks) > 0 { + d.importLock.Lock() + d.importStart = time.Now() + d.importQueue = blocks + d.importDone = 0 + d.importLock.Unlock() + } + return blocks } // Has checks if the downloader knows about a particular hash, meaning that its @@ -255,9 +288,14 @@ func (d *Downloader) Cancel() bool { } d.cancelLock.Unlock() - // reset the queue + // Reset the queue and import statistics d.queue.Reset() + d.importLock.Lock() + d.importQueue = nil + d.importDone = 0 + d.importLock.Unlock() + return true } diff --git a/eth/sync.go b/eth/sync.go index a25d4d4fd..8fee21d7b 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -127,10 +127,11 @@ func (pm *ProtocolManager) txsyncLoop() { // fetcher is responsible for collecting hash notifications, and periodically // checking all unknown ones and individually fetching them. func (pm *ProtocolManager) fetcher() { - announces := make(map[common.Hash]*blockAnnounce) + announces := make(map[common.Hash][]*blockAnnounce) request := make(map[*peer][]common.Hash) pending := make(map[common.Hash]*blockAnnounce) cycle := time.Tick(notifyCheckCycle) + done := make(chan common.Hash) // Iterate the block fetching until a quit is requested for { @@ -139,9 +140,18 @@ func (pm *ProtocolManager) fetcher() { // A batch of hashes the notified, schedule them for retrieval glog.V(logger.Debug).Infof("Scheduling %d hash announcements from %s", len(notifications), notifications[0].peer.id) for _, announce := range notifications { - announces[announce.hash] = announce + // Skip if it's already pending fetch + if _, ok := pending[announce.hash]; ok { + continue + } + // Otherwise queue up the peer as a potential source + announces[announce.hash] = append(announces[announce.hash], announce) } + case hash := <-done: + // A pending import finished, remove all traces + delete(pending, hash) + case <-cycle: // Clean up any expired block fetches for hash, announce := range pending { @@ -150,8 +160,9 @@ func (pm *ProtocolManager) fetcher() { } } // Check if any notified blocks failed to arrive - for hash, announce := range announces { - if time.Since(announce.time) > notifyArriveTimeout { + for hash, all := range announces { + if time.Since(all[0].time) > notifyArriveTimeout { + announce := all[rand.Intn(len(all))] if !pm.chainman.HasBlock(hash) { request[announce.peer] = append(request[announce.peer], hash) pending[hash] = announce @@ -200,24 +211,32 @@ func (pm *ProtocolManager) fetcher() { case <-pm.quitSync: return } - // If any explicit fetches were replied to, import them - if count := len(explicit); count > 0 { - glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", count) - - // Create a closure with the retrieved blocks and origin peers - peers := make([]*peer, 0, count) - blocks := make([]*types.Block, 0, count) - for _, block := range explicit { - hash := block.Hash() - if announce := pending[hash]; announce != nil { - peers = append(peers, announce.peer) - blocks = append(blocks, block) - + // Create a closure with the retrieved blocks and origin peers + peers := make([]*peer, 0, len(explicit)) + blocks = make([]*types.Block, 0, len(explicit)) + for _, block := range explicit { + hash := block.Hash() + if announce := pending[hash]; announce != nil { + // Drop the block if it surely cannot fit + if pm.chainman.HasBlock(hash) || !pm.chainman.HasBlock(block.ParentHash()) { delete(pending, hash) + continue } + // Otherwise accumulate for import + peers = append(peers, announce.peer) + blocks = append(blocks, block) } - // Run the importer on a new thread + } + // If any explicit fetches were replied to, import them + if count := len(blocks); count > 0 { + glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", len(blocks)) go func() { + // Make sure all hashes are cleaned up + for _, block := range blocks { + hash := block.Hash() + defer func() { done <- hash }() + } + // Try and actually import the blocks for i := 0; i < len(blocks); i++ { if err := pm.importBlock(peers[i], blocks[i], nil); err != nil { glog.V(logger.Detail).Infof("Failed to import explicitly fetched block: %v", err) diff --git a/p2p/discover/node_test.go b/p2p/discover/node_test.go index b1babd989..795460c49 100644 --- a/p2p/discover/node_test.go +++ b/p2p/discover/node_test.go @@ -13,11 +13,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) -var ( - quickrand = rand.New(rand.NewSource(time.Now().Unix())) - quickcfg = &quick.Config{MaxCount: 5000, Rand: quickrand} -) - var parseNodeTests = []struct { rawurl string wantError string @@ -176,7 +171,7 @@ func TestNodeID_distcmp(t *testing.T) { bbig := new(big.Int).SetBytes(b[:]) return new(big.Int).Xor(tbig, abig).Cmp(new(big.Int).Xor(tbig, bbig)) } - if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg); err != nil { + if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg()); err != nil { t.Error(err) } } @@ -195,7 +190,7 @@ func TestNodeID_logdist(t *testing.T) { abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:]) return new(big.Int).Xor(abig, bbig).BitLen() } - if err := quick.CheckEqual(logdist, logdistBig, quickcfg); err != nil { + if err := quick.CheckEqual(logdist, logdistBig, quickcfg()); err != nil { t.Error(err) } } @@ -211,9 +206,10 @@ func TestNodeID_logdistEqual(t *testing.T) { func TestNodeID_hashAtDistance(t *testing.T) { // we don't use quick.Check here because its output isn't // very helpful when the test fails. - for i := 0; i < quickcfg.MaxCount; i++ { - a := gen(common.Hash{}, quickrand).(common.Hash) - dist := quickrand.Intn(len(common.Hash{}) * 8) + cfg := quickcfg() + for i := 0; i < cfg.MaxCount; i++ { + a := gen(common.Hash{}, cfg.Rand).(common.Hash) + dist := cfg.Rand.Intn(len(common.Hash{}) * 8) result := hashAtDistance(a, dist) actualdist := logdist(result, a) @@ -225,7 +221,14 @@ func TestNodeID_hashAtDistance(t *testing.T) { } } -// TODO: this can be dropped when we require Go >= 1.5 +func quickcfg() *quick.Config { + return &quick.Config{ + MaxCount: 5000, + Rand: rand.New(rand.NewSource(time.Now().Unix())), + } +} + +// TODO: The Generate method can be dropped when we require Go >= 1.5 // because testing/quick learned to generate arrays in 1.5. func (NodeID) Generate(rand *rand.Rand, size int) reflect.Value { diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 4b7ddb775..f71320425 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -40,6 +40,8 @@ type Table struct { bonding map[NodeID]*bondproc bondslots chan struct{} // limits total number of active bonding processes + nodeAddedHook func(*Node) // for testing + net transport self *Node // metadata of the local node } @@ -431,6 +433,9 @@ func (tab *Table) pingreplace(new *Node, b *bucket) { } copy(b.entries[1:], b.entries) b.entries[0] = new + if tab.nodeAddedHook != nil { + tab.nodeAddedHook(new) + } } // ping a remote endpoint and wait for a reply, also updating the node database @@ -466,6 +471,9 @@ outer: } if len(bucket.entries) < bucketSize { bucket.entries = append(bucket.entries, n) + if tab.nodeAddedHook != nil { + tab.nodeAddedHook(n) + } } } } diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index da398d137..829899916 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -9,6 +9,7 @@ import ( "reflect" "testing" "testing/quick" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -74,7 +75,7 @@ func TestBucket_bumpNoDuplicates(t *testing.T) { t.Parallel() cfg := &quick.Config{ MaxCount: 1000, - Rand: quickrand, + Rand: rand.New(rand.NewSource(time.Now().Unix())), Values: func(args []reflect.Value, rand *rand.Rand) { // generate a random list of nodes. this will be the content of the bucket. n := rand.Intn(bucketSize-1) + 1 @@ -205,7 +206,7 @@ func TestTable_closest(t *testing.T) { } return true } - if err := quick.Check(test, quickcfg); err != nil { + if err := quick.Check(test, quickcfg()); err != nil { t.Error(err) } } @@ -213,7 +214,7 @@ func TestTable_closest(t *testing.T) { func TestTable_ReadRandomNodesGetAll(t *testing.T) { cfg := &quick.Config{ MaxCount: 200, - Rand: quickrand, + Rand: rand.New(rand.NewSource(time.Now().Unix())), Values: func(args []reflect.Value, rand *rand.Rand) { args[0] = reflect.ValueOf(make([]*Node, rand.Intn(1000))) }, @@ -221,7 +222,7 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) { test := func(buf []*Node) bool { tab := newTable(nil, NodeID{}, &net.UDPAddr{}, "") for i := 0; i < len(buf); i++ { - ld := quickrand.Intn(len(tab.buckets)) + ld := cfg.Rand.Intn(len(tab.buckets)) tab.add([]*Node{nodeAtDistance(tab.self.sha, ld)}) } gotN := tab.ReadRandomNodes(buf) diff --git a/p2p/discover/udp_test.go b/p2p/discover/udp_test.go index 11fa31d7c..b5d035a98 100644 --- a/p2p/discover/udp_test.go +++ b/p2p/discover/udp_test.go @@ -234,14 +234,12 @@ func TestUDP_findnodeMultiReply(t *testing.T) { func TestUDP_successfulPing(t *testing.T) { test := newUDPTest(t) + added := make(chan *Node, 1) + test.table.nodeAddedHook = func(n *Node) { added <- n } defer test.table.Close() - done := make(chan struct{}) - go func() { - // The remote side sends a ping packet to initiate the exchange. - test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: Version, Expiration: futureExp}) - close(done) - }() + // The remote side sends a ping packet to initiate the exchange. + go test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: Version, Expiration: futureExp}) // the ping is replied to. test.waitPacketOut(func(p *pong) { @@ -277,35 +275,26 @@ func TestUDP_successfulPing(t *testing.T) { }) test.packetIn(nil, pongPacket, &pong{Expiration: futureExp}) - // ping should return shortly after getting the pong packet. - <-done - - // check that the node was added. - rid := PubkeyID(&test.remotekey.PublicKey) - rnode := find(test.table, rid) - if rnode == nil { - t.Fatalf("node %v not found in table", rid) - } - if !bytes.Equal(rnode.IP, test.remoteaddr.IP) { - t.Errorf("node has wrong IP: got %v, want: %v", rnode.IP, test.remoteaddr.IP) - } - if int(rnode.UDP) != test.remoteaddr.Port { - t.Errorf("node has wrong UDP port: got %v, want: %v", rnode.UDP, test.remoteaddr.Port) - } - if rnode.TCP != testRemote.TCP { - t.Errorf("node has wrong TCP port: got %v, want: %v", rnode.TCP, testRemote.TCP) - } -} - -func find(tab *Table, id NodeID) *Node { - for _, b := range tab.buckets { - for _, e := range b.entries { - if e.ID == id { - return e - } + // the node should be added to the table shortly after getting the + // pong packet. + select { + case n := <-added: + rid := PubkeyID(&test.remotekey.PublicKey) + if n.ID != rid { + t.Errorf("node has wrong ID: got %v, want %v", n.ID, rid) } + if !bytes.Equal(n.IP, test.remoteaddr.IP) { + t.Errorf("node has wrong IP: got %v, want: %v", n.IP, test.remoteaddr.IP) + } + if int(n.UDP) != test.remoteaddr.Port { + t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP, test.remoteaddr.Port) + } + if n.TCP != testRemote.TCP { + t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP, testRemote.TCP) + } + case <-time.After(2 * time.Second): + t.Errorf("node was not added within 2 seconds") } - return nil } // dgramPipe is a fake UDP socket. It queues all sent datagrams. diff --git a/p2p/server_test.go b/p2p/server_test.go index 01448cc7b..e8d21a188 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -117,7 +117,6 @@ func TestServerDial(t *testing.T) { t.Error("accept error:", err) return } - conn.Close() accepted <- conn }() @@ -134,6 +133,8 @@ func TestServerDial(t *testing.T) { select { case conn := <-accepted: + defer conn.Close() + select { case peer := <-connected: if peer.ID() != remid { diff --git a/rpc/args_test.go b/rpc/args_test.go index fc10d68cf..81a2972cd 100644 --- a/rpc/args_test.go +++ b/rpc/args_test.go @@ -2519,6 +2519,14 @@ func TestSigArgs(t *testing.T) { if err := json.Unmarshal([]byte(input), &args); err != nil { t.Error(err) } + + if expected.From != args.From { + t.Errorf("From should be %v but is %v", expected.From, args.From) + } + + if expected.Data != args.Data { + t.Errorf("Data should be %v but is %v", expected.Data, args.Data) + } } func TestSigArgsEmptyData(t *testing.T) { diff --git a/tests/helper/vm.go b/tests/helper/vm.go index 87b2e070d..e29a2d8ee 100644 --- a/tests/helper/vm.go +++ b/tests/helper/vm.go @@ -190,7 +190,7 @@ func RunState(statedb *state.StateDB, env, tx map[string]string) ([]byte, state. vmenv := NewEnvFromMap(statedb, env, tx) vmenv.origin = common.BytesToAddress(keyPair.Address()) ret, _, err := core.ApplyMessage(vmenv, message, coinbase) - if core.IsNonceErr(err) || core.IsInvalidTxErr(err) { + if core.IsNonceErr(err) || core.IsInvalidTxErr(err) || state.IsGasLimitErr(err) { statedb.Set(snapshot) } statedb.Update() |