aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cmd/geth/chaincmd.go71
-rw-r--r--core/blockchain.go17
-rw-r--r--core/blockchain_test.go2
-rw-r--r--core/vm/vm.go4
-rw-r--r--eth/downloader/downloader.go3
-rw-r--r--trie/sync.go32
6 files changed, 83 insertions, 46 deletions
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index 20c7dc74c..c1bbbd8dc 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -20,7 +20,9 @@ import (
"fmt"
"os"
"path/filepath"
+ "runtime"
"strconv"
+ "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
@@ -85,37 +87,62 @@ func importChain(ctx *cli.Context) error {
chain, chainDb := utils.MakeChain(ctx, stack)
defer chainDb.Close()
+ // Start periodically gathering memory profiles
+ var peakMemAlloc, peakMemSys uint64
+ go func() {
+ stats := new(runtime.MemStats)
+ for {
+ runtime.ReadMemStats(stats)
+ if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
+ atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
+ }
+ if atomic.LoadUint64(&peakMemSys) < stats.Sys {
+ atomic.StoreUint64(&peakMemSys, stats.Sys)
+ }
+ time.Sleep(5 * time.Second)
+ }
+ }()
// Import the chain
start := time.Now()
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
utils.Fatalf("Import error: %v", err)
}
- fmt.Printf("Import done in %v.\n", time.Since(start))
+ fmt.Printf("Import done in %v.\n\n", time.Since(start))
- if db, ok := chainDb.(*ethdb.LDBDatabase); ok {
- // Output pre-compaction stats mostly to see the import trashing
- stats, err := db.LDB().GetProperty("leveldb.stats")
- if err != nil {
- utils.Fatalf("Failed to read database stats: %v", err)
- }
- fmt.Println(stats)
- fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses())
- fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
+ // Output pre-compaction stats mostly to see the import trashing
+ db := chainDb.(*ethdb.LDBDatabase)
- // Compact the entire database to more accurately measure disk io and print the stats
- start = time.Now()
- fmt.Println("Compacting entire database...")
- if err = db.LDB().CompactRange(util.Range{}); err != nil {
- utils.Fatalf("Compaction failed: %v", err)
- }
- fmt.Printf("Compaction done in %v.\n", time.Since(start))
+ stats, err := db.LDB().GetProperty("leveldb.stats")
+ if err != nil {
+ utils.Fatalf("Failed to read database stats: %v", err)
+ }
+ fmt.Println(stats)
+ fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses())
+ fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
- stats, err = db.LDB().GetProperty("leveldb.stats")
- if err != nil {
- utils.Fatalf("Failed to read database stats: %v", err)
- }
- fmt.Println(stats)
+ // Print the memory statistics used by the importing
+ mem := new(runtime.MemStats)
+ runtime.ReadMemStats(mem)
+
+ fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
+ fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
+ fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000)
+ fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs))
+
+ // Compact the entire database to more accurately measure disk io and print the stats
+ start = time.Now()
+ fmt.Println("Compacting entire database...")
+ if err = db.LDB().CompactRange(util.Range{}); err != nil {
+ utils.Fatalf("Compaction failed: %v", err)
}
+ fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
+
+ stats, err = db.LDB().GetProperty("leveldb.stats")
+ if err != nil {
+ utils.Fatalf("Failed to read database stats: %v", err)
+ }
+ fmt.Println(stats)
+
return nil
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 5cf99cd8c..d806c143d 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -774,7 +774,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
if stats.ignored > 0 {
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
}
- glog.V(logger.Info).Infof("imported %d receipts%s in %9v. #%d [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
+ glog.V(logger.Info).Infof("imported %d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored)
return 0, nil
}
@@ -981,6 +981,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
stats.processed++
if glog.V(logger.Info) {
+ stats.usedGas += usedGas.Uint64()
stats.report(chain, i)
}
}
@@ -993,6 +994,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// insertStats tracks and reports on block insertion.
type insertStats struct {
queued, processed, ignored int
+ usedGas uint64
lastIndex int
startTime time.Time
}
@@ -1004,10 +1006,15 @@ const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int) {
+ // Fetch the timings for the batch
var (
now = time.Now()
elapsed = now.Sub(st.startTime)
)
+ if elapsed == 0 { // Yes Windows, I'm looking at you
+ elapsed = 1
+ }
+ // If we're at the last block of the batch or report period reached, log
if index == len(chain)-1 || elapsed >= statsReportLimit {
start, end := chain[st.lastIndex], chain[index]
txcount := countTransactions(chain[st.lastIndex : index+1])
@@ -1016,7 +1023,13 @@ func (st *insertStats) report(chain []*types.Block, index int) {
if st.queued > 0 || st.ignored > 0 {
extra = fmt.Sprintf(" (%d queued %d ignored)", st.queued, st.ignored)
}
- glog.Infof("imported %d blocks%s, %5d txs in %9v. #%v [%x… / %x…]\n", st.processed, extra, txcount, common.PrettyDuration(elapsed), end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
+ hashes := ""
+ if st.processed > 1 {
+ hashes = fmt.Sprintf("%x… / %x…", start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
+ } else {
+ hashes = fmt.Sprintf("%x…", end.Hash().Bytes()[:4])
+ }
+ glog.Infof("imported %d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra)
*st = insertStats{startTime: now, lastIndex: index}
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index de3ef0a9c..3c47b2fd9 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -436,7 +436,7 @@ func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, rec
return nil
}
func (bproc) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, vm.Logs, *big.Int, error) {
- return nil, nil, nil, nil
+ return nil, nil, new(big.Int), nil
}
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
diff --git a/core/vm/vm.go b/core/vm/vm.go
index fcffcf317..205934822 100644
--- a/core/vm/vm.go
+++ b/core/vm/vm.go
@@ -78,7 +78,9 @@ func (evm *EVM) Run(contract *Contract, input []byte) (ret []byte, err error) {
codehash = crypto.Keccak256Hash(contract.Code)
}
var program *Program
- if evm.cfg.EnableJit {
+ if false {
+ // JIT disabled due to JIT not being Homestead gas reprice ready.
+
// If the JIT is enabled check the status of the JIT program,
// if it doesn't exist compile a new program in a separate
// goroutine or wait for compilation to finish if the JIT is
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 526ecbeca..987be2b7a 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -948,11 +948,12 @@ func (d *Downloader) fetchNodeData() error {
}
d.syncStatsLock.Lock()
d.syncStatsStateDone += uint64(delivered)
+ syncStatsStateDone := d.syncStatsStateDone // Thread safe copy for the log below
d.syncStatsLock.Unlock()
// Log a message to the user and return
if delivered > 0 {
- glog.V(logger.Info).Infof("imported %d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), d.syncStatsStateDone, pending)
+ glog.V(logger.Info).Infof("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending)
}
})
}
diff --git a/trie/sync.go b/trie/sync.go
index 30caf6980..58b8a1fb6 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -31,9 +31,9 @@ var ErrNotRequested = errors.New("not requested")
// request represents a scheduled or already in-flight state retrieval request.
type request struct {
- hash common.Hash // Hash of the node data content to retrieve
- data []byte // Data content of the node, cached until all subtrees complete
- object *node // Target node to populate with retrieved data (hashnode originally)
+ hash common.Hash // Hash of the node data content to retrieve
+ data []byte // Data content of the node, cached until all subtrees complete
+ raw bool // Whether this is a raw entry (code) or a trie node
parents []*request // Parent state nodes referencing this entry (notify all upon completion)
depth int // Depth level within the trie the node is located to prioritise DFS
@@ -86,9 +86,7 @@ func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, c
return
}
// Assemble the new sub-trie sync request
- node := node(hashNode(root.Bytes()))
req := &request{
- object: &node,
hash: root,
depth: depth,
callback: callback,
@@ -120,6 +118,7 @@ func (s *TrieSync) AddRawEntry(hash common.Hash, depth int, parent common.Hash)
// Assemble the new sub-trie sync request
req := &request{
hash: hash,
+ raw: true,
depth: depth,
}
// If this sub-trie has a designated parent, link them together
@@ -152,7 +151,7 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) {
return i, ErrNotRequested
}
// If the item is a raw entry request, commit directly
- if request.object == nil {
+ if request.raw {
request.data = item.Data
s.commit(request, nil)
continue
@@ -162,11 +161,10 @@ func (s *TrieSync) Process(results []SyncResult) (int, error) {
if err != nil {
return i, err
}
- *request.object = node
request.data = item.Data
// Create and schedule a request for all the children nodes
- requests, err := s.children(request)
+ requests, err := s.children(request, node)
if err != nil {
return i, err
}
@@ -203,27 +201,25 @@ func (s *TrieSync) schedule(req *request) {
// children retrieves all the missing children of a state trie entry for future
// retrieval scheduling.
-func (s *TrieSync) children(req *request) ([]*request, error) {
+func (s *TrieSync) children(req *request, object node) ([]*request, error) {
// Gather all the children of the node, irrelevant whether known or not
type child struct {
- node *node
+ node node
depth int
}
children := []child{}
- switch node := (*req.object).(type) {
+ switch node := (object).(type) {
case *shortNode:
- node = node.copy() // Prevents linking all downloaded nodes together.
children = []child{{
- node: &node.Val,
+ node: node.Val,
depth: req.depth + len(node.Key),
}}
case *fullNode:
- node = node.copy()
for i := 0; i < 17; i++ {
if node.Children[i] != nil {
children = append(children, child{
- node: &node.Children[i],
+ node: node.Children[i],
depth: req.depth + 1,
})
}
@@ -236,23 +232,21 @@ func (s *TrieSync) children(req *request) ([]*request, error) {
for _, child := range children {
// Notify any external watcher of a new key/value node
if req.callback != nil {
- if node, ok := (*child.node).(valueNode); ok {
+ if node, ok := (child.node).(valueNode); ok {
if err := req.callback(node, req.hash); err != nil {
return nil, err
}
}
}
// If the child references another node, resolve or schedule
- if node, ok := (*child.node).(hashNode); ok {
+ if node, ok := (child.node).(hashNode); ok {
// Try to resolve the node from the local database
blob, _ := s.database.Get(node)
if local, err := decodeNode(node[:], blob, 0); local != nil && err == nil {
- *child.node = local
continue
}
// Locally unknown node, schedule for retrieval
requests = append(requests, &request{
- object: child.node,
hash: common.BytesToHash(node),
parents: []*request{req},
depth: child.depth,