diff options
author | Felix Lange <fjl@users.noreply.github.com> | 2017-09-10 00:03:07 +0800 |
---|---|---|
committer | Péter Szilágyi <peterke@gmail.com> | 2017-09-10 00:03:07 +0800 |
commit | 10181b57a9fb648f5fd424ca611820a3cf42c42b (patch) | |
tree | 8508c139bb867a6d2126fcbe6500cb08025ddbc1 /trie | |
parent | ac193e36ce4bce752717124433a8ce84c347dbf7 (diff) | |
download | dexon-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar dexon-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.gz dexon-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.bz2 dexon-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.lz dexon-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.xz dexon-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.zst dexon-10181b57a9fb648f5fd424ca611820a3cf42c42b.zip |
core, eth/downloader: commit block data using batches (#15115)
* ethdb: add Putter interface and Has method
* ethdb: improve docs and add IdealBatchSize
* ethdb: remove memory batch lock
Batches are not safe for concurrent use.
* core: use ethdb.Putter for Write* functions
This covers the easy cases.
* core/state: simplify StateSync
* trie: optimize local node check
* ethdb: add ValueSize to Batch
* core: optimize HasHeader check
This avoids one random database read get the block number. For many uses
of HasHeader, the expectation is that it's actually there. Using Has
avoids a load + decode of the value.
* core: write fast sync block data in batches
Collect writes into batches up to the ideal size instead of issuing many
small, concurrent writes.
* eth/downloader: commit larger state batches
Collect nodes into a batch up to the ideal size instead of committing
whenever a node is received.
* core: optimize HasBlock check
This avoids a random database read to get the number.
* core: use numberCache in HasHeader
numberCache has higher capacity, increasing the odds of finding the
header without a database lookup.
* core: write imported block data using a batch
Restore batch writes of state and add blocks, tx entries, receipts to
the same batch. The change also simplifies the miner.
This commit also removes posting of logs when a forked block is imported.
* core: fix DB write error handling
* ethdb: use RLock for Has
* core: fix HasBlock comment
Diffstat (limited to 'trie')
-rw-r--r-- | trie/sync.go | 5 | ||||
-rw-r--r-- | trie/trie.go | 1 |
2 files changed, 3 insertions, 3 deletions
diff --git a/trie/sync.go b/trie/sync.go index 1e4f8d87c..fea10051f 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -138,7 +138,7 @@ func (s *TrieSync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) if _, ok := s.membatch.batch[hash]; ok { return } - if blob, _ := s.database.Get(hash.Bytes()); blob != nil { + if ok, _ := s.database.Has(hash.Bytes()); ok { return } // Assemble the new sub-trie sync request @@ -296,8 +296,7 @@ func (s *TrieSync) children(req *request, object node) ([]*request, error) { if _, ok := s.membatch.batch[hash]; ok { continue } - blob, _ := s.database.Get(node) - if local, err := decodeNode(node[:], blob, 0); local != nil && err == nil { + if ok, _ := s.database.Has(node); ok { continue } // Locally unknown node, schedule for retrieval diff --git a/trie/trie.go b/trie/trie.go index a3151b1ce..7f69a3d1d 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -66,6 +66,7 @@ type Database interface { // DatabaseReader wraps the Get method of a backing store for the trie. type DatabaseReader interface { Get(key []byte) (value []byte, err error) + Has(key []byte) (bool, error) } // DatabaseWriter wraps the Put method of a backing store for the trie. |