aboutsummaryrefslogtreecommitdiffstats
path: root/core/database_util.go
diff options
context:
space:
mode:
authorFelix Lange <fjl@users.noreply.github.com>2017-09-10 00:03:07 +0800
committerPéter Szilágyi <peterke@gmail.com>2017-09-10 00:03:07 +0800
commit10181b57a9fb648f5fd424ca611820a3cf42c42b (patch)
tree8508c139bb867a6d2126fcbe6500cb08025ddbc1 /core/database_util.go
parentac193e36ce4bce752717124433a8ce84c347dbf7 (diff)
downloadgo-tangerine-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar
go-tangerine-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.gz
go-tangerine-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.bz2
go-tangerine-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.lz
go-tangerine-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.xz
go-tangerine-10181b57a9fb648f5fd424ca611820a3cf42c42b.tar.zst
go-tangerine-10181b57a9fb648f5fd424ca611820a3cf42c42b.zip
core, eth/downloader: commit block data using batches (#15115)
* ethdb: add Putter interface and Has method * ethdb: improve docs and add IdealBatchSize * ethdb: remove memory batch lock Batches are not safe for concurrent use. * core: use ethdb.Putter for Write* functions This covers the easy cases. * core/state: simplify StateSync * trie: optimize local node check * ethdb: add ValueSize to Batch * core: optimize HasHeader check This avoids one random database read get the block number. For many uses of HasHeader, the expectation is that it's actually there. Using Has avoids a load + decode of the value. * core: write fast sync block data in batches Collect writes into batches up to the ideal size instead of issuing many small, concurrent writes. * eth/downloader: commit larger state batches Collect nodes into a batch up to the ideal size instead of committing whenever a node is received. * core: optimize HasBlock check This avoids a random database read to get the number. * core: use numberCache in HasHeader numberCache has higher capacity, increasing the odds of finding the header without a database lookup. * core: write imported block data using a batch Restore batch writes of state and add blocks, tx entries, receipts to the same batch. The change also simplifies the miner. This commit also removes posting of logs when a forked block is imported. * core: fix DB write error handling * ethdb: use RLock for Has * core: fix HasBlock comment
Diffstat (limited to 'core/database_util.go')
-rw-r--r--core/database_util.go53
1 files changed, 25 insertions, 28 deletions
diff --git a/core/database_util.go b/core/database_util.go
index 989071104..1730a048e 100644
--- a/core/database_util.go
+++ b/core/database_util.go
@@ -38,11 +38,6 @@ type DatabaseReader interface {
Get(key []byte) (value []byte, err error)
}
-// DatabaseWriter wraps the Put method of a backing data store.
-type DatabaseWriter interface {
- Put(key, value []byte) error
-}
-
// DatabaseDeleter wraps the Delete method of a backing data store.
type DatabaseDeleter interface {
Delete(key []byte) error
@@ -154,7 +149,7 @@ func GetHeadFastBlockHash(db DatabaseReader) common.Hash {
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found.
func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
- data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
+ data, _ := db.Get(headerKey(hash, number))
return data
}
@@ -175,10 +170,18 @@ func GetHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Header
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func GetBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
- data, _ := db.Get(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
+ data, _ := db.Get(blockBodyKey(hash, number))
return data
}
+func headerKey(hash common.Hash, number uint64) []byte {
+ return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
+}
+
+func blockBodyKey(hash common.Hash, number uint64) []byte {
+ return append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
+}
+
// GetBody retrieves the block body (transactons, uncles) corresponding to the
// hash, nil if none found.
func GetBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
@@ -340,7 +343,7 @@ func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash)
}
// WriteCanonicalHash stores the canonical hash for the given block number.
-func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) error {
+func WriteCanonicalHash(db ethdb.Putter, hash common.Hash, number uint64) error {
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
if err := db.Put(key, hash.Bytes()); err != nil {
log.Crit("Failed to store number to hash mapping", "err", err)
@@ -349,7 +352,7 @@ func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) erro
}
// WriteHeadHeaderHash stores the head header's hash.
-func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) error {
+func WriteHeadHeaderHash(db ethdb.Putter, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last header's hash", "err", err)
}
@@ -357,7 +360,7 @@ func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) error {
}
// WriteHeadBlockHash stores the head block's hash.
-func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) error {
+func WriteHeadBlockHash(db ethdb.Putter, hash common.Hash) error {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last block's hash", "err", err)
}
@@ -365,7 +368,7 @@ func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) error {
}
// WriteHeadFastBlockHash stores the fast head block's hash.
-func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) error {
+func WriteHeadFastBlockHash(db ethdb.Putter, hash common.Hash) error {
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last fast block's hash", "err", err)
}
@@ -373,7 +376,7 @@ func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) error {
}
// WriteHeader serializes a block header into the database.
-func WriteHeader(db DatabaseWriter, header *types.Header) error {
+func WriteHeader(db ethdb.Putter, header *types.Header) error {
data, err := rlp.EncodeToBytes(header)
if err != nil {
return err
@@ -393,7 +396,7 @@ func WriteHeader(db DatabaseWriter, header *types.Header) error {
}
// WriteBody serializes the body of a block into the database.
-func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.Body) error {
+func WriteBody(db ethdb.Putter, hash common.Hash, number uint64, body *types.Body) error {
data, err := rlp.EncodeToBytes(body)
if err != nil {
return err
@@ -402,7 +405,7 @@ func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.B
}
// WriteBodyRLP writes a serialized body of a block into the database.
-func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) error {
+func WriteBodyRLP(db ethdb.Putter, hash common.Hash, number uint64, rlp rlp.RawValue) error {
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
if err := db.Put(key, rlp); err != nil {
log.Crit("Failed to store block body", "err", err)
@@ -411,7 +414,7 @@ func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.Ra
}
// WriteTd serializes the total difficulty of a block into the database.
-func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) error {
+func WriteTd(db ethdb.Putter, hash common.Hash, number uint64, td *big.Int) error {
data, err := rlp.EncodeToBytes(td)
if err != nil {
return err
@@ -424,7 +427,7 @@ func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) er
}
// WriteBlock serializes a block into the database, header and body separately.
-func WriteBlock(db DatabaseWriter, block *types.Block) error {
+func WriteBlock(db ethdb.Putter, block *types.Block) error {
// Store the body first to retain database consistency
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
return err
@@ -439,7 +442,7 @@ func WriteBlock(db DatabaseWriter, block *types.Block) error {
// WriteBlockReceipts stores all the transaction receipts belonging to a block
// as a single receipt slice. This is used during chain reorganisations for
// rescheduling dropped transactions.
-func WriteBlockReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts types.Receipts) error {
+func WriteBlockReceipts(db ethdb.Putter, hash common.Hash, number uint64, receipts types.Receipts) error {
// Convert the receipts into their storage form and serialize them
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts {
@@ -459,9 +462,7 @@ func WriteBlockReceipts(db DatabaseWriter, hash common.Hash, number uint64, rece
// WriteTxLookupEntries stores a positional metadata for every transaction from
// a block, enabling hash based transaction and receipt lookups.
-func WriteTxLookupEntries(db ethdb.Database, block *types.Block) error {
- batch := db.NewBatch()
-
+func WriteTxLookupEntries(db ethdb.Putter, block *types.Block) error {
// Iterate over each transaction and encode its metadata
for i, tx := range block.Transactions() {
entry := txLookupEntry{
@@ -473,20 +474,16 @@ func WriteTxLookupEntries(db ethdb.Database, block *types.Block) error {
if err != nil {
return err
}
- if err := batch.Put(append(lookupPrefix, tx.Hash().Bytes()...), data); err != nil {
+ if err := db.Put(append(lookupPrefix, tx.Hash().Bytes()...), data); err != nil {
return err
}
}
- // Write the scheduled data into the database
- if err := batch.Write(); err != nil {
- log.Crit("Failed to store lookup entries", "err", err)
- }
return nil
}
// WriteBloomBits writes the compressed bloom bits vector belonging to the given
// section and bit index.
-func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) {
+func WriteBloomBits(db ethdb.Putter, bit uint, section uint64, head common.Hash, bits []byte) {
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
binary.BigEndian.PutUint16(key[1:], uint16(bit))
@@ -572,13 +569,13 @@ func GetBlockChainVersion(db DatabaseReader) int {
}
// WriteBlockChainVersion writes vsn as the version number to db.
-func WriteBlockChainVersion(db DatabaseWriter, vsn int) {
+func WriteBlockChainVersion(db ethdb.Putter, vsn int) {
enc, _ := rlp.EncodeToBytes(uint(vsn))
db.Put([]byte("BlockchainVersion"), enc)
}
// WriteChainConfig writes the chain config settings to the database.
-func WriteChainConfig(db DatabaseWriter, hash common.Hash, cfg *params.ChainConfig) error {
+func WriteChainConfig(db ethdb.Putter, hash common.Hash, cfg *params.ChainConfig) error {
// short circuit and ignore if nil config. GetChainConfig
// will return a default.
if cfg == nil {