From 6cf0ab38bd0af77d81aad4c104979cebee9e3e63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 7 May 2018 14:35:06 +0300 Subject: core/rawdb: separate raw database access to own package (#16666) --- core/bench_test.go | 18 +- core/blockchain.go | 139 ++++---- core/blockchain_test.go | 31 +- core/chain_indexer.go | 7 +- core/chain_indexer_test.go | 7 +- core/database_util.go | 652 ----------------------------------- core/database_util_test.go | 388 --------------------- core/genesis.go | 55 ++- core/genesis_test.go | 3 +- core/headerchain.go | 77 +++-- core/rawdb/accessors_chain.go | 381 ++++++++++++++++++++ core/rawdb/accessors_chain_test.go | 319 +++++++++++++++++ core/rawdb/accessors_indexes.go | 119 +++++++ core/rawdb/accessors_indexes_test.go | 68 ++++ core/rawdb/accessors_metadata.go | 90 +++++ core/rawdb/interfaces.go | 33 ++ core/rawdb/schema.go | 79 +++++ 17 files changed, 1255 insertions(+), 1211 deletions(-) delete mode 100644 core/database_util.go delete mode 100644 core/database_util_test.go create mode 100644 core/rawdb/accessors_chain.go create mode 100644 core/rawdb/accessors_chain_test.go create mode 100644 core/rawdb/accessors_indexes.go create mode 100644 core/rawdb/accessors_indexes_test.go create mode 100644 core/rawdb/accessors_metadata.go create mode 100644 core/rawdb/interfaces.go create mode 100644 core/rawdb/schema.go (limited to 'core') diff --git a/core/bench_test.go b/core/bench_test.go index e23f0d19d..ee30cfed0 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -234,13 +235,15 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) { ReceiptHash: types.EmptyRootHash, } hash = header.Hash() - WriteHeader(db, header) - WriteCanonicalHash(db, hash, n) - WriteTd(db, hash, n, big.NewInt(int64(n+1))) + + rawdb.WriteHeader(db, header) + rawdb.WriteCanonicalHash(db, hash, n) + rawdb.WriteTd(db, hash, n, big.NewInt(int64(n+1))) + if full || n == 0 { block := types.NewBlockWithHeader(header) - WriteBody(db, hash, n, block.Body()) - WriteBlockReceipts(db, hash, n, nil) + rawdb.WriteBody(db, hash, n, block.Body()) + rawdb.WriteReceipts(db, hash, n, nil) } } } @@ -292,11 +295,10 @@ func benchReadChain(b *testing.B, full bool, count uint64) { header := chain.GetHeaderByNumber(n) if full { hash := header.Hash() - GetBody(db, hash, n) - GetBlockReceipts(db, hash, n) + rawdb.ReadBody(db, hash, n) + rawdb.ReadReceipts(db, hash, n) } } - chain.Stop() db.Close() } diff --git a/core/blockchain.go b/core/blockchain.go index b33eb85a4..f74a0f5b2 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -202,7 +203,7 @@ func (bc *BlockChain) getProcInterrupt() bool { // assumes that the chain manager mutex is held. func (bc *BlockChain) loadLastState() error { // Restore the last known head block - head := GetHeadBlockHash(bc.db) + head := rawdb.ReadHeadBlockHash(bc.db) if head == (common.Hash{}) { // Corrupt or empty database, init from scratch log.Warn("Empty database, resetting chain") @@ -228,7 +229,7 @@ func (bc *BlockChain) loadLastState() error { // Restore the last known head header currentHeader := currentBlock.Header() - if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) { + if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { if header := bc.GetHeaderByHash(head); header != nil { currentHeader = header } @@ -237,7 +238,7 @@ func (bc *BlockChain) loadLastState() error { // Restore the last known head fast block bc.currentFastBlock.Store(currentBlock) - if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) { + if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { if block := bc.GetBlockByHash(head); block != nil { bc.currentFastBlock.Store(block) } @@ -269,7 +270,7 @@ func (bc *BlockChain) SetHead(head uint64) error { // Rewind the header chain, deleting all block bodies until then delFn := func(hash common.Hash, num uint64) { - DeleteBody(bc.db, hash, num) + rawdb.DeleteBody(bc.db, hash, num) } bc.hc.SetHead(head, delFn) currentHeader := bc.hc.CurrentHeader() @@ -303,12 +304,10 @@ func (bc *BlockChain) SetHead(head uint64) error { } currentBlock := bc.CurrentBlock() currentFastBlock := bc.CurrentFastBlock() - if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil { - log.Crit("Failed to reset head full block", "err", err) - } - if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil { - log.Crit("Failed to reset head fast block", "err", err) - } + + rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()) + rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()) + return bc.loadLastState() } @@ -406,9 +405,8 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { log.Crit("Failed to write genesis block TD", "err", err) } - if err := WriteBlock(bc.db, genesis); err != nil { - log.Crit("Failed to write genesis block", "err", err) - } + rawdb.WriteBlock(bc.db, genesis) + bc.genesisBlock = genesis bc.insert(bc.genesisBlock) bc.currentBlock.Store(bc.genesisBlock) @@ -474,24 +472,19 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { // Note, this function assumes that the `mu` mutex is held! func (bc *BlockChain) insert(block *types.Block) { // If the block is on a side chain or an unknown one, force other heads onto it too - updateHeads := GetCanonicalHash(bc.db, block.NumberU64()) != block.Hash() + updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() // Add the block to the canonical chain number scheme and mark as the head - if err := WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()); err != nil { - log.Crit("Failed to insert block number", "err", err) - } - if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil { - log.Crit("Failed to insert head block hash", "err", err) - } + rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(bc.db, block.Hash()) + bc.currentBlock.Store(block) // If the block is better than our head or is on a different chain, force update heads if updateHeads { bc.hc.SetCurrentHeader(block.Header()) + rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()) - if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { - log.Crit("Failed to insert head fast block hash", "err", err) - } bc.currentFastBlock.Store(block) } } @@ -509,7 +502,11 @@ func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { body := cached.(*types.Body) return body } - body := GetBody(bc.db, hash, bc.hc.GetBlockNumber(hash)) + number := bc.hc.GetBlockNumber(hash) + if number == nil { + return nil + } + body := rawdb.ReadBody(bc.db, hash, *number) if body == nil { return nil } @@ -525,7 +522,11 @@ func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { if cached, ok := bc.bodyRLPCache.Get(hash); ok { return cached.(rlp.RawValue) } - body := GetBodyRLP(bc.db, hash, bc.hc.GetBlockNumber(hash)) + number := bc.hc.GetBlockNumber(hash) + if number == nil { + return nil + } + body := rawdb.ReadBodyRLP(bc.db, hash, *number) if len(body) == 0 { return nil } @@ -539,8 +540,7 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { if bc.blockCache.Contains(hash) { return true } - ok, _ := bc.db.Has(blockBodyKey(hash, number)) - return ok + return rawdb.HasBody(bc.db, hash, number) } // HasState checks if state trie is fully present in the database or not. @@ -567,7 +567,7 @@ func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { if block, ok := bc.blockCache.Get(hash); ok { return block.(*types.Block) } - block := GetBlock(bc.db, hash, number) + block := rawdb.ReadBlock(bc.db, hash, number) if block == nil { return nil } @@ -578,13 +578,17 @@ func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { // GetBlockByHash retrieves a block from the database by hash, caching it if found. func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block { - return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash)) + number := bc.hc.GetBlockNumber(hash) + if number == nil { + return nil + } + return bc.GetBlock(hash, *number) } // GetBlockByNumber retrieves a block from the database by number, caching it // (associated with its hash) if found. func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { - hash := GetCanonicalHash(bc.db, number) + hash := rawdb.ReadCanonicalHash(bc.db, number) if hash == (common.Hash{}) { return nil } @@ -593,21 +597,28 @@ func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block { // GetReceiptsByHash retrieves the receipts for all transactions in a given block. func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { - return GetBlockReceipts(bc.db, hash, GetBlockNumber(bc.db, hash)) + number := rawdb.ReadHeaderNumber(bc.db, hash) + if number == nil { + return nil + } + return rawdb.ReadReceipts(bc.db, hash, *number) } // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. // [deprecated by eth/62] func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { number := bc.hc.GetBlockNumber(hash) + if number == nil { + return nil + } for i := 0; i < n; i++ { - block := bc.GetBlock(hash, number) + block := bc.GetBlock(hash, *number) if block == nil { break } blocks = append(blocks, block) hash = block.ParentHash() - number-- + *number-- } return } @@ -712,12 +723,12 @@ func (bc *BlockChain) Rollback(chain []common.Hash) { if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash { newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) bc.currentFastBlock.Store(newFastBlock) - WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) + rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) } if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash { newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) bc.currentBlock.Store(newBlock) - WriteHeadBlockHash(bc.db, newBlock.Hash()) + rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash()) } } } @@ -802,15 +813,10 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ return i, fmt.Errorf("failed to set receipts data: %v", err) } // Write all the data out into the database - if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { - return i, fmt.Errorf("failed to write block body: %v", err) - } - if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { - return i, fmt.Errorf("failed to write block receipts: %v", err) - } - if err := WriteTxLookupEntries(batch, block); err != nil { - return i, fmt.Errorf("failed to write lookup metadata: %v", err) - } + rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) + rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) + rawdb.WriteTxLookupEntries(batch, block) + stats.processed++ if batch.ValueSize() >= ethdb.IdealBatchSize { @@ -834,9 +840,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case currentFastBlock := bc.CurrentFastBlock() if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { - if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil { - log.Crit("Failed to update head fast block hash", "err", err) - } + rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) bc.currentFastBlock.Store(head) } } @@ -864,9 +868,8 @@ func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (e if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { return err } - if err := WriteBlock(bc.db, block); err != nil { - return err - } + rawdb.WriteBlock(bc.db, block) + return nil } @@ -894,9 +897,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. } // Write other block data using a batch. batch := bc.db.NewBatch() - if err := WriteBlock(batch, block); err != nil { - return NonStatTy, err - } + rawdb.WriteBlock(batch, block) + root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) if err != nil { return NonStatTy, err @@ -953,9 +955,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. } } } - if err := WriteBlockReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { - return NonStatTy, err - } + rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts) + // If the total difficulty is higher than our known, add it to the canonical chain // Second clause in the if statement reduces the vulnerability to selfish mining. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf @@ -972,14 +973,10 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. return NonStatTy, err } } - // Write the positional metadata for transaction and receipt lookups - if err := WriteTxLookupEntries(batch, block); err != nil { - return NonStatTy, err - } - // Write hash preimages - if err := WritePreimages(bc.db, block.NumberU64(), state.Preimages()); err != nil { - return NonStatTy, err - } + // Write the positional metadata for transaction/receipt lookups and preimages + rawdb.WriteTxLookupEntries(batch, block) + rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages()) + status = CanonStatTy } else { status = SideStatTy @@ -1256,9 +1253,13 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // collectLogs collects the logs that were generated during the // processing of the block that corresponds with the given hash. // These logs are later announced as deleted. - collectLogs = func(h common.Hash) { + collectLogs = func(hash common.Hash) { // Coalesce logs and set 'Removed'. - receipts := GetBlockReceipts(bc.db, h, bc.hc.GetBlockNumber(h)) + number := bc.hc.GetBlockNumber(hash) + if number == nil { + return + } + receipts := rawdb.ReadReceipts(bc.db, hash, *number) for _, receipt := range receipts { for _, log := range receipt.Logs { del := *log @@ -1327,9 +1328,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // insert the block in the canonical way, re-writing history bc.insert(newChain[i]) // write lookup entries for hash based transaction/receipt searches - if err := WriteTxLookupEntries(bc.db, newChain[i]); err != nil { - return err - } + rawdb.WriteTxLookupEntries(bc.db, newChain[i]) addedTxs = append(addedTxs, newChain[i].Transactions()...) } // calculate the difference between deleted and added transactions @@ -1337,7 +1336,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // When transactions get deleted from the database that means the // receipts that were created in the fork must also be deleted for _, tx := range diff { - DeleteTxLookupEntry(bc.db, tx.Hash()) + rawdb.DeleteTxLookupEntry(bc.db, tx.Hash()) } if len(deletedLogs) > 0 { go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 375823b57..c1638c31f 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -128,8 +129,8 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { return err } blockchain.mu.Lock() - WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash()))) - WriteBlock(blockchain.db, block) + rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTdByHash(block.ParentHash()))) + rawdb.WriteBlock(blockchain.db, block) statedb.Commit(false) blockchain.mu.Unlock() } @@ -146,8 +147,8 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error } // Manually insert the header into the database, but don't reorganise (allows subsequent testing) blockchain.mu.Lock() - WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTdByHash(header.ParentHash))) - WriteHeader(blockchain.db, header) + rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTdByHash(header.ParentHash))) + rawdb.WriteHeader(blockchain.db, header) blockchain.mu.Unlock() } return nil @@ -173,7 +174,7 @@ func TestLastBlock(t *testing.T) { if _, err := blockchain.InsertChain(blocks); err != nil { t.Fatalf("Failed to insert block: %v", err) } - if blocks[len(blocks)-1].Hash() != GetHeadBlockHash(blockchain.db) { + if blocks[len(blocks)-1].Hash() != rawdb.ReadHeadBlockHash(blockchain.db) { t.Fatalf("Write/Get HeadBlockHash failed") } } @@ -639,13 +640,13 @@ func TestFastVsFullChains(t *testing.T) { } else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(ablock.Uncles()) { t.Errorf("block #%d [%x]: uncles mismatch: have %v, want %v", num, hash, fblock.Uncles(), ablock.Uncles()) } - if freceipts, areceipts := GetBlockReceipts(fastDb, hash, GetBlockNumber(fastDb, hash)), GetBlockReceipts(archiveDb, hash, GetBlockNumber(archiveDb, hash)); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) { + if freceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash)), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash)); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) { t.Errorf("block #%d [%x]: receipts mismatch: have %v, want %v", num, hash, freceipts, areceipts) } } // Check that the canonical chains are the same between the databases for i := 0; i < len(blocks)+1; i++ { - if fhash, ahash := GetCanonicalHash(fastDb, uint64(i)), GetCanonicalHash(archiveDb, uint64(i)); fhash != ahash { + if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash { t.Errorf("block #%d: canonical hash mismatch: have %v, want %v", i, fhash, ahash) } } @@ -821,28 +822,28 @@ func TestChainTxReorgs(t *testing.T) { // removed tx for i, tx := range (types.Transactions{pastDrop, freshDrop}) { - if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil { + if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn != nil { t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn) } - if rcpt, _, _, _ := GetReceipt(db, tx.Hash()); rcpt != nil { + if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash()); rcpt != nil { t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt) } } // added tx for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) { - if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil { + if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil { t.Errorf("add %d: expected tx to be found", i) } - if rcpt, _, _, _ := GetReceipt(db, tx.Hash()); rcpt == nil { + if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash()); rcpt == nil { t.Errorf("add %d: expected receipt to be found", i) } } // shared tx for i, tx := range (types.Transactions{postponed, swapped}) { - if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil { + if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil { t.Errorf("share %d: expected tx to be found", i) } - if rcpt, _, _, _ := GetReceipt(db, tx.Hash()); rcpt == nil { + if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash()); rcpt == nil { t.Errorf("share %d: expected receipt to be found", i) } } @@ -997,14 +998,14 @@ func TestCanonicalBlockRetrieval(t *testing.T) { // try to retrieve a block by its canonical hash and see if the block data can be retrieved. for { - ch := GetCanonicalHash(blockchain.db, block.NumberU64()) + ch := rawdb.ReadCanonicalHash(blockchain.db, block.NumberU64()) if ch == (common.Hash{}) { continue // busy wait for canonical hash to be written } if ch != block.Hash() { t.Fatalf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex()) } - fb := GetBlock(blockchain.db, ch, block.NumberU64()) + fb := rawdb.ReadBlock(blockchain.db, ch, block.NumberU64()) if fb == nil { t.Fatalf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex()) } diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 158ed8324..0b927116d 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -24,6 +24,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -206,7 +207,7 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainE // TODO(karalabe): This operation is expensive and might block, causing the event system to // potentially also lock up. We need to do with on a different thread somehow. - if h := FindCommonAncestor(c.chainDb, prevHeader, header); h != nil { + if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil { c.newHead(h.Number.Uint64(), true) } } @@ -349,11 +350,11 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com } for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ { - hash := GetCanonicalHash(c.chainDb, number) + hash := rawdb.ReadCanonicalHash(c.chainDb, number) if hash == (common.Hash{}) { return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number) } - header := GetHeader(c.chainDb, hash, number) + header := rawdb.ReadHeader(c.chainDb, hash, number) if header == nil { return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4]) } else if header.ParentHash != lastHead { diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go index 9fc09eda5..3205616e8 100644 --- a/core/chain_indexer_test.go +++ b/core/chain_indexer_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" ) @@ -92,10 +93,10 @@ func testChainIndexer(t *testing.T, count int) { inject := func(number uint64) { header := &types.Header{Number: big.NewInt(int64(number)), Extra: big.NewInt(rand.Int63()).Bytes()} if number > 0 { - header.ParentHash = GetCanonicalHash(db, number-1) + header.ParentHash = rawdb.ReadCanonicalHash(db, number-1) } - WriteHeader(db, header) - WriteCanonicalHash(db, header.Hash(), number) + rawdb.WriteHeader(db, header) + rawdb.WriteCanonicalHash(db, header.Hash(), number) } // Start indexer with an already existing chain for i := uint64(0); i <= 100; i++ { diff --git a/core/database_util.go b/core/database_util.go deleted file mode 100644 index 8c4698985..000000000 --- a/core/database_util.go +++ /dev/null @@ -1,652 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" -) - -// DatabaseReader wraps the Get method of a backing data store. -type DatabaseReader interface { - Get(key []byte) (value []byte, err error) -} - -// DatabaseDeleter wraps the Delete method of a backing data store. -type DatabaseDeleter interface { - Delete(key []byte) error -} - -var ( - headHeaderKey = []byte("LastHeader") - headBlockKey = []byte("LastBlock") - headFastKey = []byte("LastFast") - trieSyncKey = []byte("TrieSync") - - // Data item prefixes (use single byte to avoid mixing data types, avoid `i`). - headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header - tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td - numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash - blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian) - bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body - blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts - lookupPrefix = []byte("l") // lookupPrefix + hash -> transaction/receipt lookup metadata - bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits - - preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage - configPrefix = []byte("ethereum-config-") // config prefix for the db - - // Chain index prefixes (use `i` + single byte to avoid mixing data types). - BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress - - // used by old db, now only used for conversion - oldReceiptsPrefix = []byte("receipts-") - oldTxMetaSuffix = []byte{0x01} - - ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error - - preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) - preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) -) - -// TxLookupEntry is a positional metadata to help looking up the data content of -// a transaction or receipt given only its hash. -type TxLookupEntry struct { - BlockHash common.Hash - BlockIndex uint64 - Index uint64 -} - -// encodeBlockNumber encodes a block number as big endian uint64 -func encodeBlockNumber(number uint64) []byte { - enc := make([]byte, 8) - binary.BigEndian.PutUint64(enc, number) - return enc -} - -// GetCanonicalHash retrieves a hash assigned to a canonical block number. -func GetCanonicalHash(db DatabaseReader, number uint64) common.Hash { - data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)) - if len(data) == 0 { - return common.Hash{} - } - return common.BytesToHash(data) -} - -// missingNumber is returned by GetBlockNumber if no header with the -// given block hash has been stored in the database -const missingNumber = uint64(0xffffffffffffffff) - -// GetBlockNumber returns the block number assigned to a block hash -// if the corresponding header is present in the database -func GetBlockNumber(db DatabaseReader, hash common.Hash) uint64 { - data, _ := db.Get(append(blockHashPrefix, hash.Bytes()...)) - if len(data) != 8 { - return missingNumber - } - return binary.BigEndian.Uint64(data) -} - -// GetHeadHeaderHash retrieves the hash of the current canonical head block's -// header. The difference between this and GetHeadBlockHash is that whereas the -// last block hash is only updated upon a full block import, the last header -// hash is updated already at header import, allowing head tracking for the -// light synchronization mechanism. -func GetHeadHeaderHash(db DatabaseReader) common.Hash { - data, _ := db.Get(headHeaderKey) - if len(data) == 0 { - return common.Hash{} - } - return common.BytesToHash(data) -} - -// GetHeadBlockHash retrieves the hash of the current canonical head block. -func GetHeadBlockHash(db DatabaseReader) common.Hash { - data, _ := db.Get(headBlockKey) - if len(data) == 0 { - return common.Hash{} - } - return common.BytesToHash(data) -} - -// GetHeadFastBlockHash retrieves the hash of the current canonical head block during -// fast synchronization. The difference between this and GetHeadBlockHash is that -// whereas the last block hash is only updated upon a full block import, the last -// fast hash is updated when importing pre-processed blocks. -func GetHeadFastBlockHash(db DatabaseReader) common.Hash { - data, _ := db.Get(headFastKey) - if len(data) == 0 { - return common.Hash{} - } - return common.BytesToHash(data) -} - -// GetTrieSyncProgress retrieves the number of tries nodes fast synced to allow -// reportinc correct numbers across restarts. -func GetTrieSyncProgress(db DatabaseReader) uint64 { - data, _ := db.Get(trieSyncKey) - if len(data) == 0 { - return 0 - } - return new(big.Int).SetBytes(data).Uint64() -} - -// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil -// if the header's not found. -func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue { - data, _ := db.Get(headerKey(hash, number)) - return data -} - -// GetHeader retrieves the block header corresponding to the hash, nil if none -// found. -func GetHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Header { - data := GetHeaderRLP(db, hash, number) - if len(data) == 0 { - return nil - } - header := new(types.Header) - if err := rlp.Decode(bytes.NewReader(data), header); err != nil { - log.Error("Invalid block header RLP", "hash", hash, "err", err) - return nil - } - return header -} - -// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. -func GetBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue { - data, _ := db.Get(blockBodyKey(hash, number)) - return data -} - -func headerKey(hash common.Hash, number uint64) []byte { - return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...) -} - -func blockBodyKey(hash common.Hash, number uint64) []byte { - return append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) -} - -// GetBody retrieves the block body (transactons, uncles) corresponding to the -// hash, nil if none found. -func GetBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body { - data := GetBodyRLP(db, hash, number) - if len(data) == 0 { - return nil - } - body := new(types.Body) - if err := rlp.Decode(bytes.NewReader(data), body); err != nil { - log.Error("Invalid block body RLP", "hash", hash, "err", err) - return nil - } - return body -} - -// GetTd retrieves a block's total difficulty corresponding to the hash, nil if -// none found. -func GetTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int { - data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), tdSuffix...)) - if len(data) == 0 { - return nil - } - td := new(big.Int) - if err := rlp.Decode(bytes.NewReader(data), td); err != nil { - log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err) - return nil - } - return td -} - -// GetBlock retrieves an entire block corresponding to the hash, assembling it -// back from the stored header and body. If either the header or body could not -// be retrieved nil is returned. -// -// Note, due to concurrent download of header and block body the header and thus -// canonical hash can be stored in the database but the body data not (yet). -func GetBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block { - // Retrieve the block header and body contents - header := GetHeader(db, hash, number) - if header == nil { - return nil - } - body := GetBody(db, hash, number) - if body == nil { - return nil - } - // Reassemble the block and return - return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) -} - -// GetBlockReceipts retrieves the receipts generated by the transactions included -// in a block given by its hash. -func GetBlockReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts { - data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...)) - if len(data) == 0 { - return nil - } - storageReceipts := []*types.ReceiptForStorage{} - if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { - log.Error("Invalid receipt array RLP", "hash", hash, "err", err) - return nil - } - receipts := make(types.Receipts, len(storageReceipts)) - for i, receipt := range storageReceipts { - receipts[i] = (*types.Receipt)(receipt) - } - return receipts -} - -// GetTxLookupEntry retrieves the positional metadata associated with a transaction -// hash to allow retrieving the transaction or receipt by hash. -func GetTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) { - // Load the positional metadata from disk and bail if it fails - data, _ := db.Get(append(lookupPrefix, hash.Bytes()...)) - if len(data) == 0 { - return common.Hash{}, 0, 0 - } - // Parse and return the contents of the lookup entry - var entry TxLookupEntry - if err := rlp.DecodeBytes(data, &entry); err != nil { - log.Error("Invalid lookup entry RLP", "hash", hash, "err", err) - return common.Hash{}, 0, 0 - } - return entry.BlockHash, entry.BlockIndex, entry.Index -} - -// GetTransaction retrieves a specific transaction from the database, along with -// its added positional metadata. -func GetTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) { - // Retrieve the lookup metadata and resolve the transaction from the body - blockHash, blockNumber, txIndex := GetTxLookupEntry(db, hash) - - if blockHash != (common.Hash{}) { - body := GetBody(db, blockHash, blockNumber) - if body == nil || len(body.Transactions) <= int(txIndex) { - log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex) - return nil, common.Hash{}, 0, 0 - } - return body.Transactions[txIndex], blockHash, blockNumber, txIndex - } - // Old transaction representation, load the transaction and it's metadata separately - data, _ := db.Get(hash.Bytes()) - if len(data) == 0 { - return nil, common.Hash{}, 0, 0 - } - var tx types.Transaction - if err := rlp.DecodeBytes(data, &tx); err != nil { - return nil, common.Hash{}, 0, 0 - } - // Retrieve the blockchain positional metadata - data, _ = db.Get(append(hash.Bytes(), oldTxMetaSuffix...)) - if len(data) == 0 { - return nil, common.Hash{}, 0, 0 - } - var entry TxLookupEntry - if err := rlp.DecodeBytes(data, &entry); err != nil { - return nil, common.Hash{}, 0, 0 - } - return &tx, entry.BlockHash, entry.BlockIndex, entry.Index -} - -// GetReceipt retrieves a specific transaction receipt from the database, along with -// its added positional metadata. -func GetReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) { - // Retrieve the lookup metadata and resolve the receipt from the receipts - blockHash, blockNumber, receiptIndex := GetTxLookupEntry(db, hash) - - if blockHash != (common.Hash{}) { - receipts := GetBlockReceipts(db, blockHash, blockNumber) - if len(receipts) <= int(receiptIndex) { - log.Error("Receipt refereced missing", "number", blockNumber, "hash", blockHash, "index", receiptIndex) - return nil, common.Hash{}, 0, 0 - } - return receipts[receiptIndex], blockHash, blockNumber, receiptIndex - } - // Old receipt representation, load the receipt and set an unknown metadata - data, _ := db.Get(append(oldReceiptsPrefix, hash[:]...)) - if len(data) == 0 { - return nil, common.Hash{}, 0, 0 - } - var receipt types.ReceiptForStorage - err := rlp.DecodeBytes(data, &receipt) - if err != nil { - log.Error("Invalid receipt RLP", "hash", hash, "err", err) - } - return (*types.Receipt)(&receipt), common.Hash{}, 0, 0 -} - -// GetBloomBits retrieves the compressed bloom bit vector belonging to the given -// section and bit index from the. -func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) ([]byte, error) { - key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...) - - binary.BigEndian.PutUint16(key[1:], uint16(bit)) - binary.BigEndian.PutUint64(key[3:], section) - - return db.Get(key) -} - -// WriteCanonicalHash stores the canonical hash for the given block number. -func WriteCanonicalHash(db ethdb.Putter, hash common.Hash, number uint64) error { - key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...) - if err := db.Put(key, hash.Bytes()); err != nil { - log.Crit("Failed to store number to hash mapping", "err", err) - } - return nil -} - -// WriteHeadHeaderHash stores the head header's hash. -func WriteHeadHeaderHash(db ethdb.Putter, hash common.Hash) error { - if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { - log.Crit("Failed to store last header's hash", "err", err) - } - return nil -} - -// WriteHeadBlockHash stores the head block's hash. -func WriteHeadBlockHash(db ethdb.Putter, hash common.Hash) error { - if err := db.Put(headBlockKey, hash.Bytes()); err != nil { - log.Crit("Failed to store last block's hash", "err", err) - } - return nil -} - -// WriteHeadFastBlockHash stores the fast head block's hash. -func WriteHeadFastBlockHash(db ethdb.Putter, hash common.Hash) error { - if err := db.Put(headFastKey, hash.Bytes()); err != nil { - log.Crit("Failed to store last fast block's hash", "err", err) - } - return nil -} - -// WriteTrieSyncProgress stores the fast sync trie process counter to support -// retrieving it across restarts. -func WriteTrieSyncProgress(db ethdb.Putter, count uint64) error { - if err := db.Put(trieSyncKey, new(big.Int).SetUint64(count).Bytes()); err != nil { - log.Crit("Failed to store fast sync trie progress", "err", err) - } - return nil -} - -// WriteHeader serializes a block header into the database. -func WriteHeader(db ethdb.Putter, header *types.Header) error { - data, err := rlp.EncodeToBytes(header) - if err != nil { - return err - } - hash := header.Hash().Bytes() - num := header.Number.Uint64() - encNum := encodeBlockNumber(num) - key := append(blockHashPrefix, hash...) - if err := db.Put(key, encNum); err != nil { - log.Crit("Failed to store hash to number mapping", "err", err) - } - key = append(append(headerPrefix, encNum...), hash...) - if err := db.Put(key, data); err != nil { - log.Crit("Failed to store header", "err", err) - } - return nil -} - -// WriteBody serializes the body of a block into the database. -func WriteBody(db ethdb.Putter, hash common.Hash, number uint64, body *types.Body) error { - data, err := rlp.EncodeToBytes(body) - if err != nil { - return err - } - return WriteBodyRLP(db, hash, number, data) -} - -// WriteBodyRLP writes a serialized body of a block into the database. -func WriteBodyRLP(db ethdb.Putter, hash common.Hash, number uint64, rlp rlp.RawValue) error { - key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) - if err := db.Put(key, rlp); err != nil { - log.Crit("Failed to store block body", "err", err) - } - return nil -} - -// WriteTd serializes the total difficulty of a block into the database. -func WriteTd(db ethdb.Putter, hash common.Hash, number uint64, td *big.Int) error { - data, err := rlp.EncodeToBytes(td) - if err != nil { - return err - } - key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...) - if err := db.Put(key, data); err != nil { - log.Crit("Failed to store block total difficulty", "err", err) - } - return nil -} - -// WriteBlock serializes a block into the database, header and body separately. -func WriteBlock(db ethdb.Putter, block *types.Block) error { - // Store the body first to retain database consistency - if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil { - return err - } - // Store the header too, signaling full block ownership - if err := WriteHeader(db, block.Header()); err != nil { - return err - } - return nil -} - -// WriteBlockReceipts stores all the transaction receipts belonging to a block -// as a single receipt slice. This is used during chain reorganisations for -// rescheduling dropped transactions. -func WriteBlockReceipts(db ethdb.Putter, hash common.Hash, number uint64, receipts types.Receipts) error { - // Convert the receipts into their storage form and serialize them - storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) - for i, receipt := range receipts { - storageReceipts[i] = (*types.ReceiptForStorage)(receipt) - } - bytes, err := rlp.EncodeToBytes(storageReceipts) - if err != nil { - return err - } - // Store the flattened receipt slice - key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) - if err := db.Put(key, bytes); err != nil { - log.Crit("Failed to store block receipts", "err", err) - } - return nil -} - -// WriteTxLookupEntries stores a positional metadata for every transaction from -// a block, enabling hash based transaction and receipt lookups. -func WriteTxLookupEntries(db ethdb.Putter, block *types.Block) error { - // Iterate over each transaction and encode its metadata - for i, tx := range block.Transactions() { - entry := TxLookupEntry{ - BlockHash: block.Hash(), - BlockIndex: block.NumberU64(), - Index: uint64(i), - } - data, err := rlp.EncodeToBytes(entry) - if err != nil { - return err - } - if err := db.Put(append(lookupPrefix, tx.Hash().Bytes()...), data); err != nil { - return err - } - } - return nil -} - -// WriteBloomBits writes the compressed bloom bits vector belonging to the given -// section and bit index. -func WriteBloomBits(db ethdb.Putter, bit uint, section uint64, head common.Hash, bits []byte) { - key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...) - - binary.BigEndian.PutUint16(key[1:], uint16(bit)) - binary.BigEndian.PutUint64(key[3:], section) - - if err := db.Put(key, bits); err != nil { - log.Crit("Failed to store bloom bits", "err", err) - } -} - -// DeleteCanonicalHash removes the number to hash canonical mapping. -func DeleteCanonicalHash(db DatabaseDeleter, number uint64) { - db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)) -} - -// DeleteHeader removes all block header data associated with a hash. -func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) { - db.Delete(append(blockHashPrefix, hash.Bytes()...)) - db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) -} - -// DeleteBody removes all block body data associated with a hash. -func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) { - db.Delete(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) -} - -// DeleteTd removes all block total difficulty data associated with a hash. -func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) { - db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)) -} - -// DeleteBlock removes all block data associated with a hash. -func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) { - DeleteBlockReceipts(db, hash, number) - DeleteHeader(db, hash, number) - DeleteBody(db, hash, number) - DeleteTd(db, hash, number) -} - -// DeleteBlockReceipts removes all receipt data associated with a block hash. -func DeleteBlockReceipts(db DatabaseDeleter, hash common.Hash, number uint64) { - db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) -} - -// DeleteTxLookupEntry removes all transaction data associated with a hash. -func DeleteTxLookupEntry(db DatabaseDeleter, hash common.Hash) { - db.Delete(append(lookupPrefix, hash.Bytes()...)) -} - -// PreimageTable returns a Database instance with the key prefix for preimage entries. -func PreimageTable(db ethdb.Database) ethdb.Database { - return ethdb.NewTable(db, preimagePrefix) -} - -// WritePreimages writes the provided set of preimages to the database. `number` is the -// current block number, and is used for debug messages only. -func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash][]byte) error { - table := PreimageTable(db) - batch := table.NewBatch() - hitCount := 0 - for hash, preimage := range preimages { - if _, err := table.Get(hash.Bytes()); err != nil { - batch.Put(hash.Bytes(), preimage) - hitCount++ - } - } - preimageCounter.Inc(int64(len(preimages))) - preimageHitCounter.Inc(int64(hitCount)) - if hitCount > 0 { - if err := batch.Write(); err != nil { - return fmt.Errorf("preimage write fail for block %d: %v", number, err) - } - } - return nil -} - -// GetBlockChainVersion reads the version number from db. -func GetBlockChainVersion(db DatabaseReader) int { - var vsn uint - enc, _ := db.Get([]byte("BlockchainVersion")) - rlp.DecodeBytes(enc, &vsn) - return int(vsn) -} - -// WriteBlockChainVersion writes vsn as the version number to db. -func WriteBlockChainVersion(db ethdb.Putter, vsn int) { - enc, _ := rlp.EncodeToBytes(uint(vsn)) - db.Put([]byte("BlockchainVersion"), enc) -} - -// WriteChainConfig writes the chain config settings to the database. -func WriteChainConfig(db ethdb.Putter, hash common.Hash, cfg *params.ChainConfig) error { - // short circuit and ignore if nil config. GetChainConfig - // will return a default. - if cfg == nil { - return nil - } - - jsonChainConfig, err := json.Marshal(cfg) - if err != nil { - return err - } - - return db.Put(append(configPrefix, hash[:]...), jsonChainConfig) -} - -// GetChainConfig will fetch the network settings based on the given hash. -func GetChainConfig(db DatabaseReader, hash common.Hash) (*params.ChainConfig, error) { - jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...)) - if len(jsonChainConfig) == 0 { - return nil, ErrChainConfigNotFound - } - - var config params.ChainConfig - if err := json.Unmarshal(jsonChainConfig, &config); err != nil { - return nil, err - } - - return &config, nil -} - -// FindCommonAncestor returns the last common ancestor of two block headers -func FindCommonAncestor(db DatabaseReader, a, b *types.Header) *types.Header { - for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { - a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1) - if a == nil { - return nil - } - } - for an := a.Number.Uint64(); an < b.Number.Uint64(); { - b = GetHeader(db, b.ParentHash, b.Number.Uint64()-1) - if b == nil { - return nil - } - } - for a.Hash() != b.Hash() { - a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1) - if a == nil { - return nil - } - b = GetHeader(db, b.ParentHash, b.Number.Uint64()-1) - if b == nil { - return nil - } - } - return a -} diff --git a/core/database_util_test.go b/core/database_util_test.go deleted file mode 100644 index aa87fa6f8..000000000 --- a/core/database_util_test.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "bytes" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/sha3" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/rlp" -) - -// Tests block header storage and retrieval operations. -func TestHeaderStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test header to move around the database and make sure it's really new - header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")} - if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry != nil { - t.Fatalf("Non existent header returned: %v", entry) - } - // Write and verify the header in the database - if err := WriteHeader(db, header); err != nil { - t.Fatalf("Failed to write header into database: %v", err) - } - if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry == nil { - t.Fatalf("Stored header not found") - } else if entry.Hash() != header.Hash() { - t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header) - } - if entry := GetHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil { - t.Fatalf("Stored header RLP not found") - } else { - hasher := sha3.NewKeccak256() - hasher.Write(entry) - - if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() { - t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header) - } - } - // Delete the header and verify the execution - DeleteHeader(db, header.Hash(), header.Number.Uint64()) - if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry != nil { - t.Fatalf("Deleted header returned: %v", entry) - } -} - -// Tests block body storage and retrieval operations. -func TestBodyStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test body to move around the database and make sure it's really new - body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}} - - hasher := sha3.NewKeccak256() - rlp.Encode(hasher, body) - hash := common.BytesToHash(hasher.Sum(nil)) - - if entry := GetBody(db, hash, 0); entry != nil { - t.Fatalf("Non existent body returned: %v", entry) - } - // Write and verify the body in the database - if err := WriteBody(db, hash, 0, body); err != nil { - t.Fatalf("Failed to write body into database: %v", err) - } - if entry := GetBody(db, hash, 0); entry == nil { - t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { - t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) - } - if entry := GetBodyRLP(db, hash, 0); entry == nil { - t.Fatalf("Stored body RLP not found") - } else { - hasher := sha3.NewKeccak256() - hasher.Write(entry) - - if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash { - t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body) - } - } - // Delete the body and verify the execution - DeleteBody(db, hash, 0) - if entry := GetBody(db, hash, 0); entry != nil { - t.Fatalf("Deleted body returned: %v", entry) - } -} - -// Tests block storage and retrieval operations. -func TestBlockStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test block to move around the database and make sure it's really new - block := types.NewBlockWithHeader(&types.Header{ - Extra: []byte("test block"), - UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, - }) - if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil { - t.Fatalf("Non existent block returned: %v", entry) - } - if entry := GetHeader(db, block.Hash(), block.NumberU64()); entry != nil { - t.Fatalf("Non existent header returned: %v", entry) - } - if entry := GetBody(db, block.Hash(), block.NumberU64()); entry != nil { - t.Fatalf("Non existent body returned: %v", entry) - } - // Write and verify the block in the database - if err := WriteBlock(db, block); err != nil { - t.Fatalf("Failed to write block into database: %v", err) - } - if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil { - t.Fatalf("Stored block not found") - } else if entry.Hash() != block.Hash() { - t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) - } - if entry := GetHeader(db, block.Hash(), block.NumberU64()); entry == nil { - t.Fatalf("Stored header not found") - } else if entry.Hash() != block.Header().Hash() { - t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header()) - } - if entry := GetBody(db, block.Hash(), block.NumberU64()); entry == nil { - t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { - t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body()) - } - // Delete the block and verify the execution - DeleteBlock(db, block.Hash(), block.NumberU64()) - if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil { - t.Fatalf("Deleted block returned: %v", entry) - } - if entry := GetHeader(db, block.Hash(), block.NumberU64()); entry != nil { - t.Fatalf("Deleted header returned: %v", entry) - } - if entry := GetBody(db, block.Hash(), block.NumberU64()); entry != nil { - t.Fatalf("Deleted body returned: %v", entry) - } -} - -// Tests that partial block contents don't get reassembled into full blocks. -func TestPartialBlockStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - block := types.NewBlockWithHeader(&types.Header{ - Extra: []byte("test block"), - UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, - }) - // Store a header and check that it's not recognized as a block - if err := WriteHeader(db, block.Header()); err != nil { - t.Fatalf("Failed to write header into database: %v", err) - } - if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil { - t.Fatalf("Non existent block returned: %v", entry) - } - DeleteHeader(db, block.Hash(), block.NumberU64()) - - // Store a body and check that it's not recognized as a block - if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil { - t.Fatalf("Failed to write body into database: %v", err) - } - if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil { - t.Fatalf("Non existent block returned: %v", entry) - } - DeleteBody(db, block.Hash(), block.NumberU64()) - - // Store a header and a body separately and check reassembly - if err := WriteHeader(db, block.Header()); err != nil { - t.Fatalf("Failed to write header into database: %v", err) - } - if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil { - t.Fatalf("Failed to write body into database: %v", err) - } - if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil { - t.Fatalf("Stored block not found") - } else if entry.Hash() != block.Hash() { - t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) - } -} - -// Tests block total difficulty storage and retrieval operations. -func TestTdStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test TD to move around the database and make sure it's really new - hash, td := common.Hash{}, big.NewInt(314) - if entry := GetTd(db, hash, 0); entry != nil { - t.Fatalf("Non existent TD returned: %v", entry) - } - // Write and verify the TD in the database - if err := WriteTd(db, hash, 0, td); err != nil { - t.Fatalf("Failed to write TD into database: %v", err) - } - if entry := GetTd(db, hash, 0); entry == nil { - t.Fatalf("Stored TD not found") - } else if entry.Cmp(td) != 0 { - t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td) - } - // Delete the TD and verify the execution - DeleteTd(db, hash, 0) - if entry := GetTd(db, hash, 0); entry != nil { - t.Fatalf("Deleted TD returned: %v", entry) - } -} - -// Tests that canonical numbers can be mapped to hashes and retrieved. -func TestCanonicalMappingStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test canonical number and assinged hash to move around - hash, number := common.Hash{0: 0xff}, uint64(314) - if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) { - t.Fatalf("Non existent canonical mapping returned: %v", entry) - } - // Write and verify the TD in the database - if err := WriteCanonicalHash(db, hash, number); err != nil { - t.Fatalf("Failed to write canonical mapping into database: %v", err) - } - if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) { - t.Fatalf("Stored canonical mapping not found") - } else if entry != hash { - t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash) - } - // Delete the TD and verify the execution - DeleteCanonicalHash(db, number) - if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) { - t.Fatalf("Deleted canonical mapping returned: %v", entry) - } -} - -// Tests that head headers and head blocks can be assigned, individually. -func TestHeadStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")}) - blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")}) - blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")}) - - // Check that no head entries are in a pristine database - if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) { - t.Fatalf("Non head header entry returned: %v", entry) - } - if entry := GetHeadBlockHash(db); entry != (common.Hash{}) { - t.Fatalf("Non head block entry returned: %v", entry) - } - if entry := GetHeadFastBlockHash(db); entry != (common.Hash{}) { - t.Fatalf("Non fast head block entry returned: %v", entry) - } - // Assign separate entries for the head header and block - if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil { - t.Fatalf("Failed to write head header hash: %v", err) - } - if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil { - t.Fatalf("Failed to write head block hash: %v", err) - } - if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil { - t.Fatalf("Failed to write fast head block hash: %v", err) - } - // Check that both heads are present, and different (i.e. two heads maintained) - if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() { - t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash()) - } - if entry := GetHeadBlockHash(db); entry != blockFull.Hash() { - t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash()) - } - if entry := GetHeadFastBlockHash(db); entry != blockFast.Hash() { - t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash()) - } -} - -// Tests that positional lookup metadata can be stored and retrieved. -func TestLookupStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) - tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), big.NewInt(222), 2222, big.NewInt(22222), []byte{0x22, 0x22, 0x22}) - tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) - txs := []*types.Transaction{tx1, tx2, tx3} - - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil) - - // Check that no transactions entries are in a pristine database - for i, tx := range txs { - if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil { - t.Fatalf("tx #%d [%x]: non existent transaction returned: %v", i, tx.Hash(), txn) - } - } - // Insert all the transactions into the database, and verify contents - if err := WriteBlock(db, block); err != nil { - t.Fatalf("failed to write block contents: %v", err) - } - if err := WriteTxLookupEntries(db, block); err != nil { - t.Fatalf("failed to write transactions: %v", err) - } - for i, tx := range txs { - if txn, hash, number, index := GetTransaction(db, tx.Hash()); txn == nil { - t.Fatalf("tx #%d [%x]: transaction not found", i, tx.Hash()) - } else { - if hash != block.Hash() || number != block.NumberU64() || index != uint64(i) { - t.Fatalf("tx #%d [%x]: positional metadata mismatch: have %x/%d/%d, want %x/%v/%v", i, tx.Hash(), hash, number, index, block.Hash(), block.NumberU64(), i) - } - if tx.Hash() != txn.Hash() { - t.Fatalf("tx #%d [%x]: transaction mismatch: have %v, want %v", i, tx.Hash(), txn, tx) - } - } - } - // Delete the transactions and check purge - for i, tx := range txs { - DeleteTxLookupEntry(db, tx.Hash()) - if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil { - t.Fatalf("tx #%d [%x]: deleted transaction returned: %v", i, tx.Hash(), txn) - } - } -} - -// Tests that receipts associated with a single block can be stored and retrieved. -func TestBlockReceiptStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - receipt1 := &types.Receipt{ - Status: types.ReceiptStatusFailed, - CumulativeGasUsed: 1, - Logs: []*types.Log{ - {Address: common.BytesToAddress([]byte{0x11})}, - {Address: common.BytesToAddress([]byte{0x01, 0x11})}, - }, - TxHash: common.BytesToHash([]byte{0x11, 0x11}), - ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), - GasUsed: 111111, - } - receipt2 := &types.Receipt{ - PostState: common.Hash{2}.Bytes(), - CumulativeGasUsed: 2, - Logs: []*types.Log{ - {Address: common.BytesToAddress([]byte{0x22})}, - {Address: common.BytesToAddress([]byte{0x02, 0x22})}, - }, - TxHash: common.BytesToHash([]byte{0x22, 0x22}), - ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), - GasUsed: 222222, - } - receipts := []*types.Receipt{receipt1, receipt2} - - // Check that no receipt entries are in a pristine database - hash := common.BytesToHash([]byte{0x03, 0x14}) - if rs := GetBlockReceipts(db, hash, 0); len(rs) != 0 { - t.Fatalf("non existent receipts returned: %v", rs) - } - // Insert the receipt slice into the database and check presence - if err := WriteBlockReceipts(db, hash, 0, receipts); err != nil { - t.Fatalf("failed to write block receipts: %v", err) - } - if rs := GetBlockReceipts(db, hash, 0); len(rs) == 0 { - t.Fatalf("no receipts returned") - } else { - for i := 0; i < len(receipts); i++ { - rlpHave, _ := rlp.EncodeToBytes(rs[i]) - rlpWant, _ := rlp.EncodeToBytes(receipts[i]) - - if !bytes.Equal(rlpHave, rlpWant) { - t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i]) - } - } - } - // Delete the receipt slice and check purge - DeleteBlockReceipts(db, hash, 0) - if rs := GetBlockReceipts(db, hash, 0); len(rs) != 0 { - t.Fatalf("deleted receipts returned: %v", rs) - } -} diff --git a/core/genesis.go b/core/genesis.go index b6ead2250..c0a636ab2 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" @@ -155,7 +156,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig } // Just commit the new block if there is no stored genesis block. - stored := GetCanonicalHash(db, 0) + stored := rawdb.ReadCanonicalHash(db, 0) if (stored == common.Hash{}) { if genesis == nil { log.Info("Writing default main-net genesis block") @@ -177,14 +178,11 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig // Get the existing chain configuration. newcfg := genesis.configOrDefault(stored) - storedcfg, err := GetChainConfig(db, stored) - if err != nil { - if err == ErrChainConfigNotFound { - // This case happens if a genesis write was interrupted. - log.Warn("Found genesis block without chain config") - err = WriteChainConfig(db, stored, newcfg) - } - return newcfg, stored, err + storedcfg := rawdb.ReadChainConfig(db, stored) + if storedcfg == nil { + log.Warn("Found genesis block without chain config") + rawdb.WriteChainConfig(db, stored, newcfg) + return newcfg, stored, nil } // Special case: don't change the existing config of a non-mainnet chain if no new // config is supplied. These chains would get AllProtocolChanges (and a compat error) @@ -195,15 +193,16 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. - height := GetBlockNumber(db, GetHeadHeaderHash(db)) - if height == missingNumber { + height := rawdb.ReadHeaderNumber(db, rawdb.ReadHeadHeaderHash(db)) + if height == nil { return newcfg, stored, fmt.Errorf("missing block number for head header hash") } - compatErr := storedcfg.CheckCompatible(newcfg, height) - if compatErr != nil && height != 0 && compatErr.RewindTo != 0 { + compatErr := storedcfg.CheckCompatible(newcfg, *height) + if compatErr != nil && *height != 0 && compatErr.RewindTo != 0 { return newcfg, stored, compatErr } - return newcfg, stored, WriteChainConfig(db, stored, newcfg) + rawdb.WriteChainConfig(db, stored, newcfg) + return newcfg, stored, nil } func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { @@ -267,29 +266,19 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { if block.Number().Sign() != 0 { return nil, fmt.Errorf("can't commit genesis block with number > 0") } - if err := WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty); err != nil { - return nil, err - } - if err := WriteBlock(db, block); err != nil { - return nil, err - } - if err := WriteBlockReceipts(db, block.Hash(), block.NumberU64(), nil); err != nil { - return nil, err - } - if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { - return nil, err - } - if err := WriteHeadBlockHash(db, block.Hash()); err != nil { - return nil, err - } - if err := WriteHeadHeaderHash(db, block.Hash()); err != nil { - return nil, err - } + rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty) + rawdb.WriteBlock(db, block) + rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) + rawdb.WriteHeadHeaderHash(db, block.Hash()) + config := g.Config if config == nil { config = params.AllEthashProtocolChanges } - return block, WriteChainConfig(db, block.Hash(), config) + rawdb.WriteChainConfig(db, block.Hash(), config) + return block, nil } // MustCommit writes the genesis block and state to db, panicking on error. diff --git a/core/genesis_test.go b/core/genesis_test.go index 052ded699..613434e20 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -24,6 +24,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" @@ -154,7 +155,7 @@ func TestSetupGenesis(t *testing.T) { t.Errorf("%s: returned hash %s, want %s", test.name, hash.Hex(), test.wantHash.Hex()) } else if err == nil { // Check database content. - stored := GetBlock(db, test.wantHash, 0) + stored := rawdb.ReadBlock(db, test.wantHash, 0) if stored.Hash() != test.wantHash { t.Errorf("%s: block in DB has hash %s, want %s", test.name, stored.Hash(), test.wantHash) } diff --git a/core/headerchain.go b/core/headerchain.go index 2d1b0a2a1..2ac0cccc7 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -97,7 +98,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c } hc.currentHeader.Store(hc.genesisHeader) - if head := GetHeadBlockHash(chainDb); head != (common.Hash{}) { + if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) { if chead := hc.GetHeaderByHash(head); chead != nil { hc.currentHeader.Store(chead) } @@ -109,13 +110,14 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c // GetBlockNumber retrieves the block number belonging to the given hash // from the cache or database -func (hc *HeaderChain) GetBlockNumber(hash common.Hash) uint64 { +func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { if cached, ok := hc.numberCache.Get(hash); ok { - return cached.(uint64) + number := cached.(uint64) + return &number } - number := GetBlockNumber(hc.chainDb, hash) - if number != missingNumber { - hc.numberCache.Add(hash, number) + number := rawdb.ReadHeaderNumber(hc.chainDb, hash) + if number != nil { + hc.numberCache.Add(hash, *number) } return number } @@ -147,20 +149,19 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er if err := hc.WriteTd(hash, number, externTd); err != nil { log.Crit("Failed to write header total difficulty", "err", err) } - if err := WriteHeader(hc.chainDb, header); err != nil { - log.Crit("Failed to write header content", "err", err) - } + rawdb.WriteHeader(hc.chainDb, header) + // If the total difficulty is higher than our known, add it to the canonical chain // Second clause in the if statement reduces the vulnerability to selfish mining. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) { // Delete any canonical number assignments above the new head for i := number + 1; ; i++ { - hash := GetCanonicalHash(hc.chainDb, i) + hash := rawdb.ReadCanonicalHash(hc.chainDb, i) if hash == (common.Hash{}) { break } - DeleteCanonicalHash(hc.chainDb, i) + rawdb.DeleteCanonicalHash(hc.chainDb, i) } // Overwrite any stale canonical number assignments var ( @@ -168,20 +169,17 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er headNumber = header.Number.Uint64() - 1 headHeader = hc.GetHeader(headHash, headNumber) ) - for GetCanonicalHash(hc.chainDb, headNumber) != headHash { - WriteCanonicalHash(hc.chainDb, headHash, headNumber) + for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { + rawdb.WriteCanonicalHash(hc.chainDb, headHash, headNumber) headHash = headHeader.ParentHash headNumber = headHeader.Number.Uint64() - 1 headHeader = hc.GetHeader(headHash, headNumber) } // Extend the canonical chain with the new header - if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil { - log.Crit("Failed to insert header number", "err", err) - } - if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil { - log.Crit("Failed to insert head header hash", "err", err) - } + rawdb.WriteCanonicalHash(hc.chainDb, hash, number) + rawdb.WriteHeadHeaderHash(hc.chainDb, hash) + hc.currentHeaderHash = hash hc.currentHeader.Store(types.CopyHeader(header)) @@ -316,7 +314,7 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { if cached, ok := hc.tdCache.Get(hash); ok { return cached.(*big.Int) } - td := GetTd(hc.chainDb, hash, number) + td := rawdb.ReadTd(hc.chainDb, hash, number) if td == nil { return nil } @@ -328,15 +326,17 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { // GetTdByHash retrieves a block's total difficulty in the canonical chain from the // database by hash, caching it if found. func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int { - return hc.GetTd(hash, hc.GetBlockNumber(hash)) + number := hc.GetBlockNumber(hash) + if number == nil { + return nil + } + return hc.GetTd(hash, *number) } // WriteTd stores a block's total difficulty into the database, also caching it // along the way. func (hc *HeaderChain) WriteTd(hash common.Hash, number uint64, td *big.Int) error { - if err := WriteTd(hc.chainDb, hash, number, td); err != nil { - return err - } + rawdb.WriteTd(hc.chainDb, hash, number, td) hc.tdCache.Add(hash, new(big.Int).Set(td)) return nil } @@ -348,7 +348,7 @@ func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header if header, ok := hc.headerCache.Get(hash); ok { return header.(*types.Header) } - header := GetHeader(hc.chainDb, hash, number) + header := rawdb.ReadHeader(hc.chainDb, hash, number) if header == nil { return nil } @@ -360,7 +360,11 @@ func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header // GetHeaderByHash retrieves a block header from the database by hash, caching it if // found. func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { - return hc.GetHeader(hash, hc.GetBlockNumber(hash)) + number := hc.GetBlockNumber(hash) + if number == nil { + return nil + } + return hc.GetHeader(hash, *number) } // HasHeader checks if a block header is present in the database or not. @@ -368,14 +372,13 @@ func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) { return true } - ok, _ := hc.chainDb.Has(headerKey(hash, number)) - return ok + return rawdb.HasHeader(hc.chainDb, hash, number) } // GetHeaderByNumber retrieves a block header from the database by number, // caching it (associated with its hash) if found. func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { - hash := GetCanonicalHash(hc.chainDb, number) + hash := rawdb.ReadCanonicalHash(hc.chainDb, number) if hash == (common.Hash{}) { return nil } @@ -390,9 +393,8 @@ func (hc *HeaderChain) CurrentHeader() *types.Header { // SetCurrentHeader sets the current head header of the canonical chain. func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { - if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil { - log.Crit("Failed to insert head header hash", "err", err) - } + rawdb.WriteHeadHeaderHash(hc.chainDb, head.Hash()) + hc.currentHeader.Store(head) hc.currentHeaderHash = head.Hash() } @@ -416,13 +418,14 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) { if delFn != nil { delFn(hash, num) } - DeleteHeader(hc.chainDb, hash, num) - DeleteTd(hc.chainDb, hash, num) + rawdb.DeleteHeader(hc.chainDb, hash, num) + rawdb.DeleteTd(hc.chainDb, hash, num) + hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash, hdr.Number.Uint64()-1)) } // Roll back the canonical chain numbering for i := height; i > head; i-- { - DeleteCanonicalHash(hc.chainDb, i) + rawdb.DeleteCanonicalHash(hc.chainDb, i) } // Clear out any stale content from the caches hc.headerCache.Purge() @@ -434,9 +437,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) { } hc.currentHeaderHash = hc.CurrentHeader().Hash() - if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil { - log.Crit("Failed to reset head header hash", "err", err) - } + rawdb.WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash) } // SetGenesis sets a new genesis block header for the chain diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go new file mode 100644 index 000000000..a26a42ba7 --- /dev/null +++ b/core/rawdb/accessors_chain.go @@ -0,0 +1,381 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "bytes" + "encoding/binary" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +// ReadCanonicalHash retrieves the hash assigned to a canonical block number. +func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash { + data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// WriteCanonicalHash stores the hash assigned to a canonical block number. +func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) { + key := append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...) + if err := db.Put(key, hash.Bytes()); err != nil { + log.Crit("Failed to store number to hash mapping", "err", err) + } +} + +// DeleteCanonicalHash removes the number to hash canonical mapping. +func DeleteCanonicalHash(db DatabaseDeleter, number uint64) { + if err := db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)); err != nil { + log.Crit("Failed to delete number to hash mapping", "err", err) + } +} + +// ReadHeaderNumber returns the header number assigned to a hash. +func ReadHeaderNumber(db DatabaseReader, hash common.Hash) *uint64 { + data, _ := db.Get(append(headerNumberPrefix, hash.Bytes()...)) + if len(data) != 8 { + return nil + } + number := binary.BigEndian.Uint64(data) + return &number +} + +// ReadHeadHeaderHash retrieves the hash of the current canonical head header. +func ReadHeadHeaderHash(db DatabaseReader) common.Hash { + data, _ := db.Get(headHeaderKey) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// WriteHeadHeaderHash stores the hash of the current canonical head header. +func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) { + if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { + log.Crit("Failed to store last header's hash", "err", err) + } +} + +// ReadHeadBlockHash retrieves the hash of the current canonical head block. +func ReadHeadBlockHash(db DatabaseReader) common.Hash { + data, _ := db.Get(headBlockKey) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// WriteHeadBlockHash stores the head block's hash. +func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) { + if err := db.Put(headBlockKey, hash.Bytes()); err != nil { + log.Crit("Failed to store last block's hash", "err", err) + } +} + +// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block. +func ReadHeadFastBlockHash(db DatabaseReader) common.Hash { + data, _ := db.Get(headFastBlockKey) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// WriteHeadFastBlockHash stores the hash of the current fast-sync head block. +func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) { + if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil { + log.Crit("Failed to store last fast block's hash", "err", err) + } +} + +// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow +// reporting correct numbers across restarts. +func ReadFastTrieProgress(db DatabaseReader) uint64 { + data, _ := db.Get(fastTrieProgressKey) + if len(data) == 0 { + return 0 + } + return new(big.Int).SetBytes(data).Uint64() +} + +// WriteFastTrieProgress stores the fast sync trie process counter to support +// retrieving it across restarts. +func WriteFastTrieProgress(db DatabaseWriter, count uint64) { + if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil { + log.Crit("Failed to store fast sync trie progress", "err", err) + } +} + +// ReadHeaderRLP retrieves a block header in its raw RLP database encoding. +func ReadHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue { + data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) + return data +} + +// HasHeader verifies the existence of a block header corresponding to the hash. +func HasHeader(db DatabaseReader, hash common.Hash, number uint64) bool { + key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) + if has, err := db.Has(key); !has || err != nil { + return false + } + return true +} + +// ReadHeader retrieves the block header corresponding to the hash. +func ReadHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Header { + data := ReadHeaderRLP(db, hash, number) + if len(data) == 0 { + return nil + } + header := new(types.Header) + if err := rlp.Decode(bytes.NewReader(data), header); err != nil { + log.Error("Invalid block header RLP", "hash", hash, "err", err) + return nil + } + return header +} + +// WriteHeader stores a block header into the database and also stores the hash- +// to-number mapping. +func WriteHeader(db DatabaseWriter, header *types.Header) { + // Write the hash -> number mapping + var ( + hash = header.Hash().Bytes() + number = header.Number.Uint64() + encoded = encodeBlockNumber(number) + ) + key := append(headerNumberPrefix, hash...) + if err := db.Put(key, encoded); err != nil { + log.Crit("Failed to store hash to number mapping", "err", err) + } + // Write the encoded header + data, err := rlp.EncodeToBytes(header) + if err != nil { + log.Crit("Failed to RLP encode header", "err", err) + } + key = append(append(headerPrefix, encoded...), hash...) + if err := db.Put(key, data); err != nil { + log.Crit("Failed to store header", "err", err) + } +} + +// DeleteHeader removes all block header data associated with a hash. +func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) { + if err := db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)); err != nil { + log.Crit("Failed to delete header", "err", err) + } + if err := db.Delete(append(headerNumberPrefix, hash.Bytes()...)); err != nil { + log.Crit("Failed to delete hash to number mapping", "err", err) + } +} + +// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. +func ReadBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue { + data, _ := db.Get(append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) + return data +} + +// WriteBodyRLP stores an RLP encoded block body into the database. +func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) { + key := append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) + if err := db.Put(key, rlp); err != nil { + log.Crit("Failed to store block body", "err", err) + } +} + +// HasBody verifies the existence of a block body corresponding to the hash. +func HasBody(db DatabaseReader, hash common.Hash, number uint64) bool { + key := append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) + if has, err := db.Has(key); !has || err != nil { + return false + } + return true +} + +// ReadBody retrieves the block body corresponding to the hash. +func ReadBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body { + data := ReadBodyRLP(db, hash, number) + if len(data) == 0 { + return nil + } + body := new(types.Body) + if err := rlp.Decode(bytes.NewReader(data), body); err != nil { + log.Error("Invalid block body RLP", "hash", hash, "err", err) + return nil + } + return body +} + +// WriteBody storea a block body into the database. +func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.Body) { + data, err := rlp.EncodeToBytes(body) + if err != nil { + log.Crit("Failed to RLP encode body", "err", err) + } + WriteBodyRLP(db, hash, number, data) +} + +// DeleteBody removes all block body data associated with a hash. +func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) { + if err := db.Delete(append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)); err != nil { + log.Crit("Failed to delete block body", "err", err) + } +} + +// ReadTd retrieves a block's total difficulty corresponding to the hash. +func ReadTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int { + data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), headerTDSuffix...)) + if len(data) == 0 { + return nil + } + td := new(big.Int) + if err := rlp.Decode(bytes.NewReader(data), td); err != nil { + log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err) + return nil + } + return td +} + +// WriteTd stores the total difficulty of a block into the database. +func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) { + data, err := rlp.EncodeToBytes(td) + if err != nil { + log.Crit("Failed to RLP encode block total difficulty", "err", err) + } + key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), headerTDSuffix...) + if err := db.Put(key, data); err != nil { + log.Crit("Failed to store block total difficulty", "err", err) + } +} + +// DeleteTd removes all block total difficulty data associated with a hash. +func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) { + if err := db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), headerTDSuffix...)); err != nil { + log.Crit("Failed to delete block total difficulty", "err", err) + } +} + +// ReadReceipts retrieves all the transaction receipts belonging to a block. +func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts { + // Retrieve the flattened receipt slice + data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...)) + if len(data) == 0 { + return nil + } + // Convert the revceipts from their storage form to their internal representation + storageReceipts := []*types.ReceiptForStorage{} + if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { + log.Error("Invalid receipt array RLP", "hash", hash, "err", err) + return nil + } + receipts := make(types.Receipts, len(storageReceipts)) + for i, receipt := range storageReceipts { + receipts[i] = (*types.Receipt)(receipt) + } + return receipts +} + +// WriteReceipts stores all the transaction receipts belonging to a block. +func WriteReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts types.Receipts) { + // Convert the receipts into their storage form and serialize them + storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) + for i, receipt := range receipts { + storageReceipts[i] = (*types.ReceiptForStorage)(receipt) + } + bytes, err := rlp.EncodeToBytes(storageReceipts) + if err != nil { + log.Crit("Failed to encode block receipts", "err", err) + } + // Store the flattened receipt slice + key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) + if err := db.Put(key, bytes); err != nil { + log.Crit("Failed to store block receipts", "err", err) + } +} + +// DeleteReceipts removes all receipt data associated with a block hash. +func DeleteReceipts(db DatabaseDeleter, hash common.Hash, number uint64) { + if err := db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)); err != nil { + log.Crit("Failed to delete block receipts", "err", err) + } +} + +// ReadBlock retrieves an entire block corresponding to the hash, assembling it +// back from the stored header and body. If either the header or body could not +// be retrieved nil is returned. +// +// Note, due to concurrent download of header and block body the header and thus +// canonical hash can be stored in the database but the body data not (yet). +func ReadBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block { + header := ReadHeader(db, hash, number) + if header == nil { + return nil + } + body := ReadBody(db, hash, number) + if body == nil { + return nil + } + return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) +} + +// WriteBlock serializes a block into the database, header and body separately. +func WriteBlock(db DatabaseWriter, block *types.Block) { + WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) + WriteHeader(db, block.Header()) +} + +// DeleteBlock removes all block data associated with a hash. +func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) { + DeleteReceipts(db, hash, number) + DeleteHeader(db, hash, number) + DeleteBody(db, hash, number) + DeleteTd(db, hash, number) +} + +// FindCommonAncestor returns the last common ancestor of two block headers +func FindCommonAncestor(db DatabaseReader, a, b *types.Header) *types.Header { + for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { + a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) + if a == nil { + return nil + } + } + for an := a.Number.Uint64(); an < b.Number.Uint64(); { + b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) + if b == nil { + return nil + } + } + for a.Hash() != b.Hash() { + a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) + if a == nil { + return nil + } + b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) + if b == nil { + return nil + } + } + return a +} diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go new file mode 100644 index 000000000..84c9c9aeb --- /dev/null +++ b/core/rawdb/accessors_chain_test.go @@ -0,0 +1,319 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "bytes" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" +) + +// Tests block header storage and retrieval operations. +func TestHeaderStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test header to move around the database and make sure it's really new + header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")} + if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil { + t.Fatalf("Non existent header returned: %v", entry) + } + // Write and verify the header in the database + WriteHeader(db, header) + if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry == nil { + t.Fatalf("Stored header not found") + } else if entry.Hash() != header.Hash() { + t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header) + } + if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil { + t.Fatalf("Stored header RLP not found") + } else { + hasher := sha3.NewKeccak256() + hasher.Write(entry) + + if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() { + t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header) + } + } + // Delete the header and verify the execution + DeleteHeader(db, header.Hash(), header.Number.Uint64()) + if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil { + t.Fatalf("Deleted header returned: %v", entry) + } +} + +// Tests block body storage and retrieval operations. +func TestBodyStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test body to move around the database and make sure it's really new + body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}} + + hasher := sha3.NewKeccak256() + rlp.Encode(hasher, body) + hash := common.BytesToHash(hasher.Sum(nil)) + + if entry := ReadBody(db, hash, 0); entry != nil { + t.Fatalf("Non existent body returned: %v", entry) + } + // Write and verify the body in the database + WriteBody(db, hash, 0, body) + if entry := ReadBody(db, hash, 0); entry == nil { + t.Fatalf("Stored body not found") + } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { + t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) + } + if entry := ReadBodyRLP(db, hash, 0); entry == nil { + t.Fatalf("Stored body RLP not found") + } else { + hasher := sha3.NewKeccak256() + hasher.Write(entry) + + if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash { + t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body) + } + } + // Delete the body and verify the execution + DeleteBody(db, hash, 0) + if entry := ReadBody(db, hash, 0); entry != nil { + t.Fatalf("Deleted body returned: %v", entry) + } +} + +// Tests block storage and retrieval operations. +func TestBlockStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test block to move around the database and make sure it's really new + block := types.NewBlockWithHeader(&types.Header{ + Extra: []byte("test block"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil { + t.Fatalf("Non existent header returned: %v", entry) + } + if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil { + t.Fatalf("Non existent body returned: %v", entry) + } + // Write and verify the block in the database + WriteBlock(db, block) + if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil { + t.Fatalf("Stored block not found") + } else if entry.Hash() != block.Hash() { + t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) + } + if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry == nil { + t.Fatalf("Stored header not found") + } else if entry.Hash() != block.Header().Hash() { + t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header()) + } + if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil { + t.Fatalf("Stored body not found") + } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { + t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body()) + } + // Delete the block and verify the execution + DeleteBlock(db, block.Hash(), block.NumberU64()) + if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil { + t.Fatalf("Deleted block returned: %v", entry) + } + if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil { + t.Fatalf("Deleted header returned: %v", entry) + } + if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil { + t.Fatalf("Deleted body returned: %v", entry) + } +} + +// Tests that partial block contents don't get reassembled into full blocks. +func TestPartialBlockStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + block := types.NewBlockWithHeader(&types.Header{ + Extra: []byte("test block"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + // Store a header and check that it's not recognized as a block + WriteHeader(db, block.Header()) + if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + DeleteHeader(db, block.Hash(), block.NumberU64()) + + // Store a body and check that it's not recognized as a block + WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) + if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + DeleteBody(db, block.Hash(), block.NumberU64()) + + // Store a header and a body separately and check reassembly + WriteHeader(db, block.Header()) + WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) + + if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil { + t.Fatalf("Stored block not found") + } else if entry.Hash() != block.Hash() { + t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) + } +} + +// Tests block total difficulty storage and retrieval operations. +func TestTdStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test TD to move around the database and make sure it's really new + hash, td := common.Hash{}, big.NewInt(314) + if entry := ReadTd(db, hash, 0); entry != nil { + t.Fatalf("Non existent TD returned: %v", entry) + } + // Write and verify the TD in the database + WriteTd(db, hash, 0, td) + if entry := ReadTd(db, hash, 0); entry == nil { + t.Fatalf("Stored TD not found") + } else if entry.Cmp(td) != 0 { + t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td) + } + // Delete the TD and verify the execution + DeleteTd(db, hash, 0) + if entry := ReadTd(db, hash, 0); entry != nil { + t.Fatalf("Deleted TD returned: %v", entry) + } +} + +// Tests that canonical numbers can be mapped to hashes and retrieved. +func TestCanonicalMappingStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test canonical number and assinged hash to move around + hash, number := common.Hash{0: 0xff}, uint64(314) + if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) { + t.Fatalf("Non existent canonical mapping returned: %v", entry) + } + // Write and verify the TD in the database + WriteCanonicalHash(db, hash, number) + if entry := ReadCanonicalHash(db, number); entry == (common.Hash{}) { + t.Fatalf("Stored canonical mapping not found") + } else if entry != hash { + t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash) + } + // Delete the TD and verify the execution + DeleteCanonicalHash(db, number) + if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) { + t.Fatalf("Deleted canonical mapping returned: %v", entry) + } +} + +// Tests that head headers and head blocks can be assigned, individually. +func TestHeadStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")}) + blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")}) + blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")}) + + // Check that no head entries are in a pristine database + if entry := ReadHeadHeaderHash(db); entry != (common.Hash{}) { + t.Fatalf("Non head header entry returned: %v", entry) + } + if entry := ReadHeadBlockHash(db); entry != (common.Hash{}) { + t.Fatalf("Non head block entry returned: %v", entry) + } + if entry := ReadHeadFastBlockHash(db); entry != (common.Hash{}) { + t.Fatalf("Non fast head block entry returned: %v", entry) + } + // Assign separate entries for the head header and block + WriteHeadHeaderHash(db, blockHead.Hash()) + WriteHeadBlockHash(db, blockFull.Hash()) + WriteHeadFastBlockHash(db, blockFast.Hash()) + + // Check that both heads are present, and different (i.e. two heads maintained) + if entry := ReadHeadHeaderHash(db); entry != blockHead.Hash() { + t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash()) + } + if entry := ReadHeadBlockHash(db); entry != blockFull.Hash() { + t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash()) + } + if entry := ReadHeadFastBlockHash(db); entry != blockFast.Hash() { + t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash()) + } +} + +// Tests that receipts associated with a single block can be stored and retrieved. +func TestBlockReceiptStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + receipt1 := &types.Receipt{ + Status: types.ReceiptStatusFailed, + CumulativeGasUsed: 1, + Logs: []*types.Log{ + {Address: common.BytesToAddress([]byte{0x11})}, + {Address: common.BytesToAddress([]byte{0x01, 0x11})}, + }, + TxHash: common.BytesToHash([]byte{0x11, 0x11}), + ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), + GasUsed: 111111, + } + receipt2 := &types.Receipt{ + PostState: common.Hash{2}.Bytes(), + CumulativeGasUsed: 2, + Logs: []*types.Log{ + {Address: common.BytesToAddress([]byte{0x22})}, + {Address: common.BytesToAddress([]byte{0x02, 0x22})}, + }, + TxHash: common.BytesToHash([]byte{0x22, 0x22}), + ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), + GasUsed: 222222, + } + receipts := []*types.Receipt{receipt1, receipt2} + + // Check that no receipt entries are in a pristine database + hash := common.BytesToHash([]byte{0x03, 0x14}) + if rs := ReadReceipts(db, hash, 0); len(rs) != 0 { + t.Fatalf("non existent receipts returned: %v", rs) + } + // Insert the receipt slice into the database and check presence + WriteReceipts(db, hash, 0, receipts) + if rs := ReadReceipts(db, hash, 0); len(rs) == 0 { + t.Fatalf("no receipts returned") + } else { + for i := 0; i < len(receipts); i++ { + rlpHave, _ := rlp.EncodeToBytes(rs[i]) + rlpWant, _ := rlp.EncodeToBytes(receipts[i]) + + if !bytes.Equal(rlpHave, rlpWant) { + t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i]) + } + } + } + // Delete the receipt slice and check purge + DeleteReceipts(db, hash, 0) + if rs := ReadReceipts(db, hash, 0); len(rs) != 0 { + t.Fatalf("deleted receipts returned: %v", rs) + } +} diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go new file mode 100644 index 000000000..9abad14e0 --- /dev/null +++ b/core/rawdb/accessors_indexes.go @@ -0,0 +1,119 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "encoding/binary" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +// ReadTxLookupEntry retrieves the positional metadata associated with a transaction +// hash to allow retrieving the transaction or receipt by hash. +func ReadTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) { + data, _ := db.Get(append(txLookupPrefix, hash.Bytes()...)) + if len(data) == 0 { + return common.Hash{}, 0, 0 + } + var entry TxLookupEntry + if err := rlp.DecodeBytes(data, &entry); err != nil { + log.Error("Invalid transaction lookup entry RLP", "hash", hash, "err", err) + return common.Hash{}, 0, 0 + } + return entry.BlockHash, entry.BlockIndex, entry.Index +} + +// WriteTxLookupEntries stores a positional metadata for every transaction from +// a block, enabling hash based transaction and receipt lookups. +func WriteTxLookupEntries(db DatabaseWriter, block *types.Block) { + for i, tx := range block.Transactions() { + entry := TxLookupEntry{ + BlockHash: block.Hash(), + BlockIndex: block.NumberU64(), + Index: uint64(i), + } + data, err := rlp.EncodeToBytes(entry) + if err != nil { + log.Crit("Failed to encode transaction lookup entry", "err", err) + } + if err := db.Put(append(txLookupPrefix, tx.Hash().Bytes()...), data); err != nil { + log.Crit("Failed to store transaction lookup entry", "err", err) + } + } +} + +// DeleteTxLookupEntry removes all transaction data associated with a hash. +func DeleteTxLookupEntry(db DatabaseDeleter, hash common.Hash) { + db.Delete(append(txLookupPrefix, hash.Bytes()...)) +} + +// ReadTransaction retrieves a specific transaction from the database, along with +// its added positional metadata. +func ReadTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) { + blockHash, blockNumber, txIndex := ReadTxLookupEntry(db, hash) + if blockHash == (common.Hash{}) { + return nil, common.Hash{}, 0, 0 + } + body := ReadBody(db, blockHash, blockNumber) + if body == nil || len(body.Transactions) <= int(txIndex) { + log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex) + return nil, common.Hash{}, 0, 0 + } + return body.Transactions[txIndex], blockHash, blockNumber, txIndex +} + +// ReadReceipt retrieves a specific transaction receipt from the database, along with +// its added positional metadata. +func ReadReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) { + blockHash, blockNumber, receiptIndex := ReadTxLookupEntry(db, hash) + if blockHash == (common.Hash{}) { + return nil, common.Hash{}, 0, 0 + } + receipts := ReadReceipts(db, blockHash, blockNumber) + if len(receipts) <= int(receiptIndex) { + log.Error("Receipt refereced missing", "number", blockNumber, "hash", blockHash, "index", receiptIndex) + return nil, common.Hash{}, 0, 0 + } + return receipts[receiptIndex], blockHash, blockNumber, receiptIndex +} + +// ReadBloomBits retrieves the compressed bloom bit vector belonging to the given +// section and bit index from the. +func ReadBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) ([]byte, error) { + key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...) + + binary.BigEndian.PutUint16(key[1:], uint16(bit)) + binary.BigEndian.PutUint64(key[3:], section) + + return db.Get(key) +} + +// WriteBloomBits stores the compressed bloom bits vector belonging to the given +// section and bit index. +func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) { + key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...) + + binary.BigEndian.PutUint16(key[1:], uint16(bit)) + binary.BigEndian.PutUint64(key[3:], section) + + if err := db.Put(key, bits); err != nil { + log.Crit("Failed to store bloom bits", "err", err) + } +} diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go new file mode 100644 index 000000000..d9c129163 --- /dev/null +++ b/core/rawdb/accessors_indexes_test.go @@ -0,0 +1,68 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" +) + +// Tests that positional lookup metadata can be stored and retrieved. +func TestLookupStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), 1111, big.NewInt(11111), []byte{0x11, 0x11, 0x11}) + tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), big.NewInt(222), 2222, big.NewInt(22222), []byte{0x22, 0x22, 0x22}) + tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) + txs := []*types.Transaction{tx1, tx2, tx3} + + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil) + + // Check that no transactions entries are in a pristine database + for i, tx := range txs { + if txn, _, _, _ := ReadTransaction(db, tx.Hash()); txn != nil { + t.Fatalf("tx #%d [%x]: non existent transaction returned: %v", i, tx.Hash(), txn) + } + } + // Insert all the transactions into the database, and verify contents + WriteBlock(db, block) + WriteTxLookupEntries(db, block) + + for i, tx := range txs { + if txn, hash, number, index := ReadTransaction(db, tx.Hash()); txn == nil { + t.Fatalf("tx #%d [%x]: transaction not found", i, tx.Hash()) + } else { + if hash != block.Hash() || number != block.NumberU64() || index != uint64(i) { + t.Fatalf("tx #%d [%x]: positional metadata mismatch: have %x/%d/%d, want %x/%v/%v", i, tx.Hash(), hash, number, index, block.Hash(), block.NumberU64(), i) + } + if tx.Hash() != txn.Hash() { + t.Fatalf("tx #%d [%x]: transaction mismatch: have %v, want %v", i, tx.Hash(), txn, tx) + } + } + } + // Delete the transactions and check purge + for i, tx := range txs { + DeleteTxLookupEntry(db, tx.Hash()) + if txn, _, _, _ := ReadTransaction(db, tx.Hash()); txn != nil { + t.Fatalf("tx #%d [%x]: deleted transaction returned: %v", i, tx.Hash(), txn) + } + } +} diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go new file mode 100644 index 000000000..73ab983f2 --- /dev/null +++ b/core/rawdb/accessors_metadata.go @@ -0,0 +1,90 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "encoding/json" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +// ReadDatabaseVersion retrieves the version number of the database. +func ReadDatabaseVersion(db DatabaseReader) int { + var version int + + enc, _ := db.Get(databaseVerisionKey) + rlp.DecodeBytes(enc, &version) + + return version +} + +// WriteDatabaseVersion stores the version number of the database +func WriteDatabaseVersion(db DatabaseWriter, version int) { + enc, _ := rlp.EncodeToBytes(version) + if err := db.Put(databaseVerisionKey, enc); err != nil { + log.Crit("Failed to store the database version", "err", err) + } +} + +// ReadChainConfig retrieves the consensus settings based on the given genesis hash. +func ReadChainConfig(db DatabaseReader, hash common.Hash) *params.ChainConfig { + data, _ := db.Get(append(configPrefix, hash[:]...)) + if len(data) == 0 { + return nil + } + var config params.ChainConfig + if err := json.Unmarshal(data, &config); err != nil { + log.Error("Invalid chain config JSON", "hash", hash, "err", err) + return nil + } + return &config +} + +// WriteChainConfig writes the chain config settings to the database. +func WriteChainConfig(db DatabaseWriter, hash common.Hash, cfg *params.ChainConfig) { + if cfg == nil { + return + } + data, err := json.Marshal(cfg) + if err != nil { + log.Crit("Failed to JSON encode chain config", "err", err) + } + if err := db.Put(append(configPrefix, hash[:]...), data); err != nil { + log.Crit("Failed to store chain config", "err", err) + } +} + +// ReadPreimage retrieves a single preimage of the provided hash. +func ReadPreimage(db DatabaseReader, hash common.Hash) []byte { + data, _ := db.Get(append(preimagePrefix, hash.Bytes()...)) + return data +} + +// WritePreimages writes the provided set of preimages to the database. `number` is the +// current block number, and is used for debug messages only. +func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) { + for hash, preimage := range preimages { + if err := db.Put(append(preimagePrefix, hash.Bytes()...), preimage); err != nil { + log.Crit("Failed to store trie preimage", "err", err) + } + } + preimageCounter.Inc(int64(len(preimages))) + preimageHitCounter.Inc(int64(len(preimages))) +} diff --git a/core/rawdb/interfaces.go b/core/rawdb/interfaces.go new file mode 100644 index 000000000..3bdf55124 --- /dev/null +++ b/core/rawdb/interfaces.go @@ -0,0 +1,33 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +// DatabaseReader wraps the Has and Get method of a backing data store. +type DatabaseReader interface { + Has(key []byte) (bool, error) + Get(key []byte) ([]byte, error) +} + +// DatabaseWriter wraps the Put method of a backing data store. +type DatabaseWriter interface { + Put(key []byte, value []byte) error +} + +// DatabaseDeleter wraps the Delete method of a backing data store. +type DatabaseDeleter interface { + Delete(key []byte) error +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go new file mode 100644 index 000000000..a4b1596fd --- /dev/null +++ b/core/rawdb/schema.go @@ -0,0 +1,79 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package rawdb contains a collection of low level database accessors. +package rawdb + +import ( + "encoding/binary" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/metrics" +) + +// The fields below define the low level database schema prefixing. +var ( + // databaseVerisionKey tracks the current database version. + databaseVerisionKey = []byte("DatabaseVersion") + + // headHeaderKey tracks the latest know header's hash. + headHeaderKey = []byte("LastHeader") + + // headBlockKey tracks the latest know full block's hash. + headBlockKey = []byte("LastBlock") + + // headFastBlockKey tracks the latest known incomplete block's hash duirng fast sync. + headFastBlockKey = []byte("LastFast") + + // fastTrieProgressKey tracks the number of trie entries imported during fast sync. + fastTrieProgressKey = []byte("TrieSync") + + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). + headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header + headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td + headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash + headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian) + + blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body + blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts + + txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata + bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits + + preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage + configPrefix = []byte("ethereum-config-") // config prefix for the db + + // Chain index prefixes (use `i` + single byte to avoid mixing data types). + BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress + + preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) + preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) +) + +// TxLookupEntry is a positional metadata to help looking up the data content of +// a transaction or receipt given only its hash. +type TxLookupEntry struct { + BlockHash common.Hash + BlockIndex uint64 + Index uint64 +} + +// encodeBlockNumber encodes a block number as big endian uint64 +func encodeBlockNumber(number uint64) []byte { + enc := make([]byte, 8) + binary.BigEndian.PutUint64(enc, number) + return enc +} -- cgit v1.2.3