diff options
author | gary rong <garyrong0905@gmail.com> | 2019-05-14 22:07:44 +0800 |
---|---|---|
committer | Péter Szilágyi <peterke@gmail.com> | 2019-05-16 15:39:34 +0800 |
commit | 37d280da411eb649ce22ab69827ac5aacd46534b (patch) | |
tree | 8d19d2071c812575a14cea54a2de9efd2dd33157 /core | |
parent | 42c746d6f405deb0c49d868dcc6e0afe279e19ab (diff) | |
download | go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.gz go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.bz2 go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.lz go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.xz go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.zst go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.zip |
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
Diffstat (limited to 'core')
-rw-r--r-- | core/blockchain.go | 105 | ||||
-rw-r--r-- | core/blockchain_test.go | 70 | ||||
-rw-r--r-- | core/genesis.go | 17 | ||||
-rw-r--r-- | core/headerchain.go | 11 | ||||
-rw-r--r-- | core/rawdb/accessors_chain.go | 26 | ||||
-rw-r--r-- | core/rawdb/accessors_metadata.go | 14 | ||||
-rw-r--r-- | core/rawdb/database.go | 134 | ||||
-rw-r--r-- | core/rawdb/freezer.go | 21 | ||||
-rw-r--r-- | core/rawdb/freezer_table.go | 13 | ||||
-rw-r--r-- | core/rawdb/schema.go | 3 | ||||
-rw-r--r-- | core/rawdb/table.go | 6 |
11 files changed, 353 insertions, 67 deletions
diff --git a/core/blockchain.go b/core/blockchain.go index 4ac2c3a44..651c67c5d 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -93,7 +93,10 @@ const ( // - Version 6 // The following incompatible database changes were added: // * Transaction lookup information stores the corresponding block number instead of block hash - BlockChainVersion uint64 = 6 + // - Version 7 + // The following incompatible database changes were added: + // * Use freezer as the ancient database to maintain all ancient data + BlockChainVersion uint64 = 7 ) // CacheConfig contains the configuration values for the trie caching/pruning @@ -215,10 +218,35 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par if bc.genesisBlock == nil { return nil, ErrNoGenesis } + // Initialize the chain with ancient data if it isn't empty. + if bc.empty() { + if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { + for i := uint64(0); i < frozen; i++ { + // Inject hash<->number mapping. + hash := rawdb.ReadCanonicalHash(bc.db, i) + if hash == (common.Hash{}) { + return nil, errors.New("broken ancient database") + } + rawdb.WriteHeaderNumber(bc.db, hash, i) + + // Inject txlookup indexes. + block := rawdb.ReadBlock(bc.db, hash, i) + if block == nil { + return nil, errors.New("broken ancient database") + } + rawdb.WriteTxLookupEntries(bc.db, block) + } + hash := rawdb.ReadCanonicalHash(bc.db, frozen-1) + rawdb.WriteHeadHeaderHash(bc.db, hash) + rawdb.WriteHeadFastBlockHash(bc.db, hash) + + log.Info("Initialized chain with ancients", "number", frozen-1, "hash", hash) + } + } if err := bc.loadLastState(); err != nil { return nil, err } - if frozen, err := bc.db.Ancients(); err == nil && frozen >= 1 { + if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { var ( needRewind bool low uint64 @@ -278,6 +306,20 @@ func (bc *BlockChain) GetVMConfig() *vm.Config { return &bc.vmConfig } +// empty returns an indicator whether the blockchain is empty. +// Note, it's a special case that we connect a non-empty ancient +// database with an empty node, so that we can plugin the ancient +// into node seamlessly. +func (bc *BlockChain) empty() bool { + genesis := bc.genesisBlock.Hash() + for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} { + if hash != genesis { + return false + } + } + return true +} + // loadLastState loads the last known chain state from the database. This method // assumes that the chain manager mutex is held. func (bc *BlockChain) loadLastState() error { @@ -383,7 +425,9 @@ func (bc *BlockChain) SetHead(head uint64) error { if num+1 <= frozen { // Truncate all relative data(header, total difficulty, body, receipt // and canonical hash) from ancient store. - bc.db.TruncateAncients(num + 1) + if err := bc.db.TruncateAncients(num + 1); err != nil { + log.Crit("Failed to truncate ancient data", "number", num, "err", err) + } // Remove the hash <-> number mapping from the active store. rawdb.DeleteHeaderNumber(db, hash) @@ -948,6 +992,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ } } }() + var deleted types.Blocks for i, block := range blockChain { // Short circuit insertion if shutting down or processing failed if atomic.LoadInt32(&bc.procInterrupt) == 1 { @@ -961,16 +1006,38 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ if !bc.HasHeader(block.Hash(), block.NumberU64()) { return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) } - // Compute all the non-consensus fields of the receipts - if err := receiptChain[i].DeriveFields(bc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil { - return i, fmt.Errorf("failed to derive receipts data: %v", err) + var ( + start = time.Now() + logged = time.Now() + count int + ) + // Migrate all ancient blocks. This can happen if someone upgrades from Geth + // 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the + // long term. + for { + // We can ignore the error here since light client won't hit this code path. + frozen, _ := bc.db.Ancients() + if frozen >= block.NumberU64() { + break + } + h := rawdb.ReadCanonicalHash(bc.db, frozen) + b := rawdb.ReadBlock(bc.db, h, frozen) + size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen)) + count += 1 + + // Always keep genesis block in active database. + if b.NumberU64() != 0 { + deleted = append(deleted, b) + } + if time.Since(logged) > 8*time.Second { + log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } } - // Initialize freezer with genesis block first - if frozen, err := bc.db.Ancients(); err == nil && frozen == 0 && block.NumberU64() == 1 { - genesisBlock := rawdb.ReadBlock(bc.db, rawdb.ReadCanonicalHash(bc.db, 0), 0) - size += rawdb.WriteAncientBlock(bc.db, genesisBlock, nil, genesisBlock.Difficulty()) + if count > 0 { + log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) } - // Flush data into ancient store. + // Flush data into ancient database. size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64())) rawdb.WriteTxLookupEntries(batch, block) @@ -992,15 +1059,8 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ } previous = nil // disable rollback explicitly - // Remove the ancient data from the active store - cleanGenesis := len(blockChain) > 0 && blockChain[0].NumberU64() == 1 - if cleanGenesis { - // Migrate genesis block to ancient store too. - rawdb.DeleteBlockWithoutNumber(batch, rawdb.ReadCanonicalHash(bc.db, 0), 0) - rawdb.DeleteCanonicalHash(batch, 0) - } // Wipe out canonical block data. - for _, block := range blockChain { + for _, block := range append(deleted, blockChain...) { rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64()) rawdb.DeleteCanonicalHash(batch, block.NumberU64()) } @@ -1008,8 +1068,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ return 0, err } batch.Reset() + // Wipe out side chain too. - for _, block := range blockChain { + for _, block := range append(deleted, blockChain...) { for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) { rawdb.DeleteBlock(batch, hash, block.NumberU64()) } @@ -1035,10 +1096,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ stats.ignored++ continue } - // Compute all the non-consensus fields of the receipts - if err := receiptChain[i].DeriveFields(bc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil { - return i, fmt.Errorf("failed to derive receipts data: %v", err) - } // Write all the data out into the database rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i]) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 7b1a9a54f..09caf7e60 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -716,6 +716,20 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { height := uint64(1024) blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil) + // makeDb creates a db instance for testing. + makeDb := func() (ethdb.Database, func()) { + dir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("failed to create temp freezer dir: %v", err) + } + defer os.Remove(dir) + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "") + if err != nil { + t.Fatalf("failed to create temp freezer db: %v", err) + } + gspec.MustCommit(db) + return db, func() { os.RemoveAll(dir) } + } // Configure a subchain to roll back remove := []common.Hash{} for _, block := range blocks[height/2:] { @@ -734,9 +748,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { } } // Import the chain as an archive node and ensure all pointers are updated - archiveDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(archiveDb) - + archiveDb, delfn := makeDb() + defer delfn() archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil) if n, err := archive.InsertChain(blocks); err != nil { t.Fatalf("failed to process block %d: %v", n, err) @@ -748,8 +761,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { assert(t, "archive", archive, height/2, height/2, height/2) // Import the chain as a non-archive node and ensure all pointers are updated - fastDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(fastDb) + fastDb, delfn := makeDb() + defer delfn() fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil) defer fast.Stop() @@ -768,16 +781,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { assert(t, "fast", fast, height/2, height/2, 0) // Import the chain as a ancient-first node and ensure all pointers are updated - frdir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("failed to create temp freezer dir: %v", err) - } - defer os.Remove(frdir) - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "") - if err != nil { - t.Fatalf("failed to create temp freezer db: %v", err) - } - gspec.MustCommit(ancientDb) + ancientDb, delfn := makeDb() + defer delfn() ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil) defer ancient.Stop() @@ -795,9 +800,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { } // Import the chain as a light node and ensure all pointers are updated - lightDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(lightDb) - + lightDb, delfn := makeDb() + defer delfn() light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil) if n, err := light.InsertHeaderChain(headers, 1); err != nil { t.Fatalf("failed to insert header %d: %v", n, err) @@ -1892,10 +1896,18 @@ func testInsertKnownChainData(t *testing.T, typ string) { b.SetCoinbase(common.Address{1}) b.OffsetTime(-9) // A higher difficulty }) - // Import the shared chain and the original canonical one - chaindb := rawdb.NewMemoryDatabase() + dir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("failed to create temp freezer dir: %v", err) + } + defer os.Remove(dir) + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "") + if err != nil { + t.Fatalf("failed to create temp freezer db: %v", err) + } new(Genesis).MustCommit(chaindb) + defer os.RemoveAll(dir) chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil) if err != nil { @@ -1992,18 +2004,16 @@ func testInsertKnownChainData(t *testing.T, typ string) { // The head shouldn't change. asserter(t, blocks3[len(blocks3)-1]) - if typ != "headers" { - // Rollback the heavier chain and re-insert the longer chain again - for i := 0; i < len(blocks3); i++ { - rollback = append(rollback, blocks3[i].Hash()) - } - chain.Rollback(rollback) + // Rollback the heavier chain and re-insert the longer chain again + for i := 0; i < len(blocks3); i++ { + rollback = append(rollback, blocks3[i].Hash()) + } + chain.Rollback(rollback) - if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - asserter(t, blocks2[len(blocks2)-1]) + if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { + t.Fatalf("failed to insert chain data: %v", err) } + asserter(t, blocks2[len(blocks2)-1]) } // getLongAndShortChains returns two chains, diff --git a/core/genesis.go b/core/genesis.go index 1f34a3a9e..830fb033b 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -170,6 +170,22 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constant return genesis.Config, block.Hash(), err } + // We have the genesis block in database(perhaps in ancient database) + // but the corresponding state is missing. + header := rawdb.ReadHeader(db, stored, 0) + if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0)); err != nil { + if genesis == nil { + genesis = DefaultGenesisBlock() + } + // Ensure the stored genesis matches with the given one. + hash := genesis.ToBlock(nil).Hash() + if hash != stored { + return genesis.Config, hash, &GenesisMismatchError{stored, hash} + } + block, err := genesis.Commit(db) + return genesis.Config, block.Hash(), err + } + // Check whether the genesis block is already written. if genesis != nil { hash := genesis.ToBlock(nil).Hash() @@ -277,6 +293,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) rawdb.WriteHeadBlockHash(db, block.Hash()) + rawdb.WriteHeadFastBlockHash(db, block.Hash()) rawdb.WriteHeadHeaderHash(db, block.Hash()) config := g.Config diff --git a/core/headerchain.go b/core/headerchain.go index 659141fd1..cdd64bb50 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -274,9 +274,14 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa return i, errors.New("aborted") } // If the header's already known, skip it, otherwise store - if hc.HasHeader(header.Hash(), header.Number.Uint64()) { - stats.ignored++ - continue + hash := header.Hash() + if hc.HasHeader(hash, header.Number.Uint64()) { + externTd := hc.GetTd(hash, header.Number.Uint64()) + localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64()) + if externTd == nil || externTd.Cmp(localTd) <= 0 { + stats.ignored++ + continue + } } if err := writeHeader(header); err != nil { return i, err diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 681e6e917..fab7ca56c 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -89,7 +89,16 @@ func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 { return &number } -// DeleteHeaderNumber removes hash to number mapping. +// WriteHeaderNumber stores the hash->number mapping. +func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { + key := headerNumberKey(hash) + enc := encodeBlockNumber(number) + if err := db.Put(key, enc); err != nil { + log.Crit("Failed to store hash to number mapping", "err", err) + } +} + +// DeleteHeaderNumber removes hash->number mapping. func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) { if err := db.Delete(headerNumberKey(hash)); err != nil { log.Crit("Failed to delete hash to number mapping", "err", err) @@ -206,22 +215,19 @@ func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header // WriteHeader stores a block header into the database and also stores the hash- // to-number mapping. func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) { - // Write the hash -> number mapping var ( - hash = header.Hash() - number = header.Number.Uint64() - encoded = encodeBlockNumber(number) + hash = header.Hash() + number = header.Number.Uint64() ) - key := headerNumberKey(hash) - if err := db.Put(key, encoded); err != nil { - log.Crit("Failed to store hash to number mapping", "err", err) - } + // Write the hash -> number mapping + WriteHeaderNumber(db, hash, number) + // Write the encoded header data, err := rlp.EncodeToBytes(header) if err != nil { log.Crit("Failed to RLP encode header", "err", err) } - key = headerKey(number, hash) + key := headerKey(number, hash) if err := db.Put(key, data); err != nil { log.Crit("Failed to store header", "err", err) } diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index f8d09fbdd..e6235f010 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -80,6 +80,20 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha } } +// ReadAncientPath retrieves ancient database path which is recorded during the +// first node setup or forcibly changed by user. +func ReadAncientPath(db ethdb.KeyValueReader) string { + data, _ := db.Get(ancientKey) + return string(data) +} + +// WriteAncientPath writes ancient database path into the key-value database. +func WriteAncientPath(db ethdb.KeyValueWriter, path string) { + if err := db.Put(ancientKey, []byte(path)); err != nil { + log.Crit("Failed to store ancient path", "err", err) + } +} + // ReadPreimage retrieves a single preimage of the provided hash. func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { data, _ := db.Get(preimageKey(hash)) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 5a3c7f94b..016c6c909 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -17,11 +17,17 @@ package rawdb import ( + "bytes" "fmt" + "os" + "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/leveldb" "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/log" + "github.com/olekukonko/tablewriter" ) // freezerdb is a database wrapper that enabled freezer data retrievals. @@ -66,6 +72,11 @@ func (db *nofreezedb) Ancients() (uint64, error) { return 0, errNotSupported } +// AncientSize returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) AncientSize(kind string) (uint64, error) { + return 0, errNotSupported +} + // AppendAncient returns an error as we don't have a backing chain freezer. func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error { return errNotSupported @@ -140,5 +151,128 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer kvdb.Close() return nil, err } + // Make sure we always use the same ancient store. + // + // | stored == nil | stored != nil + // ----------------+------------------+---------------------- + // freezer == nil | non-freezer mode | ancient store missing + // freezer != nil | initialize | ensure consistency + stored := ReadAncientPath(kvdb) + if stored == "" && freezer != "" { + WriteAncientPath(kvdb, freezer) + } else if stored != freezer { + log.Warn("Ancient path mismatch", "stored", stored, "given", freezer) + log.Crit("Please use a consistent ancient path or migrate it via the command line tool `geth migrate-ancient`") + } return frdb, nil } + +// InspectDatabase traverses the entire database and checks the size +// of all different categories of data. +func InspectDatabase(db ethdb.Database) error { + it := db.NewIterator() + defer it.Release() + + var ( + count int64 + start = time.Now() + logged = time.Now() + + // Key-value store statistics + total common.StorageSize + headerSize common.StorageSize + bodySize common.StorageSize + receiptSize common.StorageSize + tdSize common.StorageSize + numHashPairing common.StorageSize + hashNumPairing common.StorageSize + trieSize common.StorageSize + txlookupSize common.StorageSize + preimageSize common.StorageSize + bloomBitsSize common.StorageSize + + // Ancient store statistics + ancientHeaders common.StorageSize + ancientBodies common.StorageSize + ancientReceipts common.StorageSize + ancientHashes common.StorageSize + ancientTds common.StorageSize + + // Les statistic + ChtTrieNodes common.StorageSize + BloomTrieNodes common.StorageSize + ) + // Inspect key-value database first. + for it.Next() { + var ( + key = it.Key() + size = common.StorageSize(len(key) + len(it.Value())) + ) + total += size + switch { + case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix): + tdSize += size + case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix): + numHashPairing += size + case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength): + headerSize += size + case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): + hashNumPairing += size + case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength): + bodySize += size + case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength): + receiptSize += size + case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): + txlookupSize += size + case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): + preimageSize += size + case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): + bloomBitsSize += size + case bytes.HasPrefix(key, []byte("cht-")) && len(key) == 4+common.HashLength: + ChtTrieNodes += size + case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength: + BloomTrieNodes += size + case len(key) == common.HashLength: + trieSize += size + } + count += 1 + if count%1000 == 0 && time.Since(logged) > 8*time.Second { + log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + // Inspect append-only file store then. + ancients := []*common.StorageSize{&ancientHeaders, &ancientBodies, &ancientReceipts, &ancientHashes, &ancientTds} + for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} { + if size, err := db.AncientSize(category); err == nil { + *ancients[i] += common.StorageSize(size) + total += common.StorageSize(size) + } + } + // Display the database statistic. + stats := [][]string{ + {"Key-Value store", "Headers", headerSize.String()}, + {"Key-Value store", "Bodies", bodySize.String()}, + {"Key-Value store", "Receipts", receiptSize.String()}, + {"Key-Value store", "Difficulties", tdSize.String()}, + {"Key-Value store", "Block number->hash", numHashPairing.String()}, + {"Key-Value store", "Block hash->number", hashNumPairing.String()}, + {"Key-Value store", "Transaction index", txlookupSize.String()}, + {"Key-Value store", "Bloombit index", bloomBitsSize.String()}, + {"Key-Value store", "Trie nodes", trieSize.String()}, + {"Key-Value store", "Trie preimages", preimageSize.String()}, + {"Ancient store", "Headers", ancientHeaders.String()}, + {"Ancient store", "Bodies", ancientBodies.String()}, + {"Ancient store", "Receipts", ancientReceipts.String()}, + {"Ancient store", "Difficulties", ancientTds.String()}, + {"Ancient store", "Block number->hash", ancientHashes.String()}, + {"Light client", "CHT trie nodes", ChtTrieNodes.String()}, + {"Light client", "Bloom trie nodes", BloomTrieNodes.String()}, + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Database", "Category", "Size"}) + table.SetFooter([]string{"", "Total", total.String()}) + table.AppendBulk(stats) + table.Render() + return nil +} diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 21a6055cd..f3a6bbb8f 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "math" + "os" "path/filepath" "sync/atomic" "time" @@ -39,6 +40,10 @@ var ( // errOutOrderInsertion is returned if the user attempts to inject out-of-order // binary blobs into the freezer. errOutOrderInsertion = errors.New("the append operation is out-order") + + // errSymlinkDatadir is returned if the ancient directory specified by user + // is a symbolic link. + errSymlinkDatadir = errors.New("symbolic link datadir is not supported") ) const ( @@ -78,6 +83,13 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil) writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil) ) + // Ensure the datadir is not a symbolic link if it exists. + if info, err := os.Lstat(datadir); !os.IsNotExist(err) { + if info.Mode()&os.ModeSymlink != 0 { + log.Warn("Symbolic link ancient database is not supported", "path", datadir) + return nil, errSymlinkDatadir + } + } // Leveldb uses LOCK as the filelock filename. To prevent the // name collision, we use FLOCK as the lock name. lock, _, err := fileutil.Flock(filepath.Join(datadir, "FLOCK")) @@ -107,6 +119,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { lock.Release() return nil, err } + log.Info("Opened ancient database", "database", datadir) return freezer, nil } @@ -149,6 +162,14 @@ func (f *freezer) Ancients() (uint64, error) { return atomic.LoadUint64(&f.frozen), nil } +// AncientSize returns the ancient size of the specified category. +func (f *freezer) AncientSize(kind string) (uint64, error) { + if table := f.tables[kind]; table != nil { + return table.size() + } + return 0, errUnknownTable +} + // AppendAncient injects all binary blobs belong to block at the end of the // append-only immutable table files. // diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index d46597f73..ebccf7816 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -515,6 +515,19 @@ func (t *freezerTable) has(number uint64) bool { return atomic.LoadUint64(&t.items) > number } +// size returns the total data size in the freezer table. +func (t *freezerTable) size() (uint64, error) { + t.lock.RLock() + defer t.lock.RUnlock() + + stat, err := t.index.Stat() + if err != nil { + return 0, err + } + total := uint64(t.maxFileSize)*uint64(t.headId-t.tailId) + uint64(t.headBytes) + uint64(stat.Size()) + return total, nil +} + // Sync pushes any pending data from memory out to disk. This is an expensive // operation, so use it with care. func (t *freezerTable) Sync() error { diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index a44a2c99f..0d54a3c8b 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -41,6 +41,9 @@ var ( // fastTrieProgressKey tracks the number of trie entries imported during fast sync. fastTrieProgressKey = []byte("TrieSync") + // ancientKey tracks the absolute path of ancient database. + ancientKey = []byte("AncientPath") + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 124678959..6610b7f5a 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -68,6 +68,12 @@ func (t *table) Ancients() (uint64, error) { return t.db.Ancients() } +// AncientSize is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) AncientSize(kind string) (uint64, error) { + return t.db.AncientSize(kind) +} + // AppendAncient is a noop passthrough that just forwards the request to the underlying // database. func (t *table) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error { |