diff options
author | gary rong <garyrong0905@gmail.com> | 2019-05-14 22:07:44 +0800 |
---|---|---|
committer | Péter Szilágyi <peterke@gmail.com> | 2019-05-16 15:39:34 +0800 |
commit | 37d280da411eb649ce22ab69827ac5aacd46534b (patch) | |
tree | 8d19d2071c812575a14cea54a2de9efd2dd33157 /core/rawdb | |
parent | 42c746d6f405deb0c49d868dcc6e0afe279e19ab (diff) | |
download | go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.gz go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.bz2 go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.lz go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.xz go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.zst go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.zip |
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
Diffstat (limited to 'core/rawdb')
-rw-r--r-- | core/rawdb/accessors_chain.go | 26 | ||||
-rw-r--r-- | core/rawdb/accessors_metadata.go | 14 | ||||
-rw-r--r-- | core/rawdb/database.go | 134 | ||||
-rw-r--r-- | core/rawdb/freezer.go | 21 | ||||
-rw-r--r-- | core/rawdb/freezer_table.go | 13 | ||||
-rw-r--r-- | core/rawdb/schema.go | 3 | ||||
-rw-r--r-- | core/rawdb/table.go | 6 |
7 files changed, 207 insertions, 10 deletions
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 681e6e917..fab7ca56c 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -89,7 +89,16 @@ func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 { return &number } -// DeleteHeaderNumber removes hash to number mapping. +// WriteHeaderNumber stores the hash->number mapping. +func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { + key := headerNumberKey(hash) + enc := encodeBlockNumber(number) + if err := db.Put(key, enc); err != nil { + log.Crit("Failed to store hash to number mapping", "err", err) + } +} + +// DeleteHeaderNumber removes hash->number mapping. func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) { if err := db.Delete(headerNumberKey(hash)); err != nil { log.Crit("Failed to delete hash to number mapping", "err", err) @@ -206,22 +215,19 @@ func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header // WriteHeader stores a block header into the database and also stores the hash- // to-number mapping. func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) { - // Write the hash -> number mapping var ( - hash = header.Hash() - number = header.Number.Uint64() - encoded = encodeBlockNumber(number) + hash = header.Hash() + number = header.Number.Uint64() ) - key := headerNumberKey(hash) - if err := db.Put(key, encoded); err != nil { - log.Crit("Failed to store hash to number mapping", "err", err) - } + // Write the hash -> number mapping + WriteHeaderNumber(db, hash, number) + // Write the encoded header data, err := rlp.EncodeToBytes(header) if err != nil { log.Crit("Failed to RLP encode header", "err", err) } - key = headerKey(number, hash) + key := headerKey(number, hash) if err := db.Put(key, data); err != nil { log.Crit("Failed to store header", "err", err) } diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index f8d09fbdd..e6235f010 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -80,6 +80,20 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha } } +// ReadAncientPath retrieves ancient database path which is recorded during the +// first node setup or forcibly changed by user. +func ReadAncientPath(db ethdb.KeyValueReader) string { + data, _ := db.Get(ancientKey) + return string(data) +} + +// WriteAncientPath writes ancient database path into the key-value database. +func WriteAncientPath(db ethdb.KeyValueWriter, path string) { + if err := db.Put(ancientKey, []byte(path)); err != nil { + log.Crit("Failed to store ancient path", "err", err) + } +} + // ReadPreimage retrieves a single preimage of the provided hash. func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { data, _ := db.Get(preimageKey(hash)) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 5a3c7f94b..016c6c909 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -17,11 +17,17 @@ package rawdb import ( + "bytes" "fmt" + "os" + "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/leveldb" "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/log" + "github.com/olekukonko/tablewriter" ) // freezerdb is a database wrapper that enabled freezer data retrievals. @@ -66,6 +72,11 @@ func (db *nofreezedb) Ancients() (uint64, error) { return 0, errNotSupported } +// AncientSize returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) AncientSize(kind string) (uint64, error) { + return 0, errNotSupported +} + // AppendAncient returns an error as we don't have a backing chain freezer. func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error { return errNotSupported @@ -140,5 +151,128 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer kvdb.Close() return nil, err } + // Make sure we always use the same ancient store. + // + // | stored == nil | stored != nil + // ----------------+------------------+---------------------- + // freezer == nil | non-freezer mode | ancient store missing + // freezer != nil | initialize | ensure consistency + stored := ReadAncientPath(kvdb) + if stored == "" && freezer != "" { + WriteAncientPath(kvdb, freezer) + } else if stored != freezer { + log.Warn("Ancient path mismatch", "stored", stored, "given", freezer) + log.Crit("Please use a consistent ancient path or migrate it via the command line tool `geth migrate-ancient`") + } return frdb, nil } + +// InspectDatabase traverses the entire database and checks the size +// of all different categories of data. +func InspectDatabase(db ethdb.Database) error { + it := db.NewIterator() + defer it.Release() + + var ( + count int64 + start = time.Now() + logged = time.Now() + + // Key-value store statistics + total common.StorageSize + headerSize common.StorageSize + bodySize common.StorageSize + receiptSize common.StorageSize + tdSize common.StorageSize + numHashPairing common.StorageSize + hashNumPairing common.StorageSize + trieSize common.StorageSize + txlookupSize common.StorageSize + preimageSize common.StorageSize + bloomBitsSize common.StorageSize + + // Ancient store statistics + ancientHeaders common.StorageSize + ancientBodies common.StorageSize + ancientReceipts common.StorageSize + ancientHashes common.StorageSize + ancientTds common.StorageSize + + // Les statistic + ChtTrieNodes common.StorageSize + BloomTrieNodes common.StorageSize + ) + // Inspect key-value database first. + for it.Next() { + var ( + key = it.Key() + size = common.StorageSize(len(key) + len(it.Value())) + ) + total += size + switch { + case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix): + tdSize += size + case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix): + numHashPairing += size + case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength): + headerSize += size + case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): + hashNumPairing += size + case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength): + bodySize += size + case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength): + receiptSize += size + case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): + txlookupSize += size + case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): + preimageSize += size + case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): + bloomBitsSize += size + case bytes.HasPrefix(key, []byte("cht-")) && len(key) == 4+common.HashLength: + ChtTrieNodes += size + case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength: + BloomTrieNodes += size + case len(key) == common.HashLength: + trieSize += size + } + count += 1 + if count%1000 == 0 && time.Since(logged) > 8*time.Second { + log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + // Inspect append-only file store then. + ancients := []*common.StorageSize{&ancientHeaders, &ancientBodies, &ancientReceipts, &ancientHashes, &ancientTds} + for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} { + if size, err := db.AncientSize(category); err == nil { + *ancients[i] += common.StorageSize(size) + total += common.StorageSize(size) + } + } + // Display the database statistic. + stats := [][]string{ + {"Key-Value store", "Headers", headerSize.String()}, + {"Key-Value store", "Bodies", bodySize.String()}, + {"Key-Value store", "Receipts", receiptSize.String()}, + {"Key-Value store", "Difficulties", tdSize.String()}, + {"Key-Value store", "Block number->hash", numHashPairing.String()}, + {"Key-Value store", "Block hash->number", hashNumPairing.String()}, + {"Key-Value store", "Transaction index", txlookupSize.String()}, + {"Key-Value store", "Bloombit index", bloomBitsSize.String()}, + {"Key-Value store", "Trie nodes", trieSize.String()}, + {"Key-Value store", "Trie preimages", preimageSize.String()}, + {"Ancient store", "Headers", ancientHeaders.String()}, + {"Ancient store", "Bodies", ancientBodies.String()}, + {"Ancient store", "Receipts", ancientReceipts.String()}, + {"Ancient store", "Difficulties", ancientTds.String()}, + {"Ancient store", "Block number->hash", ancientHashes.String()}, + {"Light client", "CHT trie nodes", ChtTrieNodes.String()}, + {"Light client", "Bloom trie nodes", BloomTrieNodes.String()}, + } + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Database", "Category", "Size"}) + table.SetFooter([]string{"", "Total", total.String()}) + table.AppendBulk(stats) + table.Render() + return nil +} diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 21a6055cd..f3a6bbb8f 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "math" + "os" "path/filepath" "sync/atomic" "time" @@ -39,6 +40,10 @@ var ( // errOutOrderInsertion is returned if the user attempts to inject out-of-order // binary blobs into the freezer. errOutOrderInsertion = errors.New("the append operation is out-order") + + // errSymlinkDatadir is returned if the ancient directory specified by user + // is a symbolic link. + errSymlinkDatadir = errors.New("symbolic link datadir is not supported") ) const ( @@ -78,6 +83,13 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil) writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil) ) + // Ensure the datadir is not a symbolic link if it exists. + if info, err := os.Lstat(datadir); !os.IsNotExist(err) { + if info.Mode()&os.ModeSymlink != 0 { + log.Warn("Symbolic link ancient database is not supported", "path", datadir) + return nil, errSymlinkDatadir + } + } // Leveldb uses LOCK as the filelock filename. To prevent the // name collision, we use FLOCK as the lock name. lock, _, err := fileutil.Flock(filepath.Join(datadir, "FLOCK")) @@ -107,6 +119,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) { lock.Release() return nil, err } + log.Info("Opened ancient database", "database", datadir) return freezer, nil } @@ -149,6 +162,14 @@ func (f *freezer) Ancients() (uint64, error) { return atomic.LoadUint64(&f.frozen), nil } +// AncientSize returns the ancient size of the specified category. +func (f *freezer) AncientSize(kind string) (uint64, error) { + if table := f.tables[kind]; table != nil { + return table.size() + } + return 0, errUnknownTable +} + // AppendAncient injects all binary blobs belong to block at the end of the // append-only immutable table files. // diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index d46597f73..ebccf7816 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -515,6 +515,19 @@ func (t *freezerTable) has(number uint64) bool { return atomic.LoadUint64(&t.items) > number } +// size returns the total data size in the freezer table. +func (t *freezerTable) size() (uint64, error) { + t.lock.RLock() + defer t.lock.RUnlock() + + stat, err := t.index.Stat() + if err != nil { + return 0, err + } + total := uint64(t.maxFileSize)*uint64(t.headId-t.tailId) + uint64(t.headBytes) + uint64(stat.Size()) + return total, nil +} + // Sync pushes any pending data from memory out to disk. This is an expensive // operation, so use it with care. func (t *freezerTable) Sync() error { diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index a44a2c99f..0d54a3c8b 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -41,6 +41,9 @@ var ( // fastTrieProgressKey tracks the number of trie entries imported during fast sync. fastTrieProgressKey = []byte("TrieSync") + // ancientKey tracks the absolute path of ancient database. + ancientKey = []byte("AncientPath") + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 124678959..6610b7f5a 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -68,6 +68,12 @@ func (t *table) Ancients() (uint64, error) { return t.db.Ancients() } +// AncientSize is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) AncientSize(kind string) (uint64, error) { + return t.db.AncientSize(kind) +} + // AppendAncient is a noop passthrough that just forwards the request to the underlying // database. func (t *table) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error { |