aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2018-11-13 00:47:34 +0800
committerPéter Szilágyi <peterke@gmail.com>2018-11-15 18:22:13 +0800
commit434dd5bc0067cdf604d84426df9086015721dd36 (patch)
tree279d85e32a36b8804d60c5a4b83b444514850782 /core
parent9a000601c6c4e4f8134caedba1957ffe28d2b659 (diff)
downloaddexon-434dd5bc0067cdf604d84426df9086015721dd36.tar
dexon-434dd5bc0067cdf604d84426df9086015721dd36.tar.gz
dexon-434dd5bc0067cdf604d84426df9086015721dd36.tar.bz2
dexon-434dd5bc0067cdf604d84426df9086015721dd36.tar.lz
dexon-434dd5bc0067cdf604d84426df9086015721dd36.tar.xz
dexon-434dd5bc0067cdf604d84426df9086015721dd36.tar.zst
dexon-434dd5bc0067cdf604d84426df9086015721dd36.zip
cmd, core, eth, light, trie: add trie read caching layer
Diffstat (limited to 'core')
-rw-r--r--core/blockchain.go21
-rw-r--r--core/state/database.go14
2 files changed, 24 insertions, 11 deletions
diff --git a/core/blockchain.go b/core/blockchain.go
index 26ac75b8c..22f130ce6 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -68,9 +68,10 @@ const (
// CacheConfig contains the configuration values for the trie caching/pruning
// that's resident in a blockchain.
type CacheConfig struct {
- Disabled bool // Whether to disable trie write caching (archive node)
- TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk
- TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
+ Disabled bool // Whether to disable trie write caching (archive node)
+ TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
+ TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
+ TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
}
// BlockChain represents the canonical chain given a database with a genesis
@@ -140,8 +141,9 @@ type BlockChain struct {
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = &CacheConfig{
- TrieNodeLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
}
}
bodyCache, _ := lru.New(bodyCacheLimit)
@@ -156,7 +158,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
cacheConfig: cacheConfig,
db: db,
triegc: prque.New(nil),
- stateCache: state.NewDatabase(db),
+ stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
quit: make(chan struct{}),
shouldPreserve: shouldPreserve,
bodyCache: bodyCache,
@@ -393,6 +395,11 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, bc.stateCache)
}
+// StateCache returns the caching database underpinning the blockchain instance.
+func (bc *BlockChain) StateCache() state.Database {
+ return bc.stateCache
+}
+
// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *BlockChain) Reset() error {
return bc.ResetWithGenesisBlock(bc.genesisBlock)
@@ -938,7 +945,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// If we exceeded our memory allowance, flush matured singleton nodes to disk
var (
nodes, imgs = triedb.Size()
- limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
+ limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
)
if nodes > limit || imgs > 4*1024*1024 {
triedb.Cap(limit - ethdb.IdealBatchSize)
diff --git a/core/state/database.go b/core/state/database.go
index c1b630991..f6ea144b9 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -72,13 +72,19 @@ type Trie interface {
}
// NewDatabase creates a backing store for state. The returned database is safe for
-// concurrent use and retains cached trie nodes in memory. The pool is an optional
-// intermediate trie-node memory pool between the low level storage layer and the
-// high level trie abstraction.
+// concurrent use and retains a few recent expanded trie nodes in memory. To keep
+// more historical state in memory, use the NewDatabaseWithCache constructor.
func NewDatabase(db ethdb.Database) Database {
+ return NewDatabaseWithCache(db, 0)
+}
+
+// NewDatabase creates a backing store for state. The returned database is safe for
+// concurrent use and retains both a few recent expanded trie nodes in memory, as
+// well as a lot of collapsed RLP trie nodes in a large memory cache.
+func NewDatabaseWithCache(db ethdb.Database, cache int) Database {
csc, _ := lru.New(codeSizeCacheSize)
return &cachingDB{
- db: trie.NewDatabase(db),
+ db: trie.NewDatabaseWithCache(db, cache),
codeSizeCache: csc,
}
}