aboutsummaryrefslogtreecommitdiffstats
path: root/trie
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2019-03-14 21:25:12 +0800
committerGitHub <noreply@github.com>2019-03-14 21:25:12 +0800
commit91eec1251c06727581063cd7e942ba913d806971 (patch)
treee47da6be2a8b15116b773855cf06473d5b4b64ed /trie
parente270a753bec7e723e7909b55543a54e26210dd8a (diff)
downloadgo-tangerine-91eec1251c06727581063cd7e942ba913d806971.tar
go-tangerine-91eec1251c06727581063cd7e942ba913d806971.tar.gz
go-tangerine-91eec1251c06727581063cd7e942ba913d806971.tar.bz2
go-tangerine-91eec1251c06727581063cd7e942ba913d806971.tar.lz
go-tangerine-91eec1251c06727581063cd7e942ba913d806971.tar.xz
go-tangerine-91eec1251c06727581063cd7e942ba913d806971.tar.zst
go-tangerine-91eec1251c06727581063cd7e942ba913d806971.zip
cmd, core, eth, trie: get rid of trie cache generations (#19262)
* cmd, core, eth, trie: get rid of trie cache generations * core, trie: get rid of remainder of cache gen boilerplate
Diffstat (limited to 'trie')
-rw-r--r--trie/database.go22
-rw-r--r--trie/hasher.go25
-rw-r--r--trie/iterator.go2
-rw-r--r--trie/node.go38
-rw-r--r--trie/node_test.go58
-rw-r--r--trie/proof.go4
-rw-r--r--trie/secure_trie.go11
-rw-r--r--trie/secure_trie_test.go5
-rw-r--r--trie/sync.go4
-rw-r--r--trie/sync_test.go10
-rw-r--r--trie/trie.go47
-rw-r--r--trie/trie_test.go72
12 files changed, 52 insertions, 246 deletions
diff --git a/trie/database.go b/trie/database.go
index 73ba2e761..c853dfe51 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -154,11 +154,11 @@ func (n *cachedNode) rlp() []byte {
// obj returns the decoded and expanded trie node, either directly from the cache,
// or by regenerating it from the rlp encoded blob.
-func (n *cachedNode) obj(hash common.Hash, cachegen uint16) node {
+func (n *cachedNode) obj(hash common.Hash) node {
if node, ok := n.node.(rawNode); ok {
- return mustDecodeNode(hash[:], node, cachegen)
+ return mustDecodeNode(hash[:], node)
}
- return expandNode(hash[:], n.node, cachegen)
+ return expandNode(hash[:], n.node)
}
// childs returns all the tracked children of this node, both the implicit ones
@@ -223,16 +223,15 @@ func simplifyNode(n node) node {
// expandNode traverses the node hierarchy of a collapsed storage node and converts
// all fields and keys into expanded memory form.
-func expandNode(hash hashNode, n node, cachegen uint16) node {
+func expandNode(hash hashNode, n node) node {
switch n := n.(type) {
case *rawShortNode:
// Short nodes need key and child expansion
return &shortNode{
Key: compactToHex(n.Key),
- Val: expandNode(nil, n.Val, cachegen),
+ Val: expandNode(nil, n.Val),
flags: nodeFlag{
hash: hash,
- gen: cachegen,
},
}
@@ -241,12 +240,11 @@ func expandNode(hash hashNode, n node, cachegen uint16) node {
node := &fullNode{
flags: nodeFlag{
hash: hash,
- gen: cachegen,
},
}
for i := 0; i < len(node.Children); i++ {
if n[i] != nil {
- node.Children[i] = expandNode(nil, n[i], cachegen)
+ node.Children[i] = expandNode(nil, n[i])
}
}
return node
@@ -349,13 +347,13 @@ func (db *Database) insertPreimage(hash common.Hash, preimage []byte) {
// node retrieves a cached trie node from memory, or returns nil if none can be
// found in the memory cache.
-func (db *Database) node(hash common.Hash, cachegen uint16) node {
+func (db *Database) node(hash common.Hash) node {
// Retrieve the node from the clean cache if available
if db.cleans != nil {
if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil {
memcacheCleanHitMeter.Mark(1)
memcacheCleanReadMeter.Mark(int64(len(enc)))
- return mustDecodeNode(hash[:], enc, cachegen)
+ return mustDecodeNode(hash[:], enc)
}
}
// Retrieve the node from the dirty cache if available
@@ -364,7 +362,7 @@ func (db *Database) node(hash common.Hash, cachegen uint16) node {
db.lock.RUnlock()
if dirty != nil {
- return dirty.obj(hash, cachegen)
+ return dirty.obj(hash)
}
// Content unavailable in memory, attempt to retrieve from disk
enc, err := db.diskdb.Get(hash[:])
@@ -376,7 +374,7 @@ func (db *Database) node(hash common.Hash, cachegen uint16) node {
memcacheCleanMissMeter.Mark(1)
memcacheCleanWriteMeter.Mark(int64(len(enc)))
}
- return mustDecodeNode(hash[:], enc, cachegen)
+ return mustDecodeNode(hash[:], enc)
}
// Node retrieves an encoded cached trie node from memory. If it cannot be found
diff --git a/trie/hasher.go b/trie/hasher.go
index 9d6756b6f..54f6a9de2 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -26,11 +26,9 @@ import (
)
type hasher struct {
- tmp sliceBuffer
- sha keccakState
- cachegen uint16
- cachelimit uint16
- onleaf LeafCallback
+ tmp sliceBuffer
+ sha keccakState
+ onleaf LeafCallback
}
// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports
@@ -62,9 +60,9 @@ var hasherPool = sync.Pool{
},
}
-func newHasher(cachegen, cachelimit uint16, onleaf LeafCallback) *hasher {
+func newHasher(onleaf LeafCallback) *hasher {
h := hasherPool.Get().(*hasher)
- h.cachegen, h.cachelimit, h.onleaf = cachegen, cachelimit, onleaf
+ h.onleaf = onleaf
return h
}
@@ -80,14 +78,13 @@ func (h *hasher) hash(n node, db *Database, force bool) (node, node, error) {
if db == nil {
return hash, n, nil
}
- if n.canUnload(h.cachegen, h.cachelimit) {
- // Unload the node from cache. All of its subnodes will have a lower or equal
- // cache generation number.
- cacheUnloadCounter.Inc(1)
- return hash, hash, nil
- }
if !dirty {
- return hash, n, nil
+ switch n.(type) {
+ case *fullNode, *shortNode:
+ return hash, hash, nil
+ default:
+ return hash, n, nil
+ }
}
}
// Trie not processed yet or needs storage, walk the children
diff --git a/trie/iterator.go b/trie/iterator.go
index 77f168166..da93b2fad 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -180,7 +180,7 @@ func (it *nodeIterator) LeafBlob() []byte {
func (it *nodeIterator) LeafProof() [][]byte {
if len(it.stack) > 0 {
if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
- hasher := newHasher(0, 0, nil)
+ hasher := newHasher(nil)
defer returnHasherToPool(hasher)
proofs := make([][]byte, 0, len(it.stack))
diff --git a/trie/node.go b/trie/node.go
index 1fafb7a53..f4055e779 100644
--- a/trie/node.go
+++ b/trie/node.go
@@ -30,7 +30,6 @@ var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b
type node interface {
fstring(string) string
cache() (hashNode, bool)
- canUnload(cachegen, cachelimit uint16) bool
}
type (
@@ -71,20 +70,9 @@ func (n *shortNode) copy() *shortNode { copy := *n; return &copy }
// nodeFlag contains caching-related metadata about a node.
type nodeFlag struct {
hash hashNode // cached hash of the node (may be nil)
- gen uint16 // cache generation counter
dirty bool // whether the node has changes that must be written to the database
}
-// canUnload tells whether a node can be unloaded.
-func (n *nodeFlag) canUnload(cachegen, cachelimit uint16) bool {
- return !n.dirty && cachegen-n.gen >= cachelimit
-}
-
-func (n *fullNode) canUnload(gen, limit uint16) bool { return n.flags.canUnload(gen, limit) }
-func (n *shortNode) canUnload(gen, limit uint16) bool { return n.flags.canUnload(gen, limit) }
-func (n hashNode) canUnload(uint16, uint16) bool { return false }
-func (n valueNode) canUnload(uint16, uint16) bool { return false }
-
func (n *fullNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
func (n *shortNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
func (n hashNode) cache() (hashNode, bool) { return nil, true }
@@ -117,8 +105,8 @@ func (n valueNode) fstring(ind string) string {
return fmt.Sprintf("%x ", []byte(n))
}
-func mustDecodeNode(hash, buf []byte, cachegen uint16) node {
- n, err := decodeNode(hash, buf, cachegen)
+func mustDecodeNode(hash, buf []byte) node {
+ n, err := decodeNode(hash, buf)
if err != nil {
panic(fmt.Sprintf("node %x: %v", hash, err))
}
@@ -126,7 +114,7 @@ func mustDecodeNode(hash, buf []byte, cachegen uint16) node {
}
// decodeNode parses the RLP encoding of a trie node.
-func decodeNode(hash, buf []byte, cachegen uint16) (node, error) {
+func decodeNode(hash, buf []byte) (node, error) {
if len(buf) == 0 {
return nil, io.ErrUnexpectedEOF
}
@@ -136,22 +124,22 @@ func decodeNode(hash, buf []byte, cachegen uint16) (node, error) {
}
switch c, _ := rlp.CountValues(elems); c {
case 2:
- n, err := decodeShort(hash, elems, cachegen)
+ n, err := decodeShort(hash, elems)
return n, wrapError(err, "short")
case 17:
- n, err := decodeFull(hash, elems, cachegen)
+ n, err := decodeFull(hash, elems)
return n, wrapError(err, "full")
default:
return nil, fmt.Errorf("invalid number of list elements: %v", c)
}
}
-func decodeShort(hash, elems []byte, cachegen uint16) (node, error) {
+func decodeShort(hash, elems []byte) (node, error) {
kbuf, rest, err := rlp.SplitString(elems)
if err != nil {
return nil, err
}
- flag := nodeFlag{hash: hash, gen: cachegen}
+ flag := nodeFlag{hash: hash}
key := compactToHex(kbuf)
if hasTerm(key) {
// value node
@@ -161,17 +149,17 @@ func decodeShort(hash, elems []byte, cachegen uint16) (node, error) {
}
return &shortNode{key, append(valueNode{}, val...), flag}, nil
}
- r, _, err := decodeRef(rest, cachegen)
+ r, _, err := decodeRef(rest)
if err != nil {
return nil, wrapError(err, "val")
}
return &shortNode{key, r, flag}, nil
}
-func decodeFull(hash, elems []byte, cachegen uint16) (*fullNode, error) {
- n := &fullNode{flags: nodeFlag{hash: hash, gen: cachegen}}
+func decodeFull(hash, elems []byte) (*fullNode, error) {
+ n := &fullNode{flags: nodeFlag{hash: hash}}
for i := 0; i < 16; i++ {
- cld, rest, err := decodeRef(elems, cachegen)
+ cld, rest, err := decodeRef(elems)
if err != nil {
return n, wrapError(err, fmt.Sprintf("[%d]", i))
}
@@ -189,7 +177,7 @@ func decodeFull(hash, elems []byte, cachegen uint16) (*fullNode, error) {
const hashLen = len(common.Hash{})
-func decodeRef(buf []byte, cachegen uint16) (node, []byte, error) {
+func decodeRef(buf []byte) (node, []byte, error) {
kind, val, rest, err := rlp.Split(buf)
if err != nil {
return nil, buf, err
@@ -202,7 +190,7 @@ func decodeRef(buf []byte, cachegen uint16) (node, []byte, error) {
err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen)
return nil, buf, err
}
- n, err := decodeNode(nil, buf, cachegen)
+ n, err := decodeNode(nil, buf)
return n, rest, err
case kind == rlp.String && len(val) == 0:
// empty node
diff --git a/trie/node_test.go b/trie/node_test.go
deleted file mode 100644
index 7ad1ff9e7..000000000
--- a/trie/node_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2016 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package trie
-
-import "testing"
-
-func TestCanUnload(t *testing.T) {
- tests := []struct {
- flag nodeFlag
- cachegen, cachelimit uint16
- want bool
- }{
- {
- flag: nodeFlag{dirty: true, gen: 0},
- want: false,
- },
- {
- flag: nodeFlag{dirty: false, gen: 0},
- cachegen: 0, cachelimit: 0,
- want: true,
- },
- {
- flag: nodeFlag{dirty: false, gen: 65534},
- cachegen: 65535, cachelimit: 1,
- want: true,
- },
- {
- flag: nodeFlag{dirty: false, gen: 65534},
- cachegen: 0, cachelimit: 1,
- want: true,
- },
- {
- flag: nodeFlag{dirty: false, gen: 1},
- cachegen: 65535, cachelimit: 1,
- want: true,
- },
- }
-
- for _, test := range tests {
- if got := test.flag.canUnload(test.cachegen, test.cachelimit); got != test.want {
- t.Errorf("%+v\n got %t, want %t", test, got, test.want)
- }
- }
-}
diff --git a/trie/proof.go b/trie/proof.go
index 0f18dd26b..26a41ed27 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -65,7 +65,7 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error {
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
}
}
- hasher := newHasher(0, 0, nil)
+ hasher := newHasher(nil)
defer returnHasherToPool(hasher)
for i, n := range nodes {
@@ -112,7 +112,7 @@ func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.Reader) (value
if buf == nil {
return nil, i, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash)
}
- n, err := decodeNode(wantHash[:], buf, 0)
+ n, err := decodeNode(wantHash[:], buf)
if err != nil {
return nil, i, fmt.Errorf("bad proof node %d: %v", i, err)
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 6a50cfd5a..fbc591ed1 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -51,7 +51,7 @@ type SecureTrie struct {
// Loaded nodes are kept around until their 'cache generation' expires.
// A new cache generation is created by each call to Commit.
// cachelimit sets the number of past cache generations to keep.
-func NewSecure(root common.Hash, db *Database, cachelimit uint16) (*SecureTrie, error) {
+func NewSecure(root common.Hash, db *Database) (*SecureTrie, error) {
if db == nil {
panic("trie.NewSecure called without a database")
}
@@ -59,7 +59,6 @@ func NewSecure(root common.Hash, db *Database, cachelimit uint16) (*SecureTrie,
if err != nil {
return nil, err
}
- trie.SetCacheLimit(cachelimit)
return &SecureTrie{trie: *trie}, nil
}
@@ -161,12 +160,6 @@ func (t *SecureTrie) Hash() common.Hash {
return t.trie.Hash()
}
-// Root returns the root hash of SecureTrie.
-// Deprecated: use Hash instead.
-func (t *SecureTrie) Root() []byte {
- return t.trie.Root()
-}
-
// Copy returns a copy of SecureTrie.
func (t *SecureTrie) Copy() *SecureTrie {
cpy := *t
@@ -183,7 +176,7 @@ func (t *SecureTrie) NodeIterator(start []byte) NodeIterator {
// The caller must not hold onto the return value because it will become
// invalid on the next call to hashKey or secKey.
func (t *SecureTrie) hashKey(key []byte) []byte {
- h := newHasher(0, 0, nil)
+ h := newHasher(nil)
h.sha.Reset()
h.sha.Write(key)
buf := h.sha.Sum(t.hashKeyBuf[:0])
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index f0ca6c800..fb6c38ee2 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -28,7 +28,7 @@ import (
)
func newEmptySecure() *SecureTrie {
- trie, _ := NewSecure(common.Hash{}, NewDatabase(memorydb.New()), 0)
+ trie, _ := NewSecure(common.Hash{}, NewDatabase(memorydb.New()))
return trie
}
@@ -36,8 +36,7 @@ func newEmptySecure() *SecureTrie {
func makeTestSecureTrie() (*Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
triedb := NewDatabase(memorydb.New())
-
- trie, _ := NewSecure(common.Hash{}, triedb, 0)
+ trie, _ := NewSecure(common.Hash{}, triedb)
// Fill it with some arbitrary data
content := make(map[string][]byte)
diff --git a/trie/sync.go b/trie/sync.go
index ef931f633..85f1b0f85 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -101,7 +101,7 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb
}
key := root.Bytes()
blob, _ := s.database.Get(key)
- if local, err := decodeNode(key, blob, 0); local != nil && err == nil {
+ if local, err := decodeNode(key, blob); local != nil && err == nil {
return
}
// Assemble the new sub-trie sync request
@@ -187,7 +187,7 @@ func (s *Sync) Process(results []SyncResult) (bool, int, error) {
continue
}
// Decode the node data content and update the request
- node, err := decodeNode(item.Hash[:], item.Data, 0)
+ node, err := decodeNode(item.Hash[:], item.Data)
if err != nil {
return committed, i, err
}
diff --git a/trie/sync_test.go b/trie/sync_test.go
index d80070f3e..0d8c29cfe 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -133,7 +133,7 @@ func testIterativeSync(t *testing.T, batch int) {
queue = append(queue[:0], sched.Missing(batch)...)
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Root(), srcData)
+ checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
@@ -167,7 +167,7 @@ func TestIterativeDelayedSync(t *testing.T) {
queue = append(queue[len(results):], sched.Missing(10000)...)
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Root(), srcData)
+ checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
}
// Tests that given a root hash, a trie can sync iteratively on a single thread,
@@ -212,7 +212,7 @@ func testIterativeRandomSync(t *testing.T, batch int) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Root(), srcData)
+ checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
@@ -259,7 +259,7 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Root(), srcData)
+ checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
}
// Tests that a trie sync will not request nodes multiple times, even if they
@@ -299,7 +299,7 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
queue = append(queue[:0], sched.Missing(0)...)
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Root(), srcData)
+ checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
}
// Tests that at any point in time during a sync, only complete sub-tries are in
diff --git a/trie/trie.go b/trie/trie.go
index af424d4ac..920e331fd 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/metrics"
)
var (
@@ -35,25 +34,6 @@ var (
emptyState = crypto.Keccak256Hash(nil)
)
-var (
- cacheMissCounter = metrics.NewRegisteredCounter("trie/cachemiss", nil)
- cacheUnloadCounter = metrics.NewRegisteredCounter("trie/cacheunload", nil)
-)
-
-// CacheMisses retrieves a global counter measuring the number of cache misses
-// the trie had since process startup. This isn't useful for anything apart from
-// trie debugging purposes.
-func CacheMisses() int64 {
- return cacheMissCounter.Count()
-}
-
-// CacheUnloads retrieves a global counter measuring the number of cache unloads
-// the trie did since process startup. This isn't useful for anything apart from
-// trie debugging purposes.
-func CacheUnloads() int64 {
- return cacheUnloadCounter.Count()
-}
-
// LeafCallback is a callback type invoked when a trie operation reaches a leaf
// node. It's used by state sync and commit to allow handling external references
// between account and storage tries.
@@ -67,23 +47,11 @@ type LeafCallback func(leaf []byte, parent common.Hash) error
type Trie struct {
db *Database
root node
-
- // Cache generation values.
- // cachegen increases by one with each commit operation.
- // new nodes are tagged with the current generation and unloaded
- // when their generation is older than than cachegen-cachelimit.
- cachegen, cachelimit uint16
-}
-
-// SetCacheLimit sets the number of 'cache generations' to keep.
-// A cache generation is created by a call to Commit.
-func (t *Trie) SetCacheLimit(l uint16) {
- t.cachelimit = l
}
// newFlag returns the cache flag value for a newly created node.
func (t *Trie) newFlag() nodeFlag {
- return nodeFlag{dirty: true, gen: t.cachegen}
+ return nodeFlag{dirty: true}
}
// New creates a trie with an existing root node from db.
@@ -152,14 +120,12 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
if err == nil && didResolve {
n = n.copy()
n.Val = newnode
- n.flags.gen = t.cachegen
}
return value, n, didResolve, err
case *fullNode:
value, newnode, didResolve, err = t.tryGet(n.Children[key[pos]], key, pos+1)
if err == nil && didResolve {
n = n.copy()
- n.flags.gen = t.cachegen
n.Children[key[pos]] = newnode
}
return value, n, didResolve, err
@@ -428,19 +394,13 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) {
}
func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) {
- cacheMissCounter.Inc(1)
-
hash := common.BytesToHash(n)
- if node := t.db.node(hash, t.cachegen); node != nil {
+ if node := t.db.node(hash); node != nil {
return node, nil
}
return nil, &MissingNodeError{NodeHash: hash, Path: prefix}
}
-// Root returns the root hash of the trie.
-// Deprecated: use Hash instead.
-func (t *Trie) Root() []byte { return t.Hash().Bytes() }
-
// Hash returns the root hash of the trie. It does not write to the
// database and can be used even if the trie doesn't have one.
func (t *Trie) Hash() common.Hash {
@@ -460,7 +420,6 @@ func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) {
return common.Hash{}, err
}
t.root = cached
- t.cachegen++
return common.BytesToHash(hash.(hashNode)), nil
}
@@ -468,7 +427,7 @@ func (t *Trie) hashRoot(db *Database, onleaf LeafCallback) (node, node, error) {
if t.root == nil {
return hashNode(emptyRoot.Bytes()), nil, nil
}
- h := newHasher(t.cachegen, t.cachelimit, onleaf)
+ h := newHasher(onleaf)
defer returnHasherToPool(h)
return h.hash(t.root, db, true)
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 1c874370c..ea0b3cbdd 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -19,7 +19,6 @@ package trie
import (
"bytes"
"encoding/binary"
- "errors"
"fmt"
"io/ioutil"
"math/big"
@@ -328,38 +327,6 @@ func (db *countingDB) Get(key []byte) ([]byte, error) {
return db.KeyValueStore.Get(key)
}
-// TestCacheUnload checks that decoded nodes are unloaded after a
-// certain number of commit operations.
-func TestCacheUnload(t *testing.T) {
- // Create test trie with two branches.
- trie := newEmpty()
- key1 := "---------------------------------"
- key2 := "---some other branch"
- updateString(trie, key1, "this is the branch of key1.")
- updateString(trie, key2, "this is the branch of key2.")
-
- root, _ := trie.Commit(nil)
- trie.db.Commit(root, true)
-
- // Commit the trie repeatedly and access key1.
- // The branch containing it is loaded from DB exactly two times:
- // in the 0th and 6th iteration.
- diskdb := &countingDB{KeyValueStore: trie.db.diskdb, gets: make(map[string]int)}
- triedb := NewDatabase(diskdb)
- trie, _ = New(root, triedb)
- trie.SetCacheLimit(5)
- for i := 0; i < 12; i++ {
- getString(trie, key1)
- trie.Commit(nil)
- }
- // Check that it got loaded two times.
- for dbkey, count := range diskdb.gets {
- if count != 2 {
- t.Errorf("db key %x loaded %d times, want %d times", []byte(dbkey), count, 2)
- }
- }
-}
-
// randTest performs random trie operations.
// Instances of this test are created by Generate.
type randTest []randTestStep
@@ -379,7 +346,6 @@ const (
opHash
opReset
opItercheckhash
- opCheckCacheInvariant
opMax // boundary value, not an actual op
)
@@ -458,8 +424,6 @@ func runRandTest(rt randTest) bool {
if tr.Hash() != checktr.Hash() {
rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash")
}
- case opCheckCacheInvariant:
- rt[i].err = checkCacheInvariant(tr.root, nil, tr.cachegen, false, 0)
}
// Abort the test on error.
if rt[i].err != nil {
@@ -469,40 +433,6 @@ func runRandTest(rt randTest) bool {
return true
}
-func checkCacheInvariant(n, parent node, parentCachegen uint16, parentDirty bool, depth int) error {
- var children []node
- var flag nodeFlag
- switch n := n.(type) {
- case *shortNode:
- flag = n.flags
- children = []node{n.Val}
- case *fullNode:
- flag = n.flags
- children = n.Children[:]
- default:
- return nil
- }
-
- errorf := func(format string, args ...interface{}) error {
- msg := fmt.Sprintf(format, args...)
- msg += fmt.Sprintf("\nat depth %d node %s", depth, spew.Sdump(n))
- msg += fmt.Sprintf("parent: %s", spew.Sdump(parent))
- return errors.New(msg)
- }
- if flag.gen > parentCachegen {
- return errorf("cache invariant violation: %d > %d\n", flag.gen, parentCachegen)
- }
- if depth > 0 && !parentDirty && flag.dirty {
- return errorf("cache invariant violation: %d > %d\n", flag.gen, parentCachegen)
- }
- for _, child := range children {
- if err := checkCacheInvariant(child, n, flag.gen, flag.dirty, depth+1); err != nil {
- return err
- }
- }
- return nil
-}
-
func TestRandom(t *testing.T) {
if err := quick.Check(runRandTest, nil); err != nil {
if cerr, ok := err.(*quick.CheckError); ok {
@@ -626,6 +556,6 @@ func TestDecodeNode(t *testing.T) {
for i := 0; i < 5000000; i++ {
rand.Read(hash)
rand.Read(elems)
- decodeNode(hash, elems, 1)
+ decodeNode(hash, elems)
}
}