diff options
author | Felix Lange <fjl@twurst.com> | 2016-09-26 02:49:02 +0800 |
---|---|---|
committer | Péter Szilágyi <peterke@gmail.com> | 2016-09-28 16:27:28 +0800 |
commit | cd791bd855b55b95afc8a5c8f56b8bf67863d099 (patch) | |
tree | 3955fe3abf4079eee5412119043ade1b7001bb61 /trie | |
parent | 863d166c7b0250cf2e99c8aad69578cdd144d386 (diff) | |
download | dexon-cd791bd855b55b95afc8a5c8f56b8bf67863d099.tar dexon-cd791bd855b55b95afc8a5c8f56b8bf67863d099.tar.gz dexon-cd791bd855b55b95afc8a5c8f56b8bf67863d099.tar.bz2 dexon-cd791bd855b55b95afc8a5c8f56b8bf67863d099.tar.lz dexon-cd791bd855b55b95afc8a5c8f56b8bf67863d099.tar.xz dexon-cd791bd855b55b95afc8a5c8f56b8bf67863d099.tar.zst dexon-cd791bd855b55b95afc8a5c8f56b8bf67863d099.zip |
core, trie: replace state caches with trie journal
Diffstat (limited to 'trie')
-rw-r--r-- | trie/arc.go | 206 | ||||
-rw-r--r-- | trie/hasher.go | 157 | ||||
-rw-r--r-- | trie/iterator.go | 182 | ||||
-rw-r--r-- | trie/iterator_test.go | 51 | ||||
-rw-r--r-- | trie/proof.go | 8 | ||||
-rw-r--r-- | trie/secure_trie.go | 70 | ||||
-rw-r--r-- | trie/sync_test.go | 9 | ||||
-rw-r--r-- | trie/trie.go | 212 | ||||
-rw-r--r-- | trie/trie_test.go | 41 |
9 files changed, 350 insertions, 586 deletions
diff --git a/trie/arc.go b/trie/arc.go deleted file mode 100644 index fc7a3259f..000000000 --- a/trie/arc.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) 2015 Hans Alexander Gugel <alexander.gugel@gmail.com> -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -// This file contains a modified version of package arc from -// https://github.com/alexanderGugel/arc -// -// It implements the ARC (Adaptive Replacement Cache) algorithm as detailed in -// https://www.usenix.org/legacy/event/fast03/tech/full_papers/megiddo/megiddo.pdf - -package trie - -import ( - "container/list" - "sync" -) - -type arc struct { - p int - c int - t1 *list.List - b1 *list.List - t2 *list.List - b2 *list.List - cache map[string]*entry - mutex sync.Mutex -} - -type entry struct { - key hashNode - value node - ll *list.List - el *list.Element -} - -// newARC returns a new Adaptive Replacement Cache with the -// given capacity. -func newARC(c int) *arc { - return &arc{ - c: c, - t1: list.New(), - b1: list.New(), - t2: list.New(), - b2: list.New(), - cache: make(map[string]*entry, c), - } -} - -// Clear clears the cache -func (a *arc) Clear() { - a.mutex.Lock() - defer a.mutex.Unlock() - a.p = 0 - a.t1 = list.New() - a.b1 = list.New() - a.t2 = list.New() - a.b2 = list.New() - a.cache = make(map[string]*entry, a.c) -} - -// Put inserts a new key-value pair into the cache. -// This optimizes future access to this entry (side effect). -func (a *arc) Put(key hashNode, value node) bool { - a.mutex.Lock() - defer a.mutex.Unlock() - ent, ok := a.cache[string(key)] - if ok != true { - ent = &entry{key: key, value: value} - a.req(ent) - a.cache[string(key)] = ent - } else { - ent.value = value - a.req(ent) - } - return ok -} - -// Get retrieves a previously via Set inserted entry. -// This optimizes future access to this entry (side effect). -func (a *arc) Get(key hashNode) (value node, ok bool) { - a.mutex.Lock() - defer a.mutex.Unlock() - ent, ok := a.cache[string(key)] - if ok { - a.req(ent) - return ent.value, ent.value != nil - } - return nil, false -} - -func (a *arc) req(ent *entry) { - if ent.ll == a.t1 || ent.ll == a.t2 { - // Case I - ent.setMRU(a.t2) - } else if ent.ll == a.b1 { - // Case II - // Cache Miss in t1 and t2 - - // Adaptation - var d int - if a.b1.Len() >= a.b2.Len() { - d = 1 - } else { - d = a.b2.Len() / a.b1.Len() - } - a.p = a.p + d - if a.p > a.c { - a.p = a.c - } - - a.replace(ent) - ent.setMRU(a.t2) - } else if ent.ll == a.b2 { - // Case III - // Cache Miss in t1 and t2 - - // Adaptation - var d int - if a.b2.Len() >= a.b1.Len() { - d = 1 - } else { - d = a.b1.Len() / a.b2.Len() - } - a.p = a.p - d - if a.p < 0 { - a.p = 0 - } - - a.replace(ent) - ent.setMRU(a.t2) - } else if ent.ll == nil { - // Case IV - - if a.t1.Len()+a.b1.Len() == a.c { - // Case A - if a.t1.Len() < a.c { - a.delLRU(a.b1) - a.replace(ent) - } else { - a.delLRU(a.t1) - } - } else if a.t1.Len()+a.b1.Len() < a.c { - // Case B - if a.t1.Len()+a.t2.Len()+a.b1.Len()+a.b2.Len() >= a.c { - if a.t1.Len()+a.t2.Len()+a.b1.Len()+a.b2.Len() == 2*a.c { - a.delLRU(a.b2) - } - a.replace(ent) - } - } - - ent.setMRU(a.t1) - } -} - -func (a *arc) delLRU(list *list.List) { - lru := list.Back() - list.Remove(lru) - delete(a.cache, string(lru.Value.(*entry).key)) -} - -func (a *arc) replace(ent *entry) { - if a.t1.Len() > 0 && ((a.t1.Len() > a.p) || (ent.ll == a.b2 && a.t1.Len() == a.p)) { - lru := a.t1.Back().Value.(*entry) - lru.value = nil - lru.setMRU(a.b1) - } else { - lru := a.t2.Back().Value.(*entry) - lru.value = nil - lru.setMRU(a.b2) - } -} - -func (e *entry) setLRU(list *list.List) { - e.detach() - e.ll = list - e.el = e.ll.PushBack(e) -} - -func (e *entry) setMRU(list *list.List) { - e.detach() - e.ll = list - e.el = e.ll.PushFront(e) -} - -func (e *entry) detach() { - if e.ll != nil { - e.ll.Remove(e.el) - } -} diff --git a/trie/hasher.go b/trie/hasher.go new file mode 100644 index 000000000..87e02fb85 --- /dev/null +++ b/trie/hasher.go @@ -0,0 +1,157 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package trie + +import ( + "bytes" + "hash" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/rlp" +) + +type hasher struct { + tmp *bytes.Buffer + sha hash.Hash +} + +// hashers live in a global pool. +var hasherPool = sync.Pool{ + New: func() interface{} { + return &hasher{tmp: new(bytes.Buffer), sha: sha3.NewKeccak256()} + }, +} + +func newHasher() *hasher { + return hasherPool.Get().(*hasher) +} + +func returnHasherToPool(h *hasher) { + hasherPool.Put(h) +} + +// hash collapses a node down into a hash node, also returning a copy of the +// original node initialzied with the computed hash to replace the original one. +func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) { + // If we're not storing the node, just hashing, use avaialble cached data + if hash, dirty := n.cache(); hash != nil && (db == nil || !dirty) { + return hash, n, nil + } + // Trie not processed yet or needs storage, walk the children + collapsed, cached, err := h.hashChildren(n, db) + if err != nil { + return hashNode{}, n, err + } + hashed, err := h.store(collapsed, db, force) + if err != nil { + return hashNode{}, n, err + } + // Cache the hash and RLP blob of the ndoe for later reuse + if hash, ok := hashed.(hashNode); ok && !force { + switch cached := cached.(type) { + case shortNode: + cached.hash = hash + if db != nil { + cached.dirty = false + } + return hashed, cached, nil + case fullNode: + cached.hash = hash + if db != nil { + cached.dirty = false + } + return hashed, cached, nil + } + } + return hashed, cached, nil +} + +// hashChildren replaces the children of a node with their hashes if the encoded +// size of the child is larger than a hash, returning the collapsed node as well +// as a replacement for the original node with the child hashes cached in. +func (h *hasher) hashChildren(original node, db DatabaseWriter) (node, node, error) { + var err error + + switch n := original.(type) { + case shortNode: + // Hash the short node's child, caching the newly hashed subtree + cached := n + cached.Key = common.CopyBytes(cached.Key) + + n.Key = compactEncode(n.Key) + if _, ok := n.Val.(valueNode); !ok { + if n.Val, cached.Val, err = h.hash(n.Val, db, false); err != nil { + return n, original, err + } + } + if n.Val == nil { + n.Val = valueNode(nil) // Ensure that nil children are encoded as empty strings. + } + return n, cached, nil + + case fullNode: + // Hash the full node's children, caching the newly hashed subtrees + cached := fullNode{dirty: n.dirty} + + for i := 0; i < 16; i++ { + if n.Children[i] != nil { + if n.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, false); err != nil { + return n, original, err + } + } else { + n.Children[i] = valueNode(nil) // Ensure that nil children are encoded as empty strings. + } + } + cached.Children[16] = n.Children[16] + if n.Children[16] == nil { + n.Children[16] = valueNode(nil) + } + return n, cached, nil + + default: + // Value and hash nodes don't have children so they're left as were + return n, original, nil + } +} + +func (h *hasher) store(n node, db DatabaseWriter, force bool) (node, error) { + // Don't store hashes or empty nodes. + if _, isHash := n.(hashNode); n == nil || isHash { + return n, nil + } + // Generate the RLP encoding of the node + h.tmp.Reset() + if err := rlp.Encode(h.tmp, n); err != nil { + panic("encode error: " + err.Error()) + } + if h.tmp.Len() < 32 && !force { + return n, nil // Nodes smaller than 32 bytes are stored inside their parent + } + // Larger nodes are replaced by their hash and stored in the database. + hash, _ := n.cache() + if hash == nil { + h.sha.Reset() + h.sha.Write(h.tmp.Bytes()) + hash = hashNode(h.sha.Sum(nil)) + } + if db != nil { + return hash, db.Put(hash, h.tmp.Bytes()) + } + return hash, nil +} diff --git a/trie/iterator.go b/trie/iterator.go index 88c4cee7f..8cad51aff 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -16,18 +16,13 @@ package trie -import ( - "bytes" - "fmt" +import "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" -) - -// Iterator is a key-value trie iterator to traverse the data contents. +// Iterator is a key-value trie iterator that traverses a Trie. type Iterator struct { - trie *Trie + trie *Trie + nodeIt *NodeIterator + keyBuf []byte Key []byte // Current data key on which the iterator is positioned on Value []byte // Current data value on which the iterator is positioned on @@ -35,119 +30,45 @@ type Iterator struct { // NewIterator creates a new key-value iterator. func NewIterator(trie *Trie) *Iterator { - return &Iterator{trie: trie, Key: nil} -} - -// Next moves the iterator forward with one key-value entry. -func (self *Iterator) Next() bool { - isIterStart := false - if self.Key == nil { - isIterStart = true - self.Key = make([]byte, 32) + return &Iterator{ + trie: trie, + nodeIt: NewNodeIterator(trie), + keyBuf: make([]byte, 0, 64), + Key: nil, } - - key := remTerm(compactHexDecode(self.Key)) - k := self.next(self.trie.root, key, isIterStart) - - self.Key = []byte(decodeCompact(k)) - - return len(k) > 0 } -func (self *Iterator) next(node interface{}, key []byte, isIterStart bool) []byte { - if node == nil { - return nil - } - - switch node := node.(type) { - case fullNode: - if len(key) > 0 { - k := self.next(node.Children[key[0]], key[1:], isIterStart) - if k != nil { - return append([]byte{key[0]}, k...) - } - } - - var r byte - if len(key) > 0 { - r = key[0] + 1 - } - - for i := r; i < 16; i++ { - k := self.key(node.Children[i]) - if k != nil { - return append([]byte{i}, k...) - } +// Next moves the iterator forward one key-value entry. +func (it *Iterator) Next() bool { + for it.nodeIt.Next() { + if it.nodeIt.Leaf { + it.Key = it.makeKey() + it.Value = it.nodeIt.LeafBlob + return true } - - case shortNode: - k := remTerm(node.Key) - if vnode, ok := node.Val.(valueNode); ok { - switch bytes.Compare([]byte(k), key) { - case 0: - if isIterStart { - self.Value = vnode - return k - } - case 1: - self.Value = vnode - return k - } - } else { - cnode := node.Val - - var ret []byte - skey := key[len(k):] - if bytes.HasPrefix(key, k) { - ret = self.next(cnode, skey, isIterStart) - } else if bytes.Compare(k, key[:len(k)]) > 0 { - return self.key(node) - } - - if ret != nil { - return append(k, ret...) - } - } - - case hashNode: - rn, err := self.trie.resolveHash(node, nil, nil) - if err != nil && glog.V(logger.Error) { - glog.Errorf("Unhandled trie error: %v", err) - } - return self.next(rn, key, isIterStart) } - return nil + it.Key = nil + it.Value = nil + return false } -func (self *Iterator) key(node interface{}) []byte { - switch node := node.(type) { - case shortNode: - // Leaf node - k := remTerm(node.Key) - if vnode, ok := node.Val.(valueNode); ok { - self.Value = vnode - return k - } - return append(k, self.key(node.Val)...) - case fullNode: - if node.Children[16] != nil { - self.Value = node.Children[16].(valueNode) - return []byte{16} - } - for i := 0; i < 16; i++ { - k := self.key(node.Children[i]) - if k != nil { - return append([]byte{byte(i)}, k...) +func (it *Iterator) makeKey() []byte { + key := it.keyBuf[:0] + for _, se := range it.nodeIt.stack { + switch node := se.node.(type) { + case fullNode: + if se.child <= 16 { + key = append(key, byte(se.child)) + } + case shortNode: + if hasTerm(node.Key) { + key = append(key, node.Key[:len(node.Key)-1]...) + } else { + key = append(key, node.Key...) } } - case hashNode: - rn, err := self.trie.resolveHash(node, nil, nil) - if err != nil && glog.V(logger.Error) { - glog.Errorf("Unhandled trie error: %v", err) - } - return self.key(rn) } - return nil + return decodeCompact(key) } // nodeIteratorState represents the iteration state at one particular node of the @@ -199,25 +120,27 @@ func (it *NodeIterator) Next() bool { // step moves the iterator to the next node of the trie. func (it *NodeIterator) step() error { - // Abort if we reached the end of the iteration if it.trie == nil { + // Abort if we reached the end of the iteration return nil } - // Initialize the iterator if we've just started, or pop off the old node otherwise if len(it.stack) == 0 { - // Always start with a collapsed root + // Initialize the iterator if we've just started. root := it.trie.Hash() - it.stack = append(it.stack, &nodeIteratorState{node: hashNode(root[:]), child: -1}) - if it.stack[0].node == nil { - return fmt.Errorf("root node missing: %x", it.trie.Hash()) + state := &nodeIteratorState{node: it.trie.root, child: -1} + if root != emptyRoot { + state.hash = root } + it.stack = append(it.stack, state) } else { + // Continue iterating at the previous node otherwise. it.stack = it.stack[:len(it.stack)-1] if len(it.stack) == 0 { it.trie = nil return nil } } + // Continue iteration to the next child for { parent := it.stack[len(it.stack)-1] @@ -232,7 +155,12 @@ func (it *NodeIterator) step() error { } for parent.child++; parent.child < len(node.Children); parent.child++ { if current := node.Children[parent.child]; current != nil { - it.stack = append(it.stack, &nodeIteratorState{node: current, parent: ancestor, child: -1}) + it.stack = append(it.stack, &nodeIteratorState{ + hash: common.BytesToHash(node.hash), + node: current, + parent: ancestor, + child: -1, + }) break } } @@ -242,7 +170,12 @@ func (it *NodeIterator) step() error { break } parent.child++ - it.stack = append(it.stack, &nodeIteratorState{node: node.Val, parent: ancestor, child: -1}) + it.stack = append(it.stack, &nodeIteratorState{ + hash: common.BytesToHash(node.hash), + node: node.Val, + parent: ancestor, + child: -1, + }) } else if hash, ok := parent.node.(hashNode); ok { // Hash node, resolve the hash child from the database, then the node itself if parent.child >= 0 { @@ -254,7 +187,12 @@ func (it *NodeIterator) step() error { if err != nil { return err } - it.stack = append(it.stack, &nodeIteratorState{hash: common.BytesToHash(hash), node: node, parent: ancestor, child: -1}) + it.stack = append(it.stack, &nodeIteratorState{ + hash: common.BytesToHash(hash), + node: node, + parent: ancestor, + child: -1, + }) } else { break } diff --git a/trie/iterator_test.go b/trie/iterator_test.go index dc8276116..2bcc3700e 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -34,21 +34,60 @@ func TestIterator(t *testing.T) { {"dog", "puppy"}, {"somethingveryoddindeedthis is", "myothernodedata"}, } - v := make(map[string]bool) + all := make(map[string]string) for _, val := range vals { - v[val.k] = false + all[val.k] = val.v trie.Update([]byte(val.k), []byte(val.v)) } trie.Commit() + found := make(map[string]string) it := NewIterator(trie) for it.Next() { - v[string(it.Key)] = true + found[string(it.Key)] = string(it.Value) } - for k, found := range v { - if !found { - t.Error("iterator didn't find", k) + for k, v := range all { + if found[k] != v { + t.Errorf("iterator value mismatch for %s: got %q want %q", k, found[k], v) + } + } +} + +type kv struct { + k, v []byte + t bool +} + +func TestIteratorLargeData(t *testing.T) { + trie := newEmpty() + vals := make(map[string]*kv) + + for i := byte(0); i < 255; i++ { + value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} + value2 := &kv{common.LeftPadBytes([]byte{10, i}, 32), []byte{i}, false} + trie.Update(value.k, value.v) + trie.Update(value2.k, value2.v) + vals[string(value.k)] = value + vals[string(value2.k)] = value2 + } + + it := NewIterator(trie) + for it.Next() { + vals[string(it.Key)].t = true + } + + var untouched []*kv + for _, value := range vals { + if !value.t { + untouched = append(untouched, value) + } + } + + if len(untouched) > 0 { + t.Errorf("Missed %d nodes", len(untouched)) + for _, value := range untouched { + t.Error(value) } } } diff --git a/trie/proof.go b/trie/proof.go index 5135de047..116c13a1b 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -70,15 +70,13 @@ func (t *Trie) Prove(key []byte) []rlp.RawValue { panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) } } - if t.hasher == nil { - t.hasher = newHasher() - } + hasher := newHasher() proof := make([]rlp.RawValue, 0, len(nodes)) for i, n := range nodes { // Don't bother checking for errors here since hasher panics // if encoding doesn't work and we're not writing to any database. - n, _, _ = t.hasher.hashChildren(n, nil) - hn, _ := t.hasher.store(n, nil, false) + n, _, _ = hasher.hashChildren(n, nil) + hn, _ := hasher.store(n, nil, false) if _, ok := hn.(hashNode); ok || i == 0 { // If the node's database encoding is a hash (or is the // root node), it becomes a proof element. diff --git a/trie/secure_trie.go b/trie/secure_trie.go index 1d027c102..efe875bc8 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -17,10 +17,7 @@ package trie import ( - "hash" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" ) @@ -38,11 +35,9 @@ var secureKeyPrefix = []byte("secure-key-") // // SecureTrie is not safe for concurrent use. type SecureTrie struct { - *Trie - - hash hash.Hash + trie Trie hashKeyBuf []byte - secKeyBuf []byte + secKeyBuf [200]byte secKeyCache map[string][]byte } @@ -61,7 +56,7 @@ func NewSecure(root common.Hash, db Database) (*SecureTrie, error) { return nil, err } return &SecureTrie{ - Trie: trie, + trie: *trie, secKeyCache: make(map[string][]byte), }, nil } @@ -80,7 +75,7 @@ func (t *SecureTrie) Get(key []byte) []byte { // The value bytes must not be modified by the caller. // If a node was not found in the database, a MissingNodeError is returned. func (t *SecureTrie) TryGet(key []byte) ([]byte, error) { - return t.Trie.TryGet(t.hashKey(key)) + return t.trie.TryGet(t.hashKey(key)) } // Update associates key with value in the trie. Subsequent calls to @@ -105,7 +100,7 @@ func (t *SecureTrie) Update(key, value []byte) { // If a node was not found in the database, a MissingNodeError is returned. func (t *SecureTrie) TryUpdate(key, value []byte) error { hk := t.hashKey(key) - err := t.Trie.TryUpdate(hk, value) + err := t.trie.TryUpdate(hk, value) if err != nil { return err } @@ -125,7 +120,7 @@ func (t *SecureTrie) Delete(key []byte) { func (t *SecureTrie) TryDelete(key []byte) error { hk := t.hashKey(key) delete(t.secKeyCache, string(hk)) - return t.Trie.TryDelete(hk) + return t.trie.TryDelete(hk) } // GetKey returns the sha3 preimage of a hashed key that was @@ -134,7 +129,7 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte { if key, ok := t.secKeyCache[string(shaKey)]; ok { return key } - key, _ := t.Trie.db.Get(t.secKey(shaKey)) + key, _ := t.trie.db.Get(t.secKey(shaKey)) return key } @@ -144,7 +139,23 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte { // Committing flushes nodes from memory. Subsequent Get calls will load nodes // from the database. func (t *SecureTrie) Commit() (root common.Hash, err error) { - return t.CommitTo(t.db) + return t.CommitTo(t.trie.db) +} + +func (t *SecureTrie) Hash() common.Hash { + return t.trie.Hash() +} + +func (t *SecureTrie) Root() []byte { + return t.trie.Root() +} + +func (t *SecureTrie) Iterator() *Iterator { + return t.trie.Iterator() +} + +func (t *SecureTrie) NodeIterator() *NodeIterator { + return NewNodeIterator(&t.trie) } // CommitTo writes all nodes and the secure hash pre-images to the given database. @@ -162,27 +173,26 @@ func (t *SecureTrie) CommitTo(db DatabaseWriter) (root common.Hash, err error) { } t.secKeyCache = make(map[string][]byte) } - n, clean, err := t.hashRoot(db) - if err != nil { - return (common.Hash{}), err - } - t.root = clean - return common.BytesToHash(n.(hashNode)), nil + return t.trie.CommitTo(db) } +// secKey returns the database key for the preimage of key, as an ephemeral buffer. +// The caller must not hold onto the return value because it will become +// invalid on the next call to hashKey or secKey. func (t *SecureTrie) secKey(key []byte) []byte { - t.secKeyBuf = append(t.secKeyBuf[:0], secureKeyPrefix...) - t.secKeyBuf = append(t.secKeyBuf, key...) - return t.secKeyBuf + buf := append(t.secKeyBuf[:0], secureKeyPrefix...) + buf = append(buf, key...) + return buf } +// hashKey returns the hash of key as an ephemeral buffer. +// The caller must not hold onto the return value because it will become +// invalid on the next call to hashKey or secKey. func (t *SecureTrie) hashKey(key []byte) []byte { - if t.hash == nil { - t.hash = sha3.NewKeccak256() - t.hashKeyBuf = make([]byte, 32) - } - t.hash.Reset() - t.hash.Write(key) - t.hashKeyBuf = t.hash.Sum(t.hashKeyBuf[:0]) - return t.hashKeyBuf + h := newHasher() + h.sha.Reset() + h.sha.Write(key) + buf := h.sha.Sum(t.hashKeyBuf[:0]) + returnHasherToPool(h) + return buf } diff --git a/trie/sync_test.go b/trie/sync_test.go index a81f7650e..a763dc564 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -51,9 +51,6 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) { } trie.Commit() - // Remove any potentially cached data from the test trie creation - globalCache.Clear() - // Return the generated trie return db, trie, content } @@ -61,9 +58,6 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) { // checkTrieContents cross references a reconstructed trie with an expected data // content map. func checkTrieContents(t *testing.T, db Database, root []byte, content map[string][]byte) { - // Remove any potentially cached data from the trie synchronisation - globalCache.Clear() - // Check root availability and trie contents trie, err := New(common.BytesToHash(root), db) if err != nil { @@ -81,9 +75,6 @@ func checkTrieContents(t *testing.T, db Database, root []byte, content map[strin // checkTrieConsistency checks that all nodes in a trie are indeed present. func checkTrieConsistency(db Database, root common.Hash) error { - // Remove any potentially cached data from the test trie creation or previous checks - globalCache.Clear() - // Create and iterate a trie rooted in a subnode trie, err := New(root, db) if err != nil { diff --git a/trie/trie.go b/trie/trie.go index a530e7b2a..93e189e2e 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -20,22 +20,14 @@ package trie import ( "bytes" "fmt" - "hash" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" - "github.com/ethereum/go-ethereum/rlp" ) -const defaultCacheCapacity = 800 - var ( - // The global cache stores decoded trie nodes by hash as they get loaded. - globalCache = newARC(defaultCacheCapacity) - // This is the known root hash of an empty trie. emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") @@ -43,11 +35,6 @@ var ( emptyState = crypto.Keccak256Hash(nil) ) -// ClearGlobalCache clears the global trie cache -func ClearGlobalCache() { - globalCache.Clear() -} - // Database must be implemented by backing stores for the trie. type Database interface { DatabaseWriter @@ -72,7 +59,6 @@ type Trie struct { root node db Database originalRoot common.Hash - *hasher } // New creates a trie with an existing root node from db. @@ -118,32 +104,50 @@ func (t *Trie) Get(key []byte) []byte { // If a node was not found in the database, a MissingNodeError is returned. func (t *Trie) TryGet(key []byte) ([]byte, error) { key = compactHexDecode(key) - pos := 0 - tn := t.root - for pos < len(key) { - switch n := tn.(type) { - case shortNode: - if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) { - return nil, nil - } - tn = n.Val - pos += len(n.Key) - case fullNode: - tn = n.Children[key[pos]] - pos++ - case nil: - return nil, nil - case hashNode: - var err error - tn, err = t.resolveHash(n, key[:pos], key[pos:]) - if err != nil { - return nil, err - } - default: - panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) + value, newroot, didResolve, err := t.tryGet(t.root, key, 0) + if err == nil && didResolve { + t.root = newroot + } + return value, err +} + +func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) { + switch n := (origNode).(type) { + case nil: + return nil, nil, false, nil + case valueNode: + return n, n, false, nil + case shortNode: + if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) { + // key not found in trie + return nil, n, false, nil + } + value, newnode, didResolve, err = t.tryGet(n.Val, key, pos+len(n.Key)) + if err == nil && didResolve { + n.Val = newnode + return value, n, didResolve, err + } else { + return value, origNode, didResolve, err + } + case fullNode: + child := n.Children[key[pos]] + value, newnode, didResolve, err = t.tryGet(child, key, pos+1) + if err == nil && didResolve { + n.Children[key[pos]] = newnode + return value, n, didResolve, err + } else { + return value, origNode, didResolve, err + } + case hashNode: + child, err := t.resolveHash(n, key[:pos], key[pos:]) + if err != nil { + return nil, n, true, err } + value, newnode, _, err := t.tryGet(child, key, pos) + return value, newnode, true, err + default: + panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode)) } - return tn.(valueNode), nil } // Update associates key with value in the trie. Subsequent calls to @@ -410,9 +414,6 @@ func (t *Trie) resolve(n node, prefix, suffix []byte) (node, error) { } func (t *Trie) resolveHash(n hashNode, prefix, suffix []byte) (node, error) { - if v, ok := globalCache.Get(n); ok { - return v, nil - } enc, err := t.db.Get(n) if err != nil || enc == nil { return nil, &MissingNodeError{ @@ -424,9 +425,6 @@ func (t *Trie) resolveHash(n hashNode, prefix, suffix []byte) (node, error) { } } dec := mustDecodeNode(n, enc) - if dec != nil { - globalCache.Put(n, dec) - } return dec, nil } @@ -474,127 +472,7 @@ func (t *Trie) hashRoot(db DatabaseWriter) (node, node, error) { if t.root == nil { return hashNode(emptyRoot.Bytes()), nil, nil } - if t.hasher == nil { - t.hasher = newHasher() - } - return t.hasher.hash(t.root, db, true) -} - -type hasher struct { - tmp *bytes.Buffer - sha hash.Hash -} - -func newHasher() *hasher { - return &hasher{tmp: new(bytes.Buffer), sha: sha3.NewKeccak256()} -} - -// hash collapses a node down into a hash node, also returning a copy of the -// original node initialzied with the computed hash to replace the original one. -func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) { - // If we're not storing the node, just hashing, use avaialble cached data - if hash, dirty := n.cache(); hash != nil && (db == nil || !dirty) { - return hash, n, nil - } - // Trie not processed yet or needs storage, walk the children - collapsed, cached, err := h.hashChildren(n, db) - if err != nil { - return hashNode{}, n, err - } - hashed, err := h.store(collapsed, db, force) - if err != nil { - return hashNode{}, n, err - } - // Cache the hash and RLP blob of the ndoe for later reuse - if hash, ok := hashed.(hashNode); ok && !force { - switch cached := cached.(type) { - case shortNode: - cached.hash = hash - if db != nil { - cached.dirty = false - } - return hashed, cached, nil - case fullNode: - cached.hash = hash - if db != nil { - cached.dirty = false - } - return hashed, cached, nil - } - } - return hashed, cached, nil -} - -// hashChildren replaces the children of a node with their hashes if the encoded -// size of the child is larger than a hash, returning the collapsed node as well -// as a replacement for the original node with the child hashes cached in. -func (h *hasher) hashChildren(original node, db DatabaseWriter) (node, node, error) { - var err error - - switch n := original.(type) { - case shortNode: - // Hash the short node's child, caching the newly hashed subtree - cached := n - cached.Key = common.CopyBytes(cached.Key) - - n.Key = compactEncode(n.Key) - if _, ok := n.Val.(valueNode); !ok { - if n.Val, cached.Val, err = h.hash(n.Val, db, false); err != nil { - return n, original, err - } - } - if n.Val == nil { - n.Val = valueNode(nil) // Ensure that nil children are encoded as empty strings. - } - return n, cached, nil - - case fullNode: - // Hash the full node's children, caching the newly hashed subtrees - cached := fullNode{dirty: n.dirty} - - for i := 0; i < 16; i++ { - if n.Children[i] != nil { - if n.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, false); err != nil { - return n, original, err - } - } else { - n.Children[i] = valueNode(nil) // Ensure that nil children are encoded as empty strings. - } - } - cached.Children[16] = n.Children[16] - if n.Children[16] == nil { - n.Children[16] = valueNode(nil) - } - return n, cached, nil - - default: - // Value and hash nodes don't have children so they're left as were - return n, original, nil - } -} - -func (h *hasher) store(n node, db DatabaseWriter, force bool) (node, error) { - // Don't store hashes or empty nodes. - if _, isHash := n.(hashNode); n == nil || isHash { - return n, nil - } - // Generate the RLP encoding of the node - h.tmp.Reset() - if err := rlp.Encode(h.tmp, n); err != nil { - panic("encode error: " + err.Error()) - } - if h.tmp.Len() < 32 && !force { - return n, nil // Nodes smaller than 32 bytes are stored inside their parent - } - // Larger nodes are replaced by their hash and stored in the database. - hash, _ := n.cache() - if hash == nil { - h.sha.Reset() - h.sha.Write(h.tmp.Bytes()) - hash = hashNode(h.sha.Sum(nil)) - } - if db != nil { - return hash, db.Put(hash, h.tmp.Bytes()) - } - return hash, nil + h := newHasher() + defer returnHasherToPool(h) + return h.hash(t.root, db, true) } diff --git a/trie/trie_test.go b/trie/trie_test.go index 121ba24c1..5a3ea1be9 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -76,8 +76,6 @@ func TestMissingNode(t *testing.T) { updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf") root, _ := trie.Commit() - ClearGlobalCache() - trie, _ = New(root, db) _, err := trie.TryGet([]byte("120000")) if err != nil { @@ -109,7 +107,6 @@ func TestMissingNode(t *testing.T) { } db.Delete(common.FromHex("e1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9")) - ClearGlobalCache() trie, _ = New(root, db) _, err = trie.TryGet([]byte("120000")) @@ -362,44 +359,6 @@ func TestLargeValue(t *testing.T) { } -type kv struct { - k, v []byte - t bool -} - -func TestLargeData(t *testing.T) { - trie := newEmpty() - vals := make(map[string]*kv) - - for i := byte(0); i < 255; i++ { - value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} - value2 := &kv{common.LeftPadBytes([]byte{10, i}, 32), []byte{i}, false} - trie.Update(value.k, value.v) - trie.Update(value2.k, value2.v) - vals[string(value.k)] = value - vals[string(value2.k)] = value2 - } - - it := NewIterator(trie) - for it.Next() { - vals[string(it.Key)].t = true - } - - var untouched []*kv - for _, value := range vals { - if !value.t { - untouched = append(untouched, value) - } - } - - if len(untouched) > 0 { - t.Errorf("Missed %d nodes", len(untouched)) - for _, value := range untouched { - t.Error(value) - } - } -} - func BenchmarkGet(b *testing.B) { benchGet(b, false) } func BenchmarkGetDB(b *testing.B) { benchGet(b, true) } func BenchmarkUpdateBE(b *testing.B) { benchUpdate(b, binary.BigEndian) } |