aboutsummaryrefslogtreecommitdiffstats
path: root/trie/hasher.go
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2018-02-06 00:40:32 +0800
committerFelix Lange <fjl@users.noreply.github.com>2018-02-06 00:40:32 +0800
commit55599ee95d4151a2502465e0afc7c47bd1acba77 (patch)
tree4165e73ae852db4f025a5ed57f0bc499e87cb8b9 /trie/hasher.go
parent59336283c0dbeb1d0a74ff7a8b717b2b3bb0cf40 (diff)
downloadgo-tangerine-55599ee95d4151a2502465e0afc7c47bd1acba77.tar
go-tangerine-55599ee95d4151a2502465e0afc7c47bd1acba77.tar.gz
go-tangerine-55599ee95d4151a2502465e0afc7c47bd1acba77.tar.bz2
go-tangerine-55599ee95d4151a2502465e0afc7c47bd1acba77.tar.lz
go-tangerine-55599ee95d4151a2502465e0afc7c47bd1acba77.tar.xz
go-tangerine-55599ee95d4151a2502465e0afc7c47bd1acba77.tar.zst
go-tangerine-55599ee95d4151a2502465e0afc7c47bd1acba77.zip
core, trie: intermediate mempool between trie and database (#15857)
This commit reduces database I/O by not writing every state trie to disk.
Diffstat (limited to 'trie/hasher.go')
-rw-r--r--trie/hasher.go61
1 files changed, 50 insertions, 11 deletions
diff --git a/trie/hasher.go b/trie/hasher.go
index 4719aabf6..2fc44787a 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -27,21 +27,23 @@ import (
)
type hasher struct {
- tmp *bytes.Buffer
- sha hash.Hash
- cachegen, cachelimit uint16
+ tmp *bytes.Buffer
+ sha hash.Hash
+ cachegen uint16
+ cachelimit uint16
+ onleaf LeafCallback
}
-// hashers live in a global pool.
+// hashers live in a global db.
var hasherPool = sync.Pool{
New: func() interface{} {
return &hasher{tmp: new(bytes.Buffer), sha: sha3.NewKeccak256()}
},
}
-func newHasher(cachegen, cachelimit uint16) *hasher {
+func newHasher(cachegen, cachelimit uint16, onleaf LeafCallback) *hasher {
h := hasherPool.Get().(*hasher)
- h.cachegen, h.cachelimit = cachegen, cachelimit
+ h.cachegen, h.cachelimit, h.onleaf = cachegen, cachelimit, onleaf
return h
}
@@ -51,7 +53,7 @@ func returnHasherToPool(h *hasher) {
// hash collapses a node down into a hash node, also returning a copy of the
// original node initialized with the computed hash to replace the original one.
-func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) {
+func (h *hasher) hash(n node, db *Database, force bool) (node, node, error) {
// If we're not storing the node, just hashing, use available cached data
if hash, dirty := n.cache(); hash != nil {
if db == nil {
@@ -98,7 +100,7 @@ func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error)
// hashChildren replaces the children of a node with their hashes if the encoded
// size of the child is larger than a hash, returning the collapsed node as well
// as a replacement for the original node with the child hashes cached in.
-func (h *hasher) hashChildren(original node, db DatabaseWriter) (node, node, error) {
+func (h *hasher) hashChildren(original node, db *Database) (node, node, error) {
var err error
switch n := original.(type) {
@@ -145,7 +147,10 @@ func (h *hasher) hashChildren(original node, db DatabaseWriter) (node, node, err
}
}
-func (h *hasher) store(n node, db DatabaseWriter, force bool) (node, error) {
+// store hashes the node n and if we have a storage layer specified, it writes
+// the key/value pair to it and tracks any node->child references as well as any
+// node->external trie references.
+func (h *hasher) store(n node, db *Database, force bool) (node, error) {
// Don't store hashes or empty nodes.
if _, isHash := n.(hashNode); n == nil || isHash {
return n, nil
@@ -155,7 +160,6 @@ func (h *hasher) store(n node, db DatabaseWriter, force bool) (node, error) {
if err := rlp.Encode(h.tmp, n); err != nil {
panic("encode error: " + err.Error())
}
-
if h.tmp.Len() < 32 && !force {
return n, nil // Nodes smaller than 32 bytes are stored inside their parent
}
@@ -167,7 +171,42 @@ func (h *hasher) store(n node, db DatabaseWriter, force bool) (node, error) {
hash = hashNode(h.sha.Sum(nil))
}
if db != nil {
- return hash, db.Put(hash, h.tmp.Bytes())
+ // We are pooling the trie nodes into an intermediate memory cache
+ db.lock.Lock()
+
+ hash := common.BytesToHash(hash)
+ db.insert(hash, h.tmp.Bytes())
+
+ // Track all direct parent->child node references
+ switch n := n.(type) {
+ case *shortNode:
+ if child, ok := n.Val.(hashNode); ok {
+ db.reference(common.BytesToHash(child), hash)
+ }
+ case *fullNode:
+ for i := 0; i < 16; i++ {
+ if child, ok := n.Children[i].(hashNode); ok {
+ db.reference(common.BytesToHash(child), hash)
+ }
+ }
+ }
+ db.lock.Unlock()
+
+ // Track external references from account->storage trie
+ if h.onleaf != nil {
+ switch n := n.(type) {
+ case *shortNode:
+ if child, ok := n.Val.(valueNode); ok {
+ h.onleaf(child, hash)
+ }
+ case *fullNode:
+ for i := 0; i < 16; i++ {
+ if child, ok := n.Children[i].(valueNode); ok {
+ h.onleaf(child, hash)
+ }
+ }
+ }
+ }
}
return hash, nil
}