From ab27bee25a845be90bd60e774ff68d2ea1501772 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 5 Oct 2015 19:37:56 +0300 Subject: core, eth, trie: direct state trie synchronization --- trie/sync.go | 233 +++++++++++++++++++++++++++++++++++++++++++++++++ trie/sync_test.go | 257 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 490 insertions(+) create mode 100644 trie/sync.go create mode 100644 trie/sync_test.go (limited to 'trie') diff --git a/trie/sync.go b/trie/sync.go new file mode 100644 index 000000000..65cfd6ed8 --- /dev/null +++ b/trie/sync.go @@ -0,0 +1,233 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "gopkg.in/karalabe/cookiejar.v2/collections/prque" +) + +// request represents a scheduled or already in-flight state retrieval request. +type request struct { + hash common.Hash // Hash of the node data content to retrieve + data []byte // Data content of the node, cached until all subtrees complete + object *node // Target node to populate with retrieved data (hashnode originally) + + parents []*request // Parent state nodes referencing this entry (notify all upon completion) + depth int // Depth level within the trie the node is located to prioritize DFS + deps int // Number of dependencies before allowed to commit this node + + callback TrieSyncLeafCallback // Callback to invoke if a leaf node it reached on this branch +} + +// SyncResult is a simple list to return missing nodes along with their request +// hashes. +type SyncResult struct { + Hash common.Hash // Hash of the originally unknown trie node + Data []byte // Data content of the retrieved node +} + +// TrieSyncLeafCallback is a callback type invoked when a trie sync reaches a +// leaf node. It's used by state syncing to check if the leaf node requires some +// further data syncing. +type TrieSyncLeafCallback func(leaf []byte, parent common.Hash) error + +// TrieSync is the main state trie synchronisation scheduler, which provides yet +// unknown trie hashes to retrieve, accepts node data associated with said hashes +// and reconstructs the trie steb by step until all is done. +type TrieSync struct { + database Database // State database for storing all the assembled node data + requests map[common.Hash]*request // Pending requests pertaining to a key hash + queue *prque.Prque // Priority queue with the pending requests +} + +// NewTrieSync creates a new trie data download scheduler. +func NewTrieSync(root common.Hash, database Database, callback TrieSyncLeafCallback) *TrieSync { + ts := &TrieSync{ + database: database, + requests: make(map[common.Hash]*request), + queue: prque.New(), + } + ts.AddSubTrie(root, 0, common.Hash{}, callback) + return ts +} + +// AddSubTrie registers a new trie to the sync code, rooted at the designated parent. +func (s *TrieSync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback TrieSyncLeafCallback) { + // Short circuit if the trie is empty + if root == emptyRoot { + return + } + // Assemble the new sub-trie sync request + node := node(hashNode(root.Bytes())) + req := &request{ + object: &node, + hash: root, + depth: depth, + callback: callback, + } + // If this sub-trie has a designated parent, link them together + if parent != (common.Hash{}) { + ancestor := s.requests[parent] + if ancestor == nil { + panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent)) + } + ancestor.deps++ + req.parents = append(req.parents, ancestor) + } + s.schedule(req) +} + +// Missing retrieves the known missing nodes from the trie for retrieval. +func (s *TrieSync) Missing(max int) []common.Hash { + requests := []common.Hash{} + for !s.queue.Empty() && (max == 0 || len(requests) < max) { + requests = append(requests, s.queue.PopItem().(common.Hash)) + } + return requests +} + +// Process injects a batch of retrieved trie nodes data. +func (s *TrieSync) Process(results []SyncResult) (int, error) { + for i, item := range results { + // If the item was not requested, bail out + request := s.requests[item.Hash] + if request == nil { + return i, fmt.Errorf("not requested: %x", item.Hash) + } + // Decode the node data content and update the request + node, err := decodeNode(item.Data) + if err != nil { + return i, err + } + *request.object = node + request.data = item.Data + + // Create and schedule a request for all the children nodes + requests, err := s.children(request) + if err != nil { + return i, err + } + if len(requests) == 0 && request.deps == 0 { + s.commit(request) + continue + } + request.deps += len(requests) + for _, child := range requests { + s.schedule(child) + } + } + return 0, nil +} + +// schedule inserts a new state retrieval request into the fetch queue. If there +// is already a pending request for this node, the new request will be discarded +// and only a parent reference added to the old one. +func (s *TrieSync) schedule(req *request) { + // If we're already requesting this node, add a new reference and stop + if old, ok := s.requests[req.hash]; ok { + old.parents = append(old.parents, req.parents...) + return + } + // Schedule the request for future retrieval + s.queue.Push(req.hash, float32(req.depth)) + s.requests[req.hash] = req +} + +// children retrieves all the missing children of a state trie entry for future +// retrieval scheduling. +func (s *TrieSync) children(req *request) ([]*request, error) { + // Gather all the children of the node, irrelevant whether known or not + type child struct { + node *node + depth int + } + children := []child{} + + switch node := (*req.object).(type) { + case shortNode: + children = []child{{ + node: &node.Val, + depth: req.depth + len(node.Key), + }} + case fullNode: + for i := 0; i < 17; i++ { + if node[i] != nil { + children = append(children, child{ + node: &node[i], + depth: req.depth + 1, + }) + } + } + default: + panic(fmt.Sprintf("unknown node: %+v", node)) + } + // Iterate over the children, and request all unknown ones + requests := make([]*request, 0, len(children)) + for _, child := range children { + // Notify any external watcher of a new key/value node + if req.callback != nil { + if node, ok := (*child.node).(valueNode); ok { + if err := req.callback(node, req.hash); err != nil { + return nil, err + } + } + } + // If the child references another node, resolve or schedule + if node, ok := (*child.node).(hashNode); ok { + // Try to resolve the node from the local database + blob, _ := s.database.Get(node) + if local, err := decodeNode(blob); local != nil && err == nil { + *child.node = local + continue + } + // Locally unknown node, schedule for retrieval + requests = append(requests, &request{ + object: child.node, + hash: common.BytesToHash(node), + parents: []*request{req}, + depth: child.depth, + callback: req.callback, + }) + } + } + return requests, nil +} + +// commit finalizes a retrieval request and stores it into the database. If any +// of the referencing parent requests complete due to this commit, they are also +// committed themselves. +func (s *TrieSync) commit(req *request) error { + // Write the node content to disk + if err := s.database.Put(req.hash[:], req.data); err != nil { + return err + } + delete(s.requests, req.hash) + + // Check all parents for completion + for _, parent := range req.parents { + parent.deps-- + if parent.deps == 0 { + if err := s.commit(parent); err != nil { + return err + } + } + } + return nil +} diff --git a/trie/sync_test.go b/trie/sync_test.go new file mode 100644 index 000000000..9c036a3a9 --- /dev/null +++ b/trie/sync_test.go @@ -0,0 +1,257 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" +) + +// makeTestTrie create a sample test trie to test node-wise reconstruction. +func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) { + // Create an empty trie + db, _ := ethdb.NewMemDatabase() + trie, _ := New(common.Hash{}, db) + + // Fill it with some arbitrary data + content := make(map[string][]byte) + for i := byte(0); i < 255; i++ { + key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} + content[string(key)] = val + trie.Update(key, val) + + key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} + content[string(key)] = val + trie.Update(key, val) + } + trie.Commit() + + // Return the generated trie + return db, trie, content +} + +// checkTrieContents cross references a reconstructed trie with an expected data +// content map. +func checkTrieContents(t *testing.T, db Database, root []byte, content map[string][]byte) { + trie, err := New(common.BytesToHash(root), db) + if err != nil { + t.Fatalf("failed to create trie at %x: %v", root, err) + } + for key, val := range content { + if have := trie.Get([]byte(key)); bytes.Compare(have, val) != 0 { + t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) + } + } +} + +// Tests that an empty trie is not scheduled for syncing. +func TestEmptyTrieSync(t *testing.T) { + emptyA, _ := New(common.Hash{}, nil) + emptyB, _ := New(emptyRoot, nil) + + for i, trie := range []*Trie{emptyA, emptyB} { + db, _ := ethdb.NewMemDatabase() + if req := NewTrieSync(common.BytesToHash(trie.Root()), db, nil).Missing(1); len(req) != 0 { + t.Errorf("test %d: content requested for empty trie: %v", i, req) + } + } +} + +// Tests that given a root hash, a trie can sync iteratively on a single thread, +// requesting retrieval tasks and returning all of them in one go. +func TestIterativeTrieSyncIndividual(t *testing.T) { testIterativeTrieSync(t, 1) } +func TestIterativeTrieSyncBatched(t *testing.T) { testIterativeTrieSync(t, 100) } + +func testIterativeTrieSync(t *testing.T, batch int) { + // Create a random trie to copy + srcDb, srcTrie, srcData := makeTestTrie() + + // Create a destination trie and sync with the scheduler + dstDb, _ := ethdb.NewMemDatabase() + sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil) + + queue := append([]common.Hash{}, sched.Missing(batch)...) + for len(queue) > 0 { + results := make([]SyncResult, len(queue)) + for i, hash := range queue { + data, err := srcDb.Get(hash.Bytes()) + if err != nil { + t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + } + results[i] = SyncResult{hash, data} + } + if index, err := sched.Process(results); err != nil { + t.Fatalf("failed to process result #%d: %v", index, err) + } + queue = append(queue[:0], sched.Missing(batch)...) + } + // Cross check that the two tries re in sync + checkTrieContents(t, dstDb, srcTrie.Root(), srcData) +} + +// Tests that the trie scheduler can correctly reconstruct the state even if only +// partial results are returned, and the others sent only later. +func TestIterativeDelayedTrieSync(t *testing.T) { + // Create a random trie to copy + srcDb, srcTrie, srcData := makeTestTrie() + + // Create a destination trie and sync with the scheduler + dstDb, _ := ethdb.NewMemDatabase() + sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil) + + queue := append([]common.Hash{}, sched.Missing(10000)...) + for len(queue) > 0 { + // Sync only half of the scheduled nodes + results := make([]SyncResult, len(queue)/2+1) + for i, hash := range queue[:len(results)] { + data, err := srcDb.Get(hash.Bytes()) + if err != nil { + t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + } + results[i] = SyncResult{hash, data} + } + if index, err := sched.Process(results); err != nil { + t.Fatalf("failed to process result #%d: %v", index, err) + } + queue = append(queue[len(results):], sched.Missing(10000)...) + } + // Cross check that the two tries re in sync + checkTrieContents(t, dstDb, srcTrie.Root(), srcData) +} + +// Tests that given a root hash, a trie can sync iteratively on a single thread, +// requesting retrieval tasks and returning all of them in one go, however in a +// random order. +func TestIterativeRandomTrieSyncIndividual(t *testing.T) { testIterativeRandomTrieSync(t, 1) } +func TestIterativeRandomTrieSyncBatched(t *testing.T) { testIterativeRandomTrieSync(t, 100) } + +func testIterativeRandomTrieSync(t *testing.T, batch int) { + // Create a random trie to copy + srcDb, srcTrie, srcData := makeTestTrie() + + // Create a destination trie and sync with the scheduler + dstDb, _ := ethdb.NewMemDatabase() + sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil) + + queue := make(map[common.Hash]struct{}) + for _, hash := range sched.Missing(batch) { + queue[hash] = struct{}{} + } + for len(queue) > 0 { + // Fetch all the queued nodes in a random order + results := make([]SyncResult, 0, len(queue)) + for hash, _ := range queue { + data, err := srcDb.Get(hash.Bytes()) + if err != nil { + t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + } + results = append(results, SyncResult{hash, data}) + } + // Feed the retrieved results back and queue new tasks + if index, err := sched.Process(results); err != nil { + t.Fatalf("failed to process result #%d: %v", index, err) + } + queue = make(map[common.Hash]struct{}) + for _, hash := range sched.Missing(batch) { + queue[hash] = struct{}{} + } + } + // Cross check that the two tries re in sync + checkTrieContents(t, dstDb, srcTrie.Root(), srcData) +} + +// Tests that the trie scheduler can correctly reconstruct the state even if only +// partial results are returned (Even those randomly), others sent only later. +func TestIterativeRandomDelayedTrieSync(t *testing.T) { + // Create a random trie to copy + srcDb, srcTrie, srcData := makeTestTrie() + + // Create a destination trie and sync with the scheduler + dstDb, _ := ethdb.NewMemDatabase() + sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil) + + queue := make(map[common.Hash]struct{}) + for _, hash := range sched.Missing(10000) { + queue[hash] = struct{}{} + } + for len(queue) > 0 { + // Sync only half of the scheduled nodes, even those in random order + results := make([]SyncResult, 0, len(queue)/2+1) + for hash, _ := range queue { + data, err := srcDb.Get(hash.Bytes()) + if err != nil { + t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + } + results = append(results, SyncResult{hash, data}) + + if len(results) >= cap(results) { + break + } + } + // Feed the retrieved results back and queue new tasks + if index, err := sched.Process(results); err != nil { + t.Fatalf("failed to process result #%d: %v", index, err) + } + for _, result := range results { + delete(queue, result.Hash) + } + for _, hash := range sched.Missing(10000) { + queue[hash] = struct{}{} + } + } + // Cross check that the two tries re in sync + checkTrieContents(t, dstDb, srcTrie.Root(), srcData) +} + +// Tests that a trie sync will not request nodes multiple times, even if they +// have such references. +func TestDuplicateAvoidanceTrieSync(t *testing.T) { + // Create a random trie to copy + srcDb, srcTrie, srcData := makeTestTrie() + + // Create a destination trie and sync with the scheduler + dstDb, _ := ethdb.NewMemDatabase() + sched := NewTrieSync(common.BytesToHash(srcTrie.Root()), dstDb, nil) + + queue := append([]common.Hash{}, sched.Missing(0)...) + requested := make(map[common.Hash]struct{}) + + for len(queue) > 0 { + results := make([]SyncResult, len(queue)) + for i, hash := range queue { + data, err := srcDb.Get(hash.Bytes()) + if err != nil { + t.Fatalf("failed to retrieve node data for %x: %v", hash, err) + } + if _, ok := requested[hash]; ok { + t.Errorf("hash %x already requested once", hash) + } + requested[hash] = struct{}{} + + results[i] = SyncResult{hash, data} + } + if index, err := sched.Process(results); err != nil { + t.Fatalf("failed to process result #%d: %v", index, err) + } + queue = append(queue[:0], sched.Missing(0)...) + } + // Cross check that the two tries re in sync + checkTrieContents(t, dstDb, srcTrie.Root(), srcData) +} -- cgit v1.2.3