aboutsummaryrefslogtreecommitdiffstats
path: root/trie
diff options
context:
space:
mode:
authorgary rong <garyrong0905@gmail.com>2019-04-25 22:59:48 +0800
committerPéter Szilágyi <peterke@gmail.com>2019-05-16 15:39:32 +0800
commit80469bea0cc6dbfae749d944094a7c2357dc050d (patch)
treefb70ce428df4a6b5b49dbd83cacca388555d51b5 /trie
parentb6cac42e9ffc0b19a1e70416db85593f1cb0d30c (diff)
downloadgo-tangerine-80469bea0cc6dbfae749d944094a7c2357dc050d.tar
go-tangerine-80469bea0cc6dbfae749d944094a7c2357dc050d.tar.gz
go-tangerine-80469bea0cc6dbfae749d944094a7c2357dc050d.tar.bz2
go-tangerine-80469bea0cc6dbfae749d944094a7c2357dc050d.tar.lz
go-tangerine-80469bea0cc6dbfae749d944094a7c2357dc050d.tar.xz
go-tangerine-80469bea0cc6dbfae749d944094a7c2357dc050d.tar.zst
go-tangerine-80469bea0cc6dbfae749d944094a7c2357dc050d.zip
all: integrate the freezer with fast sync
* all: freezer style syncing core, eth, les, light: clean up freezer relative APIs core, eth, les, trie, ethdb, light: clean a bit core, eth, les, light: add unit tests core, light: rewrite setHead function core, eth: fix downloader unit tests core: add receipt chain insertion test core: use constant instead of hardcoding table name core: fix rollback core: fix setHead core/rawdb: remove canonical block first and then iterate side chain core/rawdb, ethdb: add hasAncient interface eth/downloader: calculate ancient limit via cht first core, eth, ethdb: lots of fixes * eth/downloader: print ancient disable log only for fast sync
Diffstat (limited to 'trie')
-rw-r--r--trie/database.go2
-rw-r--r--trie/proof.go8
-rw-r--r--trie/sync.go6
3 files changed, 7 insertions, 9 deletions
diff --git a/trie/database.go b/trie/database.go
index 49a696bef..d8a0fa9c5 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -321,7 +321,7 @@ func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int) *Database {
}
// DiskDB retrieves the persistent storage backing the trie database.
-func (db *Database) DiskDB() ethdb.Reader {
+func (db *Database) DiskDB() ethdb.KeyValueReader {
return db.diskdb
}
diff --git a/trie/proof.go b/trie/proof.go
index 3c4b8d653..9985e730d 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -33,7 +33,7 @@ import (
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root node), ending
// with the node that proves the absence of the key.
-func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error {
+func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
// Collect all nodes on the path to key.
key = keybytesToHex(key)
var nodes []node
@@ -96,16 +96,14 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error {
// If the trie does not contain a value for key, the returned proof contains all
// nodes of the longest existing prefix of the key (at least the root node), ending
// with the node that proves the absence of the key.
-func (t *SecureTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.Writer) error {
+func (t *SecureTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
return t.trie.Prove(key, fromLevel, proofDb)
}
// VerifyProof checks merkle proofs. The given proof must contain the value for
// key in a trie with the given root hash. VerifyProof returns an error if the
// proof contains invalid trie nodes or the wrong value.
-//
-// Note, the method assumes that all key-values in proofDb satisfy key = hash(value).
-func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.Reader) (value []byte, nodes int, err error) {
+func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, nodes int, err error) {
key = keybytesToHex(key)
wantHash := rootHash
for i := 0; ; i++ {
diff --git a/trie/sync.go b/trie/sync.go
index d9564d783..6f40b45a1 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -72,7 +72,7 @@ func newSyncMemBatch() *syncMemBatch {
// unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie step by step until all is done.
type Sync struct {
- database ethdb.Reader // Persistent database to check for existing entries
+ database ethdb.KeyValueReader // Persistent database to check for existing entries
membatch *syncMemBatch // Memory buffer to avoid frequent database writes
requests map[common.Hash]*request // Pending requests pertaining to a key hash
queue *prque.Prque // Priority queue with the pending requests
@@ -80,7 +80,7 @@ type Sync struct {
}
// NewSync creates a new trie data download scheduler.
-func NewSync(root common.Hash, database ethdb.Reader, callback LeafCallback, bloom *SyncBloom) *Sync {
+func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync {
ts := &Sync{
database: database,
membatch: newSyncMemBatch(),
@@ -224,7 +224,7 @@ func (s *Sync) Process(results []SyncResult) (bool, int, error) {
// Commit flushes the data stored in the internal membatch out to persistent
// storage, returning the number of items written and any occurred error.
-func (s *Sync) Commit(dbw ethdb.Writer) (int, error) {
+func (s *Sync) Commit(dbw ethdb.KeyValueWriter) (int, error) {
// Dump the membatch into a database dbw
for i, key := range s.membatch.order {
if err := dbw.Put(key[:], s.membatch.batch[key]); err != nil {