aboutsummaryrefslogtreecommitdiffstats
path: root/light
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2018-02-11 20:57:46 +0800
committerGitHub <noreply@github.com>2018-02-11 20:57:46 +0800
commit7a0019c63b1297cb5c9a6fdfc4cb00fdae9b05aa (patch)
tree98bc8333d5672cd4faa6a13c9a59f902c277955f /light
parent5cf75a30c1ceb0ab35cd6b0532520d556996b21c (diff)
downloaddexon-7a0019c63b1297cb5c9a6fdfc4cb00fdae9b05aa.tar
dexon-7a0019c63b1297cb5c9a6fdfc4cb00fdae9b05aa.tar.gz
dexon-7a0019c63b1297cb5c9a6fdfc4cb00fdae9b05aa.tar.bz2
dexon-7a0019c63b1297cb5c9a6fdfc4cb00fdae9b05aa.tar.lz
dexon-7a0019c63b1297cb5c9a6fdfc4cb00fdae9b05aa.tar.xz
dexon-7a0019c63b1297cb5c9a6fdfc4cb00fdae9b05aa.tar.zst
dexon-7a0019c63b1297cb5c9a6fdfc4cb00fdae9b05aa.zip
les, light: fix CHT trie retrievals (#16039)
* les, light: fix CHT trie retrievals * les, light: minor polishes, test remote CHT retrievals * les, light: deterministic nodeset rlp, bloombits test skeleton * les: add an event emission to the les bloombits test * les: drop dead tester code
Diffstat (limited to 'light')
-rw-r--r--light/lightchain.go6
-rw-r--r--light/nodeset.go27
-rw-r--r--light/odr_util.go4
-rw-r--r--light/postprocess.go23
4 files changed, 36 insertions, 24 deletions
diff --git a/light/lightchain.go b/light/lightchain.go
index bc88aeb48..181a1c2a6 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -127,7 +127,7 @@ func (self *LightChain) addTrustedCheckpoint(cp trustedCheckpoint) {
if self.odr.BloomIndexer() != nil {
self.odr.BloomIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead)
}
- log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.sectionIdx+1)*ChtFrequency-1, "hash", cp.sectionHead)
+ log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.sectionIdx+1)*CHTFrequencyClient-1, "hash", cp.sectionHead)
}
func (self *LightChain) getProcInterrupt() bool {
@@ -453,8 +453,8 @@ func (self *LightChain) SyncCht(ctx context.Context) bool {
}
headNum := self.CurrentHeader().Number.Uint64()
chtCount, _, _ := self.odr.ChtIndexer().Sections()
- if headNum+1 < chtCount*ChtFrequency {
- num := chtCount*ChtFrequency - 1
+ if headNum+1 < chtCount*CHTFrequencyClient {
+ num := chtCount*CHTFrequencyClient - 1
header, err := GetHeaderByNumber(ctx, self.odr, num)
if header != nil && err == nil {
self.mu.Lock()
diff --git a/light/nodeset.go b/light/nodeset.go
index ffdb71bb7..245b5eb76 100644
--- a/light/nodeset.go
+++ b/light/nodeset.go
@@ -29,7 +29,9 @@ import (
// NodeSet stores a set of trie nodes. It implements trie.Database and can also
// act as a cache for another trie.Database.
type NodeSet struct {
- db map[string][]byte
+ nodes map[string][]byte
+ order []string
+
dataSize int
lock sync.RWMutex
}
@@ -37,7 +39,7 @@ type NodeSet struct {
// NewNodeSet creates an empty node set
func NewNodeSet() *NodeSet {
return &NodeSet{
- db: make(map[string][]byte),
+ nodes: make(map[string][]byte),
}
}
@@ -46,10 +48,15 @@ func (db *NodeSet) Put(key []byte, value []byte) error {
db.lock.Lock()
defer db.lock.Unlock()
- if _, ok := db.db[string(key)]; !ok {
- db.db[string(key)] = common.CopyBytes(value)
- db.dataSize += len(value)
+ if _, ok := db.nodes[string(key)]; ok {
+ return nil
}
+ keystr := string(key)
+
+ db.nodes[keystr] = common.CopyBytes(value)
+ db.order = append(db.order, keystr)
+ db.dataSize += len(value)
+
return nil
}
@@ -58,7 +65,7 @@ func (db *NodeSet) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
- if entry, ok := db.db[string(key)]; ok {
+ if entry, ok := db.nodes[string(key)]; ok {
return entry, nil
}
return nil, errors.New("not found")
@@ -75,7 +82,7 @@ func (db *NodeSet) KeyCount() int {
db.lock.RLock()
defer db.lock.RUnlock()
- return len(db.db)
+ return len(db.nodes)
}
// DataSize returns the aggregated data size of nodes in the set
@@ -92,8 +99,8 @@ func (db *NodeSet) NodeList() NodeList {
defer db.lock.RUnlock()
var values NodeList
- for _, value := range db.db {
- values = append(values, value)
+ for _, key := range db.order {
+ values = append(values, db.nodes[key])
}
return values
}
@@ -103,7 +110,7 @@ func (db *NodeSet) Store(target ethdb.Putter) {
db.lock.RLock()
defer db.lock.RUnlock()
- for key, value := range db.db {
+ for key, value := range db.nodes {
target.Put([]byte(key), value)
}
}
diff --git a/light/odr_util.go b/light/odr_util.go
index 33a8e80ce..8f92d6442 100644
--- a/light/odr_util.go
+++ b/light/odr_util.go
@@ -52,13 +52,13 @@ func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*typ
for chtCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
chtCount--
if chtCount > 0 {
- sectionHeadNum = chtCount*ChtFrequency - 1
+ sectionHeadNum = chtCount*CHTFrequencyClient - 1
sectionHead = odr.ChtIndexer().SectionHead(chtCount - 1)
canonicalHash = core.GetCanonicalHash(db, sectionHeadNum)
}
}
}
- if number >= chtCount*ChtFrequency {
+ if number >= chtCount*CHTFrequencyClient {
return nil, ErrNoTrustedCht
}
r := &ChtRequest{ChtRoot: GetChtRoot(db, chtCount-1, sectionHead), ChtNum: chtCount - 1, BlockNum: number}
diff --git a/light/postprocess.go b/light/postprocess.go
index 160d07b17..b6756de51 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -19,7 +19,6 @@ package light
import (
"encoding/binary"
"errors"
- "fmt"
"math/big"
"time"
@@ -35,8 +34,14 @@ import (
)
const (
- ChtFrequency = 32768
- ChtV1Frequency = 4096 // as long as we want to retain LES/1 compatibility, servers generate CHTs with the old, higher frequency
+ // CHTFrequencyClient is the block frequency for creating CHTs on the client side.
+ CHTFrequencyClient = 32768
+
+ // CHTFrequencyServer is the block frequency for creating CHTs on the server side.
+ // Eventually this can be merged back with the client version, but that requires a
+ // full database upgrade, so that should be left for a suitable moment.
+ CHTFrequencyServer = 4096
+
HelperTrieConfirmations = 2048 // number of confirmations before a server is expected to have the given HelperTrie available
HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated
)
@@ -100,7 +105,7 @@ func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) c
// GetChtV2Root reads the CHT root assoctiated to the given section from the database
// Note that sectionIdx is specified according to LES/2 CHT section size
func GetChtV2Root(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
- return GetChtRoot(db, (sectionIdx+1)*(ChtFrequency/ChtV1Frequency)-1, sectionHead)
+ return GetChtRoot(db, (sectionIdx+1)*(CHTFrequencyClient/CHTFrequencyServer)-1, sectionHead)
}
// StoreChtRoot writes the CHT root assoctiated to the given section into the database
@@ -124,10 +129,10 @@ type ChtIndexerBackend struct {
func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer {
var sectionSize, confirmReq uint64
if clientMode {
- sectionSize = ChtFrequency
+ sectionSize = CHTFrequencyClient
confirmReq = HelperTrieConfirmations
} else {
- sectionSize = ChtV1Frequency
+ sectionSize = CHTFrequencyServer
confirmReq = HelperTrieProcessConfirmations
}
idb := ethdb.NewTable(db, "chtIndex-")
@@ -174,8 +179,8 @@ func (c *ChtIndexerBackend) Commit() error {
}
c.triedb.Commit(root, false)
- if ((c.section+1)*c.sectionSize)%ChtFrequency == 0 {
- log.Info("Storing CHT", "idx", c.section*c.sectionSize/ChtFrequency, "sectionHead", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
+ if ((c.section+1)*c.sectionSize)%CHTFrequencyClient == 0 {
+ log.Info("Storing CHT", "section", c.section*c.sectionSize/CHTFrequencyClient, "head", c.lastHash, "root", root)
}
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
return nil
@@ -294,7 +299,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
b.triedb.Commit(root, false)
sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
- log.Info("Storing BloomTrie", "section", b.section, "sectionHead", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression ratio", float64(compSize)/float64(decompSize))
+ log.Info("Storing bloom trie", "section", b.section, "head", sectionHead, "root", root, "compression", float64(compSize)/float64(decompSize))
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
return nil