aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cmd/geth/main.go7
-rw-r--r--cmd/geth/usage.go1
-rw-r--r--cmd/utils/flags.go7
-rw-r--r--eth/api_backend.go4
-rw-r--r--eth/config.go3
-rw-r--r--graphql/graphql.go8
-rw-r--r--internal/ethapi/api.go20
-rw-r--r--internal/ethapi/backend.go1
-rw-r--r--les/api_backend.go4
-rw-r--r--les/backend.go4
-rw-r--r--les/benchmark.go3
-rw-r--r--les/commons.go14
-rw-r--r--les/costtracker.go9
-rw-r--r--les/handler.go191
-rw-r--r--les/handler_test.go99
-rw-r--r--les/odr.go2
-rw-r--r--les/odr_requests.go198
-rw-r--r--les/odr_test.go8
-rw-r--r--les/peer.go73
-rw-r--r--les/protocol.go19
-rw-r--r--les/request_test.go8
-rw-r--r--les/server.go16
-rw-r--r--light/odr_test.go8
-rw-r--r--light/postprocess.go23
-rw-r--r--p2p/discover/table.go2
-rw-r--r--params/network_params.go9
-rw-r--r--signer/core/signed_data.go6
-rw-r--r--signer/core/signed_data_test.go34
-rw-r--r--swarm/version/version.go2
29 files changed, 196 insertions, 587 deletions
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 0e597f030..4f3849a41 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -57,7 +57,6 @@ var (
utils.IdentityFlag,
utils.UnlockedAccountFlag,
utils.PasswordFileFlag,
- utils.InsecureUnlockAllowedFlag,
utils.BootnodesFlag,
utils.BootnodesV4Flag,
utils.BootnodesV5Flag,
@@ -136,8 +135,6 @@ var (
utils.VMEnableDebugFlag,
utils.NetworkIdFlag,
utils.ConstantinopleOverrideFlag,
- utils.RPCCORSDomainFlag,
- utils.RPCVirtualHostsFlag,
utils.EthStatsURLFlag,
utils.FakePoWFlag,
utils.NoCompactionFlag,
@@ -152,6 +149,8 @@ var (
utils.RPCEnabledFlag,
utils.RPCListenAddrFlag,
utils.RPCPortFlag,
+ utils.RPCCORSDomainFlag,
+ utils.RPCVirtualHostsFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLListenAddrFlag,
utils.GraphQLPortFlag,
@@ -165,6 +164,8 @@ var (
utils.WSAllowedOriginsFlag,
utils.IPCDisabledFlag,
utils.IPCPathFlag,
+ utils.InsecureUnlockAllowedFlag,
+ utils.RPCGlobalGasCap,
}
whisperFlags = []cli.Flag{
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index 6d039ba04..7ec1ab03f 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -158,6 +158,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.RPCListenAddrFlag,
utils.RPCPortFlag,
utils.RPCApiFlag,
+ utils.RPCGlobalGasCap,
utils.WSEnabledFlag,
utils.WSListenAddrFlag,
utils.WSPortFlag,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index f6e428869..f5f4cde5b 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -448,6 +448,10 @@ var (
Name: "allow-insecure-unlock",
Usage: "Allow insecure account unlocking when account-related RPCs are exposed by http",
}
+ RPCGlobalGasCap = cli.Uint64Flag{
+ Name: "rpc.gascap",
+ Usage: "Sets a cap on gas that can be used in eth_call/estimateGas",
+ }
// Logging and debug settings
EthStatsURLFlag = cli.StringFlag{
Name: "ethstats",
@@ -1400,6 +1404,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(EVMInterpreterFlag.Name) {
cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name)
}
+ if ctx.GlobalIsSet(RPCGlobalGasCap.Name) {
+ cfg.RPCGasCap = new(big.Int).SetUint64(ctx.GlobalUint64(RPCGlobalGasCap.Name))
+ }
// Override any default configs for hard coded networks.
switch {
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 29ce19e28..00424caed 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -218,6 +218,10 @@ func (b *EthAPIBackend) ExtRPCEnabled() bool {
return b.extRPCEnabled
}
+func (b *EthAPIBackend) RPCGasCap() *big.Int {
+ return b.eth.config.RPCGasCap
+}
+
func (b *EthAPIBackend) BloomStatus() (uint64, uint64) {
sections, _, _ := b.eth.bloomIndexer.Sections()
return params.BloomBitsBlocks, sections
diff --git a/eth/config.go b/eth/config.go
index a98e69053..d97ae3070 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -151,6 +151,9 @@ type Config struct {
// Constantinople block override (TODO: remove after the fork)
ConstantinopleOverride *big.Int
+
+ // RPCGasCap is the global gas cap for eth-call variants.
+ RPCGasCap *big.Int `toml:",omitempty"`
}
type configMarshaling struct {
diff --git a/graphql/graphql.go b/graphql/graphql.go
index b3bcbd8a4..d22a3afb6 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -856,7 +856,7 @@ func (b *Block) Call(ctx context.Context, args struct {
}
}
- result, gas, failed, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.num, vm.Config{}, 5*time.Second)
+ result, gas, failed, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.num, vm.Config{}, 5*time.Second, b.backend.RPCGasCap())
status := hexutil.Uint64(1)
if failed {
status = 0
@@ -883,7 +883,7 @@ func (b *Block) EstimateGas(ctx context.Context, args struct {
}
}
- gas, err := ethapi.DoEstimateGas(ctx, b.backend, args.Data, *b.num)
+ gas, err := ethapi.DoEstimateGas(ctx, b.backend, args.Data, *b.num, b.backend.RPCGasCap())
return gas, err
}
@@ -927,7 +927,7 @@ func (p *Pending) Account(ctx context.Context, args struct {
func (p *Pending) Call(ctx context.Context, args struct {
Data ethapi.CallArgs
}) (*CallResult, error) {
- result, gas, failed, err := ethapi.DoCall(ctx, p.backend, args.Data, rpc.PendingBlockNumber, vm.Config{}, 5*time.Second)
+ result, gas, failed, err := ethapi.DoCall(ctx, p.backend, args.Data, rpc.PendingBlockNumber, vm.Config{}, 5*time.Second, p.backend.RPCGasCap())
status := hexutil.Uint64(1)
if failed {
status = 0
@@ -942,7 +942,7 @@ func (p *Pending) Call(ctx context.Context, args struct {
func (p *Pending) EstimateGas(ctx context.Context, args struct {
Data ethapi.CallArgs
}) (hexutil.Uint64, error) {
- return ethapi.DoEstimateGas(ctx, p.backend, args.Data, rpc.PendingBlockNumber)
+ return ethapi.DoEstimateGas(ctx, p.backend, args.Data, rpc.PendingBlockNumber, p.backend.RPCGasCap())
}
// Resolver is the top-level object in the GraphQL hierarchy.
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index acb3e7075..473026606 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -718,7 +718,7 @@ type CallArgs struct {
Data *hexutil.Bytes `json:"data"`
}
-func DoCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration) ([]byte, uint64, bool, error) {
+func DoCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
state, header, err := b.StateAndHeaderByNumber(ctx, blockNr)
@@ -741,6 +741,10 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumb
if args.Gas != nil {
gas = uint64(*args.Gas)
}
+ if globalGasCap != nil && globalGasCap.Uint64() < gas {
+ log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
+ gas = globalGasCap.Uint64()
+ }
gasPrice := new(big.Int).SetUint64(defaultGasPrice)
if args.GasPrice != nil {
gasPrice = args.GasPrice.ToInt()
@@ -796,11 +800,11 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumb
// Call executes the given transaction on the state for the given block number.
// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
- result, _, _, err := DoCall(ctx, s.b, args, blockNr, vm.Config{}, 5*time.Second)
+ result, _, _, err := DoCall(ctx, s.b, args, blockNr, vm.Config{}, 5*time.Second, s.b.RPCGasCap())
return (hexutil.Bytes)(result), err
}
-func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Uint64, error) {
+func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumber, gasCap *big.Int) (hexutil.Uint64, error) {
// Binary search the gas requirement, as it may be higher than the amount used
var (
lo uint64 = params.TxGas - 1
@@ -817,13 +821,17 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNr rpc.Bl
}
hi = block.GasLimit()
}
+ if gasCap != nil && hi > gasCap.Uint64() {
+ log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap)
+ hi = gasCap.Uint64()
+ }
cap = hi
// Create a helper to check if a gas allowance results in an executable transaction
executable := func(gas uint64) bool {
args.Gas = (*hexutil.Uint64)(&gas)
- _, _, failed, err := DoCall(ctx, b, args, rpc.PendingBlockNumber, vm.Config{}, 0)
+ _, _, failed, err := DoCall(ctx, b, args, rpc.PendingBlockNumber, vm.Config{}, 0, gasCap)
if err != nil || failed {
return false
}
@@ -841,7 +849,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNr rpc.Bl
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
if !executable(hi) {
- return 0, fmt.Errorf("gas required exceeds allowance or always failing transaction")
+ return 0, fmt.Errorf("gas required exceeds allowance (%d) or always failing transaction", cap)
}
}
return hexutil.Uint64(hi), nil
@@ -850,7 +858,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNr rpc.Bl
// EstimateGas returns an estimate of the amount of gas needed to execute the
// given transaction against the current pending block.
func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (hexutil.Uint64, error) {
- return DoEstimateGas(ctx, s.b, args, rpc.PendingBlockNumber)
+ return DoEstimateGas(ctx, s.b, args, rpc.PendingBlockNumber, s.b.RPCGasCap())
}
// ExecutionResult groups all structured logs emitted by the EVM
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index e88207f87..0c6c7eace 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -45,6 +45,7 @@ type Backend interface {
EventMux() *event.TypeMux
AccountManager() *accounts.Manager
ExtRPCEnabled() bool
+ RPCGasCap() *big.Int // global gas cap for eth_call over rpc: DoS protection
// BlockChain API
SetHead(number uint64)
diff --git a/les/api_backend.go b/les/api_backend.go
index 8b03979a2..4fe352136 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -192,6 +192,10 @@ func (b *LesApiBackend) ExtRPCEnabled() bool {
return b.extRPCEnabled
}
+func (b *LesApiBackend) RPCGasCap() *big.Int {
+ return b.eth.config.RPCGasCap
+}
+
func (b *LesApiBackend) BloomStatus() (uint64, uint64) {
if b.eth.bloomIndexer == nil {
return 0, 0
diff --git a/les/backend.go b/les/backend.go
index 944e7695d..a50fe0ced 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -119,7 +119,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool)
leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.retriever)
- leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequencyClient, params.HelperTrieConfirmations)
+ leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequency, params.HelperTrieConfirmations)
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency)
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)
@@ -179,8 +179,6 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
func lesTopic(genesisHash common.Hash, protocolVersion uint) discv5.Topic {
var name string
switch protocolVersion {
- case lpv1:
- name = "LES"
case lpv2:
name = "LES2"
default:
diff --git a/les/benchmark.go b/les/benchmark.go
index cb302c6ea..925d1d89e 100644
--- a/les/benchmark.go
+++ b/les/benchmark.go
@@ -135,8 +135,7 @@ func (b *benchmarkHelperTrie) init(pm *ProtocolManager, count int) error {
b.sectionCount, b.headNum, _ = pm.server.bloomTrieIndexer.Sections()
} else {
b.sectionCount, _, _ = pm.server.chtIndexer.Sections()
- b.sectionCount /= (params.CHTFrequencyClient / params.CHTFrequencyServer)
- b.headNum = b.sectionCount*params.CHTFrequencyClient - 1
+ b.headNum = b.sectionCount*params.CHTFrequency - 1
}
if b.sectionCount == 0 {
return fmt.Errorf("no processed sections available")
diff --git a/les/commons.go b/les/commons.go
index 21fb25714..32fd65449 100644
--- a/les/commons.go
+++ b/les/commons.go
@@ -80,28 +80,16 @@ func (c *lesCommons) nodeInfo() interface{} {
sections, _, _ := c.chtIndexer.Sections()
sections2, _, _ := c.bloomTrieIndexer.Sections()
- if !c.protocolManager.lightSync {
- // convert to client section size if running in server mode
- sections /= c.iConfig.PairChtSize / c.iConfig.ChtSize
- }
-
if sections2 < sections {
sections = sections2
}
if sections > 0 {
sectionIndex := sections - 1
sectionHead := c.bloomTrieIndexer.SectionHead(sectionIndex)
- var chtRoot common.Hash
- if c.protocolManager.lightSync {
- chtRoot = light.GetChtRoot(c.chainDb, sectionIndex, sectionHead)
- } else {
- idxV2 := (sectionIndex+1)*c.iConfig.PairChtSize/c.iConfig.ChtSize - 1
- chtRoot = light.GetChtRoot(c.chainDb, idxV2, sectionHead)
- }
cht = params.TrustedCheckpoint{
SectionIndex: sectionIndex,
SectionHead: sectionHead,
- CHTRoot: chtRoot,
+ CHTRoot: light.GetChtRoot(c.chainDb, sectionIndex, sectionHead),
BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),
}
}
diff --git a/les/costtracker.go b/les/costtracker.go
index 69531937e..014b888c0 100644
--- a/les/costtracker.go
+++ b/les/costtracker.go
@@ -39,11 +39,8 @@ var (
GetBlockBodiesMsg: {0, 700000},
GetReceiptsMsg: {0, 1000000},
GetCodeMsg: {0, 450000},
- GetProofsV1Msg: {0, 600000},
GetProofsV2Msg: {0, 600000},
- GetHeaderProofsMsg: {0, 1000000},
GetHelperTrieProofsMsg: {0, 1000000},
- SendTxMsg: {0, 450000},
SendTxV2Msg: {0, 450000},
GetTxStatusMsg: {0, 250000},
}
@@ -53,11 +50,8 @@ var (
GetBlockBodiesMsg: {0, 40},
GetReceiptsMsg: {0, 40},
GetCodeMsg: {0, 80},
- GetProofsV1Msg: {0, 80},
GetProofsV2Msg: {0, 80},
- GetHeaderProofsMsg: {0, 20},
GetHelperTrieProofsMsg: {0, 20},
- SendTxMsg: {0, 66000},
SendTxV2Msg: {0, 66000},
GetTxStatusMsg: {0, 50},
}
@@ -67,11 +61,8 @@ var (
GetBlockBodiesMsg: {0, 100000},
GetReceiptsMsg: {0, 200000},
GetCodeMsg: {0, 50000},
- GetProofsV1Msg: {0, 4000},
GetProofsV2Msg: {0, 4000},
- GetHeaderProofsMsg: {0, 4000},
GetHelperTrieProofsMsg: {0, 4000},
- SendTxMsg: {0, 0},
SendTxV2Msg: {0, 100},
GetTxStatusMsg: {0, 100},
}
diff --git a/les/handler.go b/les/handler.go
index 7c290b717..9c72c6b13 100644
--- a/les/handler.go
+++ b/les/handler.go
@@ -772,80 +772,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
Obj: resp.Receipts,
}
- case GetProofsV1Msg:
- p.Log().Trace("Received proofs request")
- // Decode the retrieval message
- var req struct {
- ReqID uint64
- Reqs []ProofReq
- }
- if err := msg.Decode(&req); err != nil {
- return errResp(ErrDecode, "msg %v: %v", msg, err)
- }
- // Gather state data until the fetch or network limits is reached
- var (
- bytes int
- proofs proofsData
- )
- reqCnt := len(req.Reqs)
- if !accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) {
- return errResp(ErrRequestRejected, "")
- }
- go func() {
- for i, req := range req.Reqs {
- if i != 0 && !task.waitOrStop() {
- return
- }
- // Look up the root hash belonging to the request
- number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash)
- if number == nil {
- p.Log().Warn("Failed to retrieve block num for proof", "hash", req.BHash)
- continue
- }
- header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number)
- if header == nil {
- p.Log().Warn("Failed to retrieve header for proof", "block", *number, "hash", req.BHash)
- continue
- }
- // Open the account or storage trie for the request
- statedb := pm.blockchain.StateCache()
-
- var trie state.Trie
- switch len(req.AccKey) {
- case 0:
- // No account key specified, open an account trie
- trie, err = statedb.OpenTrie(header.Root)
- if trie == nil || err != nil {
- p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", header.Root, "err", err)
- continue
- }
- default:
- // Account key specified, open a storage trie
- account, err := pm.getAccount(statedb.TrieDB(), header.Root, common.BytesToHash(req.AccKey))
- if err != nil {
- p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err)
- continue
- }
- trie, err = statedb.OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root)
- if trie == nil || err != nil {
- p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "root", account.Root, "err", err)
- continue
- }
- }
- // Prove the user's request from the account or stroage trie
- var proof light.NodeList
- if err := trie.Prove(req.Key, 0, &proof); err != nil {
- p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
- continue
- }
- proofs = append(proofs, proof)
- if bytes += proof.DataSize(); bytes >= softResponseLimit {
- break
- }
- }
- sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofs(req.ReqID, proofs), task.done())
- }()
-
case GetProofsV2Msg:
p.Log().Trace("Received les/2 proofs request")
// Decode the retrieval message
@@ -927,27 +853,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofsV2(req.ReqID, nodes.NodeList()), task.done())
}()
- case ProofsV1Msg:
- if pm.odr == nil {
- return errResp(ErrUnexpectedResponse, "")
- }
-
- p.Log().Trace("Received proofs response")
- // A batch of merkle proofs arrived to one of our previous requests
- var resp struct {
- ReqID, BV uint64
- Data []light.NodeList
- }
- if err := msg.Decode(&resp); err != nil {
- return errResp(ErrDecode, "msg %v: %v", msg, err)
- }
- p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
- deliverMsg = &Msg{
- MsgType: MsgProofsV1,
- ReqID: resp.ReqID,
- Obj: resp.Data,
- }
-
case ProofsV2Msg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
@@ -969,54 +874,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
Obj: resp.Data,
}
- case GetHeaderProofsMsg:
- p.Log().Trace("Received headers proof request")
- // Decode the retrieval message
- var req struct {
- ReqID uint64
- Reqs []ChtReq
- }
- if err := msg.Decode(&req); err != nil {
- return errResp(ErrDecode, "msg %v: %v", msg, err)
- }
- // Gather state data until the fetch or network limits is reached
- var (
- bytes int
- proofs []ChtResp
- )
- reqCnt := len(req.Reqs)
- if !accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) {
- return errResp(ErrRequestRejected, "")
- }
- go func() {
- trieDb := trie.NewDatabase(rawdb.NewTable(pm.chainDb, light.ChtTablePrefix))
- for i, req := range req.Reqs {
- if i != 0 && !task.waitOrStop() {
- return
- }
- if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
- sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, req.ChtNum*pm.iConfig.ChtSize-1)
- if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
- trie, err := trie.New(root, trieDb)
- if err != nil {
- continue
- }
- var encNumber [8]byte
- binary.BigEndian.PutUint64(encNumber[:], req.BlockNum)
-
- var proof light.NodeList
- trie.Prove(encNumber[:], 0, &proof)
-
- proofs = append(proofs, ChtResp{Header: header, Proof: proof})
- if bytes += proof.DataSize() + estHeaderRlpSize; bytes >= softResponseLimit {
- break
- }
- }
- }
- }
- sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHeaderProofs(req.ReqID, proofs), task.done())
- }()
-
case GetHelperTrieProofsMsg:
p.Log().Trace("Received helper trie proof request")
// Decode the retrieval message
@@ -1081,26 +938,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}), task.done())
}()
- case HeaderProofsMsg:
- if pm.odr == nil {
- return errResp(ErrUnexpectedResponse, "")
- }
-
- p.Log().Trace("Received headers proof response")
- var resp struct {
- ReqID, BV uint64
- Data []ChtResp
- }
- if err := msg.Decode(&resp); err != nil {
- return errResp(ErrDecode, "msg %v: %v", msg, err)
- }
- p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
- deliverMsg = &Msg{
- MsgType: MsgHeaderProofs,
- ReqID: resp.ReqID,
- Obj: resp.Data,
- }
-
case HelperTrieProofsMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
@@ -1122,29 +959,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
Obj: resp.Data,
}
- case SendTxMsg:
- if pm.txpool == nil {
- return errResp(ErrRequestRejected, "")
- }
- // Transactions arrived, parse all of them and deliver to the pool
- var txs []*types.Transaction
- if err := msg.Decode(&txs); err != nil {
- return errResp(ErrDecode, "msg %v: %v", msg, err)
- }
- reqCnt := len(txs)
- if !accept(0, uint64(reqCnt), MaxTxSend) {
- return errResp(ErrRequestRejected, "")
- }
- go func() {
- for i, tx := range txs {
- if i != 0 && !task.waitOrStop() {
- return
- }
- pm.txpool.AddRemotes([]*types.Transaction{tx})
- }
- sendResponse(0, uint64(reqCnt), nil, task.done())
- }()
-
case SendTxV2Msg:
if pm.txpool == nil {
return errResp(ErrRequestRejected, "")
@@ -1261,9 +1075,8 @@ func (pm *ProtocolManager) getAccount(triedb *trie.Database, root, hash common.H
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
switch id {
case htCanonical:
- idxV1 := (idx+1)*(pm.iConfig.PairChtSize/pm.iConfig.ChtSize) - 1
- sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idxV1+1)*pm.iConfig.ChtSize-1)
- return light.GetChtRoot(pm.chainDb, idxV1, sectionHead), light.ChtTablePrefix
+ sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.ChtSize-1)
+ return light.GetChtRoot(pm.chainDb, idx, sectionHead), light.ChtTablePrefix
case htBloomBits:
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.BloomTrieSize-1)
return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix
diff --git a/les/handler_test.go b/les/handler_test.go
index 5cf31b8f5..c1db65cf3 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -46,7 +46,6 @@ func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}
}
// Tests that block headers can be retrieved from a remote chain based on user queries.
-func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) }
func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
func testGetBlockHeaders(t *testing.T, protocol int) {
@@ -174,7 +173,6 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
}
// Tests that block contents can be retrieved from a remote chain based on their hashes.
-func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) }
func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
func testGetBlockBodies(t *testing.T, protocol int) {
@@ -249,7 +247,6 @@ func testGetBlockBodies(t *testing.T, protocol int) {
}
// Tests that the contract codes can be retrieved based on account addresses.
-func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) }
func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
func testGetCode(t *testing.T, protocol int) {
@@ -281,7 +278,6 @@ func testGetCode(t *testing.T, protocol int) {
}
// Tests that the transaction receipts can be retrieved based on hashes.
-func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) }
func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
func testGetReceipt(t *testing.T, protocol int) {
@@ -307,7 +303,6 @@ func testGetReceipt(t *testing.T, protocol int) {
}
// Tests that trie merkle proofs can be retrieved
-func TestGetProofsLes1(t *testing.T) { testGetProofs(t, 1) }
func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
func testGetProofs(t *testing.T, protocol int) {
@@ -316,10 +311,7 @@ func testGetProofs(t *testing.T, protocol int) {
defer tearDown()
bc := server.pm.blockchain.(*core.BlockChain)
- var (
- proofreqs []ProofReq
- proofsV1 [][]rlp.RawValue
- )
+ var proofreqs []ProofReq
proofsV2 := light.NewNodeSet()
accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, {}}
@@ -334,112 +326,61 @@ func testGetProofs(t *testing.T, protocol int) {
Key: crypto.Keccak256(acc[:]),
}
proofreqs = append(proofreqs, req)
-
- switch protocol {
- case 1:
- var proof light.NodeList
- trie.Prove(crypto.Keccak256(acc[:]), 0, &proof)
- proofsV1 = append(proofsV1, proof)
- case 2:
- trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
- }
+ trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
}
}
// Send the proof request and verify the response
- switch protocol {
- case 1:
- cost := server.tPeer.GetRequestCost(GetProofsV1Msg, len(proofreqs))
- sendRequest(server.tPeer.app, GetProofsV1Msg, 42, cost, proofreqs)
- if err := expectResponse(server.tPeer.app, ProofsV1Msg, 42, testBufLimit, proofsV1); err != nil {
- t.Errorf("proofs mismatch: %v", err)
- }
- case 2:
- cost := server.tPeer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
- sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, proofreqs)
- if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
- t.Errorf("proofs mismatch: %v", err)
- }
+ cost := server.tPeer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
+ sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, proofreqs)
+ if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
+ t.Errorf("proofs mismatch: %v", err)
}
}
// Tests that CHT proofs can be correctly retrieved.
-func TestGetCHTProofsLes1(t *testing.T) { testGetCHTProofs(t, 1) }
func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
func testGetCHTProofs(t *testing.T, protocol int) {
config := light.TestServerIndexerConfig
- frequency := config.ChtSize
- if protocol == 2 {
- frequency = config.PairChtSize
- }
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
- expectSections := frequency / config.ChtSize
for {
cs, _, _ := cIndexer.Sections()
- bs, _, _ := bIndexer.Sections()
- if cs >= expectSections && bs >= expectSections {
+ if cs >= 1 {
break
}
time.Sleep(10 * time.Millisecond)
}
}
- server, tearDown := newServerEnv(t, int(frequency+config.ChtConfirms), protocol, waitIndexers)
+ server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers)
defer tearDown()
bc := server.pm.blockchain.(*core.BlockChain)
// Assemble the proofs from the different protocols
- header := bc.GetHeaderByNumber(frequency - 1)
+ header := bc.GetHeaderByNumber(config.ChtSize - 1)
rlp, _ := rlp.EncodeToBytes(header)
key := make([]byte, 8)
- binary.BigEndian.PutUint64(key, frequency-1)
+ binary.BigEndian.PutUint64(key, config.ChtSize-1)
- proofsV1 := []ChtResp{{
- Header: header,
- }}
proofsV2 := HelperTrieResps{
AuxData: [][]byte{rlp},
}
- switch protocol {
- case 1:
- root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(frequency-1).Hash())
- trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
-
- var proof light.NodeList
- trie.Prove(key, 0, &proof)
- proofsV1[0].Proof = proof
-
- case 2:
- root := light.GetChtRoot(server.db, (frequency/config.ChtSize)-1, bc.GetHeaderByNumber(frequency-1).Hash())
- trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
- trie.Prove(key, 0, &proofsV2.Proofs)
- }
+ root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
+ trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
+ trie.Prove(key, 0, &proofsV2.Proofs)
// Assemble the requests for the different protocols
- requestsV1 := []ChtReq{{
- ChtNum: frequency / config.ChtSize,
- BlockNum: frequency - 1,
- }}
requestsV2 := []HelperTrieReq{{
Type: htCanonical,
- TrieIdx: frequency/config.PairChtSize - 1,
+ TrieIdx: 0,
Key: key,
AuxReq: auxHeader,
}}
// Send the proof request and verify the response
- switch protocol {
- case 1:
- cost := server.tPeer.GetRequestCost(GetHeaderProofsMsg, len(requestsV1))
- sendRequest(server.tPeer.app, GetHeaderProofsMsg, 42, cost, requestsV1)
- if err := expectResponse(server.tPeer.app, HeaderProofsMsg, 42, testBufLimit, proofsV1); err != nil {
- t.Errorf("proofs mismatch: %v", err)
- }
- case 2:
- cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
- sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
- if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
- t.Errorf("proofs mismatch: %v", err)
- }
+ cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
+ sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
+ if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
+ t.Errorf("proofs mismatch: %v", err)
}
}
@@ -449,10 +390,8 @@ func TestGetBloombitsProofs(t *testing.T) {
waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
for {
- cs, _, _ := cIndexer.Sections()
- bs, _, _ := bIndexer.Sections()
bts, _, _ := btIndexer.Sections()
- if cs >= 8 && bs >= 8 && bts >= 1 {
+ if bts >= 1 {
break
}
time.Sleep(10 * time.Millisecond)
diff --git a/les/odr.go b/les/odr.go
index 5d98c66a9..daf2ea19e 100644
--- a/les/odr.go
+++ b/les/odr.go
@@ -84,9 +84,7 @@ const (
MsgBlockBodies = iota
MsgCode
MsgReceipts
- MsgProofsV1
MsgProofsV2
- MsgHeaderProofs
MsgHelperTrieProofs
)
diff --git a/les/odr_requests.go b/les/odr_requests.go
index 6bd4a2931..66d6175b8 100644
--- a/les/odr_requests.go
+++ b/les/odr_requests.go
@@ -188,14 +188,7 @@ type TrieRequest light.TrieRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *TrieRequest) GetCost(peer *peer) uint64 {
- switch peer.version {
- case lpv1:
- return peer.GetRequestCost(GetProofsV1Msg, 1)
- case lpv2:
- return peer.GetRequestCost(GetProofsV2Msg, 1)
- default:
- panic(nil)
- }
+ return peer.GetRequestCost(GetProofsV2Msg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
@@ -220,38 +213,22 @@ func (r *TrieRequest) Request(reqID uint64, peer *peer) error {
func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating trie proof", "root", r.Id.Root, "key", r.Key)
- switch msg.MsgType {
- case MsgProofsV1:
- proofs := msg.Obj.([]light.NodeList)
- if len(proofs) != 1 {
- return errInvalidEntryCount
- }
- nodeSet := proofs[0].NodeSet()
- // Verify the proof and store if checks out
- if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, nodeSet); err != nil {
- return fmt.Errorf("merkle proof verification failed: %v", err)
- }
- r.Proof = nodeSet
- return nil
-
- case MsgProofsV2:
- proofs := msg.Obj.(light.NodeList)
- // Verify the proof and store if checks out
- nodeSet := proofs.NodeSet()
- reads := &readTraceDB{db: nodeSet}
- if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
- return fmt.Errorf("merkle proof verification failed: %v", err)
- }
- // check if all nodes have been read by VerifyProof
- if len(reads.reads) != nodeSet.KeyCount() {
- return errUselessNodes
- }
- r.Proof = nodeSet
- return nil
-
- default:
+ if msg.MsgType != MsgProofsV2 {
return errInvalidMessageType
}
+ proofs := msg.Obj.(light.NodeList)
+ // Verify the proof and store if checks out
+ nodeSet := proofs.NodeSet()
+ reads := &readTraceDB{db: nodeSet}
+ if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
+ return fmt.Errorf("merkle proof verification failed: %v", err)
+ }
+ // check if all nodes have been read by VerifyProof
+ if len(reads.reads) != nodeSet.KeyCount() {
+ return errUselessNodes
+ }
+ r.Proof = nodeSet
+ return nil
}
type CodeReq struct {
@@ -330,32 +307,13 @@ type HelperTrieResps struct { // describes all responses, not just a single one
AuxData [][]byte
}
-// legacy LES/1
-type ChtReq struct {
- ChtNum, BlockNum uint64
- FromLevel uint
-}
-
-// legacy LES/1
-type ChtResp struct {
- Header *types.Header
- Proof []rlp.RawValue
-}
-
// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface
type ChtRequest light.ChtRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *ChtRequest) GetCost(peer *peer) uint64 {
- switch peer.version {
- case lpv1:
- return peer.GetRequestCost(GetHeaderProofsMsg, 1)
- case lpv2:
- return peer.GetRequestCost(GetHelperTrieProofsMsg, 1)
- default:
- panic(nil)
- }
+ return peer.GetRequestCost(GetHelperTrieProofsMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
@@ -377,21 +335,7 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
Key: encNum[:],
AuxReq: auxHeader,
}
- switch peer.version {
- case lpv1:
- var reqsV1 ChtReq
- if req.Type != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 {
- return fmt.Errorf("Request invalid in LES/1 mode")
- }
- blockNum := binary.BigEndian.Uint64(req.Key)
- // convert HelperTrie request to old CHT request
- reqsV1 = ChtReq{ChtNum: (req.TrieIdx + 1) * (r.Config.ChtSize / r.Config.PairChtSize), BlockNum: blockNum, FromLevel: req.FromLevel}
- return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []ChtReq{reqsV1})
- case lpv2:
- return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req})
- default:
- panic(nil)
- }
+ return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req})
}
// Valid processes an ODR request reply message from the LES network
@@ -400,78 +344,50 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating CHT", "cht", r.ChtNum, "block", r.BlockNum)
- switch msg.MsgType {
- case MsgHeaderProofs: // LES/1 backwards compatibility
- proofs := msg.Obj.([]ChtResp)
- if len(proofs) != 1 {
- return errInvalidEntryCount
- }
- proof := proofs[0]
-
- // Verify the CHT
- var encNumber [8]byte
- binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
-
- value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], light.NodeList(proof.Proof).NodeSet())
- if err != nil {
- return err
- }
- var node light.ChtNode
- if err := rlp.DecodeBytes(value, &node); err != nil {
- return err
- }
- if node.Hash != proof.Header.Hash() {
- return errCHTHashMismatch
- }
- // Verifications passed, store and return
- r.Header = proof.Header
- r.Proof = light.NodeList(proof.Proof).NodeSet()
- r.Td = node.Td
- case MsgHelperTrieProofs:
- resp := msg.Obj.(HelperTrieResps)
- if len(resp.AuxData) != 1 {
- return errInvalidEntryCount
- }
- nodeSet := resp.Proofs.NodeSet()
- headerEnc := resp.AuxData[0]
- if len(headerEnc) == 0 {
- return errHeaderUnavailable
- }
- header := new(types.Header)
- if err := rlp.DecodeBytes(headerEnc, header); err != nil {
- return errHeaderUnavailable
- }
+ if msg.MsgType != MsgHelperTrieProofs {
+ return errInvalidMessageType
+ }
+ resp := msg.Obj.(HelperTrieResps)
+ if len(resp.AuxData) != 1 {
+ return errInvalidEntryCount
+ }
+ nodeSet := resp.Proofs.NodeSet()
+ headerEnc := resp.AuxData[0]
+ if len(headerEnc) == 0 {
+ return errHeaderUnavailable
+ }
+ header := new(types.Header)
+ if err := rlp.DecodeBytes(headerEnc, header); err != nil {
+ return errHeaderUnavailable
+ }
- // Verify the CHT
- var encNumber [8]byte
- binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
+ // Verify the CHT
+ var encNumber [8]byte
+ binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
- reads := &readTraceDB{db: nodeSet}
- value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
- if err != nil {
- return fmt.Errorf("merkle proof verification failed: %v", err)
- }
- if len(reads.reads) != nodeSet.KeyCount() {
- return errUselessNodes
- }
+ reads := &readTraceDB{db: nodeSet}
+ value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
+ if err != nil {
+ return fmt.Errorf("merkle proof verification failed: %v", err)
+ }
+ if len(reads.reads) != nodeSet.KeyCount() {
+ return errUselessNodes
+ }
- var node light.ChtNode
- if err := rlp.DecodeBytes(value, &node); err != nil {
- return err
- }
- if node.Hash != header.Hash() {
- return errCHTHashMismatch
- }
- if r.BlockNum != header.Number.Uint64() {
- return errCHTNumberMismatch
- }
- // Verifications passed, store and return
- r.Header = header
- r.Proof = nodeSet
- r.Td = node.Td
- default:
- return errInvalidMessageType
+ var node light.ChtNode
+ if err := rlp.DecodeBytes(value, &node); err != nil {
+ return err
+ }
+ if node.Hash != header.Hash() {
+ return errCHTHashMismatch
+ }
+ if r.BlockNum != header.Number.Uint64() {
+ return errCHTNumberMismatch
}
+ // Verifications passed, store and return
+ r.Header = header
+ r.Proof = nodeSet
+ r.Td = node.Td
return nil
}
diff --git a/les/odr_test.go b/les/odr_test.go
index ac81fbcf0..bc587a183 100644
--- a/les/odr_test.go
+++ b/les/odr_test.go
@@ -38,8 +38,6 @@ import (
type odrTestFn func(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte
-func TestOdrGetBlockLes1(t *testing.T) { testOdr(t, 1, 1, odrGetBlock) }
-
func TestOdrGetBlockLes2(t *testing.T) { testOdr(t, 2, 1, odrGetBlock) }
func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
@@ -56,8 +54,6 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainCon
return rlp
}
-func TestOdrGetReceiptsLes1(t *testing.T) { testOdr(t, 1, 1, odrGetReceipts) }
-
func TestOdrGetReceiptsLes2(t *testing.T) { testOdr(t, 2, 1, odrGetReceipts) }
func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
@@ -78,8 +74,6 @@ func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.Chain
return rlp
}
-func TestOdrAccountsLes1(t *testing.T) { testOdr(t, 1, 1, odrAccounts) }
-
func TestOdrAccountsLes2(t *testing.T) { testOdr(t, 2, 1, odrAccounts) }
func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
@@ -108,8 +102,6 @@ func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainCon
return res
}
-func TestOdrContractCallLes1(t *testing.T) { testOdr(t, 1, 2, odrContractCall) }
-
func TestOdrContractCallLes2(t *testing.T) { testOdr(t, 2, 2, odrContractCall) }
type callmsg struct {
diff --git a/les/peer.go b/les/peer.go
index 0c15add9c..bf3f0f762 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -34,10 +34,9 @@ import (
)
var (
- errClosed = errors.New("peer set is closed")
- errAlreadyRegistered = errors.New("peer is already registered")
- errNotRegistered = errors.New("peer is not registered")
- errInvalidHelpTrieReq = errors.New("invalid help trie request")
+ errClosed = errors.New("peer set is closed")
+ errAlreadyRegistered = errors.New("peer is already registered")
+ errNotRegistered = errors.New("peer is not registered")
)
const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
@@ -244,18 +243,8 @@ func (p *peer) GetTxRelayCost(amount, size int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
- var msgcode uint64
- switch p.version {
- case lpv1:
- msgcode = SendTxMsg
- case lpv2:
- msgcode = SendTxV2Msg
- default:
- panic(nil)
- }
-
- cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
- sizeCost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(size)/txSizeCostLimit
+ cost := p.fcCosts[SendTxV2Msg].baseCost + p.fcCosts[SendTxV2Msg].reqCost*uint64(amount)
+ sizeCost := p.fcCosts[SendTxV2Msg].baseCost + p.fcCosts[SendTxV2Msg].reqCost*uint64(size)/txSizeCostLimit
if sizeCost > cost {
cost = sizeCost
}
@@ -307,24 +296,12 @@ func (p *peer) ReplyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply {
return &reply{p.rw, ReceiptsMsg, reqID, data}
}
-// ReplyProofs creates a reply with a batch of legacy LES/1 merkle proofs, corresponding to the ones requested.
-func (p *peer) ReplyProofs(reqID uint64, proofs proofsData) *reply {
- data, _ := rlp.EncodeToBytes(proofs)
- return &reply{p.rw, ProofsV1Msg, reqID, data}
-}
-
// ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV2Msg, reqID, data}
}
-// ReplyHeaderProofs creates a reply with a batch of legacy LES/1 header proofs, corresponding to the ones requested.
-func (p *peer) ReplyHeaderProofs(reqID uint64, proofs []ChtResp) *reply {
- data, _ := rlp.EncodeToBytes(proofs)
- return &reply{p.rw, HeaderProofsMsg, reqID, data}
-}
-
// ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.
func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {
data, _ := rlp.EncodeToBytes(resp)
@@ -374,36 +351,13 @@ func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
// RequestProofs fetches a batch of merkle proofs from a remote node.
func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
- switch p.version {
- case lpv1:
- return sendRequest(p.rw, GetProofsV1Msg, reqID, cost, reqs)
- case lpv2:
- return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
- default:
- panic(nil)
- }
+ return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
}
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
-func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, data interface{}) error {
- switch p.version {
- case lpv1:
- reqs, ok := data.([]ChtReq)
- if !ok {
- return errInvalidHelpTrieReq
- }
- p.Log().Debug("Fetching batch of header proofs", "count", len(reqs))
- return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
- case lpv2:
- reqs, ok := data.([]HelperTrieReq)
- if !ok {
- return errInvalidHelpTrieReq
- }
- p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
- return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
- default:
- panic(nil)
- }
+func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq) error {
+ p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
+ return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
}
// RequestTxStatus fetches a batch of transaction status records from a remote node.
@@ -415,14 +369,7 @@ func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
p.Log().Debug("Sending batch of transactions", "size", len(txs))
- switch p.version {
- case lpv1:
- return p2p.Send(p.rw, SendTxMsg, txs) // old message format does not include reqID
- case lpv2:
- return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
- default:
- panic(nil)
- }
+ return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
}
type keyValueEntry struct {
diff --git a/les/protocol.go b/les/protocol.go
index 65395ac05..86e450d01 100644
--- a/les/protocol.go
+++ b/les/protocol.go
@@ -33,19 +33,18 @@ import (
// Constants to match up protocol versions and messages
const (
- lpv1 = 1
lpv2 = 2
)
// Supported versions of the les protocol (first is primary)
var (
- ClientProtocolVersions = []uint{lpv2, lpv1}
- ServerProtocolVersions = []uint{lpv2, lpv1}
+ ClientProtocolVersions = []uint{lpv2}
+ ServerProtocolVersions = []uint{lpv2}
AdvertiseProtocolVersions = []uint{lpv2} // clients are searching for the first advertised protocol in the list
)
// Number of implemented message corresponding to different protocol versions.
-var ProtocolLengths = map[uint]uint64{lpv1: 15, lpv2: 22}
+var ProtocolLengths = map[uint]uint64{lpv2: 22}
const (
NetworkId = 1
@@ -54,7 +53,7 @@ const (
// les protocol message codes
const (
- // Protocol messages belonging to LPV1
+ // Protocol messages inherited from LPV1
StatusMsg = 0x00
AnnounceMsg = 0x01
GetBlockHeadersMsg = 0x02
@@ -63,14 +62,9 @@ const (
BlockBodiesMsg = 0x05
GetReceiptsMsg = 0x06
ReceiptsMsg = 0x07
- GetProofsV1Msg = 0x08
- ProofsV1Msg = 0x09
GetCodeMsg = 0x0a
CodeMsg = 0x0b
- SendTxMsg = 0x0c
- GetHeaderProofsMsg = 0x0d
- HeaderProofsMsg = 0x0e
- // Protocol messages belonging to LPV2
+ // Protocol messages introduced in LPV2
GetProofsV2Msg = 0x0f
ProofsV2Msg = 0x10
GetHelperTrieProofsMsg = 0x11
@@ -89,10 +83,7 @@ var requests = map[uint64]requestInfo{
GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch},
GetBlockBodiesMsg: {"GetBlockBodies", MaxBodyFetch},
GetReceiptsMsg: {"GetReceipts", MaxReceiptFetch},
- GetProofsV1Msg: {"GetProofsV1", MaxProofsFetch},
GetCodeMsg: {"GetCode", MaxCodeFetch},
- SendTxMsg: {"SendTx", MaxTxSend},
- GetHeaderProofsMsg: {"GetHeaderProofs", MaxHelperTrieProofsFetch},
GetProofsV2Msg: {"GetProofsV2", MaxProofsFetch},
GetHelperTrieProofsMsg: {"GetHelperTrieProofs", MaxHelperTrieProofsFetch},
SendTxV2Msg: {"SendTxV2", MaxTxSend},
diff --git a/les/request_test.go b/les/request_test.go
index c9c185198..e0d00d18c 100644
--- a/les/request_test.go
+++ b/les/request_test.go
@@ -36,24 +36,18 @@ func secAddr(addr common.Address) []byte {
type accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest
-func TestBlockAccessLes1(t *testing.T) { testAccess(t, 1, tfBlockAccess) }
-
func TestBlockAccessLes2(t *testing.T) { testAccess(t, 2, tfBlockAccess) }
func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
return &light.BlockRequest{Hash: bhash, Number: number}
}
-func TestReceiptsAccessLes1(t *testing.T) { testAccess(t, 1, tfReceiptsAccess) }
-
func TestReceiptsAccessLes2(t *testing.T) { testAccess(t, 2, tfReceiptsAccess) }
func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
return &light.ReceiptsRequest{Hash: bhash, Number: number}
}
-func TestTrieEntryAccessLes1(t *testing.T) { testAccess(t, 1, tfTrieEntryAccess) }
-
func TestTrieEntryAccessLes2(t *testing.T) { testAccess(t, 2, tfTrieEntryAccess) }
func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
@@ -63,8 +57,6 @@ func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) ligh
return nil
}
-func TestCodeAccessLes1(t *testing.T) { testAccess(t, 1, tfCodeAccess) }
-
func TestCodeAccessLes2(t *testing.T) { testAccess(t, 2, tfCodeAccess) }
func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrRequest {
diff --git a/les/server.go b/les/server.go
index 3716685e1..6c2b227f4 100644
--- a/les/server.go
+++ b/les/server.go
@@ -89,7 +89,7 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
config: config,
chainDb: eth.ChainDb(),
iConfig: light.DefaultServerIndexerConfig,
- chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequencyServer, params.HelperTrieProcessConfirmations),
+ chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations),
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
protocolManager: pm,
},
@@ -108,15 +108,11 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
srv.thcBlockProcessing = config.LightServ/100 + 1
srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{})
- chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
- chtV2SectionCount := chtV1SectionCount / (params.CHTFrequencyClient / params.CHTFrequencyServer)
- if chtV2SectionCount != 0 {
- // convert to LES/2 section
- chtLastSection := chtV2SectionCount - 1
- // convert last LES/2 section index back to LES/1 index for chtIndexer.SectionHead
- chtLastSectionV1 := (chtLastSection+1)*(params.CHTFrequencyClient/params.CHTFrequencyServer) - 1
- chtSectionHead := srv.chtIndexer.SectionHead(chtLastSectionV1)
- chtRoot := light.GetChtRoot(pm.chainDb, chtLastSectionV1, chtSectionHead)
+ chtSectionCount, _, _ := srv.chtIndexer.Sections()
+ if chtSectionCount != 0 {
+ chtLastSection := chtSectionCount - 1
+ chtSectionHead := srv.chtIndexer.SectionHead(chtLastSection)
+ chtRoot := light.GetChtRoot(pm.chainDb, chtLastSection, chtSectionHead)
logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot)
}
bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()
diff --git a/light/odr_test.go b/light/odr_test.go
index c1762c43e..55725d84e 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -99,7 +99,7 @@ func (odr *testOdr) IndexerConfig() *IndexerConfig {
type odrTestFn func(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error)
-func TestOdrGetBlockLes1(t *testing.T) { testChainOdr(t, 1, odrGetBlock) }
+func TestOdrGetBlockLes2(t *testing.T) { testChainOdr(t, 1, odrGetBlock) }
func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
var block *types.Block
@@ -115,7 +115,7 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc
return rlp, nil
}
-func TestOdrGetReceiptsLes1(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) }
+func TestOdrGetReceiptsLes2(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) }
func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
var receipts types.Receipts
@@ -137,7 +137,7 @@ func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain,
return rlp, nil
}
-func TestOdrAccountsLes1(t *testing.T) { testChainOdr(t, 1, odrAccounts) }
+func TestOdrAccountsLes2(t *testing.T) { testChainOdr(t, 1, odrAccounts) }
func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) {
dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678")
@@ -161,7 +161,7 @@ func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc
return res, st.Error()
}
-func TestOdrContractCallLes1(t *testing.T) { testChainOdr(t, 1, odrContractCall) }
+func TestOdrContractCallLes2(t *testing.T) { testChainOdr(t, 1, odrContractCall) }
type callmsg struct {
types.Message
diff --git a/light/postprocess.go b/light/postprocess.go
index 306de4694..2a837e6c1 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -41,9 +41,6 @@ type IndexerConfig struct {
// The block frequency for creating CHTs.
ChtSize uint64
- // A special auxiliary field represents client's chtsize for server config, otherwise represents server's chtsize.
- PairChtSize uint64
-
// The number of confirmations needed to generate/accept a canonical hash help trie.
ChtConfirms uint64
@@ -64,8 +61,7 @@ type IndexerConfig struct {
var (
// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
DefaultServerIndexerConfig = &IndexerConfig{
- ChtSize: params.CHTFrequencyServer,
- PairChtSize: params.CHTFrequencyClient,
+ ChtSize: params.CHTFrequency,
ChtConfirms: params.HelperTrieProcessConfirmations,
BloomSize: params.BloomBitsBlocks,
BloomConfirms: params.BloomConfirms,
@@ -74,8 +70,7 @@ var (
}
// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
DefaultClientIndexerConfig = &IndexerConfig{
- ChtSize: params.CHTFrequencyClient,
- PairChtSize: params.CHTFrequencyServer,
+ ChtSize: params.CHTFrequency,
ChtConfirms: params.HelperTrieConfirmations,
BloomSize: params.BloomBitsBlocksClient,
BloomConfirms: params.HelperTrieConfirmations,
@@ -84,8 +79,7 @@ var (
}
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
TestServerIndexerConfig = &IndexerConfig{
- ChtSize: 64,
- PairChtSize: 512,
+ ChtSize: 512,
ChtConfirms: 4,
BloomSize: 64,
BloomConfirms: 4,
@@ -95,7 +89,6 @@ var (
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
TestClientIndexerConfig = &IndexerConfig{
ChtSize: 512,
- PairChtSize: 64,
ChtConfirms: 32,
BloomSize: 512,
BloomConfirms: 32,
@@ -116,7 +109,7 @@ var (
ErrNoTrustedCht = errors.New("no trusted canonical hash trie")
ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie")
ErrNoHeader = errors.New("header not found")
- chtPrefix = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
+ chtPrefix = []byte("chtRootV2-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
ChtTablePrefix = "cht-"
)
@@ -127,7 +120,6 @@ type ChtNode struct {
}
// GetChtRoot reads the CHT root associated to the given section from the database
-// Note that sectionIdx is specified according to LES/1 CHT section size.
func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
@@ -136,7 +128,6 @@ func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) c
}
// StoreChtRoot writes the CHT root associated to the given section into the database
-// Note that sectionIdx is specified according to LES/1 CHT section size.
func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
@@ -163,7 +154,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *co
triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down
sectionSize: size,
}
- return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht")
+ return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndexV2-"), backend, size, confirms, time.Millisecond*100, "cht")
}
// fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
@@ -235,9 +226,7 @@ func (c *ChtIndexerBackend) Commit() error {
}
c.triedb.Commit(root, false)
- if ((c.section+1)*c.sectionSize)%params.CHTFrequencyClient == 0 {
- log.Info("Storing CHT", "section", c.section*c.sectionSize/params.CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
- }
+ log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
return nil
}
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index ef0c08afc..3e9353753 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -313,7 +313,7 @@ func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
// Avoid recording failures on shutdown.
reply <- nil
return
- } else if err != nil || len(r) == 0 {
+ } else if len(r) == 0 {
fails++
tab.db.UpdateFindFails(n.ID(), n.IP(), fails)
log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
diff --git a/params/network_params.go b/params/network_params.go
index f8731e897..024c4af45 100644
--- a/params/network_params.go
+++ b/params/network_params.go
@@ -32,13 +32,8 @@ const (
// considered probably final and its rotated bits are calculated.
BloomConfirms = 256
- // CHTFrequencyClient is the block frequency for creating CHTs on the client side.
- CHTFrequencyClient = 32768
-
- // CHTFrequencyServer is the block frequency for creating CHTs on the server side.
- // Eventually this can be merged back with the client version, but that requires a
- // full database upgrade, so that should be left for a suitable moment.
- CHTFrequencyServer = 4096
+ // CHTFrequency is the block frequency for creating CHTs
+ CHTFrequency = 32768
// BloomTrieFrequency is the block frequency for creating BloomTrie on both
// server/client sides.
diff --git a/signer/core/signed_data.go b/signer/core/signed_data.go
index 475a8837e..d264cbaa0 100644
--- a/signer/core/signed_data.go
+++ b/signer/core/signed_data.go
@@ -894,8 +894,10 @@ func (domain *TypedDataDomain) validate() error {
// Map is a helper function to generate a map version of the domain
func (domain *TypedDataDomain) Map() map[string]interface{} {
- dataMap := map[string]interface{}{
- "chainId": domain.ChainId,
+ dataMap := map[string]interface{}{}
+
+ if domain.ChainId != nil {
+ dataMap["chainId"] = domain.ChainId
}
if len(domain.Name) > 0 {
diff --git a/signer/core/signed_data_test.go b/signer/core/signed_data_test.go
index 76e1b72ee..b1d893221 100644
--- a/signer/core/signed_data_test.go
+++ b/signer/core/signed_data_test.go
@@ -225,6 +225,40 @@ func TestSignData(t *testing.T) {
}
}
+func TestDomainChainId(t *testing.T) {
+ withoutChainID := TypedData{
+ Types: Types{
+ "EIP712Domain": []Type{
+ {Name: "name", Type: "string"},
+ },
+ },
+ Domain: TypedDataDomain{
+ Name: "test",
+ },
+ }
+
+ if _, ok := withoutChainID.Domain.Map()["chainId"]; ok {
+ t.Errorf("Expected the chainId key to not be present in the domain map")
+ }
+
+ withChainID := TypedData{
+ Types: Types{
+ "EIP712Domain": []Type{
+ {Name: "name", Type: "string"},
+ {Name: "chainId", Type: "uint256"},
+ },
+ },
+ Domain: TypedDataDomain{
+ Name: "test",
+ ChainId: big.NewInt(1),
+ },
+ }
+
+ if _, ok := withChainID.Domain.Map()["chainId"]; !ok {
+ t.Errorf("Expected the chainId key be present in the domain map")
+ }
+}
+
func TestHashStruct(t *testing.T) {
hash, err := typedData.HashStruct(typedData.PrimaryType, typedData.Message)
if err != nil {
diff --git a/swarm/version/version.go b/swarm/version/version.go
index ff54bdf11..fb2fc8545 100644
--- a/swarm/version/version.go
+++ b/swarm/version/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 0 // Major version component of the current release
VersionMinor = 3 // Minor version component of the current release
- VersionPatch = 12 // Patch version component of the current release
+ VersionPatch = 14 // Patch version component of the current release
VersionMeta = "unstable" // Version metadata to append to the version string
)