aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.travis.yml29
-rw-r--r--Dockerfile2
-rw-r--r--Dockerfile.alltools2
-rw-r--r--accounts/abi/bind/backends/simulated.go2
-rw-r--r--accounts/abi/type.go7
-rw-r--r--accounts/keystore/key.go18
-rw-r--r--accounts/keystore/keystore.go2
-rw-r--r--accounts/keystore/keystore_passphrase.go27
-rw-r--r--accounts/keystore/keystore_plain_test.go4
-rw-r--r--appveyor.yml4
-rw-r--r--cmd/evm/runner.go25
-rw-r--r--cmd/geth/chaincmd.go4
-rw-r--r--cmd/geth/main.go2
-rw-r--r--cmd/geth/usage.go2
-rw-r--r--cmd/puppeth/wizard_dashboard.go2
-rw-r--r--cmd/utils/flags.go21
-rw-r--r--common/format.go42
-rw-r--r--consensus/clique/clique.go8
-rw-r--r--consensus/clique/snapshot_test.go2
-rw-r--r--core/bench_test.go4
-rw-r--r--core/block_validator_test.go8
-rw-r--r--core/blockchain.go73
-rw-r--r--core/blockchain_test.go32
-rw-r--r--core/chain_makers.go2
-rw-r--r--core/chain_makers_test.go2
-rw-r--r--core/dao_test.go12
-rw-r--r--core/genesis.go2
-rw-r--r--core/genesis_test.go2
-rw-r--r--core/headerchain.go14
-rw-r--r--core/tx_pool.go4
-rw-r--r--core/vm/evm.go22
-rw-r--r--core/vm/interpreter.go5
-rw-r--r--eth/api_tracer.go2
-rw-r--r--eth/backend.go66
-rw-r--r--eth/config.go5
-rw-r--r--eth/downloader/api.go4
-rw-r--r--eth/handler_test.go10
-rw-r--r--eth/helper_test.go2
-rw-r--r--internal/build/util.go4
-rw-r--r--les/commons.go24
-rw-r--r--les/helper_test.go2
-rw-r--r--les/odr_requests.go16
-rw-r--r--les/retrieve.go13
-rw-r--r--light/lightchain.go18
-rw-r--r--light/odr.go16
-rw-r--r--light/odr_test.go2
-rw-r--r--light/odr_util.go2
-rw-r--r--light/postprocess.go37
-rw-r--r--light/trie_test.go2
-rw-r--r--light/txpool_test.go2
-rw-r--r--miner/miner.go4
-rw-r--r--miner/worker.go72
-rw-r--r--miner/worker_test.go4
-rw-r--r--params/config.go54
-rw-r--r--rpc/websocket.go27
-rw-r--r--rpc/websocket_test.go54
-rw-r--r--signer/core/abihelper_test.go34
-rw-r--r--swarm/network/fetcher.go2
-rw-r--r--swarm/network/simulation/simulation.go2
-rw-r--r--swarm/network/stream/peer.go2
-rw-r--r--swarm/pot/doc.go4
-rw-r--r--swarm/storage/netstore.go4
-rw-r--r--tests/block_test_util.go2
-rw-r--r--whisper/mailserver/mailserver.go2
64 files changed, 622 insertions, 259 deletions
diff --git a/.travis.yml b/.travis.yml
index 3ae88aab6..372f7a827 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -14,12 +14,23 @@ matrix:
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
- # These are the latest Go versions.
- os: linux
dist: trusty
sudo: required
go: 1.10.x
script:
+ - sudo modprobe fuse
+ - sudo chmod 666 /dev/fuse
+ - sudo chown root:$USER /etc/fuse.conf
+ - go run build/ci.go install
+ - go run build/ci.go test -coverage $TEST_PACKAGES
+
+ # These are the latest Go versions.
+ - os: linux
+ dist: trusty
+ sudo: required
+ go: 1.11.x
+ script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
@@ -27,7 +38,7 @@ matrix:
- go run build/ci.go test -coverage $TEST_PACKAGES
- os: osx
- go: 1.10.x
+ go: 1.11.x
script:
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- go run build/ci.go install
@@ -36,7 +47,7 @@ matrix:
# This builder only tests code linters on latest version of Go
- os: linux
dist: trusty
- go: 1.10.x
+ go: 1.11.x
env:
- lint
git:
@@ -47,7 +58,7 @@ matrix:
# This builder does the Ubuntu PPA upload
- os: linux
dist: trusty
- go: 1.10.x
+ go: 1.11.x
env:
- ubuntu-ppa
git:
@@ -66,7 +77,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
- go: 1.10.x
+ go: 1.11.x
env:
- azure-linux
git:
@@ -100,7 +111,7 @@ matrix:
dist: trusty
services:
- docker
- go: 1.10.x
+ go: 1.11.x
env:
- azure-linux-mips
git:
@@ -144,7 +155,7 @@ matrix:
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- - curl https://storage.googleapis.com/golang/go1.10.3.linux-amd64.tar.gz | tar -xz
+ - curl https://storage.googleapis.com/golang/go1.11.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
@@ -161,7 +172,7 @@ matrix:
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- os: osx
- go: 1.10.x
+ go: 1.11.x
env:
- azure-osx
- azure-ios
@@ -190,7 +201,7 @@ matrix:
# This builder does the Azure archive purges to avoid accumulating junk
- os: linux
dist: trusty
- go: 1.10.x
+ go: 1.11.x
env:
- azure-purge
git:
diff --git a/Dockerfile b/Dockerfile
index edf5a0602..e87dd35d3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,5 @@
# Build Geth in a stock Go builder container
-FROM golang:1.10-alpine as builder
+FROM golang:1.11-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers
diff --git a/Dockerfile.alltools b/Dockerfile.alltools
index e54e107bf..e984a1b09 100644
--- a/Dockerfile.alltools
+++ b/Dockerfile.alltools
@@ -1,5 +1,5 @@
# Build Geth in a stock Go builder container
-FROM golang:1.10-alpine as builder
+FROM golang:1.11-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 1d14f8c6f..fc0ccbf52 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -69,7 +69,7 @@ func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBac
database := ethdb.NewMemDatabase()
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
genesis.MustCommit(database)
- blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{})
+ blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil)
backend := &SimulatedBackend{
database: database,
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index 9de36daff..dce89d2b4 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -103,7 +103,12 @@ func NewType(t string) (typ Type, err error) {
return typ, err
}
// parse the type and size of the abi-type.
- parsedType := typeRegex.FindAllStringSubmatch(t, -1)[0]
+ matches := typeRegex.FindAllStringSubmatch(t, -1)
+ if len(matches) == 0 {
+ return Type{}, fmt.Errorf("invalid type '%v'", t)
+ }
+ parsedType := matches[0]
+
// varSize is the size of the variable
var varSize int
if len(parsedType[3]) > 0 {
diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go
index 211fa863d..9e3e4856c 100644
--- a/accounts/keystore/key.go
+++ b/accounts/keystore/key.go
@@ -179,26 +179,34 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
return key, a, err
}
-func writeKeyFile(file string, content []byte) error {
+func writeTemporaryKeyFile(file string, content []byte) (string, error) {
// Create the keystore directory with appropriate permissions
// in case it is not present yet.
const dirPerm = 0700
if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil {
- return err
+ return "", err
}
// Atomic write: create a temporary hidden file first
// then move it into place. TempFile assigns mode 0600.
f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
if err != nil {
- return err
+ return "", err
}
if _, err := f.Write(content); err != nil {
f.Close()
os.Remove(f.Name())
- return err
+ return "", err
}
f.Close()
- return os.Rename(f.Name(), file)
+ return f.Name(), nil
+}
+
+func writeKeyFile(file string, content []byte) error {
+ name, err := writeTemporaryKeyFile(file, content)
+ if err != nil {
+ return err
+ }
+ return os.Rename(name, file)
}
// keyFileName implements the naming convention for keyfiles:
diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go
index 6b04acd05..2918047cc 100644
--- a/accounts/keystore/keystore.go
+++ b/accounts/keystore/keystore.go
@@ -78,7 +78,7 @@ type unlocked struct {
// NewKeyStore creates a keystore for the given directory.
func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore {
keydir, _ = filepath.Abs(keydir)
- ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP}}
+ ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP, false}}
ks.init(keydir)
return ks
}
diff --git a/accounts/keystore/keystore_passphrase.go b/accounts/keystore/keystore_passphrase.go
index 59738abe1..5aa3a6bbd 100644
--- a/accounts/keystore/keystore_passphrase.go
+++ b/accounts/keystore/keystore_passphrase.go
@@ -35,6 +35,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "os"
"path/filepath"
"github.com/ethereum/go-ethereum/common"
@@ -72,6 +73,10 @@ type keyStorePassphrase struct {
keysDirPath string
scryptN int
scryptP int
+ // skipKeyFileVerification disables the security-feature which does
+ // reads and decrypts any newly created keyfiles. This should be 'false' in all
+ // cases except tests -- setting this to 'true' is not recommended.
+ skipKeyFileVerification bool
}
func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
@@ -93,7 +98,7 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
- _, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, rand.Reader, auth)
+ _, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP, false}, rand.Reader, auth)
return a.Address, err
}
@@ -102,7 +107,25 @@ func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) er
if err != nil {
return err
}
- return writeKeyFile(filename, keyjson)
+ // Write into temporary file
+ tmpName, err := writeTemporaryKeyFile(filename, keyjson)
+ if err != nil {
+ return err
+ }
+ if !ks.skipKeyFileVerification {
+ // Verify that we can decrypt the file with the given password.
+ _, err = ks.GetKey(key.Address, tmpName, auth)
+ if err != nil {
+ msg := "An error was encountered when saving and verifying the keystore file. \n" +
+ "This indicates that the keystore is corrupted. \n" +
+ "The corrupted file is stored at \n%v\n" +
+ "Please file a ticket at:\n\n" +
+ "https://github.com/ethereum/go-ethereum/issues." +
+ "The error was : %s"
+ return fmt.Errorf(msg, tmpName, err)
+ }
+ }
+ return os.Rename(tmpName, filename)
}
func (ks keyStorePassphrase) JoinPath(filename string) string {
diff --git a/accounts/keystore/keystore_plain_test.go b/accounts/keystore/keystore_plain_test.go
index a1c3bc4b6..32852a0ad 100644
--- a/accounts/keystore/keystore_plain_test.go
+++ b/accounts/keystore/keystore_plain_test.go
@@ -37,7 +37,7 @@ func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
t.Fatal(err)
}
if encrypted {
- ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP}
+ ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true}
} else {
ks = &keyStorePlain{d}
}
@@ -191,7 +191,7 @@ func TestV1_1(t *testing.T) {
func TestV1_2(t *testing.T) {
t.Parallel()
- ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP}
+ ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP, true}
addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e")
file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e"
k, err := ks.GetKey(addr, file, "g")
diff --git a/appveyor.yml b/appveyor.yml
index 05ff92cf3..b056cb3fd 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.3.windows-%GETH_ARCH%.zip
- - 7z x go1.10.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.windows-%GETH_ARCH%.zip
+ - 7z x go1.11.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 7138a9ddd..962fc021d 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -80,13 +80,13 @@ func runCmd(ctx *cli.Context) error {
}
var (
- tracer vm.Tracer
- debugLogger *vm.StructLogger
- statedb *state.StateDB
- chainConfig *params.ChainConfig
- sender = common.BytesToAddress([]byte("sender"))
- receiver = common.BytesToAddress([]byte("receiver"))
- blockNumber uint64
+ tracer vm.Tracer
+ debugLogger *vm.StructLogger
+ statedb *state.StateDB
+ chainConfig *params.ChainConfig
+ sender = common.BytesToAddress([]byte("sender"))
+ receiver = common.BytesToAddress([]byte("receiver"))
+ genesisConfig *core.Genesis
)
if ctx.GlobalBool(MachineFlag.Name) {
tracer = NewJSONLogger(logconfig, os.Stdout)
@@ -98,13 +98,14 @@ func runCmd(ctx *cli.Context) error {
}
if ctx.GlobalString(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
+ genesisConfig = gen
db := ethdb.NewMemDatabase()
genesis := gen.ToBlock(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db))
chainConfig = gen.Config
- blockNumber = gen.Number
} else {
statedb, _ = state.New(common.Hash{}, state.NewDatabase(ethdb.NewMemDatabase()))
+ genesisConfig = new(core.Genesis)
}
if ctx.GlobalString(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.GlobalString(SenderFlag.Name))
@@ -156,13 +157,19 @@ func runCmd(ctx *cli.Context) error {
}
initialGas := ctx.GlobalUint64(GasFlag.Name)
+ if genesisConfig.GasLimit != 0 {
+ initialGas = genesisConfig.GasLimit
+ }
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
GasLimit: initialGas,
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
Value: utils.GlobalBig(ctx, ValueFlag.Name),
- BlockNumber: new(big.Int).SetUint64(blockNumber),
+ Difficulty: genesisConfig.Difficulty,
+ Time: new(big.Int).SetUint64(genesisConfig.Timestamp),
+ Coinbase: genesisConfig.Coinbase,
+ BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
Tracer: tracer,
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index 87548865b..562c7e0de 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -340,9 +340,9 @@ func importPreimages(ctx *cli.Context) error {
start := time.Now()
if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
- utils.Fatalf("Export error: %v\n", err)
+ utils.Fatalf("Import error: %v\n", err)
}
- fmt.Printf("Export done in %v\n", time.Since(start))
+ fmt.Printf("Import done in %v\n", time.Since(start))
return nil
}
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 134d5a4c0..fae4b5718 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -130,6 +130,8 @@ var (
utils.NoCompactionFlag,
utils.GpoBlocksFlag,
utils.GpoPercentileFlag,
+ utils.EWASMInterpreterFlag,
+ utils.EVMInterpreterFlag,
configFileFlag,
}
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index a674eca4f..8b0491ce3 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -207,6 +207,8 @@ var AppHelpFlagGroups = []flagGroup{
Name: "VIRTUAL MACHINE",
Flags: []cli.Flag{
utils.VMEnableDebugFlag,
+ utils.EVMInterpreterFlag,
+ utils.EWASMInterpreterFlag,
},
},
{
diff --git a/cmd/puppeth/wizard_dashboard.go b/cmd/puppeth/wizard_dashboard.go
index 5f781c415..1a01631ff 100644
--- a/cmd/puppeth/wizard_dashboard.go
+++ b/cmd/puppeth/wizard_dashboard.go
@@ -92,7 +92,7 @@ func (w *wizard) deployDashboard() {
pages = append(pages, page)
}
}
- // Promt the user to chose one, enter manually or simply not list this service
+ // Prompt the user to chose one, enter manually or simply not list this service
defLabel, defChoice := "don't list", len(pages)+2
if len(pages) > 0 {
defLabel, defChoice = pages[0], 1
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 0fecae9aa..a2becd08b 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -610,6 +610,17 @@ var (
Usage: "InfluxDB `host` tag attached to all measurements",
Value: "localhost",
}
+
+ EWASMInterpreterFlag = cli.StringFlag{
+ Name: "vm.ewasm",
+ Usage: "External ewasm configuration (default = built-in interpreter)",
+ Value: "",
+ }
+ EVMInterpreterFlag = cli.StringFlag{
+ Name: "vm.evm",
+ Usage: "External EVM configuration (default = built-in interpreter)",
+ Value: "",
+ }
)
// MakeDataDir retrieves the currently requested data directory, terminating
@@ -1184,6 +1195,14 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name)
}
+ if ctx.GlobalIsSet(EWASMInterpreterFlag.Name) {
+ cfg.EWASMInterpreter = ctx.GlobalString(EWASMInterpreterFlag.Name)
+ }
+
+ if ctx.GlobalIsSet(EVMInterpreterFlag.Name) {
+ cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name)
+ }
+
// Override any default configs for hard coded networks.
switch {
case ctx.GlobalBool(TestnetFlag.Name):
@@ -1379,7 +1398,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
- chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg)
+ chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)
}
diff --git a/common/format.go b/common/format.go
index fccc29962..6fc21af71 100644
--- a/common/format.go
+++ b/common/format.go
@@ -38,3 +38,45 @@ func (d PrettyDuration) String() string {
}
return label
}
+
+// PrettyAge is a pretty printed version of a time.Duration value that rounds
+// the values up to a single most significant unit, days/weeks/years included.
+type PrettyAge time.Time
+
+// ageUnits is a list of units the age pretty printing uses.
+var ageUnits = []struct {
+ Size time.Duration
+ Symbol string
+}{
+ {12 * 30 * 24 * time.Hour, "y"},
+ {30 * 24 * time.Hour, "mo"},
+ {7 * 24 * time.Hour, "w"},
+ {24 * time.Hour, "d"},
+ {time.Hour, "h"},
+ {time.Minute, "m"},
+ {time.Second, "s"},
+}
+
+// String implements the Stringer interface, allowing pretty printing of duration
+// values rounded to the most significant time unit.
+func (t PrettyAge) String() string {
+ // Calculate the time difference and handle the 0 cornercase
+ diff := time.Since(time.Time(t))
+ if diff < time.Second {
+ return "0"
+ }
+ // Accumulate a precision of 3 components before returning
+ result, prec := "", 0
+
+ for _, unit := range ageUnits {
+ if diff > unit.Size {
+ result = fmt.Sprintf("%s%d%s", result, diff/unit.Size, unit.Symbol)
+ diff %= unit.Size
+
+ if prec += 1; prec >= 3 {
+ break
+ }
+ }
+ }
+ return result
+}
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index 2b69141c1..eae09f91d 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -134,11 +134,6 @@ var (
// errRecentlySigned is returned if a header is signed by an authorized entity
// that already signed a header recently, thus is temporarily not allowed to.
errRecentlySigned = errors.New("recently signed")
-
- // errWaitTransactions is returned if an empty block is attempted to be sealed
- // on an instant chain (0 second period). It's important to refuse these as the
- // block reward is zero, so an empty block just bloats the chain... fast.
- errWaitTransactions = errors.New("waiting for transactions")
)
// SignerFn is a signer callback function to request a hash to be signed by a
@@ -615,7 +610,8 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c
}
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if c.config.Period == 0 && len(block.Transactions()) == 0 {
- return errWaitTransactions
+ log.Info("Sealing paused, waiting for transactions")
+ return nil
}
// Don't hold the signer fields for the entire sealing procedure
c.lock.RLock()
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index 71fe7ce8b..41dae1426 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -448,7 +448,7 @@ func TestClique(t *testing.T) {
batches[len(batches)-1] = append(batches[len(batches)-1], block)
}
// Pass all the headers through clique and ensure tallying succeeds
- chain, err := core.NewBlockChain(db, nil, &config, engine, vm.Config{})
+ chain, err := core.NewBlockChain(db, nil, &config, engine, vm.Config{}, nil)
if err != nil {
t.Errorf("test %d: failed to create test chain: %v", i, err)
continue
diff --git a/core/bench_test.go b/core/bench_test.go
index 8d95456e9..53cba0517 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -175,7 +175,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
- chainman, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ chainman, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()
@@ -287,7 +287,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
- chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{})
+ chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
b.Fatalf("error creating chain: %v", err)
}
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index 2a171218e..9319a7835 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -42,7 +42,7 @@ func TestHeaderVerification(t *testing.T) {
headers[i] = block.Header()
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{})
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
defer chain.Stop()
for i := 0; i < len(blocks); i++ {
@@ -106,11 +106,11 @@ func testHeaderConcurrentVerification(t *testing.T, threads int) {
var results <-chan error
if valid {
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{})
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
} else {
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{})
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
}
@@ -173,7 +173,7 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) {
defer runtime.GOMAXPROCS(old)
// Start the verifications and immediately abort
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), vm.Config{})
+ chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil)
defer chain.Stop()
abort, results := chain.engine.VerifyHeaders(chain, headers, seals)
diff --git a/core/blockchain.go b/core/blockchain.go
index 63f60ca28..fe961e0c4 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -128,13 +128,14 @@ type BlockChain struct {
validator Validator // block and state validator interface
vmConfig vm.Config
- badBlocks *lru.Cache // Bad block cache
+ badBlocks *lru.Cache // Bad block cache
+ shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
}
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator and
// Processor.
-func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config) (*BlockChain, error) {
+func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = &CacheConfig{
TrieNodeLimit: 256 * 1024 * 1024,
@@ -148,19 +149,20 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
badBlocks, _ := lru.New(badBlockLimit)
bc := &BlockChain{
- chainConfig: chainConfig,
- cacheConfig: cacheConfig,
- db: db,
- triegc: prque.New(nil),
- stateCache: state.NewDatabase(db),
- quit: make(chan struct{}),
- bodyCache: bodyCache,
- bodyRLPCache: bodyRLPCache,
- blockCache: blockCache,
- futureBlocks: futureBlocks,
- engine: engine,
- vmConfig: vmConfig,
- badBlocks: badBlocks,
+ chainConfig: chainConfig,
+ cacheConfig: cacheConfig,
+ db: db,
+ triegc: prque.New(nil),
+ stateCache: state.NewDatabase(db),
+ quit: make(chan struct{}),
+ shouldPreserve: shouldPreserve,
+ bodyCache: bodyCache,
+ bodyRLPCache: bodyRLPCache,
+ blockCache: blockCache,
+ futureBlocks: futureBlocks,
+ engine: engine,
+ vmConfig: vmConfig,
+ badBlocks: badBlocks,
}
bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
@@ -251,9 +253,9 @@ func (bc *BlockChain) loadLastState() error {
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
- log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
- log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
- log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
+ log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)))
+ log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)))
+ log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)))
return nil
}
@@ -850,13 +852,16 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
bc.mu.Unlock()
- log.Info("Imported new block receipts",
- "count", stats.processed,
- "elapsed", common.PrettyDuration(time.Since(start)),
- "number", head.Number(),
- "hash", head.Hash(),
+ context := []interface{}{
+ "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
+ "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)),
"size", common.StorageSize(bytes),
- "ignored", stats.ignored)
+ }
+ if stats.ignored > 0 {
+ context = append(context, []interface{}{"ignored", stats.ignored}...)
+ }
+ log.Info("Imported new block receipts", context...)
+
return 0, nil
}
@@ -964,8 +969,17 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
reorg := externTd.Cmp(localTd) > 0
currentBlock = bc.CurrentBlock()
if !reorg && externTd.Cmp(localTd) == 0 {
- // Split same-difficulty blocks by number, then at random
- reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
+ // Split same-difficulty blocks by number, then preferentially select
+ // the block generated by the local miner as the canonical block.
+ if block.NumberU64() < currentBlock.NumberU64() {
+ reorg = true
+ } else if block.NumberU64() == currentBlock.NumberU64() {
+ var currentPreserve, blockPreserve bool
+ if bc.shouldPreserve != nil {
+ currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
+ }
+ reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
+ }
}
if reorg {
// Reorganise the chain if the parent is not the head block
@@ -1229,8 +1243,13 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor
context := []interface{}{
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
- "number", end.Number(), "hash", end.Hash(), "cache", cache,
+ "number", end.Number(), "hash", end.Hash(),
+ }
+ if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
+ context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
+ context = append(context, []interface{}{"cache", cache}...)
+
if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...)
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index e452d6936..aef810050 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -52,7 +52,7 @@ func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *B
)
// Initialize a fresh chain with only a genesis block
- blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{})
+ blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil)
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil
@@ -523,7 +523,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
blockchain.Stop()
// Create a new BlockChain and check that it rolled back the state.
- ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, ethash.NewFaker(), vm.Config{})
+ ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
@@ -635,7 +635,7 @@ func TestFastVsFullChains(t *testing.T) {
// Import the chain as an archive node for the comparison baseline
archiveDb := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer archive.Stop()
if n, err := archive.InsertChain(blocks); err != nil {
@@ -644,7 +644,7 @@ func TestFastVsFullChains(t *testing.T) {
// Fast import the chain as a non-archive node to test
fastDb := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -722,7 +722,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
archiveDb := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
@@ -735,7 +735,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -756,7 +756,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
lightDb := ethdb.NewMemDatabase()
gspec.MustCommit(lightDb)
- light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
@@ -825,7 +825,7 @@ func TestChainTxReorgs(t *testing.T) {
}
})
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@@ -896,7 +896,7 @@ func TestLogReorgs(t *testing.T) {
signer = types.NewEIP155Signer(gspec.Config.ChainID)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
rmLogsCh := make(chan RemovedLogsEvent)
@@ -943,7 +943,7 @@ func TestReorgSideEvent(t *testing.T) {
signer = types.NewEIP155Signer(gspec.Config.ChainID)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
@@ -1072,7 +1072,7 @@ func TestEIP155Transition(t *testing.T) {
genesis = gspec.MustCommit(db)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 4, func(i int, block *BlockGen) {
@@ -1179,7 +1179,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
}
genesis = gspec.MustCommit(db)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, block *BlockGen) {
@@ -1254,7 +1254,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
diskdb := ethdb.NewMemDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1298,7 +1298,7 @@ func TestTrieForkGC(t *testing.T) {
diskdb := ethdb.NewMemDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1337,7 +1337,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
diskdb := ethdb.NewMemDatabase()
new(Genesis).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1419,7 +1419,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
diskdb := ethdb.NewMemDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{})
+ chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
if err != nil {
b.Fatalf("failed to create tester chain: %v", err)
}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 351673477..0bc453fdf 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -177,7 +177,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
// TODO(karalabe): This is needed for clique, which depends on multiple blocks.
// It's nonetheless ugly to spin up a blockchain here. Get rid of this somehow.
- blockchain, _ := NewBlockChain(db, nil, config, engine, vm.Config{})
+ blockchain, _ := NewBlockChain(db, nil, config, engine, vm.Config{}, nil)
defer blockchain.Stop()
b := &BlockGen{i: i, parent: parent, chain: blocks, chainReader: blockchain, statedb: statedb, config: config, engine: engine}
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 5015d1f48..64b64fd6a 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -79,7 +79,7 @@ func ExampleGenerateChain() {
})
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{})
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
defer blockchain.Stop()
if i, err := blockchain.InsertChain(chain); err != nil {
diff --git a/core/dao_test.go b/core/dao_test.go
index 284b1d98b..966139bce 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -45,7 +45,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
proConf.DAOForkBlock = forkBlock
proConf.DAOForkSupport = true
- proBc, _ := NewBlockChain(proDb, nil, &proConf, ethash.NewFaker(), vm.Config{})
+ proBc, _ := NewBlockChain(proDb, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil)
defer proBc.Stop()
conDb := ethdb.NewMemDatabase()
@@ -55,7 +55,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
conConf.DAOForkBlock = forkBlock
conConf.DAOForkSupport = false
- conBc, _ := NewBlockChain(conDb, nil, &conConf, ethash.NewFaker(), vm.Config{})
+ conBc, _ := NewBlockChain(conDb, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil)
defer conBc.Stop()
if _, err := proBc.InsertChain(prefix); err != nil {
@@ -69,7 +69,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a pro-fork block, and try to feed into the no-fork chain
db = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{})
+ bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
@@ -94,7 +94,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a no-fork block, and try to feed into the pro-fork chain
db = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{})
+ bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
@@ -120,7 +120,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
db = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{})
+ bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
@@ -140,7 +140,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
db = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{})
+ bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
diff --git a/core/genesis.go b/core/genesis.go
index 9190e2ba2..6e71afd61 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -355,7 +355,7 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd
common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul
common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing
- faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
+ faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))},
},
}
}
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 2d7f94f8f..c7d54f205 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -120,7 +120,7 @@ func TestSetupGenesis(t *testing.T) {
// Advance to block #4, past the homestead transition block of customg.
genesis := oldcustomg.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, oldcustomg.Config, ethash.NewFullFaker(), vm.Config{})
+ bc, _ := NewBlockChain(db, nil, oldcustomg.Config, ethash.NewFullFaker(), vm.Config{}, nil)
defer bc.Stop()
blocks, _ := GenerateChain(oldcustomg.Config, genesis, ethash.NewFaker(), db, 4, nil)
diff --git a/core/headerchain.go b/core/headerchain.go
index 2bbec28bf..d2093113c 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -281,8 +281,18 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
}
// Report some public statistics so the user has a clue what's going on
last := chain[len(chain)-1]
- log.Info("Imported new block headers", "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
- "number", last.Number, "hash", last.Hash(), "ignored", stats.ignored)
+
+ context := []interface{}{
+ "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
+ "number", last.Number, "hash", last.Hash(),
+ }
+ if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute {
+ context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
+ }
+ if stats.ignored > 0 {
+ context = append(context, []interface{}{"ignored", stats.ignored}...)
+ }
+ log.Info("Imported new block headers", context...)
return 0, nil
}
diff --git a/core/tx_pool.go b/core/tx_pool.go
index a0a6ff851..f6da5da2a 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -525,7 +525,7 @@ func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common
return pending, queued
}
-// Pending retrieves all currently processable transactions, groupped by origin
+// Pending retrieves all currently processable transactions, grouped by origin
// account and sorted by nonce. The returned transaction set is a copy and can be
// freely modified by calling code.
func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
@@ -547,7 +547,7 @@ func (pool *TxPool) Locals() []common.Address {
return pool.locals.flatten()
}
-// local retrieves all currently known local transactions, groupped by origin
+// local retrieves all currently known local transactions, grouped by origin
// account and sorted by nonce. The returned transaction set is a copy and can be
// freely modified by calling code.
func (pool *TxPool) local() map[common.Address]types.Transactions {
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 58618f811..fc040c621 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -136,10 +136,28 @@ func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmCon
vmConfig: vmConfig,
chainConfig: chainConfig,
chainRules: chainConfig.Rules(ctx.BlockNumber),
- interpreters: make([]Interpreter, 1),
+ interpreters: make([]Interpreter, 0, 1),
}
- evm.interpreters[0] = NewEVMInterpreter(evm, vmConfig)
+ if chainConfig.IsEWASM(ctx.BlockNumber) {
+ // to be implemented by EVM-C and Wagon PRs.
+ // if vmConfig.EWASMInterpreter != "" {
+ // extIntOpts := strings.Split(vmConfig.EWASMInterpreter, ":")
+ // path := extIntOpts[0]
+ // options := []string{}
+ // if len(extIntOpts) > 1 {
+ // options = extIntOpts[1..]
+ // }
+ // evm.interpreters = append(evm.interpreters, NewEVMVCInterpreter(evm, vmConfig, options))
+ // } else {
+ // evm.interpreters = append(evm.interpreters, NewEWASMInterpreter(evm, vmConfig))
+ // }
+ panic("No supported ewasm interpreter yet.")
+ }
+
+ // vmConfig.EVMInterpreter will be used by EVM-C, it won't be checked here
+ // as we always want to have the built-in EVM as the failover option.
+ evm.interpreters = append(evm.interpreters, NewEVMInterpreter(evm, vmConfig))
evm.interpreter = evm.interpreters[0]
return evm
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 0f1b07342..8e934f60e 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -39,6 +39,11 @@ type Config struct {
// may be left uninitialised and will be set to the default
// table.
JumpTable [256]operation
+
+ // Type of the EWASM interpreter
+ EWASMInterpreter string
+ // Type of the EVM interpreter
+ EVMInterpreter string
}
// Interpreter is used to run Ethereum based contracts and will utilise the
diff --git a/eth/api_tracer.go b/eth/api_tracer.go
index 0a8b9a994..5b7f168ec 100644
--- a/eth/api_tracer.go
+++ b/eth/api_tracer.go
@@ -127,7 +127,7 @@ func (api *PrivateDebugAPI) TraceChain(ctx context.Context, start, end rpc.Block
// traceChain configures a new tracer according to the provided configuration, and
// executes all the transactions contained within. The return value will be one item
-// per transaction, dependent on the requestd tracer.
+// per transaction, dependent on the requested tracer.
func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) {
// Tracing a chain is a **long** operation, only do with subscriptions
notifier, supported := rpc.NotifierFromContext(ctx)
diff --git a/eth/backend.go b/eth/backend.go
index 9926225f2..b555b064a 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -149,10 +149,14 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
}
var (
- vmConfig = vm.Config{EnablePreimageRecording: config.EnablePreimageRecording}
+ vmConfig = vm.Config{
+ EnablePreimageRecording: config.EnablePreimageRecording,
+ EWASMInterpreter: config.EWASMInterpreter,
+ EVMInterpreter: config.EVMInterpreter,
+ }
cacheConfig = &core.CacheConfig{Disabled: config.NoPruning, TrieNodeLimit: config.TrieCache, TrieTimeLimit: config.TrieTimeout}
)
- eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, eth.chainConfig, eth.engine, vmConfig)
+ eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, eth.chainConfig, eth.engine, vmConfig, eth.shouldPreserve)
if err != nil {
return nil, err
}
@@ -173,7 +177,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
return nil, err
}
- eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit, config.MinerGasFloor, config.MinerGasCeil)
+ eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit, config.MinerGasFloor, config.MinerGasCeil, eth.isLocalBlock)
eth.miner.SetExtra(makeExtraData(config.MinerExtraData))
eth.APIBackend = &EthAPIBackend{eth, nil}
@@ -330,6 +334,60 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) {
return common.Address{}, fmt.Errorf("etherbase must be explicitly specified")
}
+// isLocalBlock checks whether the specified block is mined
+// by local miner accounts.
+//
+// We regard two types of accounts as local miner account: etherbase
+// and accounts specified via `txpool.locals` flag.
+func (s *Ethereum) isLocalBlock(block *types.Block) bool {
+ author, err := s.engine.Author(block.Header())
+ if err != nil {
+ log.Warn("Failed to retrieve block author", "number", block.NumberU64(), "hash", block.Hash(), "err", err)
+ return false
+ }
+ // Check whether the given address is etherbase.
+ s.lock.RLock()
+ etherbase := s.etherbase
+ s.lock.RUnlock()
+ if author == etherbase {
+ return true
+ }
+ // Check whether the given address is specified by `txpool.local`
+ // CLI flag.
+ for _, account := range s.config.TxPool.Locals {
+ if account == author {
+ return true
+ }
+ }
+ return false
+}
+
+// shouldPreserve checks whether we should preserve the given block
+// during the chain reorg depending on whether the author of block
+// is a local account.
+func (s *Ethereum) shouldPreserve(block *types.Block) bool {
+ // The reason we need to disable the self-reorg preserving for clique
+ // is it can be probable to introduce a deadlock.
+ //
+ // e.g. If there are 7 available signers
+ //
+ // r1 A
+ // r2 B
+ // r3 C
+ // r4 D
+ // r5 A [X] F G
+ // r6 [X]
+ //
+ // In the round5, the inturn signer E is offline, so the worst case
+ // is A, F and G sign the block of round5 and reject the block of opponents
+ // and in the round6, the last available signer B is offline, the whole
+ // network is stuck.
+ if _, ok := s.engine.(*clique.Clique); ok {
+ return false
+ }
+ return s.isLocalBlock(block)
+}
+
// SetEtherbase sets the mining reward address.
func (s *Ethereum) SetEtherbase(etherbase common.Address) {
s.lock.Lock()
@@ -362,7 +420,7 @@ func (s *Ethereum) StartMining(threads int) error {
s.lock.RUnlock()
s.txPool.SetGasPrice(price)
- // Configure the local mining addess
+ // Configure the local mining address
eb, err := s.Etherbase()
if err != nil {
log.Error("Cannot start mining without etherbase", "err", err)
diff --git a/eth/config.go b/eth/config.go
index f1a402e37..efbaafb6a 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -121,6 +121,11 @@ type Config struct {
// Miscellaneous options
DocRoot string `toml:"-"`
+
+ // Type of the EWASM interpreter ("" for detault)
+ EWASMInterpreter string
+ // Type of the EVM interpreter ("" for default)
+ EVMInterpreter string
}
type configMarshaling struct {
diff --git a/eth/downloader/api.go b/eth/downloader/api.go
index 91c6322d4..57ff3d71a 100644
--- a/eth/downloader/api.go
+++ b/eth/downloader/api.go
@@ -40,8 +40,8 @@ type PublicDownloaderAPI struct {
// installSyncSubscription channel.
func NewPublicDownloaderAPI(d *Downloader, m *event.TypeMux) *PublicDownloaderAPI {
api := &PublicDownloaderAPI{
- d: d,
- mux: m,
+ d: d,
+ mux: m,
installSyncSubscription: make(chan chan interface{}),
uninstallSyncSubscription: make(chan *uninstallSyncSubscriptionRequest),
}
diff --git a/eth/handler_test.go b/eth/handler_test.go
index fee4114eb..0885a0448 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -242,10 +242,10 @@ func testGetBlockBodies(t *testing.T, protocol int) {
available []bool // Availability of explicitly requested blocks
expected int // Total number of existing blocks to expect
}{
- {1, nil, nil, 1}, // A single random block should be retrievable
- {10, nil, nil, 10}, // Multiple random blocks should be retrievable
- {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
- {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned
+ {1, nil, nil, 1}, // A single random block should be retrievable
+ {10, nil, nil, 10}, // Multiple random blocks should be retrievable
+ {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
+ {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned
{0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
{0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
{0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned
@@ -472,7 +472,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool
config = &params.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked}
gspec = &core.Genesis{Config: config}
genesis = gspec.MustCommit(db)
- blockchain, _ = core.NewBlockChain(db, nil, config, pow, vm.Config{})
+ blockchain, _ = core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil)
)
pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db)
if err != nil {
diff --git a/eth/helper_test.go b/eth/helper_test.go
index 3d2ab0aba..3c101f658 100644
--- a/eth/helper_test.go
+++ b/eth/helper_test.go
@@ -59,7 +59,7 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func
Alloc: core.GenesisAlloc{testBank: {Balance: big.NewInt(1000000)}},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
+ blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
)
chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator)
if _, err := blockchain.InsertChain(chain); err != nil {
diff --git a/internal/build/util.go b/internal/build/util.go
index f99ee8396..195bdb404 100644
--- a/internal/build/util.go
+++ b/internal/build/util.go
@@ -143,9 +143,9 @@ func CopyFile(dst, src string, mode os.FileMode) {
// so that go commands executed by build use the same version of Go as the 'host' that runs
// build code. e.g.
//
-// /usr/lib/go-1.8/bin/go run build/ci.go ...
+// /usr/lib/go-1.11/bin/go run build/ci.go ...
//
-// runs using go 1.8 and invokes go 1.8 tools from the same GOROOT. This is also important
+// runs using go 1.11 and invokes go 1.11 tools from the same GOROOT. This is also important
// because runtime.Version checks on the host should match the tools that are run.
func GoTool(tool string, args ...string) *exec.Cmd {
args = append([]string{tool}, args...)
diff --git a/les/commons.go b/les/commons.go
index a97687993..0b6cf3711 100644
--- a/les/commons.go
+++ b/les/commons.go
@@ -42,12 +42,12 @@ type lesCommons struct {
// NodeInfo represents a short summary of the Ethereum sub-protocol metadata
// known about the host peer.
type NodeInfo struct {
- Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
- Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
- Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
- Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
- Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
- CHT light.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup
+ Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
+ Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
+ Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
+ Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
+ Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
+ CHT params.TrustedCheckpoint `json:"cht"` // Trused CHT checkpoint for fast catchup
}
// makeProtocols creates protocol descriptors for the given LES versions.
@@ -76,7 +76,7 @@ func (c *lesCommons) makeProtocols(versions []uint) []p2p.Protocol {
// nodeInfo retrieves some protocol metadata about the running host node.
func (c *lesCommons) nodeInfo() interface{} {
- var cht light.TrustedCheckpoint
+ var cht params.TrustedCheckpoint
sections, _, _ := c.chtIndexer.Sections()
sections2, _, _ := c.bloomTrieIndexer.Sections()
@@ -98,11 +98,11 @@ func (c *lesCommons) nodeInfo() interface{} {
idxV2 := (sectionIndex+1)*c.iConfig.PairChtSize/c.iConfig.ChtSize - 1
chtRoot = light.GetChtRoot(c.chainDb, idxV2, sectionHead)
}
- cht = light.TrustedCheckpoint{
- SectionIdx: sectionIndex,
- SectionHead: sectionHead,
- CHTRoot: chtRoot,
- BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),
+ cht = params.TrustedCheckpoint{
+ SectionIndex: sectionIndex,
+ SectionHead: sectionHead,
+ CHTRoot: chtRoot,
+ BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead),
}
}
diff --git a/les/helper_test.go b/les/helper_test.go
index 206ee2d92..29496d6af 100644
--- a/les/helper_test.go
+++ b/les/helper_test.go
@@ -164,7 +164,7 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
if lightSync {
chain, _ = light.NewLightChain(odr, gspec.Config, engine)
} else {
- blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
+ blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
gchain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
diff --git a/les/odr_requests.go b/les/odr_requests.go
index 9e9b2673f..77b1b6d0c 100644
--- a/les/odr_requests.go
+++ b/les/odr_requests.go
@@ -478,7 +478,7 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
}
type BloomReq struct {
- BloomTrieNum, BitIdx, SectionIdx, FromLevel uint64
+ BloomTrieNum, BitIdx, SectionIndex, FromLevel uint64
}
// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface
@@ -487,7 +487,7 @@ type BloomRequest light.BloomRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *BloomRequest) GetCost(peer *peer) uint64 {
- return peer.GetRequestCost(GetHelperTrieProofsMsg, len(r.SectionIdxList))
+ return peer.GetRequestCost(GetHelperTrieProofsMsg, len(r.SectionIndexList))
}
// CanSend tells if a certain peer is suitable for serving the given request
@@ -503,13 +503,13 @@ func (r *BloomRequest) CanSend(peer *peer) bool {
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *BloomRequest) Request(reqID uint64, peer *peer) error {
- peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList)
- reqs := make([]HelperTrieReq, len(r.SectionIdxList))
+ peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList)
+ reqs := make([]HelperTrieReq, len(r.SectionIndexList))
var encNumber [10]byte
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
- for i, sectionIdx := range r.SectionIdxList {
+ for i, sectionIdx := range r.SectionIndexList {
binary.BigEndian.PutUint64(encNumber[2:], sectionIdx)
reqs[i] = HelperTrieReq{
Type: htBloomBits,
@@ -524,7 +524,7 @@ func (r *BloomRequest) Request(reqID uint64, peer *peer) error {
// returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest)
func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {
- log.Debug("Validating BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIdxList)
+ log.Debug("Validating BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList)
// Ensure we have a correct message with a single proof element
if msg.MsgType != MsgHelperTrieProofs {
@@ -535,13 +535,13 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {
nodeSet := proofs.NodeSet()
reads := &readTraceDB{db: nodeSet}
- r.BloomBits = make([][]byte, len(r.SectionIdxList))
+ r.BloomBits = make([][]byte, len(r.SectionIndexList))
// Verify the proofs
var encNumber [10]byte
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
- for i, idx := range r.SectionIdxList {
+ for i, idx := range r.SectionIndexList {
binary.BigEndian.PutUint64(encNumber[2:], idx)
value, _, err := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads)
if err != nil {
diff --git a/les/retrieve.go b/les/retrieve.go
index 8ae36d82c..d77cfea74 100644
--- a/les/retrieve.go
+++ b/les/retrieve.go
@@ -217,6 +217,13 @@ func (r *sentReq) stateRequesting() reqStateFn {
go r.tryRequest()
r.lastReqQueued = true
return r.stateRequesting
+ case rpDeliveredInvalid:
+ // if it was the last sent request (set to nil by update) then start a new one
+ if !r.lastReqQueued && r.lastReqSentTo == nil {
+ go r.tryRequest()
+ r.lastReqQueued = true
+ }
+ return r.stateRequesting
case rpDeliveredValid:
r.stop(nil)
return r.stateStopped
@@ -242,7 +249,11 @@ func (r *sentReq) stateNoMorePeers() reqStateFn {
r.stop(nil)
return r.stateStopped
}
- return r.stateNoMorePeers
+ if r.waiting() {
+ return r.stateNoMorePeers
+ }
+ r.stop(light.ErrNoPeers)
+ return nil
case <-r.stopCh:
return r.stateStopped
}
diff --git a/light/lightchain.go b/light/lightchain.go
index d40a4ee6c..8e2734c2d 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -118,19 +118,19 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.
}
// addTrustedCheckpoint adds a trusted checkpoint to the blockchain
-func (self *LightChain) addTrustedCheckpoint(cp TrustedCheckpoint) {
+func (self *LightChain) addTrustedCheckpoint(cp *params.TrustedCheckpoint) {
if self.odr.ChtIndexer() != nil {
- StoreChtRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.CHTRoot)
- self.odr.ChtIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead)
+ StoreChtRoot(self.chainDb, cp.SectionIndex, cp.SectionHead, cp.CHTRoot)
+ self.odr.ChtIndexer().AddCheckpoint(cp.SectionIndex, cp.SectionHead)
}
if self.odr.BloomTrieIndexer() != nil {
- StoreBloomTrieRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.BloomRoot)
- self.odr.BloomTrieIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead)
+ StoreBloomTrieRoot(self.chainDb, cp.SectionIndex, cp.SectionHead, cp.BloomRoot)
+ self.odr.BloomTrieIndexer().AddCheckpoint(cp.SectionIndex, cp.SectionHead)
}
if self.odr.BloomIndexer() != nil {
- self.odr.BloomIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead)
+ self.odr.BloomIndexer().AddCheckpoint(cp.SectionIndex, cp.SectionHead)
}
- log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.SectionIdx+1)*self.indexerConfig.ChtSize-1, "hash", cp.SectionHead)
+ log.Info("Added trusted checkpoint", "chain", cp.Name, "block", (cp.SectionIndex+1)*self.indexerConfig.ChtSize-1, "hash", cp.SectionHead)
}
func (self *LightChain) getProcInterrupt() bool {
@@ -157,7 +157,7 @@ func (self *LightChain) loadLastState() error {
// Issue a status log and return
header := self.hc.CurrentHeader()
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
- log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd)
+ log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0)))
return nil
}
@@ -488,7 +488,7 @@ func (self *LightChain) SyncCht(ctx context.Context) bool {
// Ensure the chain didn't move past the latest block while retrieving it
if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() {
- log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash())
+ log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0)))
self.hc.SetCurrentHeader(header)
}
return true
diff --git a/light/odr.go b/light/odr.go
index 3cd8b2c04..900be0544 100644
--- a/light/odr.go
+++ b/light/odr.go
@@ -157,18 +157,18 @@ func (req *ChtRequest) StoreResult(db ethdb.Database) {
// BloomRequest is the ODR request type for retrieving bloom filters from a CHT structure
type BloomRequest struct {
OdrRequest
- Config *IndexerConfig
- BloomTrieNum uint64
- BitIdx uint
- SectionIdxList []uint64
- BloomTrieRoot common.Hash
- BloomBits [][]byte
- Proofs *NodeSet
+ Config *IndexerConfig
+ BloomTrieNum uint64
+ BitIdx uint
+ SectionIndexList []uint64
+ BloomTrieRoot common.Hash
+ BloomBits [][]byte
+ Proofs *NodeSet
}
// StoreResult stores the retrieved data in local database
func (req *BloomRequest) StoreResult(db ethdb.Database) {
- for i, sectionIdx := range req.SectionIdxList {
+ for i, sectionIdx := range req.SectionIndexList {
sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*req.Config.BloomTrieSize-1)
// if we don't have the canonical hash stored for this section head number, we'll still store it under
// a key with a zero sectionHead. GetBloomBits will look there too if we still don't have the canonical
diff --git a/light/odr_test.go b/light/odr_test.go
index eea5b1eab..3da7b3055 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -257,7 +257,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{})
+ blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, 4, testChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
t.Fatal(err)
diff --git a/light/odr_util.go b/light/odr_util.go
index 9bc0f604b..073f0d642 100644
--- a/light/odr_util.go
+++ b/light/odr_util.go
@@ -222,7 +222,7 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi
}
r := &BloomRequest{BloomTrieRoot: GetBloomTrieRoot(db, bloomTrieCount-1, sectionHead), BloomTrieNum: bloomTrieCount - 1,
- BitIdx: bitIdx, SectionIdxList: reqList, Config: odr.IndexerConfig()}
+ BitIdx: bitIdx, SectionIndexList: reqList, Config: odr.IndexerConfig()}
if err := odr.Retrieve(ctx, r); err != nil {
return nil, err
} else {
diff --git a/light/postprocess.go b/light/postprocess.go
index 7b23e48b5..2f8cb73ab 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -104,38 +104,11 @@ var (
}
)
-// trustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with
-// the appropriate section index and head hash. It is used to start light syncing from this checkpoint
-// and avoid downloading the entire header chain while still being able to securely access old headers/logs.
-type TrustedCheckpoint struct {
- name string
- SectionIdx uint64
- SectionHead, CHTRoot, BloomRoot common.Hash
-}
-
// trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to
-var trustedCheckpoints = map[common.Hash]TrustedCheckpoint{
- params.MainnetGenesisHash: {
- name: "mainnet",
- SectionIdx: 187,
- SectionHead: common.HexToHash("e6baa034efa31562d71ff23676512dec6562c1ad0301e08843b907e81958c696"),
- CHTRoot: common.HexToHash("28001955219719cf06de1b08648969139d123a9835fc760547a1e4dabdabc15a"),
- BloomRoot: common.HexToHash("395ca2373fc662720ac6b58b3bbe71f68aa0f38b63b2d3553dd32ff3c51eebc4"),
- },
- params.TestnetGenesisHash: {
- name: "ropsten",
- SectionIdx: 117,
- SectionHead: common.HexToHash("9529b38631ae30783f56cbe4c3b9f07575b770ecba4f6e20a274b1e2f40fede1"),
- CHTRoot: common.HexToHash("6f48e9f101f1fac98e7d74fbbcc4fda138358271ffd974d40d2506f0308bb363"),
- BloomRoot: common.HexToHash("8242342e66e942c0cd893484e6736b9862ceb88b43ca344bb06a8285ac1b6d64"),
- },
- params.RinkebyGenesisHash: {
- name: "rinkeby",
- SectionIdx: 85,
- SectionHead: common.HexToHash("92cfa67afc4ad8ab0dcbc6fa49efd14b5b19402442e7317e6bc879d85f89d64d"),
- CHTRoot: common.HexToHash("2802ec92cd7a54a75bca96afdc666ae7b99e5d96cf8192dcfb09588812f51564"),
- BloomRoot: common.HexToHash("ebefeb31a9a42866d8cf2d2477704b4c3d7c20d0e4e9b5aaa77f396e016a1263"),
- },
+var trustedCheckpoints = map[common.Hash]*params.TrustedCheckpoint{
+ params.MainnetGenesisHash: params.MainnetTrustedCheckpoint,
+ params.TestnetGenesisHash: params.TestnetTrustedCheckpoint,
+ params.RinkebyGenesisHash: params.RinkebyTrustedCheckpoint,
}
var (
@@ -329,7 +302,7 @@ func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section
for i := 0; i < 20; i++ {
go func() {
for bitIndex := range indexCh {
- r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIdxList: []uint64{section - 1}, Config: b.odr.IndexerConfig()}
+ r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()}
for {
if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers {
// if there are no peers to serve, retry later
diff --git a/light/trie_test.go b/light/trie_test.go
index 6bddfefe2..51ce9017a 100644
--- a/light/trie_test.go
+++ b/light/trie_test.go
@@ -40,7 +40,7 @@ func TestNodeIterator(t *testing.T) {
genesis = gspec.MustCommit(fulldb)
)
gspec.MustCommit(lightdb)
- blockchain, _ := core.NewBlockChain(fulldb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{})
+ blockchain, _ := core.NewBlockChain(fulldb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), fulldb, 4, testChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
diff --git a/light/txpool_test.go b/light/txpool_test.go
index 204347a6e..ce77573ef 100644
--- a/light/txpool_test.go
+++ b/light/txpool_test.go
@@ -88,7 +88,7 @@ func TestTxPool(t *testing.T) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{})
+ blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, poolTestBlocks, txPoolTestChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
diff --git a/miner/miner.go b/miner/miner.go
index 7f194db26..5218c1210 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -52,13 +52,13 @@ type Miner struct {
shouldStart int32 // should start indicates whether we should start after sync
}
-func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration, gasFloor, gasCeil uint64) *Miner {
+func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration, gasFloor, gasCeil uint64, isLocalBlock func(block *types.Block) bool) *Miner {
miner := &Miner{
eth: eth,
mux: mux,
engine: engine,
exitCh: make(chan struct{}),
- worker: newWorker(config, engine, eth, mux, recommit, gasFloor, gasCeil),
+ worker: newWorker(config, engine, eth, mux, recommit, gasFloor, gasCeil, isLocalBlock),
canStart: 1,
}
go miner.update()
diff --git a/miner/worker.go b/miner/worker.go
index 3500ca4c2..8579c5c84 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -149,9 +149,10 @@ type worker struct {
resubmitIntervalCh chan time.Duration
resubmitAdjustCh chan *intervalAdjust
- current *environment // An environment for current running cycle.
- possibleUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks.
- unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations.
+ current *environment // An environment for current running cycle.
+ localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks.
+ remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks.
+ unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations.
mu sync.RWMutex // The lock used to protect the coinbase and extra fields
coinbase common.Address
@@ -168,6 +169,9 @@ type worker struct {
running int32 // The indicator whether the consensus engine is running or not.
newTxs int32 // New arrival transaction count since last sealing work submitting.
+ // External functions
+ isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner.
+
// Test hooks
newTaskHook func(*task) // Method to call upon receiving a new sealing task.
skipSealHook func(*task) bool // Method to decide whether skipping the sealing.
@@ -175,7 +179,7 @@ type worker struct {
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
}
-func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration, gasFloor, gasCeil uint64) *worker {
+func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration, gasFloor, gasCeil uint64, isLocalBlock func(*types.Block) bool) *worker {
worker := &worker{
config: config,
engine: engine,
@@ -184,7 +188,9 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend,
chain: eth.BlockChain(),
gasFloor: gasFloor,
gasCeil: gasCeil,
- possibleUncles: make(map[common.Hash]*types.Block),
+ isLocalBlock: isLocalBlock,
+ localUncles: make(map[common.Hash]*types.Block),
+ remoteUncles: make(map[common.Hash]*types.Block),
unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
pendingTasks: make(map[common.Hash]*task),
txsCh: make(chan core.NewTxsEvent, txChanSize),
@@ -405,11 +411,19 @@ func (w *worker) mainLoop() {
w.commitNewWork(req.interrupt, req.noempty, req.timestamp)
case ev := <-w.chainSideCh:
- if _, exist := w.possibleUncles[ev.Block.Hash()]; exist {
+ // Short circuit for duplicate side blocks
+ if _, exist := w.localUncles[ev.Block.Hash()]; exist {
+ continue
+ }
+ if _, exist := w.remoteUncles[ev.Block.Hash()]; exist {
continue
}
- // Add side block to possible uncle block set.
- w.possibleUncles[ev.Block.Hash()] = ev.Block
+ // Add side block to possible uncle block set depending on the author.
+ if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) {
+ w.localUncles[ev.Block.Hash()] = ev.Block
+ } else {
+ w.remoteUncles[ev.Block.Hash()] = ev.Block
+ }
// If our mining block contains less than 2 uncle blocks,
// add the new uncle block if valid and regenerate a mining block.
if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 {
@@ -421,7 +435,10 @@ func (w *worker) mainLoop() {
if !ok {
return false
}
- uncle, exist := w.possibleUncles[hash]
+ uncle, exist := w.localUncles[hash]
+ if !exist {
+ uncle, exist = w.remoteUncles[hash]
+ }
if !exist {
return false
}
@@ -651,7 +668,10 @@ func (w *worker) updateSnapshot() {
if !ok {
return false
}
- uncle, exist := w.possibleUncles[hash]
+ uncle, exist := w.localUncles[hash]
+ if !exist {
+ uncle, exist = w.remoteUncles[hash]
+ }
if !exist {
return false
}
@@ -859,23 +879,29 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
misc.ApplyDAOHardFork(env.state)
}
// Accumulate the uncles for the current block
- for hash, uncle := range w.possibleUncles {
- if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
- delete(w.possibleUncles, hash)
- }
- }
uncles := make([]*types.Header, 0, 2)
- for hash, uncle := range w.possibleUncles {
- if len(uncles) == 2 {
- break
+ commitUncles := func(blocks map[common.Hash]*types.Block) {
+ // Clean up stale uncle blocks first
+ for hash, uncle := range blocks {
+ if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
+ delete(blocks, hash)
+ }
}
- if err := w.commitUncle(env, uncle.Header()); err != nil {
- log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
- } else {
- log.Debug("Committing new uncle to block", "hash", hash)
- uncles = append(uncles, uncle.Header())
+ for hash, uncle := range blocks {
+ if len(uncles) == 2 {
+ break
+ }
+ if err := w.commitUncle(env, uncle.Header()); err != nil {
+ log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
+ } else {
+ log.Debug("Committing new uncle to block", "hash", hash)
+ uncles = append(uncles, uncle.Header())
+ }
}
}
+ // Prefer to locally generated uncle
+ commitUncles(w.localUncles)
+ commitUncles(w.remoteUncles)
if !noempty {
// Create an empty block based on temporary copied state for sealing in advance without waiting block
diff --git a/miner/worker_test.go b/miner/worker_test.go
index ad10d48ef..db0ff4340 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -96,7 +96,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
}
genesis := gspec.MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
+ chain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil)
txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain)
// Generate a small n-block chain and an uncle block for it
@@ -133,7 +133,7 @@ func (b *testWorkerBackend) PostChainEvents(events []interface{}) {
func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, blocks int) (*worker, *testWorkerBackend) {
backend := newTestWorkerBackend(t, chainConfig, engine, blocks)
backend.txPool.AddLocals(pendingTxs)
- w := newWorker(chainConfig, engine, backend, new(event.TypeMux), time.Second, params.GenesisGasLimit, params.GenesisGasLimit)
+ w := newWorker(chainConfig, engine, backend, new(event.TypeMux), time.Second, params.GenesisGasLimit, params.GenesisGasLimit, nil)
w.setEtherbase(testBankAddress)
return w, backend
}
diff --git a/params/config.go b/params/config.go
index 1c3e25eb7..a9e631cde 100644
--- a/params/config.go
+++ b/params/config.go
@@ -46,6 +46,15 @@ var (
Ethash: new(EthashConfig),
}
+ // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
+ MainnetTrustedCheckpoint = &TrustedCheckpoint{
+ Name: "mainnet",
+ SectionIndex: 193,
+ SectionHead: common.HexToHash("0xc2d574295ecedc4d58530ae24c31a5a98be7d2b3327fba0dd0f4ed3913828a55"),
+ CHTRoot: common.HexToHash("0x5d1027dfae688c77376e842679ceada87fd94738feb9b32ef165473bfbbb317b"),
+ BloomRoot: common.HexToHash("0xd38be1a06aabd568e10957fee4fcc523bc64996bcf31bae3f55f86e0a583919f"),
+ }
+
// TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network.
TestnetChainConfig = &ChainConfig{
ChainID: big.NewInt(3),
@@ -61,6 +70,15 @@ var (
Ethash: new(EthashConfig),
}
+ // TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
+ TestnetTrustedCheckpoint = &TrustedCheckpoint{
+ Name: "testnet",
+ SectionIndex: 123,
+ SectionHead: common.HexToHash("0xa372a53decb68ce453da12bea1c8ee7b568b276aa2aab94d9060aa7c81fc3dee"),
+ CHTRoot: common.HexToHash("0x6b02e7fada79cd2a80d4b3623df9c44384d6647fc127462e1c188ccd09ece87b"),
+ BloomRoot: common.HexToHash("0xf2d27490914968279d6377d42868928632573e823b5d1d4a944cba6009e16259"),
+ }
+
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
RinkebyChainConfig = &ChainConfig{
ChainID: big.NewInt(4),
@@ -79,24 +97,45 @@ var (
},
}
+ // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
+ RinkebyTrustedCheckpoint = &TrustedCheckpoint{
+ Name: "rinkeby",
+ SectionIndex: 91,
+ SectionHead: common.HexToHash("0x435b7b2d8a7922f3b9a522f2fb02730e95e0e1782f0f5443894d5415bba37154"),
+ CHTRoot: common.HexToHash("0x0664bf7ecccfb6775c4eca6f0f264fb5282a22754a2135a1ac4bff2ef02898dd"),
+ BloomRoot: common.HexToHash("0x2a64df2400c3a2cb6400639bb6ed29389abdb4d93e2e525aa7c21f38767cd96f"),
+ }
+
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Ethash consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
+ AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
+ AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
- TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
+ TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
TestRules = TestChainConfig.Rules(new(big.Int))
)
+// TrustedCheckpoint represents a set of post-processed trie roots (CHT and
+// BloomTrie) associated with the appropriate section index and head hash. It is
+// used to start light syncing from this checkpoint and avoid downloading the
+// entire header chain while still being able to securely access old headers/logs.
+type TrustedCheckpoint struct {
+ Name string `json:"-"`
+ SectionIndex uint64 `json:"sectionIndex"`
+ SectionHead common.Hash `json:"sectionHead"`
+ CHTRoot common.Hash `json:"chtRoot"`
+ BloomRoot common.Hash `json:"bloomRoot"`
+}
+
// ChainConfig is the core config which determines the blockchain settings.
//
// ChainConfig is stored in the database on a per block basis. This means
@@ -119,6 +158,7 @@ type ChainConfig struct {
ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium)
ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated)
+ EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated)
// Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty"`
@@ -204,6 +244,11 @@ func (c *ChainConfig) IsConstantinople(num *big.Int) bool {
return isForked(c.ConstantinopleBlock, num)
}
+// IsEWASM returns whether num represents a block number after the EWASM fork
+func (c *ChainConfig) IsEWASM(num *big.Int) bool {
+ return isForked(c.EWASMBlock, num)
+}
+
// GasTable returns the gas table corresponding to the current phase (homestead or homestead reprice).
//
// The returned GasTable's fields shouldn't, under any circumstances, be changed.
@@ -269,6 +314,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi
if isForkIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, head) {
return newCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock)
}
+ if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) {
+ return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock)
+ }
return nil
}
diff --git a/rpc/websocket.go b/rpc/websocket.go
index e7a86ddae..eae8320e5 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -20,6 +20,7 @@ import (
"bytes"
"context"
"crypto/tls"
+ "encoding/base64"
"encoding/json"
"fmt"
"net"
@@ -118,12 +119,7 @@ func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http
return f
}
-// DialWebsocket creates a new RPC client that communicates with a JSON-RPC server
-// that is listening on the given endpoint.
-//
-// The context is used for the initial connection establishment. It does not
-// affect subsequent interactions with the client.
-func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error) {
+func wsGetConfig(endpoint, origin string) (*websocket.Config, error) {
if origin == "" {
var err error
if origin, err = os.Hostname(); err != nil {
@@ -140,6 +136,25 @@ func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error
return nil, err
}
+ if config.Location.User != nil {
+ b64auth := base64.StdEncoding.EncodeToString([]byte(config.Location.User.String()))
+ config.Header.Add("Authorization", "Basic "+b64auth)
+ config.Location.User = nil
+ }
+ return config, nil
+}
+
+// DialWebsocket creates a new RPC client that communicates with a JSON-RPC server
+// that is listening on the given endpoint.
+//
+// The context is used for the initial connection establishment. It does not
+// affect subsequent interactions with the client.
+func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error) {
+ config, err := wsGetConfig(endpoint, origin)
+ if err != nil {
+ return nil, err
+ }
+
return newClient(ctx, func(ctx context.Context) (net.Conn, error) {
return wsDialContext(ctx, config)
})
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
new file mode 100644
index 000000000..5bf3780d6
--- /dev/null
+++ b/rpc/websocket_test.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package rpc
+
+import "testing"
+
+func TestWSGetConfigNoAuth(t *testing.T) {
+ config, err := wsGetConfig("ws://example.com:1234", "")
+ if err != nil {
+ t.Logf("wsGetConfig failed: %s", err)
+ t.Fail()
+ return
+ }
+ if config.Location.User != nil {
+ t.Log("User should have been stripped from the URL")
+ t.Fail()
+ }
+ if config.Location.Hostname() != "example.com" ||
+ config.Location.Port() != "1234" || config.Location.Scheme != "ws" {
+ t.Logf("Unexpected URL: %s", config.Location)
+ t.Fail()
+ }
+}
+
+func TestWSGetConfigWithBasicAuth(t *testing.T) {
+ config, err := wsGetConfig("wss://testuser:test-PASS_01@example.com:1234", "")
+ if err != nil {
+ t.Logf("wsGetConfig failed: %s", err)
+ t.Fail()
+ return
+ }
+ if config.Location.User != nil {
+ t.Log("User should have been stripped from the URL")
+ t.Fail()
+ }
+ if config.Header.Get("Authorization") != "Basic dGVzdHVzZXI6dGVzdC1QQVNTXzAx" {
+ t.Log("Basic auth header is incorrect")
+ t.Fail()
+ }
+}
diff --git a/signer/core/abihelper_test.go b/signer/core/abihelper_test.go
index 8bb577669..2afeec73e 100644
--- a/signer/core/abihelper_test.go
+++ b/signer/core/abihelper_test.go
@@ -100,16 +100,6 @@ func TestNewUnpacker(t *testing.T) {
}
-/*
-func TestReflect(t *testing.T) {
- a := big.NewInt(0)
- b := new(big.Int).SetBytes([]byte{0x00})
- if !reflect.DeepEqual(a, b) {
- t.Fatalf("Nope, %v != %v", a, b)
- }
-}
-*/
-
func TestCalldataDecoding(t *testing.T) {
// send(uint256) : a52c101e
@@ -123,7 +113,7 @@ func TestCalldataDecoding(t *testing.T) {
{"type":"function","name":"sam","inputs":[{"name":"a","type":"bytes"},{"name":"a","type":"bool"},{"name":"a","type":"uint256[]"}]}
]`
//Expected failures
- for _, hexdata := range []string{
+ for i, hexdata := range []string{
"a52c101e00000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000042",
"a52c101e000000000000000000000000000000000000000000000000000000000000001200",
"a52c101e00000000000000000000000000000000000000000000000000000000000000",
@@ -145,12 +135,11 @@ func TestCalldataDecoding(t *testing.T) {
} {
_, err := parseCallData(common.Hex2Bytes(hexdata), jsondata)
if err == nil {
- t.Errorf("Expected decoding to fail: %s", hexdata)
+ t.Errorf("test %d: expected decoding to fail: %s", i, hexdata)
}
}
-
//Expected success
- for _, hexdata := range []string{
+ for i, hexdata := range []string{
// From https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI
"a5643bf20000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000464617665000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003",
"a52c101e0000000000000000000000000000000000000000000000000000000000000012",
@@ -169,7 +158,7 @@ func TestCalldataDecoding(t *testing.T) {
} {
_, err := parseCallData(common.Hex2Bytes(hexdata), jsondata)
if err != nil {
- t.Errorf("Unexpected failure on input %s:\n %v (%d bytes) ", hexdata, err, len(common.Hex2Bytes(hexdata)))
+ t.Errorf("test %d: unexpected failure on input %s:\n %v (%d bytes) ", i, hexdata, err, len(common.Hex2Bytes(hexdata)))
}
}
}
@@ -245,3 +234,18 @@ func TestCustomABI(t *testing.T) {
t.Fatalf("Save failed: should find a match for abi signature after loading from disk")
}
}
+
+func TestMaliciousAbiStrings(t *testing.T) {
+ tests := []string{
+ "func(uint256,uint256,[]uint256)",
+ "func(uint256,uint256,uint256,)",
+ "func(,uint256,uint256,uint256)",
+ }
+ data := common.Hex2Bytes("4401a6e40000000000000000000000000000000000000000000000000000000000000012")
+ for i, tt := range tests {
+ _, err := testSelector(tt, data)
+ if err == nil {
+ t.Errorf("test %d: expected error for selector '%v'", i, tt)
+ }
+ }
+}
diff --git a/swarm/network/fetcher.go b/swarm/network/fetcher.go
index 35e2f0132..413b40cb5 100644
--- a/swarm/network/fetcher.go
+++ b/swarm/network/fetcher.go
@@ -215,7 +215,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
var err error
sources, err = f.doRequest(ctx, gone, peers, sources)
if err != nil {
- log.Warn("unable to request", "request addr", f.addr, "err", err)
+ log.Info("unable to request", "request addr", f.addr, "err", err)
}
}
diff --git a/swarm/network/simulation/simulation.go b/swarm/network/simulation/simulation.go
index 096f7322c..2c7a18b09 100644
--- a/swarm/network/simulation/simulation.go
+++ b/swarm/network/simulation/simulation.go
@@ -112,7 +112,7 @@ type Result struct {
}
// Run calls the RunFunc function while taking care of
-// cancelation provided through the Context.
+// cancellation provided through the Context.
func (s *Simulation) Run(ctx context.Context, f RunFunc) (r Result) {
//if the option is set to run a HTTP server with the simulation,
//init the server and start it
diff --git a/swarm/network/stream/peer.go b/swarm/network/stream/peer.go
index 1466a7a9c..5fdaa7b87 100644
--- a/swarm/network/stream/peer.go
+++ b/swarm/network/stream/peer.go
@@ -165,7 +165,7 @@ func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error {
if err != nil {
return err
}
- // true only when quiting
+ // true only when quitting
if len(hashes) == 0 {
return nil
}
diff --git a/swarm/pot/doc.go b/swarm/pot/doc.go
index 4c0a03065..cb6faea57 100644
--- a/swarm/pot/doc.go
+++ b/swarm/pot/doc.go
@@ -27,11 +27,11 @@ OR distance over finite set of integers).
Methods take a comparison operator (pof, proximity order function) to compare two
value types. The default pof assumes Val to be or project to a byte slice using
-the reverse rank on the MSB first XOR logarithmic disctance.
+the reverse rank on the MSB first XOR logarithmic distance.
If the address space if limited, equality is defined as the maximum proximity order.
-The container offers applicative (funcional) style methods on PO trees:
+The container offers applicative (functional) style methods on PO trees:
* adding/removing en element
* swap (value based add/remove)
* merging two PO trees (union)
diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go
index de2d82d2b..80ac6f198 100644
--- a/swarm/storage/netstore.go
+++ b/swarm/storage/netstore.go
@@ -52,6 +52,8 @@ type NetStore struct {
closeC chan struct{}
}
+var fetcherTimeout = 2 * time.Minute // timeout to cancel the fetcher even if requests are coming in
+
// NewNetStore creates a new NetStore object using the given local store. newFetchFunc is a
// constructor function that can create a fetch function for a specific chunk address.
func NewNetStore(store SyncChunkStore, nnf NewNetFetcherFunc) (*NetStore, error) {
@@ -168,7 +170,7 @@ func (n *NetStore) getOrCreateFetcher(ref Address) *fetcher {
// no fetcher for the given address, we have to create a new one
key := hex.EncodeToString(ref)
// create the context during which fetching is kept alive
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithTimeout(context.Background(), fetcherTimeout)
// destroy is called when all requests finish
destroy := func() {
// remove fetcher from fetchers
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 2db47da57..427a94958 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -111,7 +111,7 @@ func (t *BlockTest) Run() error {
return fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
}
- chain, err := core.NewBlockChain(db, nil, config, ethash.NewShared(), vm.Config{})
+ chain, err := core.NewBlockChain(db, nil, config, ethash.NewShared(), vm.Config{}, nil)
if err != nil {
return err
}
diff --git a/whisper/mailserver/mailserver.go b/whisper/mailserver/mailserver.go
index d32eaddec..af9418d9f 100644
--- a/whisper/mailserver/mailserver.go
+++ b/whisper/mailserver/mailserver.go
@@ -118,7 +118,7 @@ func (s *WMailServer) processRequest(peer *whisper.Peer, lower, upper uint32, bl
var err error
var zero common.Hash
kl := NewDbKey(lower, zero)
- ku := NewDbKey(upper, zero)
+ ku := NewDbKey(upper+1, zero) // LevelDB is exclusive, while the Whisper API is inclusive
i := s.db.NewIterator(&util.Range{Start: kl.raw, Limit: ku.raw}, nil)
defer i.Release()