aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.travis.yml24
-rw-r--r--Dockerfile5
-rw-r--r--appveyor.yml4
-rw-r--r--cmd/geth/main.go7
-rw-r--r--cmd/geth/usage.go12
-rw-r--r--cmd/puppeth/module_node.go2
-rw-r--r--cmd/puppeth/wizard_node.go3
-rw-r--r--cmd/utils/flags.go120
-rw-r--r--consensus/clique/clique.go18
-rw-r--r--consensus/clique/snapshot.go39
-rw-r--r--core/blockchain.go475
-rw-r--r--core/blocks.go2
-rw-r--r--core/chain_makers.go4
-rw-r--r--core/database_util.go6
-rw-r--r--core/events.go2
-rw-r--r--core/fees.go2
-rw-r--r--core/genesis.go2
-rw-r--r--core/headerchain.go17
-rw-r--r--core/helper_test.go14
-rw-r--r--core/state_transition.go103
-rw-r--r--core/tx_list.go44
-rw-r--r--core/tx_list_test.go2
-rw-r--r--core/tx_pool.go135
-rw-r--r--core/tx_pool_test.go139
-rw-r--r--core/vm/interpreter.go2
-rw-r--r--eth/api.go10
-rw-r--r--eth/backend.go26
-rw-r--r--eth/config.go4
-rw-r--r--eth/gen_config.go6
-rw-r--r--eth/handler.go5
-rw-r--r--eth/sync.go1
-rw-r--r--ethstats/ethstats.go22
-rw-r--r--internal/ethapi/api.go16
33 files changed, 740 insertions, 533 deletions
diff --git a/.travis.yml b/.travis.yml
index e3cbf4630..96e429959 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -6,7 +6,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
- go: 1.7.5
+ go: 1.7.6
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
@@ -19,7 +19,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
- go: 1.8.1
+ go: 1.8.3
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
@@ -29,7 +29,7 @@ matrix:
- go run build/ci.go test -coverage -misspell
- os: osx
- go: 1.8.1
+ go: 1.8.3
sudo: required
script:
- brew update
@@ -42,7 +42,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
- go: 1.8.1
+ go: 1.8.3
env:
- ubuntu-ppa
- azure-linux
@@ -80,7 +80,7 @@ matrix:
sudo: required
services:
- docker
- go: 1.8.1
+ go: 1.8.3
env:
- azure-linux-mips
script:
@@ -120,16 +120,16 @@ matrix:
- azure-android
- maven-android
before_install:
- - curl https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | tar -xz
+ - curl https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
script:
# Build the Android archive and upload it to Maven Central and Azure
- - curl https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip -o android-ndk-r13b.zip
- - unzip -q android-ndk-r13b.zip && rm android-ndk-r13b.zip
- - mv android-ndk-r13b $HOME
- - export ANDROID_NDK=$HOME/android-ndk-r13b
+ - curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip
+ - unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip
+ - mv android-ndk-r14b $HOME
+ - export ANDROID_NDK=$HOME/android-ndk-r14b
- mkdir -p $GOPATH/src/github.com/ethereum
- ln -s `pwd` $GOPATH/src/github.com/ethereum
@@ -137,7 +137,7 @@ matrix:
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- os: osx
- go: 1.8.1
+ go: 1.8.3
env:
- azure-osx
- azure-ios
@@ -163,7 +163,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
- go: 1.8.1
+ go: 1.8.3
env:
- azure-purge
script:
diff --git a/Dockerfile b/Dockerfile
index ae6870e31..6bf13dc31 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -4,11 +4,12 @@ ADD . /go-ethereum
RUN \
apk add --update git go make gcc musl-dev linux-headers && \
(cd go-ethereum && make geth) && \
- cp go-ethereum/build/bin/geth /geth && \
+ cp go-ethereum/build/bin/geth /usr/local/bin/ && \
apk del git go make gcc musl-dev linux-headers && \
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
EXPOSE 8545
EXPOSE 30303
+EXPOSE 30303/udp
-ENTRYPOINT ["/geth"]
+ENTRYPOINT ["geth"]
diff --git a/appveyor.yml b/appveyor.yml
index aecf48d47..5bdcbc486 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -22,8 +22,8 @@ environment:
install:
- rmdir C:\go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.1.windows-%GETH_ARCH%.zip
- - 7z x go1.8.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.3.windows-%GETH_ARCH%.zip
+ - 7z x go1.8.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 56652f3bd..cc481796f 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -66,6 +66,13 @@ var (
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
+ utils.TxPoolPriceLimitFlag,
+ utils.TxPoolPriceBumpFlag,
+ utils.TxPoolAccountSlotsFlag,
+ utils.TxPoolGlobalSlotsFlag,
+ utils.TxPoolAccountQueueFlag,
+ utils.TxPoolGlobalQueueFlag,
+ utils.TxPoolLifetimeFlag,
utils.FastSyncFlag,
utils.LightModeFlag,
utils.SyncModeFlag,
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index 403e93a9d..2ba504fdd 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -93,6 +93,18 @@ var AppHelpFlagGroups = []flagGroup{
},
},
{
+ Name: "TRANSACTION POOL",
+ Flags: []cli.Flag{
+ utils.TxPoolPriceLimitFlag,
+ utils.TxPoolPriceBumpFlag,
+ utils.TxPoolAccountSlotsFlag,
+ utils.TxPoolGlobalSlotsFlag,
+ utils.TxPoolAccountQueueFlag,
+ utils.TxPoolGlobalQueueFlag,
+ utils.TxPoolLifetimeFlag,
+ },
+ },
+ {
Name: "PERFORMANCE TUNING",
Flags: []cli.Flag{
utils.CacheFlag,
diff --git a/cmd/puppeth/module_node.go b/cmd/puppeth/module_node.go
index 6372f60d2..ce1d34135 100644
--- a/cmd/puppeth/module_node.go
+++ b/cmd/puppeth/module_node.go
@@ -135,7 +135,7 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
}
defer client.Run("rm -rf " + workdir)
- // Build and deploy the bootnode service
+ // Build and deploy the boot or seal node service
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
}
diff --git a/cmd/puppeth/wizard_node.go b/cmd/puppeth/wizard_node.go
index 483d9fe05..05232486b 100644
--- a/cmd/puppeth/wizard_node.go
+++ b/cmd/puppeth/wizard_node.go
@@ -109,8 +109,7 @@ func (w *wizard) deployNode(boot bool) {
} else if w.conf.genesis.Config.Clique != nil {
// If a previous signer was already set, offer to reuse it
if infos.keyJSON != "" {
- var key keystore.Key
- if err := json.Unmarshal([]byte(infos.keyJSON), &key); err != nil {
+ if key, err := keystore.DecryptKey([]byte(infos.keyJSON), infos.keyPass); err != nil {
infos.keyJSON, infos.keyPass = "", ""
} else {
fmt.Println()
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 863761e8c..3c97cd3bb 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -123,35 +123,6 @@ var (
Name: "nousb",
Usage: "Disables monitoring for and managine USB hardware wallets",
}
- EthashCacheDirFlag = DirectoryFlag{
- Name: "ethash.cachedir",
- Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
- }
- EthashCachesInMemoryFlag = cli.IntFlag{
- Name: "ethash.cachesinmem",
- Usage: "Number of recent ethash caches to keep in memory (16MB each)",
- Value: eth.DefaultConfig.EthashCachesInMem,
- }
- EthashCachesOnDiskFlag = cli.IntFlag{
- Name: "ethash.cachesondisk",
- Usage: "Number of recent ethash caches to keep on disk (16MB each)",
- Value: eth.DefaultConfig.EthashCachesOnDisk,
- }
- EthashDatasetDirFlag = DirectoryFlag{
- Name: "ethash.dagdir",
- Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
- Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
- }
- EthashDatasetsInMemoryFlag = cli.IntFlag{
- Name: "ethash.dagsinmem",
- Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
- Value: eth.DefaultConfig.EthashDatasetsInMem,
- }
- EthashDatasetsOnDiskFlag = cli.IntFlag{
- Name: "ethash.dagsondisk",
- Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
- Value: eth.DefaultConfig.EthashDatasetsOnDisk,
- }
NetworkIdFlag = cli.Uint64Flag{
Name: "networkid",
Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten, 4=Rinkeby)",
@@ -207,6 +178,72 @@ var (
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
+ // Ethash settings
+ EthashCacheDirFlag = DirectoryFlag{
+ Name: "ethash.cachedir",
+ Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
+ }
+ EthashCachesInMemoryFlag = cli.IntFlag{
+ Name: "ethash.cachesinmem",
+ Usage: "Number of recent ethash caches to keep in memory (16MB each)",
+ Value: eth.DefaultConfig.EthashCachesInMem,
+ }
+ EthashCachesOnDiskFlag = cli.IntFlag{
+ Name: "ethash.cachesondisk",
+ Usage: "Number of recent ethash caches to keep on disk (16MB each)",
+ Value: eth.DefaultConfig.EthashCachesOnDisk,
+ }
+ EthashDatasetDirFlag = DirectoryFlag{
+ Name: "ethash.dagdir",
+ Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
+ Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
+ }
+ EthashDatasetsInMemoryFlag = cli.IntFlag{
+ Name: "ethash.dagsinmem",
+ Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
+ Value: eth.DefaultConfig.EthashDatasetsInMem,
+ }
+ EthashDatasetsOnDiskFlag = cli.IntFlag{
+ Name: "ethash.dagsondisk",
+ Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
+ Value: eth.DefaultConfig.EthashDatasetsOnDisk,
+ }
+ // Transaction pool settings
+ TxPoolPriceLimitFlag = cli.Uint64Flag{
+ Name: "txpool.pricelimit",
+ Usage: "Minimum gas price limit to enforce for acceptance into the pool",
+ Value: eth.DefaultConfig.TxPool.PriceLimit,
+ }
+ TxPoolPriceBumpFlag = cli.Uint64Flag{
+ Name: "txpool.pricebump",
+ Usage: "Price bump percentage to replace an already existing transaction",
+ Value: eth.DefaultConfig.TxPool.PriceBump,
+ }
+ TxPoolAccountSlotsFlag = cli.Uint64Flag{
+ Name: "txpool.accountslots",
+ Usage: "Minimum number of executable transaction slots guaranteed per account",
+ Value: eth.DefaultConfig.TxPool.AccountSlots,
+ }
+ TxPoolGlobalSlotsFlag = cli.Uint64Flag{
+ Name: "txpool.globalslots",
+ Usage: "Maximum number of executable transaction slots for all accounts",
+ Value: eth.DefaultConfig.TxPool.GlobalSlots,
+ }
+ TxPoolAccountQueueFlag = cli.Uint64Flag{
+ Name: "txpool.accountqueue",
+ Usage: "Maximum number of non-executable transaction slots permitted per account",
+ Value: eth.DefaultConfig.TxPool.AccountQueue,
+ }
+ TxPoolGlobalQueueFlag = cli.Uint64Flag{
+ Name: "txpool.globalqueue",
+ Usage: "Maximum number of non-executable transaction slots for all accounts",
+ Value: eth.DefaultConfig.TxPool.GlobalQueue,
+ }
+ TxPoolLifetimeFlag = cli.DurationFlag{
+ Name: "txpool.lifetime",
+ Usage: "Maximum amount of time non-executable transaction are queued",
+ Value: eth.DefaultConfig.TxPool.Lifetime,
+ }
// Performance tuning settings
CacheFlag = cli.IntFlag{
Name: "cache",
@@ -784,6 +821,30 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
}
}
+func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
+ if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) {
+ cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name)
+ }
+ if ctx.GlobalIsSet(TxPoolPriceBumpFlag.Name) {
+ cfg.PriceBump = ctx.GlobalUint64(TxPoolPriceBumpFlag.Name)
+ }
+ if ctx.GlobalIsSet(TxPoolAccountSlotsFlag.Name) {
+ cfg.AccountSlots = ctx.GlobalUint64(TxPoolAccountSlotsFlag.Name)
+ }
+ if ctx.GlobalIsSet(TxPoolGlobalSlotsFlag.Name) {
+ cfg.GlobalSlots = ctx.GlobalUint64(TxPoolGlobalSlotsFlag.Name)
+ }
+ if ctx.GlobalIsSet(TxPoolAccountQueueFlag.Name) {
+ cfg.AccountQueue = ctx.GlobalUint64(TxPoolAccountQueueFlag.Name)
+ }
+ if ctx.GlobalIsSet(TxPoolGlobalQueueFlag.Name) {
+ cfg.GlobalQueue = ctx.GlobalUint64(TxPoolGlobalQueueFlag.Name)
+ }
+ if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) {
+ cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name)
+ }
+}
+
func setEthash(ctx *cli.Context, cfg *eth.Config) {
if ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
@@ -826,6 +887,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
setEtherbase(ctx, ks, cfg)
setGPO(ctx, &cfg.GPO)
+ setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
switch {
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index 675333bcc..87a983377 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -44,7 +44,7 @@ import (
const (
checkpointInterval = 1024 // Number of blocks after which to save the vote snapshot to the database
inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory
- inmemorySignatures = 1024 // Number of recent blocks to keep in memory
+ inmemorySignatures = 4096 // Number of recent block signatures to keep in memory
wiggleTime = 500 * time.Millisecond // Random delay (per signer) to allow concurrent signers
)
@@ -162,7 +162,12 @@ func sigHash(header *types.Header) (hash common.Hash) {
}
// ecrecover extracts the Ethereum account address from a signed header.
-func ecrecover(header *types.Header) (common.Address, error) {
+func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) {
+ // If the signature's already cached, return that
+ hash := header.Hash()
+ if address, known := sigcache.Get(hash); known {
+ return address.(common.Address), nil
+ }
// Retrieve the signature from the header extra-data
if len(header.Extra) < extraSeal {
return common.Address{}, errMissingSignature
@@ -177,6 +182,7 @@ func ecrecover(header *types.Header) (common.Address, error) {
var signer common.Address
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
+ sigcache.Add(hash, signer)
return signer, nil
}
@@ -223,7 +229,7 @@ func New(config *params.CliqueConfig, db ethdb.Database) *Clique {
// Author implements consensus.Engine, returning the Ethereum address recovered
// from the signature in the header's extra-data section.
func (c *Clique) Author(header *types.Header) (common.Address, error) {
- return ecrecover(header)
+ return ecrecover(header, c.signatures)
}
// VerifyHeader checks whether a header conforms to the consensus rules.
@@ -369,7 +375,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
}
// If an on-disk checkpoint snapshot can be found, use that
if number%checkpointInterval == 0 {
- if s, err := loadSnapshot(c.config, c.db, hash); err == nil {
+ if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
log.Trace("Loaded voting snapshot form disk", "number", number, "hash", hash)
snap = s
break
@@ -385,7 +391,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
for i := 0; i < len(signers); i++ {
copy(signers[i][:], genesis.Extra[extraVanity+i*common.AddressLength:])
}
- snap = newSnapshot(c.config, 0, genesis.Hash(), signers)
+ snap = newSnapshot(c.config, c.signatures, 0, genesis.Hash(), signers)
if err := snap.store(c.db); err != nil {
return nil, err
}
@@ -464,7 +470,7 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
c.recents.Add(snap.Hash, snap)
// Resolve the authorization key and check against signers
- signer, err := ecrecover(header)
+ signer, err := ecrecover(header, c.signatures)
if err != nil {
return err
}
diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go
index 46b32ca5f..fb86bc5e6 100644
--- a/consensus/clique/snapshot.go
+++ b/consensus/clique/snapshot.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
+ lru "github.com/hashicorp/golang-lru"
)
// Vote represents a single vote that an authorized signer made to modify the
@@ -44,7 +45,8 @@ type Tally struct {
// Snapshot is the state of the authorization voting at a given point in time.
type Snapshot struct {
- config *params.CliqueConfig // Consensus engine parameters to fine tune behavior
+ config *params.CliqueConfig // Consensus engine parameters to fine tune behavior
+ sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover
Number uint64 `json:"number"` // Block number where the snapshot was created
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
@@ -57,14 +59,15 @@ type Snapshot struct {
// newSnapshot create a new snapshot with the specified startup parameters. This
// method does not initialize the set of recent signers, so only ever use if for
// the genesis block.
-func newSnapshot(config *params.CliqueConfig, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
+func newSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
snap := &Snapshot{
- config: config,
- Number: number,
- Hash: hash,
- Signers: make(map[common.Address]struct{}),
- Recents: make(map[uint64]common.Address),
- Tally: make(map[common.Address]Tally),
+ config: config,
+ sigcache: sigcache,
+ Number: number,
+ Hash: hash,
+ Signers: make(map[common.Address]struct{}),
+ Recents: make(map[uint64]common.Address),
+ Tally: make(map[common.Address]Tally),
}
for _, signer := range signers {
snap.Signers[signer] = struct{}{}
@@ -73,7 +76,7 @@ func newSnapshot(config *params.CliqueConfig, number uint64, hash common.Hash, s
}
// loadSnapshot loads an existing snapshot from the database.
-func loadSnapshot(config *params.CliqueConfig, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
+func loadSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
blob, err := db.Get(append([]byte("clique-"), hash[:]...))
if err != nil {
return nil, err
@@ -83,6 +86,7 @@ func loadSnapshot(config *params.CliqueConfig, db ethdb.Database, hash common.Ha
return nil, err
}
snap.config = config
+ snap.sigcache = sigcache
return snap, nil
}
@@ -99,13 +103,14 @@ func (s *Snapshot) store(db ethdb.Database) error {
// copy creates a deep copy of the snapshot, though not the individual votes.
func (s *Snapshot) copy() *Snapshot {
cpy := &Snapshot{
- config: s.config,
- Number: s.Number,
- Hash: s.Hash,
- Signers: make(map[common.Address]struct{}),
- Recents: make(map[uint64]common.Address),
- Votes: make([]*Vote, len(s.Votes)),
- Tally: make(map[common.Address]Tally),
+ config: s.config,
+ sigcache: s.sigcache,
+ Number: s.Number,
+ Hash: s.Hash,
+ Signers: make(map[common.Address]struct{}),
+ Recents: make(map[uint64]common.Address),
+ Votes: make([]*Vote, len(s.Votes)),
+ Tally: make(map[common.Address]Tally),
}
for signer := range s.Signers {
cpy.Signers[signer] = struct{}{}
@@ -190,7 +195,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
delete(snap.Recents, number-limit)
}
// Resolve the authorization key and check against signers
- signer, err := ecrecover(header)
+ signer, err := ecrecover(header, s.sigcache)
if err != nil {
return nil, err
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 794e1915f..073b91bab 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -56,10 +56,10 @@ const (
blockCacheLimit = 256
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
- // must be bumped when consensus algorithm is changed, this forces the upgradedb
- // command to be run (forces the blocks to be imported again using the new algorithm)
+ badBlockLimit = 10
+
+ // BlockChainVersion ensures that an incompatible database forces a resync from scratch.
BlockChainVersion = 3
- badBlockLimit = 10
)
// BlockChain represents the canonical chain given a database with a genesis
@@ -168,67 +168,67 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, engine co
return bc, nil
}
-func (self *BlockChain) getProcInterrupt() bool {
- return atomic.LoadInt32(&self.procInterrupt) == 1
+func (bc *BlockChain) getProcInterrupt() bool {
+ return atomic.LoadInt32(&bc.procInterrupt) == 1
}
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
-func (self *BlockChain) loadLastState() error {
+func (bc *BlockChain) loadLastState() error {
// Restore the last known head block
- head := GetHeadBlockHash(self.chainDb)
+ head := GetHeadBlockHash(bc.chainDb)
if head == (common.Hash{}) {
// Corrupt or empty database, init from scratch
log.Warn("Empty database, resetting chain")
- return self.Reset()
+ return bc.Reset()
}
// Make sure the entire head block is available
- currentBlock := self.GetBlockByHash(head)
+ currentBlock := bc.GetBlockByHash(head)
if currentBlock == nil {
// Corrupt or empty database, init from scratch
log.Warn("Head block missing, resetting chain", "hash", head)
- return self.Reset()
+ return bc.Reset()
}
// Make sure the state associated with the block is available
- if _, err := state.New(currentBlock.Root(), self.chainDb); err != nil {
+ if _, err := state.New(currentBlock.Root(), bc.chainDb); err != nil {
// Dangling block without a state associated, init from scratch
log.Warn("Head state missing, resetting chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
- return self.Reset()
+ return bc.Reset()
}
// Everything seems to be fine, set as the head block
- self.currentBlock = currentBlock
+ bc.currentBlock = currentBlock
// Restore the last known head header
- currentHeader := self.currentBlock.Header()
- if head := GetHeadHeaderHash(self.chainDb); head != (common.Hash{}) {
- if header := self.GetHeaderByHash(head); header != nil {
+ currentHeader := bc.currentBlock.Header()
+ if head := GetHeadHeaderHash(bc.chainDb); head != (common.Hash{}) {
+ if header := bc.GetHeaderByHash(head); header != nil {
currentHeader = header
}
}
- self.hc.SetCurrentHeader(currentHeader)
+ bc.hc.SetCurrentHeader(currentHeader)
// Restore the last known head fast block
- self.currentFastBlock = self.currentBlock
- if head := GetHeadFastBlockHash(self.chainDb); head != (common.Hash{}) {
- if block := self.GetBlockByHash(head); block != nil {
- self.currentFastBlock = block
+ bc.currentFastBlock = bc.currentBlock
+ if head := GetHeadFastBlockHash(bc.chainDb); head != (common.Hash{}) {
+ if block := bc.GetBlockByHash(head); block != nil {
+ bc.currentFastBlock = block
}
}
// Initialize a statedb cache to ensure singleton account bloom filter generation
- statedb, err := state.New(self.currentBlock.Root(), self.chainDb)
+ statedb, err := state.New(bc.currentBlock.Root(), bc.chainDb)
if err != nil {
return err
}
- self.stateCache = statedb
+ bc.stateCache = statedb
// Issue a status log for the user
- headerTd := self.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
- blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
- fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64())
+ headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
+ blockTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
+ fastTd := bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64())
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
- log.Info("Loaded most recent local full block", "number", self.currentBlock.Number(), "hash", self.currentBlock.Hash(), "td", blockTd)
- log.Info("Loaded most recent local fast block", "number", self.currentFastBlock.Number(), "hash", self.currentFastBlock.Hash(), "td", fastTd)
+ log.Info("Loaded most recent local full block", "number", bc.currentBlock.Number(), "hash", bc.currentBlock.Hash(), "td", blockTd)
+ log.Info("Loaded most recent local fast block", "number", bc.currentFastBlock.Number(), "hash", bc.currentFastBlock.Hash(), "td", fastTd)
return nil
}
@@ -288,103 +288,103 @@ func (bc *BlockChain) SetHead(head uint64) error {
// FastSyncCommitHead sets the current head block to the one defined by the hash
// irrelevant what the chain contents were prior.
-func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
+func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
// Make sure that both the block as well at its state trie exists
- block := self.GetBlockByHash(hash)
+ block := bc.GetBlockByHash(hash)
if block == nil {
return fmt.Errorf("non existent block [%x…]", hash[:4])
}
- if _, err := trie.NewSecure(block.Root(), self.chainDb, 0); err != nil {
+ if _, err := trie.NewSecure(block.Root(), bc.chainDb, 0); err != nil {
return err
}
// If all checks out, manually set the head block
- self.mu.Lock()
- self.currentBlock = block
- self.mu.Unlock()
+ bc.mu.Lock()
+ bc.currentBlock = block
+ bc.mu.Unlock()
log.Info("Committed new head block", "number", block.Number(), "hash", hash)
return nil
}
// GasLimit returns the gas limit of the current HEAD block.
-func (self *BlockChain) GasLimit() *big.Int {
- self.mu.RLock()
- defer self.mu.RUnlock()
+func (bc *BlockChain) GasLimit() *big.Int {
+ bc.mu.RLock()
+ defer bc.mu.RUnlock()
- return self.currentBlock.GasLimit()
+ return bc.currentBlock.GasLimit()
}
// LastBlockHash return the hash of the HEAD block.
-func (self *BlockChain) LastBlockHash() common.Hash {
- self.mu.RLock()
- defer self.mu.RUnlock()
+func (bc *BlockChain) LastBlockHash() common.Hash {
+ bc.mu.RLock()
+ defer bc.mu.RUnlock()
- return self.currentBlock.Hash()
+ return bc.currentBlock.Hash()
}
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache.
-func (self *BlockChain) CurrentBlock() *types.Block {
- self.mu.RLock()
- defer self.mu.RUnlock()
+func (bc *BlockChain) CurrentBlock() *types.Block {
+ bc.mu.RLock()
+ defer bc.mu.RUnlock()
- return self.currentBlock
+ return bc.currentBlock
}
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
-func (self *BlockChain) CurrentFastBlock() *types.Block {
- self.mu.RLock()
- defer self.mu.RUnlock()
+func (bc *BlockChain) CurrentFastBlock() *types.Block {
+ bc.mu.RLock()
+ defer bc.mu.RUnlock()
- return self.currentFastBlock
+ return bc.currentFastBlock
}
// Status returns status information about the current chain such as the HEAD Td,
// the HEAD hash and the hash of the genesis block.
-func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) {
- self.mu.RLock()
- defer self.mu.RUnlock()
+func (bc *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) {
+ bc.mu.RLock()
+ defer bc.mu.RUnlock()
- return self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64()), self.currentBlock.Hash(), self.genesisBlock.Hash()
+ return bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64()), bc.currentBlock.Hash(), bc.genesisBlock.Hash()
}
// SetProcessor sets the processor required for making state modifications.
-func (self *BlockChain) SetProcessor(processor Processor) {
- self.procmu.Lock()
- defer self.procmu.Unlock()
- self.processor = processor
+func (bc *BlockChain) SetProcessor(processor Processor) {
+ bc.procmu.Lock()
+ defer bc.procmu.Unlock()
+ bc.processor = processor
}
// SetValidator sets the validator which is used to validate incoming blocks.
-func (self *BlockChain) SetValidator(validator Validator) {
- self.procmu.Lock()
- defer self.procmu.Unlock()
- self.validator = validator
+func (bc *BlockChain) SetValidator(validator Validator) {
+ bc.procmu.Lock()
+ defer bc.procmu.Unlock()
+ bc.validator = validator
}
// Validator returns the current validator.
-func (self *BlockChain) Validator() Validator {
- self.procmu.RLock()
- defer self.procmu.RUnlock()
- return self.validator
+func (bc *BlockChain) Validator() Validator {
+ bc.procmu.RLock()
+ defer bc.procmu.RUnlock()
+ return bc.validator
}
// Processor returns the current processor.
-func (self *BlockChain) Processor() Processor {
- self.procmu.RLock()
- defer self.procmu.RUnlock()
- return self.processor
+func (bc *BlockChain) Processor() Processor {
+ bc.procmu.RLock()
+ defer bc.procmu.RUnlock()
+ return bc.processor
}
// State returns a new mutable state based on the current HEAD block.
-func (self *BlockChain) State() (*state.StateDB, error) {
- return self.StateAt(self.CurrentBlock().Root())
+func (bc *BlockChain) State() (*state.StateDB, error) {
+ return bc.StateAt(bc.CurrentBlock().Root())
}
// StateAt returns a new mutable state based on a particular point in time.
-func (self *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
- return self.stateCache.New(root)
+func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
+ return bc.stateCache.New(root)
}
// Reset purges the entire blockchain, restoring it to its genesis state.
@@ -420,14 +420,14 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
}
// Export writes the active chain to the given writer.
-func (self *BlockChain) Export(w io.Writer) error {
- return self.ExportN(w, uint64(0), self.currentBlock.NumberU64())
+func (bc *BlockChain) Export(w io.Writer) error {
+ return bc.ExportN(w, uint64(0), bc.currentBlock.NumberU64())
}
// ExportN writes a subset of the active chain to the given writer.
-func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
- self.mu.RLock()
- defer self.mu.RUnlock()
+func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
+ bc.mu.RLock()
+ defer bc.mu.RUnlock()
if first > last {
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
@@ -435,7 +435,7 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
log.Info("Exporting batch of blocks", "count", last-first+1)
for nr := first; nr <= last; nr++ {
- block := self.GetBlockByNumber(nr)
+ block := bc.GetBlockByNumber(nr)
if block == nil {
return fmt.Errorf("export failed on #%d: not found", nr)
}
@@ -478,41 +478,41 @@ func (bc *BlockChain) insert(block *types.Block) {
}
}
-// Accessors
+// Genesis retrieves the chain's genesis block.
func (bc *BlockChain) Genesis() *types.Block {
return bc.genesisBlock
}
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
-func (self *BlockChain) GetBody(hash common.Hash) *types.Body {
+func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
// Short circuit if the body's already in the cache, retrieve otherwise
- if cached, ok := self.bodyCache.Get(hash); ok {
+ if cached, ok := bc.bodyCache.Get(hash); ok {
body := cached.(*types.Body)
return body
}
- body := GetBody(self.chainDb, hash, self.hc.GetBlockNumber(hash))
+ body := GetBody(bc.chainDb, hash, bc.hc.GetBlockNumber(hash))
if body == nil {
return nil
}
// Cache the found body for next time and return
- self.bodyCache.Add(hash, body)
+ bc.bodyCache.Add(hash, body)
return body
}
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
// caching it if found.
-func (self *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
+func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
// Short circuit if the body's already in the cache, retrieve otherwise
- if cached, ok := self.bodyRLPCache.Get(hash); ok {
+ if cached, ok := bc.bodyRLPCache.Get(hash); ok {
return cached.(rlp.RawValue)
}
- body := GetBodyRLP(self.chainDb, hash, self.hc.GetBlockNumber(hash))
+ body := GetBodyRLP(bc.chainDb, hash, bc.hc.GetBlockNumber(hash))
if len(body) == 0 {
return nil
}
// Cache the found body for next time and return
- self.bodyRLPCache.Add(hash, body)
+ bc.bodyRLPCache.Add(hash, body)
return body
}
@@ -537,41 +537,41 @@ func (bc *BlockChain) HasBlockAndState(hash common.Hash) bool {
// GetBlock retrieves a block from the database by hash and number,
// caching it if found.
-func (self *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
+func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
// Short circuit if the block's already in the cache, retrieve otherwise
- if block, ok := self.blockCache.Get(hash); ok {
+ if block, ok := bc.blockCache.Get(hash); ok {
return block.(*types.Block)
}
- block := GetBlock(self.chainDb, hash, number)
+ block := GetBlock(bc.chainDb, hash, number)
if block == nil {
return nil
}
// Cache the found block for next time and return
- self.blockCache.Add(block.Hash(), block)
+ bc.blockCache.Add(block.Hash(), block)
return block
}
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
-func (self *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
- return self.GetBlock(hash, self.hc.GetBlockNumber(hash))
+func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
+ return bc.GetBlock(hash, bc.hc.GetBlockNumber(hash))
}
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
-func (self *BlockChain) GetBlockByNumber(number uint64) *types.Block {
- hash := GetCanonicalHash(self.chainDb, number)
+func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
+ hash := GetCanonicalHash(bc.chainDb, number)
if hash == (common.Hash{}) {
return nil
}
- return self.GetBlock(hash, number)
+ return bc.GetBlock(hash, number)
}
-// [deprecated by eth/62]
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
-func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
- number := self.hc.GetBlockNumber(hash)
+// [deprecated by eth/62]
+func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
+ number := bc.hc.GetBlockNumber(hash)
for i := 0; i < n; i++ {
- block := self.GetBlock(hash, number)
+ block := bc.GetBlock(hash, number)
if block == nil {
break
}
@@ -584,11 +584,11 @@ func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*ty
// GetUnclesInChain retrieves all the uncles from a given block backwards until
// a specific distance is reached.
-func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
+func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
uncles := []*types.Header{}
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
- block = self.GetBlock(block.ParentHash(), block.NumberU64()-1)
+ block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
}
return uncles
}
@@ -606,10 +606,10 @@ func (bc *BlockChain) Stop() {
log.Info("Blockchain manager stopped")
}
-func (self *BlockChain) procFutureBlocks() {
- blocks := make([]*types.Block, 0, self.futureBlocks.Len())
- for _, hash := range self.futureBlocks.Keys() {
- if block, exist := self.futureBlocks.Peek(hash); exist {
+func (bc *BlockChain) procFutureBlocks() {
+ blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
+ for _, hash := range bc.futureBlocks.Keys() {
+ if block, exist := bc.futureBlocks.Peek(hash); exist {
blocks = append(blocks, block.(*types.Block))
}
}
@@ -618,11 +618,12 @@ func (self *BlockChain) procFutureBlocks() {
// Insert one by one as chain insertion needs contiguous ancestry between blocks
for i := range blocks {
- self.InsertChain(blocks[i : i+1])
+ bc.InsertChain(blocks[i : i+1])
}
}
}
+// WriteStatus status of write
type WriteStatus byte
const (
@@ -633,24 +634,24 @@ const (
// Rollback is designed to remove a chain of links from the database that aren't
// certain enough to be valid.
-func (self *BlockChain) Rollback(chain []common.Hash) {
- self.mu.Lock()
- defer self.mu.Unlock()
+func (bc *BlockChain) Rollback(chain []common.Hash) {
+ bc.mu.Lock()
+ defer bc.mu.Unlock()
for i := len(chain) - 1; i >= 0; i-- {
hash := chain[i]
- currentHeader := self.hc.CurrentHeader()
+ currentHeader := bc.hc.CurrentHeader()
if currentHeader.Hash() == hash {
- self.hc.SetCurrentHeader(self.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
+ bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
}
- if self.currentFastBlock.Hash() == hash {
- self.currentFastBlock = self.GetBlock(self.currentFastBlock.ParentHash(), self.currentFastBlock.NumberU64()-1)
- WriteHeadFastBlockHash(self.chainDb, self.currentFastBlock.Hash())
+ if bc.currentFastBlock.Hash() == hash {
+ bc.currentFastBlock = bc.GetBlock(bc.currentFastBlock.ParentHash(), bc.currentFastBlock.NumberU64()-1)
+ WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash())
}
- if self.currentBlock.Hash() == hash {
- self.currentBlock = self.GetBlock(self.currentBlock.ParentHash(), self.currentBlock.NumberU64()-1)
- WriteHeadBlockHash(self.chainDb, self.currentBlock.Hash())
+ if bc.currentBlock.Hash() == hash {
+ bc.currentBlock = bc.GetBlock(bc.currentBlock.ParentHash(), bc.currentBlock.NumberU64()-1)
+ WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash())
}
}
}
@@ -692,7 +693,7 @@ func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts ty
// InsertReceiptChain attempts to complete an already existing header chain with
// transaction and receipt data.
// XXX should this be moved to the test?
-func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
+func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(blockChain); i++ {
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
@@ -705,8 +706,8 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
}
}
// Pre-checks passed, start the block body and receipt imports
- self.wg.Add(1)
- defer self.wg.Done()
+ bc.wg.Add(1)
+ defer bc.wg.Done()
// Collect some import statistics to report on
stats := struct{ processed, ignored int32 }{}
@@ -725,51 +726,51 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
block, receipts := blockChain[index], receiptChain[index]
// Short circuit insertion if shutting down or processing failed
- if atomic.LoadInt32(&self.procInterrupt) == 1 {
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
return
}
if atomic.LoadInt32(&failed) > 0 {
return
}
// Short circuit if the owner header is unknown
- if !self.HasHeader(block.Hash()) {
+ if !bc.HasHeader(block.Hash()) {
errs[index] = fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
atomic.AddInt32(&failed, 1)
return
}
// Skip if the entire data is already known
- if self.HasBlock(block.Hash()) {
+ if bc.HasBlock(block.Hash()) {
atomic.AddInt32(&stats.ignored, 1)
continue
}
// Compute all the non-consensus fields of the receipts
- SetReceiptsData(self.config, block, receipts)
+ SetReceiptsData(bc.config, block, receipts)
// Write all the data out into the database
- if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
+ if err := WriteBody(bc.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
errs[index] = fmt.Errorf("failed to write block body: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write block body", "err", err)
return
}
- if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
+ if err := WriteBlockReceipts(bc.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write block receipts", "err", err)
return
}
- if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
+ if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write log blooms", "err", err)
return
}
- if err := WriteTransactions(self.chainDb, block); err != nil {
+ if err := WriteTransactions(bc.chainDb, block); err != nil {
errs[index] = fmt.Errorf("failed to write individual transactions: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write individual transactions", "err", err)
return
}
- if err := WriteReceipts(self.chainDb, receipts); err != nil {
+ if err := WriteReceipts(bc.chainDb, receipts); err != nil {
errs[index] = fmt.Errorf("failed to write individual receipts: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write individual receipts", "err", err)
@@ -797,23 +798,23 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
}
}
}
- if atomic.LoadInt32(&self.procInterrupt) == 1 {
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during receipts processing")
return 0, nil
}
// Update the head fast sync block if better
- self.mu.Lock()
+ bc.mu.Lock()
head := blockChain[len(errs)-1]
- if td := self.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
- if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(td) < 0 {
- if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
+ if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
+ if bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64()).Cmp(td) < 0 {
+ if err := WriteHeadFastBlockHash(bc.chainDb, head.Hash()); err != nil {
log.Crit("Failed to update head fast block hash", "err", err)
}
- self.currentFastBlock = head
+ bc.currentFastBlock = head
}
}
- self.mu.Unlock()
+ bc.mu.Unlock()
// Report some public statistics so the user has a clue what's going on
last := blockChain[len(blockChain)-1]
@@ -824,27 +825,27 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
}
// WriteBlock writes the block to the chain.
-func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err error) {
- self.wg.Add(1)
- defer self.wg.Done()
+func (bc *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err error) {
+ bc.wg.Add(1)
+ defer bc.wg.Done()
// Calculate the total difficulty of the block
- ptd := self.GetTd(block.ParentHash(), block.NumberU64()-1)
+ ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil {
return NonStatTy, consensus.ErrUnknownAncestor
}
// Make sure no inconsistent state is leaked during insertion
- self.mu.Lock()
- defer self.mu.Unlock()
+ bc.mu.Lock()
+ defer bc.mu.Unlock()
- localTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
+ localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
externTd := new(big.Int).Add(block.Difficulty(), ptd)
// Irrelevant of the canonical status, write the block itself to the database
- if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
+ if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
log.Crit("Failed to write block total difficulty", "err", err)
}
- if err := WriteBlock(self.chainDb, block); err != nil {
+ if err := WriteBlock(bc.chainDb, block); err != nil {
log.Crit("Failed to write block contents", "err", err)
}
@@ -853,25 +854,25 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
// Reorganise the chain if the parent is not the head block
- if block.ParentHash() != self.currentBlock.Hash() {
- if err := self.reorg(self.currentBlock, block); err != nil {
+ if block.ParentHash() != bc.currentBlock.Hash() {
+ if err := bc.reorg(bc.currentBlock, block); err != nil {
return NonStatTy, err
}
}
- self.insert(block) // Insert the block as the new head of the chain
+ bc.insert(block) // Insert the block as the new head of the chain
status = CanonStatTy
} else {
status = SideStatTy
}
- self.futureBlocks.Remove(block.Hash())
+ bc.futureBlocks.Remove(block.Hash())
return
}
// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. If an error is returned
// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go).
-func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
+func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ {
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
@@ -884,11 +885,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
}
}
// Pre-checks passed, start the full block imports
- self.wg.Add(1)
- defer self.wg.Done()
+ bc.wg.Add(1)
+ defer bc.wg.Done()
- self.chainmu.Lock()
- defer self.chainmu.Unlock()
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
@@ -906,19 +907,19 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
headers[i] = block.Header()
seals[i] = true
}
- abort, results := self.engine.VerifyHeaders(self, headers, seals)
+ abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
defer close(abort)
// Iterate over the blocks and insert when the verifier permits
for i, block := range chain {
// If the chain is terminating, stop processing blocks
- if atomic.LoadInt32(&self.procInterrupt) == 1 {
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
break
}
// If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
- self.reportBlock(block, nil, ErrBlacklistedHash)
+ bc.reportBlock(block, nil, ErrBlacklistedHash)
return i, ErrBlacklistedHash
}
// Wait for the block's verification to complete
@@ -926,7 +927,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
err := <-results
if err == nil {
- err = self.Validator().ValidateBody(block)
+ err = bc.Validator().ValidateBody(block)
}
if err != nil {
if err == ErrKnownBlock {
@@ -942,46 +943,46 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
if block.Time().Cmp(max) > 0 {
return i, fmt.Errorf("future block: %v > %v", block.Time(), max)
}
- self.futureBlocks.Add(block.Hash(), block)
+ bc.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
}
- if err == consensus.ErrUnknownAncestor && self.futureBlocks.Contains(block.ParentHash()) {
- self.futureBlocks.Add(block.Hash(), block)
+ if err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()) {
+ bc.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
}
- self.reportBlock(block, nil, err)
+ bc.reportBlock(block, nil, err)
return i, err
}
// Create a new statedb using the parent block and report an
// error if it fails.
switch {
case i == 0:
- err = self.stateCache.Reset(self.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
+ err = bc.stateCache.Reset(bc.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
default:
- err = self.stateCache.Reset(chain[i-1].Root())
+ err = bc.stateCache.Reset(chain[i-1].Root())
}
if err != nil {
- self.reportBlock(block, nil, err)
+ bc.reportBlock(block, nil, err)
return i, err
}
// Process block using the parent state as reference point.
- receipts, logs, usedGas, err := self.processor.Process(block, self.stateCache, self.vmConfig)
+ receipts, logs, usedGas, err := bc.processor.Process(block, bc.stateCache, bc.vmConfig)
if err != nil {
- self.reportBlock(block, receipts, err)
+ bc.reportBlock(block, receipts, err)
return i, err
}
// Validate the state using the default validator
- err = self.Validator().ValidateState(block, self.GetBlock(block.ParentHash(), block.NumberU64()-1), self.stateCache, receipts, usedGas)
+ err = bc.Validator().ValidateState(block, bc.GetBlock(block.ParentHash(), block.NumberU64()-1), bc.stateCache, receipts, usedGas)
if err != nil {
- self.reportBlock(block, receipts, err)
+ bc.reportBlock(block, receipts, err)
return i, err
}
// Write state changes to database
- _, err = self.stateCache.Commit(self.config.IsEIP158(block.Number()))
+ _, err = bc.stateCache.Commit(bc.config.IsEIP158(block.Number()))
if err != nil {
return i, err
}
@@ -989,12 +990,12 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// coalesce logs for later processing
coalescedLogs = append(coalescedLogs, logs...)
- if err = WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
+ if err = WriteBlockReceipts(bc.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
return i, err
}
// write the block to the chain and get the status
- status, err := self.WriteBlock(block)
+ status, err := bc.WriteBlock(block)
if err != nil {
return i, err
}
@@ -1008,19 +1009,19 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
events = append(events, ChainEvent{block, block.Hash(), logs})
// This puts transactions in a extra db for rpc
- if err := WriteTransactions(self.chainDb, block); err != nil {
+ if err := WriteTransactions(bc.chainDb, block); err != nil {
return i, err
}
// store the receipts
- if err := WriteReceipts(self.chainDb, receipts); err != nil {
+ if err := WriteReceipts(bc.chainDb, receipts); err != nil {
return i, err
}
// Write map map bloom filters
- if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
+ if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
return i, err
}
// Write hash preimages
- if err := WritePreimages(self.chainDb, block.NumberU64(), self.stateCache.Preimages()); err != nil {
+ if err := WritePreimages(bc.chainDb, block.NumberU64(), bc.stateCache.Preimages()); err != nil {
return i, err
}
case SideStatTy:
@@ -1034,7 +1035,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
stats.usedGas += usedGas.Uint64()
stats.report(chain, i)
}
- go self.postChainEvents(events, coalescedLogs)
+ go bc.postChainEvents(events, coalescedLogs)
return 0, nil
}
@@ -1092,7 +1093,7 @@ func countTransactions(chain []*types.Block) (c int) {
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain and accumulates potential missing transactions and post an
// event about them
-func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
+func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
var (
newChain types.Blocks
oldChain types.Blocks
@@ -1104,7 +1105,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// These logs are later announced as deleted.
collectLogs = func(h common.Hash) {
// Coalesce logs and set 'Removed'.
- receipts := GetBlockReceipts(self.chainDb, h, self.hc.GetBlockNumber(h))
+ receipts := GetBlockReceipts(bc.chainDb, h, bc.hc.GetBlockNumber(h))
for _, receipt := range receipts {
for _, log := range receipt.Logs {
del := *log
@@ -1118,7 +1119,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// first reduce whoever is higher bound
if oldBlock.NumberU64() > newBlock.NumberU64() {
// reduce old chain
- for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
+ for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
@@ -1126,7 +1127,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
}
} else {
// reduce new chain and append new chain blocks for inserting later on
- for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
+ for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
newChain = append(newChain, newBlock)
}
}
@@ -1148,7 +1149,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash())
- oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), self.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
+ oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
if oldBlock == nil {
return fmt.Errorf("Invalid old chain")
}
@@ -1171,18 +1172,18 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
for _, block := range newChain {
// insert the block in the canonical way, re-writing history
- self.insert(block)
+ bc.insert(block)
// write canonical receipts and transactions
- if err := WriteTransactions(self.chainDb, block); err != nil {
+ if err := WriteTransactions(bc.chainDb, block); err != nil {
return err
}
- receipts := GetBlockReceipts(self.chainDb, block.Hash(), block.NumberU64())
+ receipts := GetBlockReceipts(bc.chainDb, block.Hash(), block.NumberU64())
// write receipts
- if err := WriteReceipts(self.chainDb, receipts); err != nil {
+ if err := WriteReceipts(bc.chainDb, receipts); err != nil {
return err
}
// Write map map bloom filters
- if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
+ if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
return err
}
addedTxs = append(addedTxs, block.Transactions()...)
@@ -1193,22 +1194,22 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// When transactions get deleted from the database that means the
// receipts that were created in the fork must also be deleted
for _, tx := range diff {
- DeleteReceipt(self.chainDb, tx.Hash())
- DeleteTransaction(self.chainDb, tx.Hash())
+ DeleteReceipt(bc.chainDb, tx.Hash())
+ DeleteTransaction(bc.chainDb, tx.Hash())
}
// Must be posted in a goroutine because of the transaction pool trying
// to acquire the chain manager lock
if len(diff) > 0 {
- go self.eventMux.Post(RemovedTransactionEvent{diff})
+ go bc.eventMux.Post(RemovedTransactionEvent{diff})
}
if len(deletedLogs) > 0 {
- go self.eventMux.Post(RemovedLogsEvent{deletedLogs})
+ go bc.eventMux.Post(RemovedLogsEvent{deletedLogs})
}
if len(oldChain) > 0 {
go func() {
for _, block := range oldChain {
- self.eventMux.Post(ChainSideEvent{Block: block})
+ bc.eventMux.Post(ChainSideEvent{Block: block})
}
}()
}
@@ -1218,30 +1219,30 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// postChainEvents iterates over the events generated by a chain insertion and
// posts them into the event mux.
-func (self *BlockChain) postChainEvents(events []interface{}, logs []*types.Log) {
+func (bc *BlockChain) postChainEvents(events []interface{}, logs []*types.Log) {
// post event logs for further processing
- self.eventMux.Post(logs)
+ bc.eventMux.Post(logs)
for _, event := range events {
if event, ok := event.(ChainEvent); ok {
// We need some control over the mining operation. Acquiring locks and waiting
// for the miner to create new block takes too long and in most cases isn't
// even necessary.
- if self.LastBlockHash() == event.Hash {
- self.eventMux.Post(ChainHeadEvent{event.Block})
+ if bc.LastBlockHash() == event.Hash {
+ bc.eventMux.Post(ChainHeadEvent{event.Block})
}
}
// Fire the insertion events individually too
- self.eventMux.Post(event)
+ bc.eventMux.Post(event)
}
}
-func (self *BlockChain) update() {
+func (bc *BlockChain) update() {
futureTimer := time.Tick(5 * time.Second)
for {
select {
case <-futureTimer:
- self.procFutureBlocks()
- case <-self.quit:
+ bc.procFutureBlocks()
+ case <-bc.quit:
return
}
}
@@ -1299,28 +1300,28 @@ Error: %v
// should be done or not. The reason behind the optional check is because some
// of the header retrieval mechanisms already need to verify nonces, as well as
// because nonces can be verified sparsely, not needing to check each.
-func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
+func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
start := time.Now()
- if i, err := self.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
+ if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
return i, err
}
// Make sure only one thread manipulates the chain at once
- self.chainmu.Lock()
- defer self.chainmu.Unlock()
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
- self.wg.Add(1)
- defer self.wg.Done()
+ bc.wg.Add(1)
+ defer bc.wg.Done()
whFunc := func(header *types.Header) error {
- self.mu.Lock()
- defer self.mu.Unlock()
+ bc.mu.Lock()
+ defer bc.mu.Unlock()
- _, err := self.hc.WriteHeader(header)
+ _, err := bc.hc.WriteHeader(header)
return err
}
- return self.hc.InsertHeaderChain(chain, whFunc, start)
+ return bc.hc.InsertHeaderChain(chain, whFunc, start)
}
// writeHeader writes a header into the local chain, given that its parent is
@@ -1332,48 +1333,48 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
-func (self *BlockChain) writeHeader(header *types.Header) error {
- self.wg.Add(1)
- defer self.wg.Done()
+func (bc *BlockChain) writeHeader(header *types.Header) error {
+ bc.wg.Add(1)
+ defer bc.wg.Done()
- self.mu.Lock()
- defer self.mu.Unlock()
+ bc.mu.Lock()
+ defer bc.mu.Unlock()
- _, err := self.hc.WriteHeader(header)
+ _, err := bc.hc.WriteHeader(header)
return err
}
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the HeaderChain's internal cache.
-func (self *BlockChain) CurrentHeader() *types.Header {
- self.mu.RLock()
- defer self.mu.RUnlock()
+func (bc *BlockChain) CurrentHeader() *types.Header {
+ bc.mu.RLock()
+ defer bc.mu.RUnlock()
- return self.hc.CurrentHeader()
+ return bc.hc.CurrentHeader()
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash and number, caching it if found.
-func (self *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
- return self.hc.GetTd(hash, number)
+func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
+ return bc.hc.GetTd(hash, number)
}
// GetTdByHash retrieves a block's total difficulty in the canonical chain from the
// database by hash, caching it if found.
-func (self *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
- return self.hc.GetTdByHash(hash)
+func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
+ return bc.hc.GetTdByHash(hash)
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
-func (self *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
- return self.hc.GetHeader(hash, number)
+func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
+ return bc.hc.GetHeader(hash, number)
}
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
// found.
-func (self *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
- return self.hc.GetHeaderByHash(hash)
+func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
+ return bc.hc.GetHeaderByHash(hash)
}
// HasHeader checks if a block header is present in the database or not, caching
@@ -1384,18 +1385,18 @@ func (bc *BlockChain) HasHeader(hash common.Hash) bool {
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
// hash, fetching towards the genesis block.
-func (self *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
- return self.hc.GetBlockHashesFromHash(hash, max)
+func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
+ return bc.hc.GetBlockHashesFromHash(hash, max)
}
// GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found.
-func (self *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
- return self.hc.GetHeaderByNumber(number)
+func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
+ return bc.hc.GetHeaderByNumber(number)
}
// Config retrieves the blockchain's chain configuration.
-func (self *BlockChain) Config() *params.ChainConfig { return self.config }
+func (bc *BlockChain) Config() *params.ChainConfig { return bc.config }
// Engine retrieves the blockchain's consensus engine.
-func (self *BlockChain) Engine() consensus.Engine { return self.engine }
+func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
diff --git a/core/blocks.go b/core/blocks.go
index cf8c86507..f20ba4aaf 100644
--- a/core/blocks.go
+++ b/core/blocks.go
@@ -18,7 +18,7 @@ package core
import "github.com/ethereum/go-ethereum/common"
-// Set of manually tracked bad hashes (usually hard forks)
+// BadHashes represent a set of manually tracked bad hashes (usually hard forks)
var BadHashes = map[common.Hash]bool{
common.HexToHash("05bef30ef572270f654746da22639a7a0c97dd97a7050b9e252391996aaeb689"): true,
common.HexToHash("7d05d08cbc596a2e5e4f13b80a743e53e09221b5323c3a61946b20873e58583f"): true,
diff --git a/core/chain_makers.go b/core/chain_makers.go
index c81239607..cc14f8fb8 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -98,10 +98,10 @@ func (b *BlockGen) Number() *big.Int {
return new(big.Int).Set(b.header.Number)
}
-// AddUncheckedReceipts forcefully adds a receipts to the block without a
+// AddUncheckedReceipt forcefully adds a receipts to the block without a
// backing transaction.
//
-// AddUncheckedReceipts will cause consensus failures when used during real
+// AddUncheckedReceipt will cause consensus failures when used during real
// chain processing. This is best used in conjunction with raw block insertion.
func (b *BlockGen) AddUncheckedReceipt(receipt *types.Receipt) {
b.receipts = append(b.receipts, receipt)
diff --git a/core/database_util.go b/core/database_util.go
index bcd99be5f..b4a230c9c 100644
--- a/core/database_util.go
+++ b/core/database_util.go
@@ -64,7 +64,7 @@ var (
oldBlockReceiptsPrefix = []byte("receipts-block-")
oldBlockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually]
- ChainConfigNotFoundErr = errors.New("ChainConfig not found") // general config not found error
+ ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
mipmapBloomMu sync.Mutex // protect against race condition when updating mipmap blooms
@@ -546,7 +546,7 @@ func mipmapKey(num, level uint64) []byte {
return append(mipmapPre, append(lkey, key.Bytes()...)...)
}
-// WriteMapmapBloom writes each address included in the receipts' logs to the
+// WriteMipmapBloom writes each address included in the receipts' logs to the
// MIP bloom bin.
func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
mipmapBloomMu.Lock()
@@ -638,7 +638,7 @@ func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConf
func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, error) {
jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...))
if len(jsonChainConfig) == 0 {
- return nil, ChainConfigNotFoundErr
+ return nil, ErrChainConfigNotFound
}
var config params.ChainConfig
diff --git a/core/events.go b/core/events.go
index ce1f5aebc..bc7c9ddcb 100644
--- a/core/events.go
+++ b/core/events.go
@@ -41,7 +41,7 @@ type NewMinedBlockEvent struct{ Block *types.Block }
// RemovedTransactionEvent is posted when a reorg happens
type RemovedTransactionEvent struct{ Txs types.Transactions }
-// RemovedLogEvent is posted when a reorg happens
+// RemovedLogsEvent is posted when a reorg happens
type RemovedLogsEvent struct{ Logs []*types.Log }
type ChainEvent struct {
diff --git a/core/fees.go b/core/fees.go
index 0bb26f055..83275ea36 100644
--- a/core/fees.go
+++ b/core/fees.go
@@ -20,4 +20,4 @@ import (
"math/big"
)
-var BlockReward *big.Int = big.NewInt(5e+18)
+var BlockReward = big.NewInt(5e+18)
diff --git a/core/genesis.go b/core/genesis.go
index 8f55d3a37..947a53c70 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -133,7 +133,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
newcfg := genesis.configOrDefault(stored)
storedcfg, err := GetChainConfig(db, stored)
if err != nil {
- if err == ChainConfigNotFoundErr {
+ if err == ErrChainConfigNotFound {
// This case happens if a genesis write was interrupted.
log.Warn("Found genesis block without chain config")
err = WriteChainConfig(db, stored, newcfg)
diff --git a/core/headerchain.go b/core/headerchain.go
index 9bb7f1793..6ec44b61d 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -201,15 +201,6 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// header writes should be protected by the parent chain mutex individually.
type WhCallback func(*types.Header) error
-// InsertHeaderChain attempts to insert the given header chain in to the local
-// chain, possibly creating a reorg. If an error is returned, it will return the
-// index number of the failing header as well an error describing what went wrong.
-//
-// The verify parameter can be used to fine tune whether nonce verification
-// should be done or not. The reason behind the optional check is because some
-// of the header retrieval mechanisms already need to verfy nonces, as well as
-// because nonces can be verified sparsely, not needing to check each.
-
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ {
@@ -257,6 +248,14 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
return 0, nil
}
+// InsertHeaderChain attempts to insert the given header chain in to the local
+// chain, possibly creating a reorg. If an error is returned, it will return the
+// index number of the failing header as well an error describing what went wrong.
+//
+// The verify parameter can be used to fine tune whether nonce verification
+// should be done or not. The reason behind the optional check is because some
+// of the header retrieval mechanisms already need to verfy nonces, as well as
+// because nonces can be verified sparsely, not needing to check each.
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCallback, start time.Time) (int, error) {
// Collect some import statistics to report on
stats := struct{ processed, ignored int }{}
diff --git a/core/helper_test.go b/core/helper_test.go
index fd6a5491c..698a2924c 100644
--- a/core/helper_test.go
+++ b/core/helper_test.go
@@ -21,8 +21,6 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/core/types"
- // "github.com/ethereum/go-ethereum/crypto"
-
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
)
@@ -38,24 +36,24 @@ type TestManager struct {
Blocks []*types.Block
}
-func (s *TestManager) IsListening() bool {
+func (tm *TestManager) IsListening() bool {
return false
}
-func (s *TestManager) IsMining() bool {
+func (tm *TestManager) IsMining() bool {
return false
}
-func (s *TestManager) PeerCount() int {
+func (tm *TestManager) PeerCount() int {
return 0
}
-func (s *TestManager) Peers() *list.List {
+func (tm *TestManager) Peers() *list.List {
return list.New()
}
-func (s *TestManager) BlockChain() *BlockChain {
- return s.blockChain
+func (tm *TestManager) BlockChain() *BlockChain {
+ return tm.blockChain
}
func (tm *TestManager) TxPool() *TxPool {
diff --git a/core/state_transition.go b/core/state_transition.go
index ea773b801..21830e806 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -134,112 +134,113 @@ func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) ([]byte, *big.Int, erro
return ret, gasUsed, err
}
-func (self *StateTransition) from() vm.AccountRef {
- f := self.msg.From()
- if !self.state.Exist(f) {
- self.state.CreateAccount(f)
+func (st *StateTransition) from() vm.AccountRef {
+ f := st.msg.From()
+ if !st.state.Exist(f) {
+ st.state.CreateAccount(f)
}
return vm.AccountRef(f)
}
-func (self *StateTransition) to() vm.AccountRef {
- if self.msg == nil {
+func (st *StateTransition) to() vm.AccountRef {
+ if st.msg == nil {
return vm.AccountRef{}
}
- to := self.msg.To()
+ to := st.msg.To()
if to == nil {
return vm.AccountRef{} // contract creation
}
reference := vm.AccountRef(*to)
- if !self.state.Exist(*to) {
- self.state.CreateAccount(*to)
+ if !st.state.Exist(*to) {
+ st.state.CreateAccount(*to)
}
return reference
}
-func (self *StateTransition) useGas(amount uint64) error {
- if self.gas < amount {
+func (st *StateTransition) useGas(amount uint64) error {
+ if st.gas < amount {
return vm.ErrOutOfGas
}
- self.gas -= amount
+ st.gas -= amount
return nil
}
-func (self *StateTransition) buyGas() error {
- mgas := self.msg.Gas()
+func (st *StateTransition) buyGas() error {
+ mgas := st.msg.Gas()
if mgas.BitLen() > 64 {
return vm.ErrOutOfGas
}
- mgval := new(big.Int).Mul(mgas, self.gasPrice)
+ mgval := new(big.Int).Mul(mgas, st.gasPrice)
var (
- state = self.state
- sender = self.from()
+ state = st.state
+ sender = st.from()
)
if state.GetBalance(sender.Address()).Cmp(mgval) < 0 {
return errInsufficientBalanceForGas
}
- if err := self.gp.SubGas(mgas); err != nil {
+ if err := st.gp.SubGas(mgas); err != nil {
return err
}
- self.gas += mgas.Uint64()
+ st.gas += mgas.Uint64()
- self.initialGas.Set(mgas)
+ st.initialGas.Set(mgas)
state.SubBalance(sender.Address(), mgval)
return nil
}
-func (self *StateTransition) preCheck() error {
- msg := self.msg
- sender := self.from()
+func (st *StateTransition) preCheck() error {
+ msg := st.msg
+ sender := st.from()
// Make sure this transaction's nonce is correct
if msg.CheckNonce() {
- if n := self.state.GetNonce(sender.Address()); n != msg.Nonce() {
+ if n := st.state.GetNonce(sender.Address()); n != msg.Nonce() {
return fmt.Errorf("invalid nonce: have %d, expected %d", msg.Nonce(), n)
}
}
- return self.buyGas()
+ return st.buyGas()
}
// TransitionDb will transition the state by applying the current message and returning the result
// including the required gas for the operation as well as the used gas. It returns an error if it
// failed. An error indicates a consensus issue.
-func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big.Int, err error) {
- if err = self.preCheck(); err != nil {
+func (st *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big.Int, err error) {
+ if err = st.preCheck(); err != nil {
return
}
- msg := self.msg
- sender := self.from() // err checked in preCheck
+ msg := st.msg
+ sender := st.from() // err checked in preCheck
- homestead := self.evm.ChainConfig().IsHomestead(self.evm.BlockNumber)
+ homestead := st.evm.ChainConfig().IsHomestead(st.evm.BlockNumber)
contractCreation := msg.To() == nil
+
// Pay intrinsic gas
// TODO convert to uint64
- intrinsicGas := IntrinsicGas(self.data, contractCreation, homestead)
+ intrinsicGas := IntrinsicGas(st.data, contractCreation, homestead)
if intrinsicGas.BitLen() > 64 {
return nil, nil, nil, vm.ErrOutOfGas
}
- if err = self.useGas(intrinsicGas.Uint64()); err != nil {
+ if err = st.useGas(intrinsicGas.Uint64()); err != nil {
return nil, nil, nil, err
}
var (
- evm = self.evm
+ evm = st.evm
// vm errors do not effect consensus and are therefor
// not assigned to err, except for insufficient balance
// error.
vmerr error
)
if contractCreation {
- ret, _, self.gas, vmerr = evm.Create(sender, self.data, self.gas, self.value)
+ ret, _, st.gas, vmerr = evm.Create(sender, st.data, st.gas, st.value)
} else {
// Increment the nonce for the next transaction
- self.state.SetNonce(sender.Address(), self.state.GetNonce(sender.Address())+1)
- ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
+ st.state.SetNonce(sender.Address(), st.state.GetNonce(sender.Address())+1)
+ ret, st.gas, vmerr = evm.Call(sender, st.to().Address(), st.data, st.gas, st.value)
}
if vmerr != nil {
log.Debug("VM returned with error", "err", err)
@@ -250,33 +251,33 @@ func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *b
return nil, nil, nil, vmerr
}
}
- requiredGas = new(big.Int).Set(self.gasUsed())
+ requiredGas = new(big.Int).Set(st.gasUsed())
- self.refundGas()
- self.state.AddBalance(self.evm.Coinbase, new(big.Int).Mul(self.gasUsed(), self.gasPrice))
+ st.refundGas()
+ st.state.AddBalance(st.evm.Coinbase, new(big.Int).Mul(st.gasUsed(), st.gasPrice))
- return ret, requiredGas, self.gasUsed(), err
+ return ret, requiredGas, st.gasUsed(), err
}
-func (self *StateTransition) refundGas() {
+func (st *StateTransition) refundGas() {
// Return eth for remaining gas to the sender account,
// exchanged at the original rate.
- sender := self.from() // err already checked
- remaining := new(big.Int).Mul(new(big.Int).SetUint64(self.gas), self.gasPrice)
- self.state.AddBalance(sender.Address(), remaining)
+ sender := st.from() // err already checked
+ remaining := new(big.Int).Mul(new(big.Int).SetUint64(st.gas), st.gasPrice)
+ st.state.AddBalance(sender.Address(), remaining)
// Apply refund counter, capped to half of the used gas.
- uhalf := remaining.Div(self.gasUsed(), common.Big2)
- refund := math.BigMin(uhalf, self.state.GetRefund())
- self.gas += refund.Uint64()
+ uhalf := remaining.Div(st.gasUsed(), common.Big2)
+ refund := math.BigMin(uhalf, st.state.GetRefund())
+ st.gas += refund.Uint64()
- self.state.AddBalance(sender.Address(), refund.Mul(refund, self.gasPrice))
+ st.state.AddBalance(sender.Address(), refund.Mul(refund, st.gasPrice))
// Also return remaining gas to the block gas counter so it is
// available for the next transaction.
- self.gp.AddGas(new(big.Int).SetUint64(self.gas))
+ st.gp.AddGas(new(big.Int).SetUint64(st.gas))
}
-func (self *StateTransition) gasUsed() *big.Int {
- return new(big.Int).Sub(self.initialGas, new(big.Int).SetUint64(self.gas))
+func (st *StateTransition) gasUsed() *big.Int {
+ return new(big.Int).Sub(st.initialGas, new(big.Int).SetUint64(st.gas))
}
diff --git a/core/tx_list.go b/core/tx_list.go
index eb380da0b..626d3a3b7 100644
--- a/core/tx_list.go
+++ b/core/tx_list.go
@@ -220,9 +220,11 @@ func (m *txSortedMap) Flatten() types.Transactions {
// the executable/pending queue; and for storing gapped transactions for the non-
// executable/future queue, with minor behavioral changes.
type txList struct {
- strict bool // Whether nonces are strictly continuous or not
- txs *txSortedMap // Heap indexed sorted hash map of the transactions
- costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance)
+ strict bool // Whether nonces are strictly continuous or not
+ txs *txSortedMap // Heap indexed sorted hash map of the transactions
+
+ costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance)
+ gascap *big.Int // Gas limit of the highest spending transaction (reset only if exceeds block limit)
}
// newTxList create a new transaction list for maintaining nonce-indexable fast,
@@ -232,6 +234,7 @@ func newTxList(strict bool) *txList {
strict: strict,
txs: newTxSortedMap(),
costcap: new(big.Int),
+ gascap: new(big.Int),
}
}
@@ -244,13 +247,13 @@ func (l *txList) Overlaps(tx *types.Transaction) bool {
// Add tries to insert a new transaction into the list, returning whether the
// transaction was accepted, and if yes, any previous transaction it replaced.
//
-// If the new transaction is accepted into the list, the lists' cost threshold
-// is also potentially updated.
-func (l *txList) Add(tx *types.Transaction) (bool, *types.Transaction) {
+// If the new transaction is accepted into the list, the lists' cost and gas
+// thresholds are also potentially updated.
+func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transaction) {
// If there's an older better transaction, abort
old := l.txs.Get(tx.Nonce())
if old != nil {
- threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+minPriceBumpPercent)), big.NewInt(100))
+ threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+int64(priceBump))), big.NewInt(100))
if threshold.Cmp(tx.GasPrice()) >= 0 {
return false, nil
}
@@ -260,6 +263,9 @@ func (l *txList) Add(tx *types.Transaction) (bool, *types.Transaction) {
if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 {
l.costcap = cost
}
+ if gas := tx.Gas(); l.gascap.Cmp(gas) < 0 {
+ l.gascap = gas
+ }
return true, old
}
@@ -270,23 +276,25 @@ func (l *txList) Forward(threshold uint64) types.Transactions {
return l.txs.Forward(threshold)
}
-// Filter removes all transactions from the list with a cost higher than the
-// provided threshold. Every removed transaction is returned for any post-removal
-// maintenance. Strict-mode invalidated transactions are also returned.
+// Filter removes all transactions from the list with a cost or gas limit higher
+// than the provided thresholds. Every removed transaction is returned for any
+// post-removal maintenance. Strict-mode invalidated transactions are also
+// returned.
//
-// This method uses the cached costcap to quickly decide if there's even a point
-// in calculating all the costs or if the balance covers all. If the threshold is
-// lower than the costcap, the costcap will be reset to a new high after removing
-// expensive the too transactions.
-func (l *txList) Filter(threshold *big.Int) (types.Transactions, types.Transactions) {
+// This method uses the cached costcap and gascap to quickly decide if there's even
+// a point in calculating all the costs or if the balance covers all. If the threshold
+// is lower than the costgas cap, the caps will be reset to a new high after removing
+// the newly invalidated transactions.
+func (l *txList) Filter(costLimit, gasLimit *big.Int) (types.Transactions, types.Transactions) {
// If all transactions are below the threshold, short circuit
- if l.costcap.Cmp(threshold) <= 0 {
+ if l.costcap.Cmp(costLimit) <= 0 && l.gascap.Cmp(gasLimit) <= 0 {
return nil, nil
}
- l.costcap = new(big.Int).Set(threshold) // Lower the cap to the threshold
+ l.costcap = new(big.Int).Set(costLimit) // Lower the caps to the thresholds
+ l.gascap = new(big.Int).Set(gasLimit)
// Filter out all the transactions above the account's funds
- removed := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Cost().Cmp(threshold) > 0 })
+ removed := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Cost().Cmp(costLimit) > 0 || tx.Gas().Cmp(gasLimit) > 0 })
// If the list was strict, filter anything above the lowest nonce
var invalids types.Transactions
diff --git a/core/tx_list_test.go b/core/tx_list_test.go
index 92b211937..b4f0b5228 100644
--- a/core/tx_list_test.go
+++ b/core/tx_list_test.go
@@ -38,7 +38,7 @@ func TestStrictTxListAdd(t *testing.T) {
// Insert the transactions in a random order
list := newTxList(true)
for _, v := range rand.Perm(len(txs)) {
- list.Add(txs[v])
+ list.Add(txs[v], DefaultTxPoolConfig.PriceBump)
}
// Verify internal state
if len(list.txs.items) != len(txs) {
diff --git a/core/tx_pool.go b/core/tx_pool.go
index a0373ca7d..1f5b46d4b 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -48,14 +48,8 @@ var (
)
var (
- minPendingPerAccount = uint64(16) // Min number of guaranteed transaction slots per address
- maxPendingTotal = uint64(4096) // Max limit of pending transactions from all accounts (soft)
- maxQueuedPerAccount = uint64(64) // Max limit of queued transactions per address
- maxQueuedTotal = uint64(1024) // Max limit of queued transactions from all accounts
- maxQueuedLifetime = 3 * time.Hour // Max amount of time transactions from idle accounts are queued
- minPriceBumpPercent = int64(10) // Minimum price bump needed to replace an old transaction
- evictionInterval = time.Minute // Time interval to check for evictable transactions
- statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
+ evictionInterval = time.Minute // Time interval to check for evictable transactions
+ statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
)
var (
@@ -78,6 +72,48 @@ var (
type stateFn func() (*state.StateDB, error)
+// TxPoolConfig are the configuration parameters of the transaction pool.
+type TxPoolConfig struct {
+ PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
+ PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
+
+ AccountSlots uint64 // Minimum number of executable transaction slots guaranteed per account
+ GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
+ AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
+ GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
+
+ Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
+}
+
+// DefaultTxPoolConfig contains the default configurations for the transaction
+// pool.
+var DefaultTxPoolConfig = TxPoolConfig{
+ PriceLimit: 1,
+ PriceBump: 10,
+
+ AccountSlots: 16,
+ GlobalSlots: 4096,
+ AccountQueue: 64,
+ GlobalQueue: 1024,
+
+ Lifetime: 3 * time.Hour,
+}
+
+// sanitize checks the provided user configurations and changes anything that's
+// unreasonable or unworkable.
+func (config *TxPoolConfig) sanitize() TxPoolConfig {
+ conf := *config
+ if conf.PriceLimit < 1 {
+ log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
+ conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
+ }
+ if conf.PriceBump < 1 {
+ log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
+ conf.PriceBump = DefaultTxPoolConfig.PriceBump
+ }
+ return conf
+}
+
// TxPool contains all currently known transactions. Transactions
// enter the pool when they are received from the network or submitted
// locally. They exit the pool when they are included in the blockchain.
@@ -86,7 +122,8 @@ type stateFn func() (*state.StateDB, error)
// current state) and future transactions. Transactions move between those
// two states over time as they are received and processed.
type TxPool struct {
- config *params.ChainConfig
+ config TxPoolConfig
+ chainconfig *params.ChainConfig
currentState stateFn // The state function which will allow us to do some pre checks
pendingState *state.ManagedState
gasLimit func() *big.Int // The current gas limit function callback
@@ -109,10 +146,17 @@ type TxPool struct {
homestead bool
}
-func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
+// NewTxPool creates a new transaction pool to gather, sort and filter inbound
+// trnsactions from the network.
+func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
+ // Sanitize the input to ensure no vulnerable gas prices are set
+ config = (&config).sanitize()
+
+ // Create the transaction pool with its initial settings
pool := &TxPool{
config: config,
- signer: types.NewEIP155Signer(config.ChainId),
+ chainconfig: chainconfig,
+ signer: types.NewEIP155Signer(chainconfig.ChainId),
pending: make(map[common.Address]*txList),
queue: make(map[common.Address]*txList),
beats: make(map[common.Address]time.Time),
@@ -120,7 +164,7 @@ func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentState
eventMux: eventMux,
currentState: currentStateFn,
gasLimit: gasLimitFn,
- gasPrice: big.NewInt(1),
+ gasPrice: new(big.Int).SetUint64(config.PriceLimit),
pendingState: nil,
locals: newTxSet(),
events: eventMux.Subscribe(ChainHeadEvent{}, RemovedTransactionEvent{}),
@@ -129,6 +173,7 @@ func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentState
pool.priced = newTxPricedList(&pool.all)
pool.resetState()
+ // Start the various events loops and return
pool.wg.Add(2)
go pool.eventLoop()
go pool.expirationLoop()
@@ -159,7 +204,7 @@ func (pool *TxPool) eventLoop() {
case ChainHeadEvent:
pool.mu.Lock()
if ev.Block != nil {
- if pool.config.IsHomestead(ev.Block.Number()) {
+ if pool.chainconfig.IsHomestead(ev.Block.Number()) {
pool.homestead = true
}
}
@@ -209,6 +254,7 @@ func (pool *TxPool) resetState() {
pool.promoteExecutables(currentState)
}
+// Stop terminates the transaction pool.
func (pool *TxPool) Stop() {
pool.events.Unsubscribe()
close(pool.quit)
@@ -238,6 +284,7 @@ func (pool *TxPool) SetGasPrice(price *big.Int) {
log.Info("Transaction pool price threshold updated", "price", price)
}
+// State returns the virtual managed state of the transaction pool.
func (pool *TxPool) State() *state.ManagedState {
pool.mu.RLock()
defer pool.mu.RUnlock()
@@ -386,7 +433,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) {
return false, err
}
// If the transaction pool is full, discard underpriced transactions
- if uint64(len(pool.all)) >= maxPendingTotal+maxQueuedTotal {
+ if uint64(len(pool.all)) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
// If the new transaction is underpriced, don't accept it
if pool.priced.Underpriced(tx, pool.locals) {
log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
@@ -394,7 +441,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) {
return false, ErrUnderpriced
}
// New transaction is better than our worse ones, make room for it
- drop := pool.priced.Discard(len(pool.all)-int(maxPendingTotal+maxQueuedTotal-1), pool.locals)
+ drop := pool.priced.Discard(len(pool.all)-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
for _, tx := range drop {
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
underpricedTxCounter.Inc(1)
@@ -405,7 +452,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) {
from, _ := types.Sender(pool.signer, tx) // already validated
if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
// Nonce already pending, check if required price bump is met
- inserted, old := list.Add(tx)
+ inserted, old := list.Add(tx, pool.config.PriceBump)
if !inserted {
pendingDiscardCounter.Inc(1)
return false, ErrReplaceUnderpriced
@@ -440,7 +487,7 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er
if pool.queue[from] == nil {
pool.queue[from] = newTxList(false)
}
- inserted, old := pool.queue[from].Add(tx)
+ inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
if !inserted {
// An older transaction was better, discard this
queuedDiscardCounter.Inc(1)
@@ -467,7 +514,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
}
list := pool.pending[addr]
- inserted, old := list.Add(tx)
+ inserted, old := list.Add(tx, pool.config.PriceBump)
if !inserted {
// An older transaction was better, discard this
delete(pool.all, hash)
@@ -616,6 +663,8 @@ func (pool *TxPool) removeTx(hash common.Hash) {
// future queue to the set of pending transactions. During this process, all
// invalidated transactions (low nonce, low balance) are deleted.
func (pool *TxPool) promoteExecutables(state *state.StateDB) {
+ gaslimit := pool.gasLimit()
+
// Iterate over all accounts and promote any executable transactions
queued := uint64(0)
for addr, list := range pool.queue {
@@ -626,8 +675,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
delete(pool.all, hash)
pool.priced.Removed()
}
- // Drop all transactions that are too costly (low balance)
- drops, _ := list.Filter(state.GetBalance(addr))
+ // Drop all transactions that are too costly (low balance or out of gas)
+ drops, _ := list.Filter(state.GetBalance(addr), gaslimit)
for _, tx := range drops {
hash := tx.Hash()
log.Trace("Removed unpayable queued transaction", "hash", hash)
@@ -642,7 +691,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
pool.promoteTx(addr, hash, tx)
}
// Drop all transactions over the allowed limit
- for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
+ for _, tx := range list.Cap(int(pool.config.AccountQueue)) {
hash := tx.Hash()
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
delete(pool.all, hash)
@@ -661,13 +710,13 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
for _, list := range pool.pending {
pending += uint64(list.Len())
}
- if pending > maxPendingTotal {
+ if pending > pool.config.GlobalSlots {
pendingBeforeCap := pending
// Assemble a spam order to penalize large transactors first
spammers := prque.New()
for addr, list := range pool.pending {
// Only evict transactions from high rollers
- if uint64(list.Len()) > minPendingPerAccount {
+ if uint64(list.Len()) > pool.config.AccountSlots {
// Skip local accounts as pools should maintain backlogs for themselves
for _, tx := range list.txs.items {
if !pool.locals.contains(tx.Hash()) {
@@ -679,7 +728,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
}
// Gradually drop transactions from offenders
offenders := []common.Address{}
- for pending > maxPendingTotal && !spammers.Empty() {
+ for pending > pool.config.GlobalSlots && !spammers.Empty() {
// Retrieve the next offender if not local address
offender, _ := spammers.Pop()
offenders = append(offenders, offender.(common.Address))
@@ -690,7 +739,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
threshold := pool.pending[offender.(common.Address)].Len()
// Iteratively reduce all offenders until below limit or threshold reached
- for pending > maxPendingTotal && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
+ for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
for i := 0; i < len(offenders)-1; i++ {
list := pool.pending[offenders[i]]
list.Cap(list.Len() - 1)
@@ -700,8 +749,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
}
}
// If still above threshold, reduce to limit or min allowance
- if pending > maxPendingTotal && len(offenders) > 0 {
- for pending > maxPendingTotal && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > minPendingPerAccount {
+ if pending > pool.config.GlobalSlots && len(offenders) > 0 {
+ for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
for _, addr := range offenders {
list := pool.pending[addr]
list.Cap(list.Len() - 1)
@@ -712,7 +761,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
pendingRLCounter.Inc(int64(pendingBeforeCap - pending))
}
// If we've queued more transactions than the hard limit, drop oldest ones
- if queued > maxQueuedTotal {
+ if queued > pool.config.GlobalQueue {
// Sort all accounts with queued transactions by heartbeat
addresses := make(addresssByHeartbeat, 0, len(pool.queue))
for addr := range pool.queue {
@@ -721,7 +770,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
sort.Sort(addresses)
// Drop transactions until the total is below the limit
- for drop := queued - maxQueuedTotal; drop > 0; {
+ for drop := queued - pool.config.GlobalQueue; drop > 0; {
addr := addresses[len(addresses)-1]
list := pool.queue[addr.address]
@@ -751,6 +800,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
// executable/pending queue and any subsequent transactions that become unexecutable
// are moved back into the future queue.
func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
+ gaslimit := pool.gasLimit()
+
// Iterate over all accounts and demote any non-executable transactions
for addr, list := range pool.pending {
nonce := state.GetNonce(addr)
@@ -762,8 +813,8 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
delete(pool.all, hash)
pool.priced.Removed()
}
- // Drop all transactions that are too costly (low balance), and queue any invalids back for later
- drops, invalids := list.Filter(state.GetBalance(addr))
+ // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
+ drops, invalids := list.Filter(state.GetBalance(addr), gaslimit)
for _, tx := range drops {
hash := tx.Hash()
log.Trace("Removed unpayable pending transaction", "hash", hash)
@@ -798,7 +849,7 @@ func (pool *TxPool) expirationLoop() {
case <-evict.C:
pool.mu.Lock()
for addr := range pool.queue {
- if time.Since(pool.beats[addr]) > maxQueuedLifetime {
+ if time.Since(pool.beats[addr]) > pool.config.Lifetime {
for _, tx := range pool.queue[addr].Flatten() {
pool.removeTx(tx.Hash())
}
@@ -850,22 +901,22 @@ func newTxSet() *txSet {
// contains returns true if the set contains the given transaction hash
// (not thread safe, should be called from a locked environment)
-func (self *txSet) contains(hash common.Hash) bool {
- _, ok := self.txMap[hash]
+func (ts *txSet) contains(hash common.Hash) bool {
+ _, ok := ts.txMap[hash]
return ok
}
// add adds a transaction hash to the set, then removes entries older than txSetDuration
// (not thread safe, should be called from a locked environment)
-func (self *txSet) add(hash common.Hash) {
- self.txMap[hash] = struct{}{}
+func (ts *txSet) add(hash common.Hash) {
+ ts.txMap[hash] = struct{}{}
now := time.Now()
- self.txOrd[self.addPtr] = txOrdType{hash: hash, time: now}
- self.addPtr++
+ ts.txOrd[ts.addPtr] = txOrdType{hash: hash, time: now}
+ ts.addPtr++
delBefore := now.Add(-txSetDuration)
- for self.delPtr < self.addPtr && self.txOrd[self.delPtr].time.Before(delBefore) {
- delete(self.txMap, self.txOrd[self.delPtr].hash)
- delete(self.txOrd, self.delPtr)
- self.delPtr++
+ for ts.delPtr < ts.addPtr && ts.txOrd[ts.delPtr].time.Before(delBefore) {
+ delete(ts.txMap, ts.txOrd[ts.delPtr].hash)
+ delete(ts.txOrd, ts.delPtr)
+ ts.delPtr++
}
}
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index e773daa2c..c12bd20a1 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -46,7 +46,7 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
statedb, _ := state.New(common.Hash{}, db)
key, _ := crypto.GenerateKey()
- newPool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+ newPool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
newPool.resetState()
return newPool, key
@@ -95,7 +95,7 @@ func TestStateChangeDuringPoolReset(t *testing.T) {
gasLimitFunc := func() *big.Int { return big.NewInt(1000000000) }
- txpool := NewTxPool(params.TestChainConfig, mux, stateFunc, gasLimitFunc)
+ txpool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, mux, stateFunc, gasLimitFunc)
txpool.resetState()
nonce := txpool.State().GetNonce(address)
@@ -397,50 +397,79 @@ func TestTransactionDropping(t *testing.T) {
var (
tx0 = transaction(0, big.NewInt(100), key)
tx1 = transaction(1, big.NewInt(200), key)
+ tx2 = transaction(2, big.NewInt(300), key)
tx10 = transaction(10, big.NewInt(100), key)
tx11 = transaction(11, big.NewInt(200), key)
+ tx12 = transaction(12, big.NewInt(300), key)
)
pool.promoteTx(account, tx0.Hash(), tx0)
pool.promoteTx(account, tx1.Hash(), tx1)
+ pool.promoteTx(account, tx1.Hash(), tx2)
pool.enqueueTx(tx10.Hash(), tx10)
pool.enqueueTx(tx11.Hash(), tx11)
+ pool.enqueueTx(tx11.Hash(), tx12)
// Check that pre and post validations leave the pool as is
- if pool.pending[account].Len() != 2 {
- t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 2)
+ if pool.pending[account].Len() != 3 {
+ t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
}
- if pool.queue[account].Len() != 2 {
- t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 2)
+ if pool.queue[account].Len() != 3 {
+ t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
}
if len(pool.all) != 4 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
}
pool.resetState()
- if pool.pending[account].Len() != 2 {
- t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 2)
+ if pool.pending[account].Len() != 3 {
+ t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
}
- if pool.queue[account].Len() != 2 {
- t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 2)
+ if pool.queue[account].Len() != 3 {
+ t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
}
if len(pool.all) != 4 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
}
// Reduce the balance of the account, and check that invalidated transactions are dropped
- state.AddBalance(account, big.NewInt(-750))
+ state.AddBalance(account, big.NewInt(-650))
pool.resetState()
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
}
- if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok {
+ if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; !ok {
+ t.Errorf("funded pending transaction missing: %v", tx0)
+ }
+ if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok {
t.Errorf("out-of-fund pending transaction present: %v", tx1)
}
if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10)
}
- if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok {
+ if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok {
+ t.Errorf("funded queued transaction missing: %v", tx10)
+ }
+ if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok {
t.Errorf("out-of-fund queued transaction present: %v", tx11)
}
+ if len(pool.all) != 4 {
+ t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
+ }
+ // Reduce the block gas limit, check that invalidated transactions are dropped
+ pool.gasLimit = func() *big.Int { return big.NewInt(100) }
+ pool.resetState()
+
+ if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
+ t.Errorf("funded pending transaction missing: %v", tx0)
+ }
+ if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok {
+ t.Errorf("over-gased pending transaction present: %v", tx1)
+ }
+ if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
+ t.Errorf("funded queued transaction missing: %v", tx10)
+ }
+ if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok {
+ t.Errorf("over-gased queued transaction present: %v", tx11)
+ }
if len(pool.all) != 2 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 2)
}
@@ -533,25 +562,25 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
pool.resetState()
// Keep queuing up transactions and make sure all above a limit are dropped
- for i := uint64(1); i <= maxQueuedPerAccount+5; i++ {
+ for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue+5; i++ {
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
if len(pool.pending) != 0 {
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0)
}
- if i <= maxQueuedPerAccount {
+ if i <= DefaultTxPoolConfig.AccountQueue {
if pool.queue[account].Len() != int(i) {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i)
}
} else {
- if pool.queue[account].Len() != int(maxQueuedPerAccount) {
- t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), maxQueuedPerAccount)
+ if pool.queue[account].Len() != int(DefaultTxPoolConfig.AccountQueue) {
+ t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), DefaultTxPoolConfig.AccountQueue)
}
}
}
- if len(pool.all) != int(maxQueuedPerAccount) {
- t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount)
+ if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue) {
+ t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue)
}
}
@@ -559,14 +588,14 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
// some threshold, the higher transactions are dropped to prevent DOS attacks.
func TestTransactionQueueGlobalLimiting(t *testing.T) {
// Reduce the queue limits to shorten test time
- defer func(old uint64) { maxQueuedTotal = old }(maxQueuedTotal)
- maxQueuedTotal = maxQueuedPerAccount * 3
+ defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue)
+ DefaultTxPoolConfig.GlobalQueue = DefaultTxPoolConfig.AccountQueue * 3
// Create the pool to test the limit enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
- pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+ pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
@@ -580,7 +609,7 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) {
// Generate and queue a batch of transactions
nonces := make(map[common.Address]uint64)
- txs := make(types.Transactions, 0, 3*maxQueuedTotal)
+ txs := make(types.Transactions, 0, 3*DefaultTxPoolConfig.GlobalQueue)
for len(txs) < cap(txs) {
key := keys[rand.Intn(len(keys))]
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -593,13 +622,13 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) {
queued := 0
for addr, list := range pool.queue {
- if list.Len() > int(maxQueuedPerAccount) {
- t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), maxQueuedPerAccount)
+ if list.Len() > int(DefaultTxPoolConfig.AccountQueue) {
+ t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), DefaultTxPoolConfig.AccountQueue)
}
queued += list.Len()
}
- if queued > int(maxQueuedTotal) {
- t.Fatalf("total transactions overflow allowance: %d > %d", queued, maxQueuedTotal)
+ if queued > int(DefaultTxPoolConfig.GlobalQueue) {
+ t.Fatalf("total transactions overflow allowance: %d > %d", queued, DefaultTxPoolConfig.GlobalQueue)
}
}
@@ -608,9 +637,9 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) {
// on shuffling them around.
func TestTransactionQueueTimeLimiting(t *testing.T) {
// Reduce the queue limits to shorten test time
- defer func(old time.Duration) { maxQueuedLifetime = old }(maxQueuedLifetime)
+ defer func(old time.Duration) { DefaultTxPoolConfig.Lifetime = old }(DefaultTxPoolConfig.Lifetime)
defer func(old time.Duration) { evictionInterval = old }(evictionInterval)
- maxQueuedLifetime = time.Second
+ DefaultTxPoolConfig.Lifetime = time.Second
evictionInterval = time.Second
// Create a test account and fund it
@@ -621,7 +650,7 @@ func TestTransactionQueueTimeLimiting(t *testing.T) {
state.AddBalance(account, big.NewInt(1000000))
// Queue up a batch of transactions
- for i := uint64(1); i <= maxQueuedPerAccount; i++ {
+ for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue; i++ {
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
@@ -646,7 +675,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
pool.resetState()
// Keep queuing up transactions and make sure all above a limit are dropped
- for i := uint64(0); i < maxQueuedPerAccount+5; i++ {
+ for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
@@ -657,8 +686,8 @@ func TestTransactionPendingLimiting(t *testing.T) {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0)
}
}
- if len(pool.all) != int(maxQueuedPerAccount+5) {
- t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount+5)
+ if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue+5) {
+ t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue+5)
}
}
@@ -674,7 +703,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
state1, _ := pool1.currentState()
state1.AddBalance(account1, big.NewInt(1000000))
- for i := uint64(0); i < maxQueuedPerAccount+5; i++ {
+ for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
if err := pool1.Add(transaction(origin+i, big.NewInt(100000), key1)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
@@ -686,7 +715,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
state2.AddBalance(account2, big.NewInt(1000000))
txns := []*types.Transaction{}
- for i := uint64(0); i < maxQueuedPerAccount+5; i++ {
+ for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
txns = append(txns, transaction(origin+i, big.NewInt(100000), key2))
}
pool2.AddBatch(txns)
@@ -708,14 +737,14 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
// attacks.
func TestTransactionPendingGlobalLimiting(t *testing.T) {
// Reduce the queue limits to shorten test time
- defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal)
- maxPendingTotal = minPendingPerAccount * 10
+ defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots)
+ DefaultTxPoolConfig.GlobalSlots = DefaultTxPoolConfig.AccountSlots * 10
// Create the pool to test the limit enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
- pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+ pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
@@ -732,7 +761,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
txs := types.Transactions{}
for _, key := range keys {
addr := crypto.PubkeyToAddress(key.PublicKey)
- for j := 0; j < int(maxPendingTotal)/len(keys)*2; j++ {
+ for j := 0; j < int(DefaultTxPoolConfig.GlobalSlots)/len(keys)*2; j++ {
txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key))
nonces[addr]++
}
@@ -744,8 +773,8 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
for _, list := range pool.pending {
pending += list.Len()
}
- if pending > int(maxPendingTotal) {
- t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, maxPendingTotal)
+ if pending > int(DefaultTxPoolConfig.GlobalSlots) {
+ t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, DefaultTxPoolConfig.GlobalSlots)
}
}
@@ -754,14 +783,14 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
// the transactions are still kept.
func TestTransactionPendingMinimumAllowance(t *testing.T) {
// Reduce the queue limits to shorten test time
- defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal)
- maxPendingTotal = 0
+ defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots)
+ DefaultTxPoolConfig.GlobalSlots = 0
// Create the pool to test the limit enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
- pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+ pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
@@ -778,7 +807,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
txs := types.Transactions{}
for _, key := range keys {
addr := crypto.PubkeyToAddress(key.PublicKey)
- for j := 0; j < int(minPendingPerAccount)*2; j++ {
+ for j := 0; j < int(DefaultTxPoolConfig.AccountSlots)*2; j++ {
txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key))
nonces[addr]++
}
@@ -787,8 +816,8 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
pool.AddBatch(txs)
for addr, list := range pool.pending {
- if list.Len() != int(minPendingPerAccount) {
- t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), minPendingPerAccount)
+ if list.Len() != int(DefaultTxPoolConfig.AccountSlots) {
+ t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), DefaultTxPoolConfig.AccountSlots)
}
}
}
@@ -803,7 +832,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
- pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+ pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
@@ -874,17 +903,17 @@ func TestTransactionPoolRepricing(t *testing.T) {
// Note, local transactions are never allowed to be dropped.
func TestTransactionPoolUnderpricing(t *testing.T) {
// Reduce the queue limits to shorten test time
- defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal)
- maxPendingTotal = 2
+ defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots)
+ DefaultTxPoolConfig.GlobalSlots = 2
- defer func(old uint64) { maxQueuedTotal = old }(maxQueuedTotal)
- maxQueuedTotal = 2
+ defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue)
+ DefaultTxPoolConfig.GlobalQueue = 2
// Create the pool to test the pricing enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
- pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+ pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
@@ -960,7 +989,7 @@ func TestTransactionReplacement(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
- pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+ pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a a test account to add transactions with
@@ -971,7 +1000,7 @@ func TestTransactionReplacement(t *testing.T) {
// Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
price := int64(100)
- threshold := (price * (100 + minPriceBumpPercent)) / 100
+ threshold := (price * (100 + int64(DefaultTxPoolConfig.PriceBump))) / 100
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap pending transaction: %v", err)
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 17edc9e33..545f7d650 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -91,7 +91,7 @@ func (in *Interpreter) enforceRestrictions(op OpCode, operation operation, stack
}
// Run loops and evaluates the contract's code with the given input data and returns
-// the return byte-slice and an error if one occured.
+// the return byte-slice and an error if one occurred.
//
// It's important to note that any errors returned by the interpreter should be
// considered a revert-and-consume-all-gas operation. No error specific checks
diff --git a/eth/api.go b/eth/api.go
index 88b3dbbf9..81570988c 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -154,7 +154,11 @@ func (api *PrivateMinerAPI) Start(threads *int) error {
// Start the miner and return
if !api.e.IsMining() {
// Propagate the initial price point to the transaction pool
- api.e.txPool.SetGasPrice(api.e.gasPrice)
+ api.e.lock.RLock()
+ price := api.e.gasPrice
+ api.e.lock.RUnlock()
+
+ api.e.txPool.SetGasPrice(price)
return api.e.StartMining(true)
}
return nil
@@ -182,6 +186,10 @@ func (api *PrivateMinerAPI) SetExtra(extra string) (bool, error) {
// SetGasPrice sets the minimum accepted gas price for the miner.
func (api *PrivateMinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
+ api.e.lock.Lock()
+ api.e.gasPrice = (*big.Int)(&gasPrice)
+ api.e.lock.Unlock()
+
api.e.txPool.SetGasPrice((*big.Int)(&gasPrice))
return true
}
diff --git a/eth/backend.go b/eth/backend.go
index 7c63fa51d..639792333 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -76,14 +76,14 @@ type Ethereum struct {
ApiBackend *EthApiBackend
- miner *miner.Miner
- gasPrice *big.Int
- Mining bool
- MinerThreads int
- etherbase common.Address
+ miner *miner.Miner
+ gasPrice *big.Int
+ etherbase common.Address
networkId uint64
netRPCService *ethapi.PublicNetAPI
+
+ lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)
}
func (s *Ethereum) AddLesServer(ls LesServer) {
@@ -121,8 +121,8 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
shutdownChan: make(chan bool),
stopDbUpgrade: stopDbUpgrade,
networkId: config.NetworkId,
+ gasPrice: config.GasPrice,
etherbase: config.Etherbase,
- MinerThreads: config.MinerThreads,
}
if err := addMipmapBloomBins(chainDb); err != nil {
@@ -150,7 +150,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
core.WriteChainConfig(chainDb, genesisHash, chainConfig)
}
- newPool := core.NewTxPool(eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
+ newPool := core.NewTxPool(config.TxPool, eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
eth.txPool = newPool
maxPeers := config.MaxPeers
@@ -169,7 +169,6 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
}
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine)
- eth.gasPrice = config.GasPrice
eth.miner.SetExtra(makeExtraData(config.ExtraData))
eth.ApiBackend = &EthApiBackend{eth, nil}
@@ -295,8 +294,12 @@ func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) {
}
func (s *Ethereum) Etherbase() (eb common.Address, err error) {
- if s.etherbase != (common.Address{}) {
- return s.etherbase, nil
+ s.lock.RLock()
+ etherbase := s.etherbase
+ s.lock.RUnlock()
+
+ if etherbase != (common.Address{}) {
+ return etherbase, nil
}
if wallets := s.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
@@ -308,7 +311,10 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) {
// set in js console via admin interface or wrapper from cli flags
func (self *Ethereum) SetEtherbase(etherbase common.Address) {
+ self.lock.Lock()
self.etherbase = etherbase
+ self.lock.Unlock()
+
self.miner.SetEtherbase(etherbase)
}
diff --git a/eth/config.go b/eth/config.go
index 22c09b170..4109cff8b 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -44,6 +44,7 @@ var DefaultConfig = Config{
DatabaseCache: 128,
GasPrice: big.NewInt(18 * params.Shannon),
+ TxPool: core.DefaultTxPoolConfig,
GPO: gasprice.Config{
Blocks: 10,
Percentile: 50,
@@ -99,6 +100,9 @@ type Config struct {
EthashDatasetsInMem int
EthashDatasetsOnDisk int
+ // Transaction pool options
+ TxPool core.TxPoolConfig
+
// Gas Price Oracle options
GPO gasprice.Config
diff --git a/eth/gen_config.go b/eth/gen_config.go
index 955facf8f..477479419 100644
--- a/eth/gen_config.go
+++ b/eth/gen_config.go
@@ -33,6 +33,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
EthashDatasetDir string
EthashDatasetsInMem int
EthashDatasetsOnDisk int
+ TxPool core.TxPoolConfig
GPO gasprice.Config
EnablePreimageRecording bool
DocRoot string `toml:"-"`
@@ -60,6 +61,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.EthashDatasetDir = c.EthashDatasetDir
enc.EthashDatasetsInMem = c.EthashDatasetsInMem
enc.EthashDatasetsOnDisk = c.EthashDatasetsOnDisk
+ enc.TxPool = c.TxPool
enc.GPO = c.GPO
enc.EnablePreimageRecording = c.EnablePreimageRecording
enc.DocRoot = c.DocRoot
@@ -90,6 +92,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
EthashDatasetDir *string
EthashDatasetsInMem *int
EthashDatasetsOnDisk *int
+ TxPool *core.TxPoolConfig
GPO *gasprice.Config
EnablePreimageRecording *bool
DocRoot *string `toml:"-"`
@@ -158,6 +161,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.EthashDatasetsOnDisk != nil {
c.EthashDatasetsOnDisk = *dec.EthashDatasetsOnDisk
}
+ if dec.TxPool != nil {
+ c.TxPool = *dec.TxPool
+ }
if dec.GPO != nil {
c.GPO = *dec.GPO
}
diff --git a/eth/handler.go b/eth/handler.go
index 16e371227..8c2f5660d 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -171,6 +171,11 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
return blockchain.CurrentBlock().NumberU64()
}
inserter := func(blocks types.Blocks) (int, error) {
+ // If fast sync is running, deny importing weird blocks
+ if atomic.LoadUint32(&manager.fastSync) == 1 {
+ log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
+ return 0, nil
+ }
atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
return manager.blockchain.InsertChain(blocks)
}
diff --git a/eth/sync.go b/eth/sync.go
index b0653acf9..8784b225d 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -183,6 +183,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
// The only scenario where this can happen is if the user manually (or via a
// bad block) rolled back a fast sync node below the sync point. In this case
// however it's safe to reenable fast sync.
+ atomic.StoreUint32(&pm.fastSync, 1)
mode = downloader.FastSync
}
if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go
index 007347590..ad77cd1e8 100644
--- a/ethstats/ethstats.go
+++ b/ethstats/ethstats.go
@@ -427,21 +427,15 @@ type blockStats struct {
GasLimit *big.Int `json:"gasLimit"`
Diff string `json:"difficulty"`
TotalDiff string `json:"totalDifficulty"`
- Txs txStats `json:"transactions"`
+ Txs []txStats `json:"transactions"`
TxHash common.Hash `json:"transactionsRoot"`
Root common.Hash `json:"stateRoot"`
Uncles uncleStats `json:"uncles"`
}
-// txStats is a custom wrapper around a transaction array to force serializing
-// empty arrays instead of returning null for them.
-type txStats []*types.Transaction
-
-func (s txStats) MarshalJSON() ([]byte, error) {
- if txs := ([]*types.Transaction)(s); len(txs) > 0 {
- return json.Marshal(txs)
- }
- return []byte("[]"), nil
+// txStats is the information to report about individual transactions.
+type txStats struct {
+ Hash common.Hash `json:"hash"`
}
// uncleStats is a custom wrapper around an uncle array to force serializing
@@ -480,7 +474,7 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
var (
header *types.Header
td *big.Int
- txs []*types.Transaction
+ txs []txStats
uncles []*types.Header
)
if s.eth != nil {
@@ -491,7 +485,10 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
header = block.Header()
td = s.eth.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
- txs = block.Transactions()
+ txs = make([]txStats, len(block.Transactions()))
+ for i, tx := range block.Transactions() {
+ txs[i].Hash = tx.Hash()
+ }
uncles = block.Uncles()
} else {
// Light nodes would need on-demand lookups for transactions/uncles, skip
@@ -501,6 +498,7 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
header = s.les.BlockChain().CurrentHeader()
}
td = s.les.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
+ txs = []txStats{}
}
// Assemble and return the block stats
author, _ := s.engine.Author(header)
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 371786e4a..c4a871e48 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -117,16 +117,16 @@ func (s *PublicTxPoolAPI) Content() map[string]map[string]map[string]*RPCTransac
// Flatten the pending transactions
for account, txs := range pending {
dump := make(map[string]*RPCTransaction)
- for nonce, tx := range txs {
- dump[fmt.Sprintf("%d", nonce)] = newRPCPendingTransaction(tx)
+ for _, tx := range txs {
+ dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx)
}
content["pending"][account.Hex()] = dump
}
// Flatten the queued transactions
for account, txs := range queue {
dump := make(map[string]*RPCTransaction)
- for nonce, tx := range txs {
- dump[fmt.Sprintf("%d", nonce)] = newRPCPendingTransaction(tx)
+ for _, tx := range txs {
+ dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx)
}
content["queued"][account.Hex()] = dump
}
@@ -161,16 +161,16 @@ func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string {
// Flatten the pending transactions
for account, txs := range pending {
dump := make(map[string]string)
- for nonce, tx := range txs {
- dump[fmt.Sprintf("%d", nonce)] = format(tx)
+ for _, tx := range txs {
+ dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx)
}
content["pending"][account.Hex()] = dump
}
// Flatten the queued transactions
for account, txs := range queue {
dump := make(map[string]string)
- for nonce, tx := range txs {
- dump[fmt.Sprintf("%d", nonce)] = format(tx)
+ for _, tx := range txs {
+ dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx)
}
content["queued"][account.Hex()] = dump
}