aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2017-04-05 06:16:29 +0800
committerFelix Lange <fjl@users.noreply.github.com>2017-04-05 06:16:29 +0800
commit09777952ee476ff80d4b6e63b5041ff5ca0e441b (patch)
treee85320f88f548201e3476b3e7095e96fd071617b /core
parente50a5b77712d891ff409aa942a5cbc24e721b332 (diff)
downloaddexon-09777952ee476ff80d4b6e63b5041ff5ca0e441b.tar
dexon-09777952ee476ff80d4b6e63b5041ff5ca0e441b.tar.gz
dexon-09777952ee476ff80d4b6e63b5041ff5ca0e441b.tar.bz2
dexon-09777952ee476ff80d4b6e63b5041ff5ca0e441b.tar.lz
dexon-09777952ee476ff80d4b6e63b5041ff5ca0e441b.tar.xz
dexon-09777952ee476ff80d4b6e63b5041ff5ca0e441b.tar.zst
dexon-09777952ee476ff80d4b6e63b5041ff5ca0e441b.zip
core, consensus: pluggable consensus engines (#3817)
This commit adds pluggable consensus engines to go-ethereum. In short, it introduces a generic consensus interface, and refactors the entire codebase to use this interface.
Diffstat (limited to 'core')
-rw-r--r--core/bench_test.go6
-rw-r--r--core/block_validator.go275
-rw-r--r--core/block_validator_test.go203
-rw-r--r--core/blockchain.go62
-rw-r--r--core/blockchain_test.go97
-rw-r--r--core/chain_makers.go13
-rw-r--r--core/chain_makers_test.go4
-rw-r--r--core/chain_pow.go87
-rw-r--r--core/chain_pow_test.go238
-rw-r--r--core/dao_test.go14
-rw-r--r--core/database_util_test.go60
-rw-r--r--core/genesis_test.go4
-rw-r--r--core/headerchain.go149
-rw-r--r--core/state_processor.go44
-rw-r--r--core/types.go30
-rw-r--r--core/types/block.go11
16 files changed, 335 insertions, 962 deletions
diff --git a/core/bench_test.go b/core/bench_test.go
index a154ccbb1..20676fc97 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -25,13 +25,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
func BenchmarkInsertChain_empty_memdb(b *testing.B) {
@@ -176,7 +176,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
evmux := new(event.TypeMux)
- chainman, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
+ chainman, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()
@@ -286,7 +286,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
- chain, err := NewBlockChain(db, params.TestChainConfig, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ chain, err := NewBlockChain(db, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if err != nil {
b.Fatalf("error creating chain: %v", err)
}
diff --git a/core/block_validator.go b/core/block_validator.go
index f93a9f40b..00457dd7a 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -19,22 +19,12 @@ package core
import (
"fmt"
"math/big"
- "time"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
- "gopkg.in/fatih/set.v0"
-)
-
-var (
- ExpDiffPeriod = big.NewInt(100000)
- big10 = big.NewInt(10)
- bigMinus99 = big.NewInt(-99)
)
// BlockValidator is responsible for validating block headers, uncles and
@@ -44,30 +34,24 @@ var (
type BlockValidator struct {
config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain
- Pow pow.PoW // Proof of work used for validating
+ engine consensus.Engine // Consensus engine used for validating
}
// NewBlockValidator returns a new block validator which is safe for re-use
-func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, pow pow.PoW) *BlockValidator {
+func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *BlockValidator {
validator := &BlockValidator{
config: config,
- Pow: pow,
+ engine: engine,
bc: blockchain,
}
return validator
}
-// ValidateBlock validates the given block's header and uncles and verifies the
-// the block header's transaction and uncle roots.
-//
-// ValidateBlock does not validate the header's pow. The pow work validated
-// separately so we can process them in parallel.
-//
-// ValidateBlock also validates and makes sure that any previous state (or present)
-// state that might or might not be present is checked to make sure that fast
-// sync has done it's job proper. This prevents the block validator from accepting
-// false positives where a header is present but the state is not.
-func (v *BlockValidator) ValidateBlock(block *types.Block) error {
+// ValidateBody validates the given block's uncles and verifies the the block
+// header's transaction and uncle roots. The headers are assumed to be already
+// validated at this point.
+func (v *BlockValidator) ValidateBody(block *types.Block) error {
+ // Check whether the block's known, and if not, that it's linkable
if v.bc.HasBlock(block.Hash()) {
if _, err := state.New(block.Root(), v.bc.chainDb); err == nil {
return &KnownBlockError{block.Number(), block.Hash()}
@@ -80,30 +64,17 @@ func (v *BlockValidator) ValidateBlock(block *types.Block) error {
if _, err := state.New(parent.Root(), v.bc.chainDb); err != nil {
return ParentError(block.ParentHash())
}
-
+ // Header validity is known at this point, check the uncles and transactions
header := block.Header()
- // validate the block header
- if err := ValidateHeader(v.config, v.Pow, header, parent.Header(), false, false); err != nil {
+ if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err
}
- // verify the uncles are correctly rewarded
- if err := v.VerifyUncles(block, parent); err != nil {
- return err
+ if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
+ return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash)
}
-
- // Verify UncleHash before running other uncle validations
- unclesSha := types.CalcUncleHash(block.Uncles())
- if unclesSha != header.UncleHash {
- return fmt.Errorf("invalid uncles root hash (remote: %x local: %x)", header.UncleHash, unclesSha)
- }
-
- // The transactions Trie's root (R = (Tr [[i, RLP(T1)], [i, RLP(T2)], ... [n, RLP(Tn)]]))
- // can be used by light clients to make sure they've received the correct Txs
- txSha := types.DeriveSha(block.Transactions())
- if txSha != header.TxHash {
- return fmt.Errorf("invalid transaction root hash (remote: %x local: %x)", header.TxHash, txSha)
+ if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {
+ return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
-
return nil
}
@@ -135,222 +106,6 @@ func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *stat
return nil
}
-// VerifyUncles verifies the given block's uncles and applies the Ethereum
-// consensus rules to the various block headers included; it will return an
-// error if any of the included uncle headers were invalid. It returns an error
-// if the validation failed.
-func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
- // validate that there are at most 2 uncles included in this block
- if len(block.Uncles()) > 2 {
- return ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(block.Uncles()))
- }
-
- uncles := set.New()
- ancestors := make(map[common.Hash]*types.Block)
- for _, ancestor := range v.bc.GetBlocksFromHash(block.ParentHash(), 7) {
- ancestors[ancestor.Hash()] = ancestor
- // Include ancestors uncles in the uncle set. Uncles must be unique.
- for _, uncle := range ancestor.Uncles() {
- uncles.Add(uncle.Hash())
- }
- }
- ancestors[block.Hash()] = block
- uncles.Add(block.Hash())
-
- for i, uncle := range block.Uncles() {
- hash := uncle.Hash()
- if uncles.Has(hash) {
- // Error not unique
- return UncleError("uncle[%d](%x) not unique", i, hash[:4])
- }
- uncles.Add(hash)
-
- if ancestors[hash] != nil {
- branch := fmt.Sprintf(" O - %x\n |\n", block.Hash())
- for h := range ancestors {
- branch += fmt.Sprintf(" O - %x\n |\n", h)
- }
- log.Warn(branch)
- return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
- }
-
- if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() {
- return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
- }
-
- if err := ValidateHeader(v.config, v.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
- return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
- }
- }
-
- return nil
-}
-
-// ValidateHeader validates the given header and, depending on the pow arg,
-// checks the proof of work of the given header. Returns an error if the
-// validation failed.
-func (v *BlockValidator) ValidateHeader(header, parent *types.Header, checkPow bool) error {
- // Short circuit if the parent is missing.
- if parent == nil {
- return ParentError(header.ParentHash)
- }
- // Short circuit if the header's already known or its parent is missing
- if v.bc.HasHeader(header.Hash()) {
- return nil
- }
- return ValidateHeader(v.config, v.Pow, header, parent, checkPow, false)
-}
-
-// Validates a header. Returns an error if the header is invalid.
-//
-// See YP section 4.3.4. "Block Header Validity"
-func ValidateHeader(config *params.ChainConfig, pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
- if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
- return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
- }
-
- if uncle {
- if header.Time.Cmp(math.MaxBig256) == 1 {
- return BlockTSTooBigErr
- }
- } else {
- if header.Time.Cmp(big.NewInt(time.Now().Unix())) == 1 {
- return BlockFutureErr
- }
- }
- if header.Time.Cmp(parent.Time) != 1 {
- return BlockEqualTSErr
- }
-
- expd := CalcDifficulty(config, header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
- if expd.Cmp(header.Difficulty) != 0 {
- return fmt.Errorf("Difficulty check failed for header (remote: %v local: %v)", header.Difficulty, expd)
- }
-
- a := new(big.Int).Set(parent.GasLimit)
- a = a.Sub(a, header.GasLimit)
- a.Abs(a)
- b := new(big.Int).Set(parent.GasLimit)
- b = b.Div(b, params.GasLimitBoundDivisor)
- if !(a.Cmp(b) < 0) || (header.GasLimit.Cmp(params.MinGasLimit) == -1) {
- return fmt.Errorf("GasLimit check failed for header (remote: %v local_max: %v)", header.GasLimit, b)
- }
-
- num := new(big.Int).Set(parent.Number)
- num.Sub(header.Number, num)
- if num.Cmp(big.NewInt(1)) != 0 {
- return BlockNumberErr
- }
-
- if checkPow {
- // Verify the nonce of the header. Return an error if it's not valid
- if err := pow.Verify(types.NewBlockWithHeader(header)); err != nil {
- return &BlockNonceErr{header.Number, header.Hash(), header.Nonce.Uint64()}
- }
- }
- // If all checks passed, validate the extra-data field for hard forks
- if err := ValidateDAOHeaderExtraData(config, header); err != nil {
- return err
- }
- if !uncle && config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 {
- if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() {
- return ValidationError("Homestead gas reprice fork hash mismatch: have 0x%x, want 0x%x", header.Hash(), config.EIP150Hash)
- }
- }
- return nil
-}
-
-// CalcDifficulty is the difficulty adjustment algorithm. It returns
-// the difficulty that a new block should have when created at time
-// given the parent block's time and difficulty.
-func CalcDifficulty(config *params.ChainConfig, time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
- if config.IsHomestead(new(big.Int).Add(parentNumber, common.Big1)) {
- return calcDifficultyHomestead(time, parentTime, parentNumber, parentDiff)
- } else {
- return calcDifficultyFrontier(time, parentTime, parentNumber, parentDiff)
- }
-}
-
-func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
- // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
- // algorithm:
- // diff = (parent_diff +
- // (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
- // ) + 2^(periodCount - 2)
-
- bigTime := new(big.Int).SetUint64(time)
- bigParentTime := new(big.Int).SetUint64(parentTime)
-
- // holds intermediate values to make the algo easier to read & audit
- x := new(big.Int)
- y := new(big.Int)
-
- // 1 - (block_timestamp -parent_timestamp) // 10
- x.Sub(bigTime, bigParentTime)
- x.Div(x, big10)
- x.Sub(common.Big1, x)
-
- // max(1 - (block_timestamp - parent_timestamp) // 10, -99)))
- if x.Cmp(bigMinus99) < 0 {
- x.Set(bigMinus99)
- }
-
- // (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
- y.Div(parentDiff, params.DifficultyBoundDivisor)
- x.Mul(y, x)
- x.Add(parentDiff, x)
-
- // minimum difficulty can ever be (before exponential factor)
- if x.Cmp(params.MinimumDifficulty) < 0 {
- x.Set(params.MinimumDifficulty)
- }
-
- // for the exponential factor
- periodCount := new(big.Int).Add(parentNumber, common.Big1)
- periodCount.Div(periodCount, ExpDiffPeriod)
-
- // the exponential factor, commonly referred to as "the bomb"
- // diff = diff + 2^(periodCount - 2)
- if periodCount.Cmp(common.Big1) > 0 {
- y.Sub(periodCount, common.Big2)
- y.Exp(common.Big2, y, nil)
- x.Add(x, y)
- }
-
- return x
-}
-
-func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
- diff := new(big.Int)
- adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
- bigTime := new(big.Int)
- bigParentTime := new(big.Int)
-
- bigTime.SetUint64(time)
- bigParentTime.SetUint64(parentTime)
-
- if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
- diff.Add(parentDiff, adjust)
- } else {
- diff.Sub(parentDiff, adjust)
- }
- if diff.Cmp(params.MinimumDifficulty) < 0 {
- diff.Set(params.MinimumDifficulty)
- }
-
- periodCount := new(big.Int).Add(parentNumber, common.Big1)
- periodCount.Div(periodCount, ExpDiffPeriod)
- if periodCount.Cmp(common.Big1) > 0 {
- // diff = diff + 2^(periodCount - 2)
- expDiff := periodCount.Sub(periodCount, common.Big2)
- expDiff.Exp(common.Big2, expDiff, nil)
- diff.Add(diff, expDiff)
- diff = math.BigMax(diff, params.MinimumDifficulty)
- }
-
- return diff
-}
-
// CalcGasLimit computes the gas limit of the next block after parent.
// The result may be modified by the caller.
// This is miner strategy, not consensus protocol.
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index a07dd9e51..abe1766b4 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -17,64 +17,179 @@
package core
import (
- "math/big"
+ "runtime"
"testing"
+ "time"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
-func testGenesis(account common.Address, balance *big.Int) *Genesis {
- return &Genesis{
- Config: params.TestChainConfig,
- Alloc: GenesisAlloc{account: {Balance: balance}},
+// Tests that simple header verification works, for both good and bad blocks.
+func TestHeaderVerification(t *testing.T) {
+ // Create a simple chain to verify
+ var (
+ testdb, _ = ethdb.NewMemDatabase()
+ gspec = &Genesis{Config: params.TestChainConfig}
+ genesis = gspec.MustCommit(testdb)
+ blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
+ )
+ headers := make([]*types.Header, len(blocks))
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ }
+ // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
+ chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+
+ for i := 0; i < len(blocks); i++ {
+ for j, valid := range []bool{true, false} {
+ var results <-chan error
+
+ if valid {
+ engine := ethash.NewFaker()
+ _, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}, []bool{true})
+ } else {
+ engine := ethash.NewFakeFailer(headers[i].Number.Uint64())
+ _, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}, []bool{true})
+ }
+ // Wait for the verification result
+ select {
+ case result := <-results:
+ if (result == nil) != valid {
+ t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, result, valid)
+ }
+ case <-time.After(time.Second):
+ t.Fatalf("test %d.%d: verification timeout", i, j)
+ }
+ // Make sure no more data is returned
+ select {
+ case result := <-results:
+ t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
+ case <-time.After(25 * time.Millisecond):
+ }
+ }
+ chain.InsertChain(blocks[i : i+1])
}
}
-func TestNumber(t *testing.T) {
- chain := newTestBlockChain()
- statedb, _ := state.New(chain.Genesis().Root(), chain.chainDb)
- header := makeHeader(chain.config, chain.Genesis(), statedb)
- header.Number = big.NewInt(3)
- err := ValidateHeader(chain.config, pow.FakePow{}, header, chain.Genesis().Header(), false, false)
- if err != BlockNumberErr {
- t.Errorf("expected block number error, got %q", err)
+// Tests that concurrent header verification works, for both good and bad blocks.
+func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) }
+func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) }
+func TestHeaderConcurrentVerification32(t *testing.T) { testHeaderConcurrentVerification(t, 32) }
+
+func testHeaderConcurrentVerification(t *testing.T, threads int) {
+ // Create a simple chain to verify
+ var (
+ testdb, _ = ethdb.NewMemDatabase()
+ gspec = &Genesis{Config: params.TestChainConfig}
+ genesis = gspec.MustCommit(testdb)
+ blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
+ )
+ headers := make([]*types.Header, len(blocks))
+ seals := make([]bool, len(blocks))
+
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ seals[i] = true
}
+ // Set the number of threads to verify on
+ old := runtime.GOMAXPROCS(threads)
+ defer runtime.GOMAXPROCS(old)
+
+ // Run the header checker for the entire block chain at once both for a valid and
+ // also an invalid chain (enough if one arbitrary block is invalid).
+ for i, valid := range []bool{true, false} {
+ var results <-chan error
+
+ if valid {
+ chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ _, results = chain.engine.VerifyHeaders(chain, headers, seals)
+ } else {
+ chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), new(event.TypeMux), vm.Config{})
+ _, results = chain.engine.VerifyHeaders(chain, headers, seals)
+ }
+ // Wait for all the verification results
+ checks := make(map[int]error)
+ for j := 0; j < len(blocks); j++ {
+ select {
+ case result := <-results:
+ checks[j] = result
- header = makeHeader(chain.config, chain.Genesis(), statedb)
- err = ValidateHeader(chain.config, pow.FakePow{}, header, chain.Genesis().Header(), false, false)
- if err == BlockNumberErr {
- t.Errorf("didn't expect block number error")
+ case <-time.After(time.Second):
+ t.Fatalf("test %d.%d: verification timeout", i, j)
+ }
+ }
+ // Check nonce check validity
+ for j := 0; j < len(blocks); j++ {
+ want := valid || (j < len(blocks)-2) // We chose the last-but-one nonce in the chain to fail
+ if (checks[j] == nil) != want {
+ t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, checks[j], want)
+ }
+ if !want {
+ // A few blocks after the first error may pass verification due to concurrent
+ // workers. We don't care about those in this test, just that the correct block
+ // errors out.
+ break
+ }
+ }
+ // Make sure no more data is returned
+ select {
+ case result := <-results:
+ t.Fatalf("test %d: unexpected result returned: %v", i, result)
+ case <-time.After(25 * time.Millisecond):
+ }
}
}
-func TestPutReceipt(t *testing.T) {
- db, _ := ethdb.NewMemDatabase()
-
- var addr common.Address
- addr[0] = 1
- var hash common.Hash
- hash[0] = 2
-
- receipt := new(types.Receipt)
- receipt.Logs = []*types.Log{{
- Address: addr,
- Topics: []common.Hash{hash},
- Data: []byte("hi"),
- BlockNumber: 42,
- TxHash: hash,
- TxIndex: 0,
- BlockHash: hash,
- Index: 0,
- }}
-
- WriteReceipts(db, types.Receipts{receipt})
- receipt = GetReceipt(db, common.Hash{})
- if receipt == nil {
- t.Error("expected to get 1 receipt, got none.")
+// Tests that aborting a header validation indeed prevents further checks from being
+// run, as well as checks that no left-over goroutines are leaked.
+func TestHeaderConcurrentAbortion2(t *testing.T) { testHeaderConcurrentAbortion(t, 2) }
+func TestHeaderConcurrentAbortion8(t *testing.T) { testHeaderConcurrentAbortion(t, 8) }
+func TestHeaderConcurrentAbortion32(t *testing.T) { testHeaderConcurrentAbortion(t, 32) }
+
+func testHeaderConcurrentAbortion(t *testing.T, threads int) {
+ // Create a simple chain to verify
+ var (
+ testdb, _ = ethdb.NewMemDatabase()
+ gspec = &Genesis{Config: params.TestChainConfig}
+ genesis = gspec.MustCommit(testdb)
+ blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 1024, nil)
+ )
+ headers := make([]*types.Header, len(blocks))
+ seals := make([]bool, len(blocks))
+
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ seals[i] = true
+ }
+ // Set the number of threads to verify on
+ old := runtime.GOMAXPROCS(threads)
+ defer runtime.GOMAXPROCS(old)
+
+ // Start the verifications and immediately abort
+ chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), new(event.TypeMux), vm.Config{})
+ abort, results := chain.engine.VerifyHeaders(chain, headers, seals)
+ close(abort)
+
+ // Deplete the results channel
+ verified := 0
+ for depleted := false; !depleted; {
+ select {
+ case result := <-results:
+ if result != nil {
+ t.Errorf("header %d: validation failed: %v", verified, result)
+ }
+ verified++
+ case <-time.After(50 * time.Millisecond):
+ depleted = true
+ }
+ }
+ // Check that abortion was honored by not processing too many POWs
+ if verified > 2*threads {
+ t.Errorf("verification count too large: have %d, want below %d", verified, 2*threads)
}
}
diff --git a/core/blockchain.go b/core/blockchain.go
index a57832df0..4793431d8 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -39,7 +40,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/hashicorp/golang-lru"
@@ -104,7 +104,7 @@ type BlockChain struct {
procInterrupt int32 // interrupt signaler for block processing
wg sync.WaitGroup // chain processing wait group for shutting down
- pow pow.PoW
+ engine consensus.Engine
processor Processor // block processor interface
validator Validator // block and state validator interface
vmConfig vm.Config
@@ -115,7 +115,7 @@ type BlockChain struct {
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialiser the default Ethereum Validator and
// Processor.
-func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.PoW, mux *event.TypeMux, vmConfig vm.Config) (*BlockChain, error) {
+func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, mux *event.TypeMux, vmConfig vm.Config) (*BlockChain, error) {
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
blockCache, _ := lru.New(blockCacheLimit)
@@ -131,25 +131,22 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P
bodyRLPCache: bodyRLPCache,
blockCache: blockCache,
futureBlocks: futureBlocks,
- pow: pow,
+ engine: engine,
vmConfig: vmConfig,
badBlocks: badBlocks,
}
- bc.SetValidator(NewBlockValidator(config, bc, pow))
- bc.SetProcessor(NewStateProcessor(config, bc))
+ bc.SetValidator(NewBlockValidator(config, bc, engine))
+ bc.SetProcessor(NewStateProcessor(config, bc, engine))
- gv := func() HeaderValidator { return bc.Validator() }
var err error
- bc.hc, err = NewHeaderChain(chainDb, config, gv, bc.getProcInterrupt)
+ bc.hc, err = NewHeaderChain(chainDb, config, engine, bc.getProcInterrupt)
if err != nil {
return nil, err
}
-
bc.genesisBlock = bc.GetBlockByNumber(0)
if bc.genesisBlock == nil {
return nil, ErrNoGenesis
}
-
if err := bc.loadLastState(); err != nil {
return nil, err
}
@@ -233,9 +230,6 @@ func (self *BlockChain) loadLastState() error {
log.Info("Loaded most recent local full block", "number", self.currentBlock.Number(), "hash", self.currentBlock.Hash(), "td", blockTd)
log.Info("Loaded most recent local fast block", "number", self.currentFastBlock.Number(), "hash", self.currentFastBlock.Hash(), "td", fastTd)
- // Try to be smart and issue a pow verification for the head to pre-generate caches
- go self.pow.Verify(types.NewBlockWithHeader(currentHeader))
-
return nil
}
@@ -383,9 +377,6 @@ func (self *BlockChain) Processor() Processor {
return self.processor
}
-// AuxValidator returns the auxiliary validator (Proof of work atm)
-func (self *BlockChain) AuxValidator() pow.PoW { return self.pow }
-
// State returns a new mutable state based on the current HEAD block.
func (self *BlockChain) State() (*state.StateDB, error) {
return self.StateAt(self.CurrentBlock().Root())
@@ -906,38 +897,38 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
stats = insertStats{startTime: mclock.Now()}
events = make([]interface{}, 0, len(chain))
coalescedLogs []*types.Log
- nonceChecked = make([]bool, len(chain))
)
+ // Start the parallel header verifier
+ headers := make([]*types.Header, len(chain))
+ seals := make([]bool, len(chain))
- // Start the parallel nonce verifier.
- nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain)
- defer close(nonceAbort)
+ for i, block := range chain {
+ headers[i] = block.Header()
+ seals[i] = true
+ }
+ abort, results := self.engine.VerifyHeaders(self, headers, seals)
+ defer close(abort)
+ // Iterate over the blocks and insert when the verifier permits
for i, block := range chain {
+ // If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&self.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
break
}
- bstart := time.Now()
- // Wait for block i's nonce to be verified before processing
- // its state transition.
- for !nonceChecked[i] {
- r := <-nonceResults
- nonceChecked[r.index] = true
- if !r.valid {
- invalid := chain[r.index]
- return r.index, &BlockNonceErr{Hash: invalid.Hash(), Number: invalid.Number(), Nonce: invalid.Nonce()}
- }
- }
-
+ // If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
err := BadHashError(block.Hash())
self.reportBlock(block, nil, err)
return i, err
}
- // Stage 1 validation of the block using the chain's validator
- // interface.
- err := self.Validator().ValidateBlock(block)
+ // Wait for the block's verification to complete
+ bstart := time.Now()
+
+ err := <-results
+ if err == nil {
+ err = self.Validator().ValidateBody(block)
+ }
if err != nil {
if IsKnownBlockErr(err) {
stats.ignored++
@@ -952,7 +943,6 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
if block.Time().Cmp(max) == 1 {
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
}
-
self.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 3da133a9e..b2fb226db 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -31,18 +32,21 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
// newTestBlockChain creates a blockchain without validation.
-func newTestBlockChain() *BlockChain {
+func newTestBlockChain(fake bool) *BlockChain {
db, _ := ethdb.NewMemDatabase()
gspec := &Genesis{
Config: params.TestChainConfig,
Difficulty: big.NewInt(1),
}
gspec.MustCommit(db)
- blockchain, err := NewBlockChain(db, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ engine := ethash.NewFullFaker()
+ if !fake {
+ engine = ethash.NewTester()
+ }
+ blockchain, err := NewBlockChain(db, gspec.Config, engine, new(event.TypeMux), vm.Config{})
if err != nil {
panic(err)
}
@@ -117,7 +121,10 @@ func printChain(bc *BlockChain) {
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
for _, block := range chain {
// Try and process the block
- err := blockchain.Validator().ValidateBlock(block)
+ err := blockchain.engine.VerifyHeader(blockchain, block.Header(), true)
+ if err == nil {
+ err = blockchain.validator.ValidateBody(block)
+ }
if err != nil {
if IsKnownBlockErr(err) {
continue
@@ -133,7 +140,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
blockchain.reportBlock(block, receipts, err)
return err
}
- err = blockchain.Validator().ValidateState(block, blockchain.GetBlockByHash(block.ParentHash()), statedb, receipts, usedGas)
+ err = blockchain.validator.ValidateState(block, blockchain.GetBlockByHash(block.ParentHash()), statedb, receipts, usedGas)
if err != nil {
blockchain.reportBlock(block, receipts, err)
return err
@@ -152,7 +159,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
for _, header := range chain {
// Try and validate the header
- if err := blockchain.Validator().ValidateHeader(header, blockchain.GetHeaderByHash(header.ParentHash), false); err != nil {
+ if err := blockchain.engine.VerifyHeader(blockchain, header, false); err != nil {
return err
}
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
@@ -174,7 +181,7 @@ func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *
}
func TestLastBlock(t *testing.T) {
- bchain := newTestBlockChain()
+ bchain := newTestBlockChain(false)
block := makeBlockChain(bchain.CurrentBlock(), 1, bchain.chainDb, 0)[0]
bchain.insert(block)
if block.Hash() != GetHeadBlockHash(bchain.chainDb) {
@@ -318,8 +325,7 @@ func testBrokenChain(t *testing.T, full bool) {
type bproc struct{}
-func (bproc) ValidateBlock(*types.Block) error { return nil }
-func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return nil }
+func (bproc) ValidateBody(*types.Block) error { return nil }
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error {
return nil
}
@@ -378,7 +384,7 @@ func testReorgShort(t *testing.T, full bool) {
}
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
- bc := newTestBlockChain()
+ bc := newTestBlockChain(true)
// Insert an easy and a difficult chain afterwards
if full {
@@ -422,7 +428,7 @@ func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
func testBadHashes(t *testing.T, full bool) {
- bc := newTestBlockChain()
+ bc := newTestBlockChain(true)
// Create a chain, ban a hash and try to import
var err error
@@ -446,7 +452,7 @@ func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
func testReorgBadHashes(t *testing.T, full bool) {
- bc := newTestBlockChain()
+ bc := newTestBlockChain(true)
// Create a chain, import and ban afterwards
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
@@ -473,7 +479,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
}
// Create a new BlockChain and check that it rolled back the state.
- ncm, err := NewBlockChain(bc.chainDb, bc.config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ ncm, err := NewBlockChain(bc.chainDb, bc.config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
@@ -504,46 +510,34 @@ func testInsertNonceError(t *testing.T, full bool) {
}
// Create and insert a chain with a failing nonce
var (
- failAt int
- failRes int
- failNum uint64
- failHash common.Hash
+ failAt int
+ failRes int
+ failNum uint64
)
if full {
blocks := makeBlockChain(blockchain.CurrentBlock(), i, db, 0)
failAt = rand.Int() % len(blocks)
failNum = blocks[failAt].NumberU64()
- failHash = blocks[failAt].Hash()
-
- blockchain.pow = failPow{failNum}
+ blockchain.engine = ethash.NewFakeFailer(failNum)
failRes, err = blockchain.InsertChain(blocks)
} else {
headers := makeHeaderChain(blockchain.CurrentHeader(), i, db, 0)
failAt = rand.Int() % len(headers)
failNum = headers[failAt].Number.Uint64()
- failHash = headers[failAt].Hash()
-
- blockchain.pow = failPow{failNum}
- blockchain.validator = NewBlockValidator(params.TestChainConfig, blockchain, failPow{failNum})
+ blockchain.engine = ethash.NewFakeFailer(failNum)
+ blockchain.hc.engine = blockchain.engine
failRes, err = blockchain.InsertHeaderChain(headers, 1)
}
// Check that the returned error indicates the nonce failure.
if failRes != failAt {
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
}
- if !IsBlockNonceErr(err) {
- t.Fatalf("test %d: error mismatch: have %v, want nonce error %T", i, err, err)
- }
- nerr := err.(*BlockNonceErr)
- if nerr.Number.Uint64() != failNum {
- t.Errorf("test %d: number mismatch: have %v, want %v", i, nerr.Number, failNum)
- }
- if nerr.Hash != failHash {
- t.Errorf("test %d: hash mismatch: have %x, want %x", i, nerr.Hash[:4], failHash[:4])
+ if err != ethash.ErrInvalidPoW {
+ t.Fatalf("test %d: error mismatch: have %v, want %v", i, err, ethash.ErrInvalidPoW)
}
// Check that all no blocks after the failing block have been inserted.
for j := 0; j < i-failAt; j++ {
@@ -569,9 +563,12 @@ func TestFastVsFullChains(t *testing.T) {
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000)
- gspec = testGenesis(address, funds)
- genesis = gspec.MustCommit(gendb)
- signer = types.NewEIP155Signer(gspec.Config.ChainId)
+ gspec = &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: GenesisAlloc{address: {Balance: funds}},
+ }
+ genesis = gspec.MustCommit(gendb)
+ signer = types.NewEIP155Signer(gspec.Config.ChainId)
)
blocks, receipts := GenerateChain(gspec.Config, genesis, gendb, 1024, func(i int, block *BlockGen) {
block.SetCoinbase(common.Address{0x00})
@@ -594,7 +591,7 @@ func TestFastVsFullChains(t *testing.T) {
// Import the chain as an archive node for the comparison baseline
archiveDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ archive, _ := NewBlockChain(archiveDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
@@ -603,7 +600,7 @@ func TestFastVsFullChains(t *testing.T) {
// Fast import the chain as a non-archive node to test
fastDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ fast, _ := NewBlockChain(fastDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -680,8 +677,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
archiveDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
-
+ archive, _ := NewBlockChain(archiveDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
@@ -692,7 +688,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ fast, _ := NewBlockChain(fastDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -711,8 +707,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a light node and ensure all pointers are updated
lightDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(lightDb)
- light, _ := NewBlockChain(lightDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ light, _ := NewBlockChain(lightDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
@@ -780,7 +776,7 @@ func TestChainTxReorgs(t *testing.T) {
})
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@@ -851,7 +847,7 @@ func TestLogReorgs(t *testing.T) {
)
var evmux event.TypeMux
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, &evmux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), &evmux, vm.Config{})
subs := evmux.Subscribe(RemovedLogsEvent{})
chain, _ := GenerateChain(params.TestChainConfig, genesis, db, 2, func(i int, gen *BlockGen) {
@@ -883,13 +879,16 @@ func TestReorgSideEvent(t *testing.T) {
db, _ = ethdb.NewMemDatabase()
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- gspec = testGenesis(addr1, big.NewInt(10000000000000))
+ gspec = &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}},
+ }
genesis = gspec.MustCommit(db)
signer = types.NewEIP155Signer(gspec.Config.ChainId)
)
evmux := &event.TypeMux{}
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
chain, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, gen *BlockGen) {})
if _, err := blockchain.InsertChain(chain); err != nil {
@@ -959,7 +958,7 @@ done:
// Tests if the canonical block can be fetched from the database during chain insertion.
func TestCanonicalBlockRetrieval(t *testing.T) {
- bc := newTestBlockChain()
+ bc := newTestBlockChain(false)
chain, _ := GenerateChain(bc.config, bc.genesisBlock, bc.chainDb, 10, func(i int, gen *BlockGen) {})
for i := range chain {
@@ -1004,7 +1003,7 @@ func TestEIP155Transition(t *testing.T) {
mux event.TypeMux
)
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, &mux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), &mux, vm.Config{})
blocks, _ := GenerateChain(gspec.Config, genesis, db, 4, func(i int, block *BlockGen) {
var (
tx *types.Transaction
@@ -1110,7 +1109,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
}
genesis = gspec.MustCommit(db)
mux event.TypeMux
- blockchain, _ = NewBlockChain(db, gspec.Config, pow.FakePow{}, &mux, vm.Config{})
+ blockchain, _ = NewBlockChain(db, gspec.Config, ethash.NewFaker(), &mux, vm.Config{})
)
blocks, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, block *BlockGen) {
var (
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 5bf1ece25..c47c719f6 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -21,13 +21,14 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
// So we can deterministically seed different blockchains
@@ -141,7 +142,7 @@ func (b *BlockGen) OffsetTime(seconds int64) {
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
panic("block time out of range")
}
- b.header.Difficulty = CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
+ b.header.Difficulty = ethash.CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
}
// GenerateChain creates a chain of n blocks. The first block's
@@ -173,13 +174,13 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, db ethdb.Dat
}
}
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(h.Number) == 0 {
- ApplyDAOHardFork(statedb)
+ misc.ApplyDAOHardFork(statedb)
}
// Execute any user modifications to the block and finalize it
if gen != nil {
gen(i, b)
}
- AccumulateRewards(statedb, h, b.uncles)
+ ethash.AccumulateRewards(statedb, h, b.uncles)
root, err := statedb.Commit(config.IsEIP158(h.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
@@ -212,7 +213,7 @@ func makeHeader(config *params.ChainConfig, parent *types.Block, state *state.St
Root: state.IntermediateRoot(config.IsEIP158(parent.Number())),
ParentHash: parent.Hash(),
Coinbase: parent.Coinbase(),
- Difficulty: CalcDifficulty(config, time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
+ Difficulty: ethash.CalcDifficulty(config, time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
GasLimit: CalcGasLimit(parent),
GasUsed: new(big.Int),
Number: new(big.Int).Add(parent.Number(), common.Big1),
@@ -229,7 +230,7 @@ func newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) {
db, _ := ethdb.NewMemDatabase()
genesis := gspec.MustCommit(db)
- blockchain, _ := NewBlockChain(db, params.AllProtocolChanges, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ blockchain, _ := NewBlockChain(db, params.AllProtocolChanges, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index a8f77cdf3..3a7c62396 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -20,13 +20,13 @@ import (
"fmt"
"math/big"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
func ExampleGenerateChain() {
@@ -81,7 +81,7 @@ func ExampleGenerateChain() {
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
if i, err := blockchain.InsertChain(chain); err != nil {
fmt.Printf("insert error (block %d): %v\n", chain[i].NumberU64(), err)
return
diff --git a/core/chain_pow.go b/core/chain_pow.go
deleted file mode 100644
index e5ccd87e2..000000000
--- a/core/chain_pow.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "runtime"
-
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/pow"
-)
-
-// nonceCheckResult contains the result of a nonce verification.
-type nonceCheckResult struct {
- index int // Index of the item verified from an input array
- valid bool // Result of the nonce verification
-}
-
-// verifyNoncesFromHeaders starts a concurrent header nonce verification,
-// returning a quit channel to abort the operations and a results channel
-// to retrieve the async verifications.
-func verifyNoncesFromHeaders(checker pow.PoW, headers []*types.Header) (chan<- struct{}, <-chan nonceCheckResult) {
- items := make([]pow.Block, len(headers))
- for i, header := range headers {
- items[i] = types.NewBlockWithHeader(header)
- }
- return verifyNonces(checker, items)
-}
-
-// verifyNoncesFromBlocks starts a concurrent block nonce verification,
-// returning a quit channel to abort the operations and a results channel
-// to retrieve the async verifications.
-func verifyNoncesFromBlocks(checker pow.PoW, blocks []*types.Block) (chan<- struct{}, <-chan nonceCheckResult) {
- items := make([]pow.Block, len(blocks))
- for i, block := range blocks {
- items[i] = block
- }
- return verifyNonces(checker, items)
-}
-
-// verifyNonces starts a concurrent nonce verification, returning a quit channel
-// to abort the operations and a results channel to retrieve the async checks.
-func verifyNonces(checker pow.PoW, items []pow.Block) (chan<- struct{}, <-chan nonceCheckResult) {
- // Spawn as many workers as allowed threads
- workers := runtime.GOMAXPROCS(0)
- if len(items) < workers {
- workers = len(items)
- }
- // Create a task channel and spawn the verifiers
- tasks := make(chan int, workers)
- results := make(chan nonceCheckResult, len(items)) // Buffered to make sure all workers stop
- for i := 0; i < workers; i++ {
- go func() {
- for index := range tasks {
- results <- nonceCheckResult{index: index, valid: checker.Verify(items[index]) == nil}
- }
- }()
- }
- // Feed item indices to the workers until done or aborted
- abort := make(chan struct{})
- go func() {
- defer close(tasks)
-
- for i := range items {
- select {
- case tasks <- i:
- continue
- case <-abort:
- return
- }
- }
- }()
- return abort, results
-}
diff --git a/core/chain_pow_test.go b/core/chain_pow_test.go
deleted file mode 100644
index 311ca128e..000000000
--- a/core/chain_pow_test.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "errors"
- "math/big"
- "runtime"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
-)
-
-// failPow is a non-validating proof of work implementation, that returns true
-// from Verify for all but one block.
-type failPow struct {
- failing uint64
-}
-
-func (pow failPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
- return 0, nil
-}
-func (pow failPow) Verify(block pow.Block) error {
- if block.NumberU64() == pow.failing {
- return errors.New("failed")
- }
- return nil
-}
-func (pow failPow) Hashrate() float64 { return 0 }
-
-// delayedPow is a non-validating proof of work implementation, that returns true
-// from Verify for all blocks, but delays them the configured amount of time.
-type delayedPow struct {
- delay time.Duration
-}
-
-func (pow delayedPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
- return 0, nil
-}
-func (pow delayedPow) Verify(block pow.Block) error { time.Sleep(pow.delay); return nil }
-func (pow delayedPow) Hashrate() float64 { return 0 }
-
-// Tests that simple POW verification works, for both good and bad blocks.
-func TestPowVerification(t *testing.T) {
- // Create a simple chain to verify
- var (
- testdb, _ = ethdb.NewMemDatabase()
- genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
- blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
- )
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- // Run the POW checker for blocks one-by-one, checking for both valid and invalid nonces
- for i := 0; i < len(blocks); i++ {
- for j, full := range []bool{true, false} {
- for k, valid := range []bool{true, false} {
- var results <-chan nonceCheckResult
-
- switch {
- case full && valid:
- _, results = verifyNoncesFromBlocks(pow.FakePow{}, []*types.Block{blocks[i]})
- case full && !valid:
- _, results = verifyNoncesFromBlocks(failPow{blocks[i].NumberU64()}, []*types.Block{blocks[i]})
- case !full && valid:
- _, results = verifyNoncesFromHeaders(pow.FakePow{}, []*types.Header{headers[i]})
- case !full && !valid:
- _, results = verifyNoncesFromHeaders(failPow{headers[i].Number.Uint64()}, []*types.Header{headers[i]})
- }
- // Wait for the verification result
- select {
- case result := <-results:
- if result.index != 0 {
- t.Errorf("test %d.%d.%d: invalid index: have %d, want 0", i, j, k, result.index)
- }
- if result.valid != valid {
- t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, result.valid, valid)
- }
- case <-time.After(time.Second):
- t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
- }
- // Make sure no more data is returned
- select {
- case result := <-results:
- t.Fatalf("test %d.%d.%d: unexpected result returned: %v", i, j, k, result)
- case <-time.After(25 * time.Millisecond):
- }
- }
- }
- }
-}
-
-// Tests that concurrent POW verification works, for both good and bad blocks.
-func TestPowConcurrentVerification2(t *testing.T) { testPowConcurrentVerification(t, 2) }
-func TestPowConcurrentVerification8(t *testing.T) { testPowConcurrentVerification(t, 8) }
-func TestPowConcurrentVerification32(t *testing.T) { testPowConcurrentVerification(t, 32) }
-
-func testPowConcurrentVerification(t *testing.T, threads int) {
- // Create a simple chain to verify
- var (
- testdb, _ = ethdb.NewMemDatabase()
- genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
- blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
- )
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- // Set the number of threads to verify on
- old := runtime.GOMAXPROCS(threads)
- defer runtime.GOMAXPROCS(old)
-
- // Run the POW checker for the entire block chain at once both for a valid and
- // also an invalid chain (enough if one is invalid, last but one (arbitrary)).
- for i, full := range []bool{true, false} {
- for j, valid := range []bool{true, false} {
- var results <-chan nonceCheckResult
-
- switch {
- case full && valid:
- _, results = verifyNoncesFromBlocks(pow.FakePow{}, blocks)
- case full && !valid:
- _, results = verifyNoncesFromBlocks(failPow{uint64(len(blocks) - 1)}, blocks)
- case !full && valid:
- _, results = verifyNoncesFromHeaders(pow.FakePow{}, headers)
- case !full && !valid:
- _, results = verifyNoncesFromHeaders(failPow{uint64(len(headers) - 1)}, headers)
- }
- // Wait for all the verification results
- checks := make(map[int]bool)
- for k := 0; k < len(blocks); k++ {
- select {
- case result := <-results:
- if _, ok := checks[result.index]; ok {
- t.Fatalf("test %d.%d.%d: duplicate results for %d", i, j, k, result.index)
- }
- if result.index < 0 || result.index >= len(blocks) {
- t.Fatalf("test %d.%d.%d: result %d out of bounds [%d, %d]", i, j, k, result.index, 0, len(blocks)-1)
- }
- checks[result.index] = result.valid
-
- case <-time.After(time.Second):
- t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
- }
- }
- // Check nonce check validity
- for k := 0; k < len(blocks); k++ {
- want := valid || (k != len(blocks)-2) // We chose the last but one nonce in the chain to fail
- if checks[k] != want {
- t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, checks[k], want)
- }
- }
- // Make sure no more data is returned
- select {
- case result := <-results:
- t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
- case <-time.After(25 * time.Millisecond):
- }
- }
- }
-}
-
-// Tests that aborting a POW validation indeed prevents further checks from being
-// run, as well as checks that no left-over goroutines are leaked.
-func TestPowConcurrentAbortion2(t *testing.T) { testPowConcurrentAbortion(t, 2) }
-func TestPowConcurrentAbortion8(t *testing.T) { testPowConcurrentAbortion(t, 8) }
-func TestPowConcurrentAbortion32(t *testing.T) { testPowConcurrentAbortion(t, 32) }
-
-func testPowConcurrentAbortion(t *testing.T, threads int) {
- // Create a simple chain to verify
- var (
- testdb, _ = ethdb.NewMemDatabase()
- genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
- blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 1024, nil)
- )
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- // Set the number of threads to verify on
- old := runtime.GOMAXPROCS(threads)
- defer runtime.GOMAXPROCS(old)
-
- // Run the POW checker for the entire block chain at once
- for i, full := range []bool{true, false} {
- var abort chan<- struct{}
- var results <-chan nonceCheckResult
-
- // Start the verifications and immediately abort
- if full {
- abort, results = verifyNoncesFromBlocks(delayedPow{time.Millisecond}, blocks)
- } else {
- abort, results = verifyNoncesFromHeaders(delayedPow{time.Millisecond}, headers)
- }
- close(abort)
-
- // Deplete the results channel
- verified := make(map[int]struct{})
- for depleted := false; !depleted; {
- select {
- case result := <-results:
- verified[result.index] = struct{}{}
- case <-time.After(50 * time.Millisecond):
- depleted = true
- }
- }
- // Check that abortion was honored by not processing too many POWs
- if len(verified) > 2*threads {
- t.Errorf("test %d: verification count too large: have %d, want below %d", i, len(verified), 2*threads)
- }
- // Check that there are no gaps in the results
- for j := 0; j < len(verified); j++ {
- if _, ok := verified[j]; !ok {
- t.Errorf("test %d.%d: gap found in verification results", i, j)
- }
- }
- }
-}
diff --git a/core/dao_test.go b/core/dao_test.go
index c0d482630..cb6e54f8f 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -20,11 +20,11 @@ import (
"math/big"
"testing"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
// Tests that DAO-fork enabled clients can properly filter out fork-commencing
@@ -42,12 +42,12 @@ func TestDAOForkRangeExtradata(t *testing.T) {
proDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(proDb)
proConf := &params.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: true}
- proBc, _ := NewBlockChain(proDb, proConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ proBc, _ := NewBlockChain(proDb, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
conDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(conDb)
conConf := &params.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: false}
- conBc, _ := NewBlockChain(conDb, conConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ conBc, _ := NewBlockChain(conDb, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if _, err := proBc.InsertChain(prefix); err != nil {
t.Fatalf("pro-fork: failed to import chain prefix: %v", err)
@@ -60,7 +60,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a pro-fork block, and try to feed into the no-fork chain
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, conConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ bc, _ := NewBlockChain(db, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
for j := 0; j < len(blocks)/2; j++ {
@@ -81,7 +81,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a no-fork block, and try to feed into the pro-fork chain
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, proConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ bc, _ = NewBlockChain(db, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
for j := 0; j < len(blocks)/2; j++ {
@@ -103,7 +103,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, conConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ bc, _ := NewBlockChain(db, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
for j := 0; j < len(blocks)/2; j++ {
@@ -119,7 +119,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, proConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ bc, _ = NewBlockChain(db, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
for j := 0; j < len(blocks)/2; j++ {
diff --git a/core/database_util_test.go b/core/database_util_test.go
index e6be06093..9f16b660a 100644
--- a/core/database_util_test.go
+++ b/core/database_util_test.go
@@ -18,14 +18,12 @@ package core
import (
"bytes"
- "encoding/json"
"io/ioutil"
"math/big"
"os"
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
@@ -34,58 +32,6 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
-type diffTest struct {
- ParentTimestamp uint64
- ParentDifficulty *big.Int
- CurrentTimestamp uint64
- CurrentBlocknumber *big.Int
- CurrentDifficulty *big.Int
-}
-
-func (d *diffTest) UnmarshalJSON(b []byte) (err error) {
- var ext struct {
- ParentTimestamp string
- ParentDifficulty string
- CurrentTimestamp string
- CurrentBlocknumber string
- CurrentDifficulty string
- }
- if err := json.Unmarshal(b, &ext); err != nil {
- return err
- }
-
- d.ParentTimestamp = math.MustParseUint64(ext.ParentTimestamp)
- d.ParentDifficulty = math.MustParseBig256(ext.ParentDifficulty)
- d.CurrentTimestamp = math.MustParseUint64(ext.CurrentTimestamp)
- d.CurrentBlocknumber = math.MustParseBig256(ext.CurrentBlocknumber)
- d.CurrentDifficulty = math.MustParseBig256(ext.CurrentDifficulty)
-
- return nil
-}
-
-func TestCalcDifficulty(t *testing.T) {
- file, err := os.Open("../tests/files/BasicTests/difficulty.json")
- if err != nil {
- t.Fatal(err)
- }
- defer file.Close()
-
- tests := make(map[string]diffTest)
- err = json.NewDecoder(file).Decode(&tests)
- if err != nil {
- t.Fatal(err)
- }
-
- config := &params.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
- for name, test := range tests {
- number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
- diff := CalcDifficulty(config, test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
- if diff.Cmp(test.CurrentDifficulty) != 0 {
- t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
- }
- }
-}
-
// Tests block header storage and retrieval operations.
func TestHeaderStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
@@ -562,7 +508,11 @@ func TestMipmapChain(t *testing.T) {
)
defer db.Close()
- genesis := testGenesis(addr, big.NewInt(1000000)).MustCommit(db)
+ gspec := &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: GenesisAlloc{addr: {Balance: big.NewInt(1000000)}},
+ }
+ genesis := gspec.MustCommit(db)
chain, receipts := GenerateChain(params.TestChainConfig, genesis, db, 1010, func(i int, gen *BlockGen) {
var receipts types.Receipts
switch i {
diff --git a/core/genesis_test.go b/core/genesis_test.go
index b73dd776f..4312a80b8 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -23,11 +23,11 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
func TestDefaultGenesisBlock(t *testing.T) {
@@ -119,7 +119,7 @@ func TestSetupGenesis(t *testing.T) {
// Commit the 'old' genesis block with Homestead transition at #2.
// Advance to block #4, past the homestead transition block of customg.
genesis := oldcustomg.MustCommit(db)
- bc, _ := NewBlockChain(db, oldcustomg.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ bc, _ := NewBlockChain(db, oldcustomg.Config, ethash.NewFullFaker(), new(event.TypeMux), vm.Config{})
bc.SetValidator(bproc{})
bc.InsertChain(makeBlockChainWithDiff(genesis, []int{2, 3, 4, 5}, 0))
bc.CurrentBlock()
diff --git a/core/headerchain.go b/core/headerchain.go
index e7660cc60..e2d0ff5b1 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -18,21 +18,19 @@ package core
import (
crand "crypto/rand"
+ "errors"
"fmt"
"math"
"math/big"
mrand "math/rand"
- "runtime"
- "sync"
- "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/hashicorp/golang-lru"
)
@@ -62,18 +60,15 @@ type HeaderChain struct {
procInterrupt func() bool
- rand *mrand.Rand
- getValidator getHeaderValidatorFn
+ rand *mrand.Rand
+ engine consensus.Engine
}
-// getHeaderValidatorFn returns a HeaderValidator interface
-type getHeaderValidatorFn func() HeaderValidator
-
// NewHeaderChain creates a new HeaderChain structure.
// getValidator should return the parent's validator
// procInterrupt points to the parent's interrupt semaphore
// wg points to the parent's shutdown wait group
-func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValidator getHeaderValidatorFn, procInterrupt func() bool) (*HeaderChain, error) {
+func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
headerCache, _ := lru.New(headerCacheLimit)
tdCache, _ := lru.New(tdCacheLimit)
numberCache, _ := lru.New(numberCacheLimit)
@@ -92,7 +87,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValid
numberCache: numberCache,
procInterrupt: procInterrupt,
rand: mrand.New(mrand.NewSource(seed.Int64())),
- getValidator: getValidator,
+ engine: engine,
}
hc.genesisHeader = hc.GetHeaderByNumber(0)
@@ -228,78 +223,34 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
}
}
- // Generate the list of headers that should be POW verified
- verify := make([]bool, len(chain))
- for i := 0; i < len(verify)/checkFreq; i++ {
+ // Generate the list of seal verification requests, and start the parallel verifier
+ seals := make([]bool, len(chain))
+ for i := 0; i < len(seals)/checkFreq; i++ {
index := i*checkFreq + hc.rand.Intn(checkFreq)
- if index >= len(verify) {
- index = len(verify) - 1
+ if index >= len(seals) {
+ index = len(seals) - 1
}
- verify[index] = true
- }
- verify[len(verify)-1] = true // Last should always be verified to avoid junk
-
- // Create the header verification task queue and worker functions
- tasks := make(chan int, len(chain))
- for i := 0; i < len(chain); i++ {
- tasks <- i
+ seals[index] = true
}
- close(tasks)
+ seals[len(seals)-1] = true // Last should always be verified to avoid junk
- errs, failed := make([]error, len(tasks)), int32(0)
- process := func(worker int) {
- for index := range tasks {
- header, hash := chain[index], chain[index].Hash()
+ abort, results := hc.engine.VerifyHeaders(hc, chain, seals)
+ defer close(abort)
- // Short circuit insertion if shutting down or processing failed
- if hc.procInterrupt() {
- return
- }
- if atomic.LoadInt32(&failed) > 0 {
- return
- }
- // Short circuit if the header is bad or already known
- if BadHashes[hash] {
- errs[index] = BadHashError(hash)
- atomic.AddInt32(&failed, 1)
- return
- }
- if hc.HasHeader(hash) {
- continue
- }
- // Verify that the header honors the chain parameters
- checkPow := verify[index]
-
- var err error
- if index == 0 {
- err = hc.getValidator().ValidateHeader(header, hc.GetHeader(header.ParentHash, header.Number.Uint64()-1), checkPow)
- } else {
- err = hc.getValidator().ValidateHeader(header, chain[index-1], checkPow)
- }
- if err != nil {
- errs[index] = err
- atomic.AddInt32(&failed, 1)
- return
- }
+ // Iterate over the headers and ensure they all check out
+ for i, header := range chain {
+ // If the chain is terminating, stop processing blocks
+ if hc.procInterrupt() {
+ log.Debug("Premature abort during headers verification")
+ return 0, errors.New("aborted")
}
- }
- // Start as many worker threads as goroutines allowed
- pending := new(sync.WaitGroup)
- for i := 0; i < runtime.GOMAXPROCS(0); i++ {
- pending.Add(1)
- go func(id int) {
- defer pending.Done()
- process(id)
- }(i)
- }
- pending.Wait()
-
- // If anything failed, report
- if failed > 0 {
- for i, err := range errs {
- if err != nil {
- return i, err
- }
+ // If the header is a banned one, straight out abort
+ if BadHashes[header.Hash()] {
+ return i, BadHashError(header.Hash())
+ }
+ // Otherwise wait for headers checks and ensure they pass
+ if err := <-results; err != nil {
+ return i, err
}
}
@@ -313,13 +264,11 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
for i, header := range chain {
// Short circuit insertion if shutting down
if hc.procInterrupt() {
- log.Debug("Premature abort during headers processing")
- break
+ log.Debug("Premature abort during headers import")
+ return i, errors.New("aborted")
}
- hash := header.Hash()
-
// If the header's already known, skip it, otherwise store
- if hc.HasHeader(hash) {
+ if hc.GetHeader(header.Hash(), header.Number.Uint64()) != nil {
stats.ignored++
continue
}
@@ -490,35 +439,11 @@ func (hc *HeaderChain) SetGenesis(head *types.Header) {
hc.genesisHeader = head
}
-// headerValidator is responsible for validating block headers
-//
-// headerValidator implements HeaderValidator.
-type headerValidator struct {
- config *params.ChainConfig
- hc *HeaderChain // Canonical header chain
- Pow pow.PoW // Proof of work used for validating
-}
-
-// NewBlockValidator returns a new block validator which is safe for re-use
-func NewHeaderValidator(config *params.ChainConfig, chain *HeaderChain, pow pow.PoW) HeaderValidator {
- return &headerValidator{
- config: config,
- Pow: pow,
- hc: chain,
- }
-}
+// Config retrieves the header chain's chain configuration.
+func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config }
-// ValidateHeader validates the given header and, depending on the pow arg,
-// checks the proof of work of the given header. Returns an error if the
-// validation failed.
-func (v *headerValidator) ValidateHeader(header, parent *types.Header, checkPow bool) error {
- // Short circuit if the parent is missing.
- if parent == nil {
- return ParentError(header.ParentHash)
- }
- // Short circuit if the header's already known or its parent missing
- if v.hc.HasHeader(header.Hash()) {
- return nil
- }
- return ValidateHeader(v.config, v.Pow, header, parent, checkPow, false)
+// GetBlock implements consensus.ChainReader, and returns nil for every input as
+// a header chain does not have blocks available for retrieval.
+func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block {
+ return nil
}
diff --git a/core/state_processor.go b/core/state_processor.go
index 3edc042a3..aca2929eb 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -19,6 +19,8 @@ package core
import (
"math/big"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -26,25 +28,22 @@ import (
"github.com/ethereum/go-ethereum/params"
)
-var (
- big8 = big.NewInt(8)
- big32 = big.NewInt(32)
-)
-
// StateProcessor is a basic Processor, which takes care of transitioning
// state from one point to another.
//
// StateProcessor implements Processor.
type StateProcessor struct {
- config *params.ChainConfig
- bc *BlockChain
+ config *params.ChainConfig // Chain configuration options
+ bc *BlockChain // Canonical block chain
+ engine consensus.Engine // Consensus engine used for block rewards
}
// NewStateProcessor initialises a new StateProcessor.
-func NewStateProcessor(config *params.ChainConfig, bc *BlockChain) *StateProcessor {
+func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *StateProcessor {
return &StateProcessor{
config: config,
bc: bc,
+ engine: engine,
}
}
@@ -59,18 +58,16 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
var (
receipts types.Receipts
totalUsedGas = big.NewInt(0)
- err error
header = block.Header()
allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit())
)
// Mutate the the block and state according to any hard-fork specs
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
- ApplyDAOHardFork(statedb)
+ misc.ApplyDAOHardFork(statedb)
}
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
- //fmt.Println("tx:", i)
statedb.StartRecord(tx.Hash(), block.Hash(), i)
receipt, _, err := ApplyTransaction(p.config, p.bc, gp, statedb, header, tx, totalUsedGas, cfg)
if err != nil {
@@ -79,9 +76,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
}
- AccumulateRewards(statedb, header, block.Uncles())
+ // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
+ p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), receipts)
- return receipts, allLogs, totalUsedGas, err
+ return receipts, allLogs, totalUsedGas, nil
}
// ApplyTransaction attempts to apply a transaction to the given state database
@@ -122,23 +120,3 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, gp *GasPool, s
return receipt, gas, err
}
-
-// AccumulateRewards credits the coinbase of the given block with the
-// mining reward. The total reward consists of the static block reward
-// and rewards for included uncles. The coinbase of each uncle block is
-// also rewarded.
-func AccumulateRewards(statedb *state.StateDB, header *types.Header, uncles []*types.Header) {
- reward := new(big.Int).Set(BlockReward)
- r := new(big.Int)
- for _, uncle := range uncles {
- r.Add(uncle.Number, big8)
- r.Sub(r, header.Number)
- r.Mul(r, BlockReward)
- r.Div(r, big8)
- statedb.AddBalance(uncle.Coinbase, r)
-
- r.Div(BlockReward, big32)
- reward.Add(reward, r)
- }
- statedb.AddBalance(header.Coinbase, reward)
-}
diff --git a/core/types.go b/core/types.go
index 7fd658979..1cfbbab29 100644
--- a/core/types.go
+++ b/core/types.go
@@ -24,31 +24,17 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
)
-// Validator is an interface which defines the standard for block validation.
+// Validator is an interface which defines the standard for block validation. It
+// is only responsible for validating block contents, as the header validation is
+// done by the specific consensus engines.
//
-// The validator is responsible for validating incoming block or, if desired,
-// validates headers for fast validation.
-//
-// ValidateBlock validates the given block and should return an error if it
-// failed to do so and should be used for "full" validation.
-//
-// ValidateHeader validates the given header and parent and returns an error
-// if it failed to do so.
-//
-// ValidateState validates the given statedb and optionally the receipts and
-// gas used. The implementer should decide what to do with the given input.
type Validator interface {
- HeaderValidator
- ValidateBlock(block *types.Block) error
- ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error
-}
+ // ValidateBody validates the given block's content.
+ ValidateBody(block *types.Block) error
-// HeaderValidator is an interface for validating headers only
-//
-// ValidateHeader validates the given header and parent and returns an error
-// if it failed to do so.
-type HeaderValidator interface {
- ValidateHeader(header, parent *types.Header, checkPow bool) error
+ // ValidateState validates the given statedb and optionally the receipts and
+ // gas used.
+ ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error
}
// Processor is an interface for processing blocks using a given initial state.
diff --git a/core/types/block.go b/core/types/block.go
index b699ba686..278594d50 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -348,12 +348,11 @@ func CalcUncleHash(uncles []*Header) common.Hash {
return rlpHash(uncles)
}
-// WithMiningResult returns a new block with the data from b
-// where nonce and mix digest are set to the provided values.
-func (b *Block) WithMiningResult(nonce BlockNonce, mixDigest common.Hash) *Block {
- cpy := *b.header
- cpy.Nonce = nonce
- cpy.MixDigest = mixDigest
+// WithSeal returns a new block with the data from b but the header replaced with
+// the sealed one.
+func (b *Block) WithSeal(header *Header) *Block {
+ cpy := *header
+
return &Block{
header: &cpy,
transactions: b.transactions,