aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorBojie Wu <bojie@dexon.org>2018-10-09 13:28:45 +0800
committerWei-Ning Huang <w@byzantine-lab.io>2019-06-12 17:23:39 +0800
commit46100ae76f9fb81aaf267101c3801af1d7adcb88 (patch)
tree4c7899aec7ba26d0fac3a3b52147d8ced0588c24 /core
parentf3b4175b04d7848a76ff1828fe1b586c4b31a46b (diff)
downloadgo-tangerine-46100ae76f9fb81aaf267101c3801af1d7adcb88.tar
go-tangerine-46100ae76f9fb81aaf267101c3801af1d7adcb88.tar.gz
go-tangerine-46100ae76f9fb81aaf267101c3801af1d7adcb88.tar.bz2
go-tangerine-46100ae76f9fb81aaf267101c3801af1d7adcb88.tar.lz
go-tangerine-46100ae76f9fb81aaf267101c3801af1d7adcb88.tar.xz
go-tangerine-46100ae76f9fb81aaf267101c3801af1d7adcb88.tar.zst
go-tangerine-46100ae76f9fb81aaf267101c3801af1d7adcb88.zip
app: implement new insert blocks logic
Diffstat (limited to 'core')
-rw-r--r--core/block_validator.go34
-rw-r--r--core/blockchain.go195
-rw-r--r--core/types/block.go35
3 files changed, 246 insertions, 18 deletions
diff --git a/core/block_validator.go b/core/block_validator.go
index 697944c41..65f311f9f 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -101,6 +101,40 @@ func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *stat
return nil
}
+// BlockValidator implements Validator.
+type DexonBlockValidator struct {
+ config *params.ChainConfig // Chain configuration options
+ bc *BlockChain // Canonical block chain
+ engine consensus.Engine // Consensus engine used for validating
+}
+
+// NewDexonBlockValidator returns a new block validator which is safe for re-use
+func NewDexonBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *DexonBlockValidator {
+ validator := &DexonBlockValidator{
+ config: config,
+ engine: engine,
+ bc: blockchain,
+ }
+ return validator
+}
+
+// ValidateBody validates the given block's uncles and verifies the block
+// header's transaction and uncle roots. The headers are assumed to be already
+// validated at this point.
+func (v *DexonBlockValidator) ValidateBody(block *types.Block) error {
+ // TODO(Bojie): implement it
+ return nil
+}
+
+// ValidateState validates the various changes that happen after a state
+// transition, such as amount of used gas, the receipt roots and the state root
+// itself. ValidateState returns a database batch if the validation was a success
+// otherwise nil and an error is returned.
+func (v *DexonBlockValidator) ValidateState(block, parent *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error {
+ // TODO(Bojie): implement it
+ return nil
+}
+
// CalcGasLimit computes the gas limit of the next block after parent. It aims
// to keep the baseline gas above the provided floor, and increase it towards the
// ceil if the blocks are full. If the ceil is exceeded, it will always decrease
diff --git a/core/blockchain.go b/core/blockchain.go
index 9775f9e16..87e586d20 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -28,6 +28,8 @@ import (
"sync/atomic"
"time"
+ "github.com/hashicorp/golang-lru"
+
coreCommon "github.com/dexon-foundation/dexon-consensus-core/common"
coreTypes "github.com/dexon-foundation/dexon-consensus-core/core/types"
@@ -47,7 +49,6 @@ import (
"github.com/dexon-foundation/dexon/params"
"github.com/dexon-foundation/dexon/rlp"
"github.com/dexon-foundation/dexon/trie"
- "github.com/hashicorp/golang-lru"
)
var (
@@ -144,6 +145,11 @@ type BlockChain struct {
confirmedBlockMu sync.Mutex
confirmedBlocks map[coreCommon.Hash]*coreTypes.Block
chainConfirmedBlocks map[uint32][]*coreTypes.Block
+
+ pendingBlocks map[uint64]struct {
+ block *types.Block
+ receipts types.Receipts
+ }
}
// NewBlockChain returns a fully initialised block chain using information
@@ -183,8 +189,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
badBlocks: badBlocks,
confirmedBlocks: make(map[coreCommon.Hash]*coreTypes.Block),
chainConfirmedBlocks: make(map[uint32][]*coreTypes.Block),
+ pendingBlocks: make(map[uint64]struct {
+ block *types.Block
+ receipts types.Receipts
+ }),
}
- bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
+ bc.SetValidator(NewDexonBlockValidator(chainConfig, bc, engine))
bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
var err error
@@ -1449,6 +1459,187 @@ func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (i
return 0, nil, nil, nil
}
+func (bc *BlockChain) InsertPendingBlock(chain types.Blocks) (int, error) {
+ n, events, logs, err := bc.insertPendingBlocks(chain)
+ bc.PostChainEvents(events, logs)
+ return n, err
+}
+
+func (bc *BlockChain) insertPendingBlocks(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
+ // Sanity check that we have something meaningful to import
+ if len(chain) == 0 {
+ return 0, nil, nil, nil
+ }
+ // Do a sanity check that the provided chain is actually ordered and linked
+ for i := 1; i < len(chain); i++ {
+ if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
+ // Chain broke ancestry, log a message (programming error) and skip insertion
+ log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
+ "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
+
+ return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
+ chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
+ }
+ }
+ // Pre-checks passed, start the full block imports
+ bc.wg.Add(1)
+ defer bc.wg.Done()
+
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
+
+ // A queued approach to delivering events. This is generally
+ // faster than direct delivery and requires much less mutex
+ // acquiring.
+ var (
+ stats = insertStats{startTime: mclock.Now()}
+ events = make([]interface{}, 0, len(chain))
+ lastCanon *types.Block
+ coalescedLogs []*types.Log
+ )
+
+ // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
+ senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
+
+ // Iterate over the blocks and insert when the verifier permits
+ for i, block := range chain {
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ log.Debug("Premature abort during blocks processing")
+ break
+ }
+ bstart := time.Now()
+
+ currentBlock := bc.CurrentBlock()
+ if block.Header().WitnessHeight > currentBlock.NumberU64() && block.Header().WitnessHeight != 0 {
+ if bc.pendingBlocks[block.Header().WitnessHeight].block.Root() != block.Header().WitnessRoot {
+ return i, nil, nil, fmt.Errorf("invalid witness root %s vs %s", bc.pendingBlocks[block.Header().WitnessHeight].block.Root().String(), block.Header().WitnessRoot.String())
+ }
+
+ if bc.pendingBlocks[block.Header().WitnessHeight].block.ReceiptHash() != block.Header().WitnessReceiptHash {
+ return i, nil, nil, fmt.Errorf("invalid witness receipt hash %s vs %s", bc.pendingBlocks[block.Header().WitnessHeight].block.ReceiptHash().String(), block.Header().WitnessReceiptHash.String())
+ }
+ }
+
+ var parentBlock *types.Block
+ var pendingState *state.StateDB
+ var err error
+ parent, exist := bc.pendingBlocks[block.NumberU64()-1]
+ if !exist {
+ parentBlock = currentBlock
+ if parentBlock.NumberU64() != block.NumberU64()-1 {
+ return i, nil, nil, fmt.Errorf("parent block %d not exist", block.NumberU64()-1)
+ }
+ } else {
+ parentBlock = parent.block
+ }
+ block.RawHeader().ParentHash = parentBlock.Hash()
+ pendingState, err = state.New(parentBlock.Root(), bc.stateCache)
+ if err != nil {
+ return i, events, coalescedLogs, err
+ }
+
+ var (
+ receipts types.Receipts
+ usedGas = new(uint64)
+ header = block.Header()
+ allLogs []*types.Log
+ gp = new(GasPool).AddGas(block.GasLimit())
+ )
+ // Iterate over and process the individual transactions
+ for i, tx := range block.Transactions() {
+ pendingState.Prepare(tx.Hash(), block.Hash(), i)
+ receipt, _, err := ApplyTransaction(bc.chainConfig, bc, nil, gp, pendingState, header, tx, usedGas, bc.vmConfig)
+ if err != nil {
+ return i, nil, nil, fmt.Errorf("apply transaction error: %v %d", err, tx.Nonce())
+ }
+ receipts = append(receipts, receipt)
+ allLogs = append(allLogs, receipt.Logs...)
+ log.Debug("apply transaction", "tx.hash", tx.Hash(), "nonce", tx.Nonce(), "amount", tx.Value())
+ }
+ // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
+ header.GasUsed = *usedGas
+ newPendingBlock, err := bc.engine.Finalize(bc, header, pendingState, block.Transactions(), block.Uncles(), receipts)
+ if err != nil {
+ return i, events, coalescedLogs, fmt.Errorf("finalize error: %v", err)
+ }
+
+ // Validate the state using the default validator
+ err = bc.Validator().ValidateState(block, nil, pendingState, receipts, *usedGas)
+ if err != nil {
+ bc.reportBlock(block, receipts, err)
+ return i, events, coalescedLogs, fmt.Errorf("valiadte state error: %v", err)
+ }
+ proctime := time.Since(bstart)
+
+ // commit state to refresh stateCache
+ _, err = pendingState.Commit(true)
+ if err != nil {
+ return i, nil, nil, fmt.Errorf("pendingState commit error: %v", err)
+ }
+
+ // add into pending blocks
+ bc.pendingBlocks[block.NumberU64()] = struct {
+ block *types.Block
+ receipts types.Receipts
+ }{block: newPendingBlock, receipts: receipts}
+
+ // start insert available pending blocks into db
+ for pendingHeight := bc.CurrentBlock().NumberU64() + 1; pendingHeight <= block.Header().WitnessHeight; pendingHeight++ {
+ confirmedBlock, exist := bc.pendingBlocks[pendingHeight]
+ if !exist {
+ log.Debug("block has already inserted", "height", pendingHeight)
+ continue
+ }
+
+ s, err := state.New(confirmedBlock.block.Root(), bc.stateCache)
+ if err != nil {
+ return i, events, coalescedLogs, err
+ }
+
+ // Write the block to the chain and get the status.
+ log.Debug("insert pending block", "height", pendingHeight)
+ status, err := bc.WriteBlockWithState(confirmedBlock.block, confirmedBlock.receipts, s)
+ if err != nil {
+ return i, events, coalescedLogs, fmt.Errorf("WriteBlockWithState error: %v", err)
+ }
+
+ switch status {
+ case CanonStatTy:
+ log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
+
+ coalescedLogs = append(coalescedLogs, allLogs...)
+ blockInsertTimer.UpdateSince(bstart)
+ events = append(events, ChainEvent{confirmedBlock.block, confirmedBlock.block.Hash(), allLogs})
+ lastCanon = confirmedBlock.block
+
+ // Only count canonical blocks for GC processing time
+ bc.gcproc += proctime
+
+ case SideStatTy:
+ return i, nil, nil, fmt.Errorf("insert pending block and fork found")
+ }
+
+ delete(bc.pendingBlocks, pendingHeight)
+
+ stats.processed++
+ stats.usedGas += *usedGas
+
+ cache, _ := bc.stateCache.TrieDB().Size()
+ stats.report(chain, i, cache)
+ }
+ }
+ // Append a single chain head event if we've progressed the chain
+ if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
+ events = append(events, ChainHeadEvent{lastCanon})
+ }
+ return 0, events, coalescedLogs, nil
+}
+
+func (bc *BlockChain) GetPendingBlockByHeight(height uint64) *types.Block {
+ return bc.pendingBlocks[height].block
+}
+
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
// blocks and inserts them to be part of the new canonical chain and accumulates
// potential missing transactions and post an event about them.
diff --git a/core/types/block.go b/core/types/block.go
index ef2cb2c56..a28ce8ee0 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -68,22 +68,25 @@ func (n *BlockNonce) UnmarshalText(input []byte) error {
// Header represents a block header in the Ethereum blockchain.
type Header struct {
- ParentHash common.Hash `json:"parentHash" gencodec:"required"`
- UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
- Coinbase common.Address `json:"miner" gencodec:"required"`
- Root common.Hash `json:"stateRoot" gencodec:"required"`
- TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
- ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
- Bloom Bloom `json:"logsBloom" gencodec:"required"`
- Difficulty *big.Int `json:"difficulty" gencodec:"required"`
- Number *big.Int `json:"number" gencodec:"required"`
- GasLimit uint64 `json:"gasLimit" gencodec:"required"`
- GasUsed uint64 `json:"gasUsed" gencodec:"required"`
- Time uint64 `json:"timestamp" gencodec:"required"`
- Extra []byte `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash"`
- Nonce BlockNonce `json:"nonce"`
- Randomness []byte `json:"randomness" gencodec:"required"`
+ ParentHash common.Hash `json:"parentHash" gencodec:"required"`
+ UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"`
+ Coinbase common.Address `json:"miner" gencodec:"required"`
+ Root common.Hash `json:"stateRoot" gencodec:"required"`
+ TxHash common.Hash `json:"transactionsRoot" gencodec:"required"`
+ ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"`
+ Bloom Bloom `json:"logsBloom" gencodec:"required"`
+ Difficulty *big.Int `json:"difficulty" gencodec:"required"`
+ Number *big.Int `json:"number" gencodec:"required"`
+ GasLimit uint64 `json:"gasLimit" gencodec:"required"`
+ GasUsed uint64 `json:"gasUsed" gencodec:"required"`
+ Time uint64 `json:"timestamp" gencodec:"required"`
+ Extra []byte `json:"extraData" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
+ Randomness []byte `json:"randomness" gencodec:"required"`
+ WitnessHeight uint64 `json:"witnessHeight" gencodec:"required"`
+ WitnessRoot common.Hash `json:"WitnessRoot" gencodec:"required"`
+ WitnessReceiptHash common.Hash `json:"WitnessReceiptHash" gencodec:"required"`
}
// field type overrides for gencodec