aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorMission Liao <mission.liao@dexon.org>2018-10-04 17:08:54 +0800
committerGitHub <noreply@github.com>2018-10-04 17:08:54 +0800
commitb4764a67bc19b2b9ea6f07d45a1275f530060c68 (patch)
tree3ea0ea1702d60791e0a71b666366b580650bb810 /core
parent149e918f4fc78632483bf549dd8f5ffe55366e18 (diff)
downloaddexon-consensus-b4764a67bc19b2b9ea6f07d45a1275f530060c68.tar
dexon-consensus-b4764a67bc19b2b9ea6f07d45a1275f530060c68.tar.gz
dexon-consensus-b4764a67bc19b2b9ea6f07d45a1275f530060c68.tar.bz2
dexon-consensus-b4764a67bc19b2b9ea6f07d45a1275f530060c68.tar.lz
dexon-consensus-b4764a67bc19b2b9ea6f07d45a1275f530060c68.tar.xz
dexon-consensus-b4764a67bc19b2b9ea6f07d45a1275f530060c68.tar.zst
dexon-consensus-b4764a67bc19b2b9ea6f07d45a1275f530060c68.zip
core: split lattice-data to another file (#172)
- Split latticeData to another file - Remove areAllAcksInLattice
Diffstat (limited to 'core')
-rw-r--r--core/lattice-data.go390
-rw-r--r--core/lattice-data_test.go679
-rw-r--r--core/lattice.go388
-rw-r--r--core/lattice_test.go670
4 files changed, 1069 insertions, 1058 deletions
diff --git a/core/lattice-data.go b/core/lattice-data.go
new file mode 100644
index 0000000..e5d173d
--- /dev/null
+++ b/core/lattice-data.go
@@ -0,0 +1,390 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/types"
+)
+
+// Errors for sanity check error.
+var (
+ ErrAckingBlockNotExists = fmt.Errorf("acking block not exists")
+ ErrInvalidParentChain = fmt.Errorf("invalid parent chain")
+ ErrDuplicatedAckOnOneChain = fmt.Errorf("duplicated ack on one chain")
+ ErrChainStatusCorrupt = fmt.Errorf("chain status corrupt")
+ ErrInvalidChainID = fmt.Errorf("invalid chain id")
+ ErrInvalidProposerID = fmt.Errorf("invalid proposer id")
+ ErrInvalidTimestamp = fmt.Errorf("invalid timestamp")
+ ErrInvalidWitness = fmt.Errorf("invalid witness data")
+ ErrInvalidBlock = fmt.Errorf("invalid block")
+ ErrForkBlock = fmt.Errorf("fork block")
+ ErrNotAckParent = fmt.Errorf("not ack parent")
+ ErrDoubleAck = fmt.Errorf("double ack")
+ ErrInvalidBlockHeight = fmt.Errorf("invalid block height")
+ ErrAlreadyInLattice = fmt.Errorf("block already in lattice")
+ ErrIncorrectBlockTime = fmt.Errorf("block timestampe is incorrect")
+)
+
+// Errors for method usage
+var (
+ ErrRoundNotIncreasing = errors.New("round not increasing")
+)
+
+// latticeData is a module for storing lattice.
+type latticeData struct {
+ // chains stores chains' blocks and other info.
+ chains []*chainStatus
+
+ // blockByHash stores blocks, indexed by block hash.
+ blockByHash map[common.Hash]*types.Block
+
+ // Block interval specifies reasonable time difference between
+ // parent/child blocks.
+ minBlockTimeInterval time.Duration
+ maxBlockTimeInterval time.Duration
+
+ // This stores configuration for each round.
+ numChainsForRounds []uint32
+ minRound uint64
+}
+
+// newLatticeData creates a new latticeData struct.
+func newLatticeData(
+ round uint64,
+ chainNum uint32,
+ minBlockTimeInterval time.Duration,
+ maxBlockTimeInterval time.Duration) (data *latticeData) {
+ data = &latticeData{
+ chains: make([]*chainStatus, chainNum),
+ blockByHash: make(map[common.Hash]*types.Block),
+ minBlockTimeInterval: minBlockTimeInterval,
+ maxBlockTimeInterval: maxBlockTimeInterval,
+ numChainsForRounds: []uint32{chainNum},
+ minRound: round,
+ }
+ for i := range data.chains {
+ data.chains[i] = &chainStatus{
+ ID: uint32(i),
+ blocks: []*types.Block{},
+ nextAck: make([]uint64, chainNum),
+ }
+ }
+ return
+}
+
+func (data *latticeData) sanityCheck(b *types.Block) error {
+ // Check if the chain id is valid.
+ if b.Position.ChainID >= uint32(len(data.chains)) {
+ return ErrInvalidChainID
+ }
+
+ // TODO(mission): Check if its proposer is in validator set somewhere,
+ // lattice doesn't have to know about node set.
+
+ // Check if it forks
+ if bInLattice := data.chains[b.Position.ChainID].getBlockByHeight(
+ b.Position.Height); bInLattice != nil {
+
+ if b.Hash != bInLattice.Hash {
+ return ErrForkBlock
+ }
+ return ErrAlreadyInLattice
+ }
+ // TODO(mission): check if fork by loading blocks from DB if the block
+ // doesn't exists because forking is serious.
+
+ // Check if it acks older blocks.
+ acksByChainID := make(map[uint32]struct{}, len(data.chains))
+ for _, hash := range b.Acks {
+ if bAck, exist := data.blockByHash[hash]; exist {
+ if bAck.Position.Height <
+ data.chains[bAck.Position.ChainID].nextAck[b.Position.ChainID] {
+ return ErrDoubleAck
+ }
+ // Check if ack two blocks on the same chain. This would need
+ // to check after we replace map with slice for acks.
+ if _, acked := acksByChainID[bAck.Position.ChainID]; acked {
+ return ErrDuplicatedAckOnOneChain
+ }
+ acksByChainID[bAck.Position.ChainID] = struct{}{}
+ } else {
+ return ErrAckingBlockNotExists
+ }
+ }
+
+ // Check non-genesis blocks if it acks its parent.
+ if b.Position.Height > 0 {
+ if !b.IsAcking(b.ParentHash) {
+ return ErrNotAckParent
+ }
+ bParent := data.blockByHash[b.ParentHash]
+ if bParent.Position.ChainID != b.Position.ChainID {
+ return ErrInvalidParentChain
+ }
+ if bParent.Position.Height != b.Position.Height-1 {
+ return ErrInvalidBlockHeight
+ }
+ if bParent.Witness.Height > b.Witness.Height {
+ return ErrInvalidWitness
+ }
+ // Check if its timestamp is valid.
+ if !b.Timestamp.After(bParent.Timestamp) {
+ return ErrInvalidTimestamp
+ }
+ // Check if its timestamp is in expected range.
+ if b.Timestamp.Before(bParent.Timestamp.Add(data.minBlockTimeInterval)) ||
+ b.Timestamp.After(bParent.Timestamp.Add(data.maxBlockTimeInterval)) {
+
+ return ErrIncorrectBlockTime
+ }
+ }
+ return nil
+}
+
+// addBlock processes block, it does sanity check, inserts block into
+// lattice and deletes blocks which will not be used.
+func (data *latticeData) addBlock(
+ block *types.Block) (deliverable []*types.Block, err error) {
+
+ var (
+ bAck *types.Block
+ updated bool
+ )
+ // TODO(mission): sanity check twice, might hurt performance.
+ // If a block does not pass sanity check, report error.
+ if err = data.sanityCheck(block); err != nil {
+ return
+ }
+ if err = data.chains[block.Position.ChainID].addBlock(block); err != nil {
+ return
+ }
+ data.blockByHash[block.Hash] = block
+ // Update nextAcks.
+ for _, ack := range block.Acks {
+ bAck = data.blockByHash[ack]
+ data.chains[bAck.Position.ChainID].nextAck[block.Position.ChainID] =
+ bAck.Position.Height + 1
+ }
+ // Extract blocks that deliverable to total ordering.
+ // A block is deliverable to total ordering iff:
+ // - All its acking blocks are delivered to total ordering.
+ for {
+ updated = false
+ for _, status := range data.chains {
+ tip := status.getBlockByHeight(status.nextOutput)
+ if tip == nil {
+ continue
+ }
+ allAckingBlockDelivered := true
+ for _, ack := range tip.Acks {
+ bAck, exists := data.blockByHash[ack]
+ if !exists {
+ continue
+ }
+ if data.chains[bAck.Position.ChainID].nextOutput >
+ bAck.Position.Height {
+
+ continue
+ }
+ // This acked block exists and not delivered yet.
+ allAckingBlockDelivered = false
+ }
+ if allAckingBlockDelivered {
+ deliverable = append(deliverable, tip)
+ status.nextOutput++
+ updated = true
+ }
+ }
+ if !updated {
+ break
+ }
+ }
+
+ // Delete old blocks in "chains" and "blocks" to release memory space.
+ //
+ // A block is safe to be deleted iff:
+ // - It's delivered to total ordering
+ // - All chains (including its proposing chain) acks some block with
+ // higher height in its proposing chain.
+ //
+ // This works because blocks of height below this minimum are not going to be
+ // acked anymore, the ackings of these blocks are illegal.
+ for _, status := range data.chains {
+ for _, h := range status.purge() {
+ delete(data.blockByHash, h)
+ }
+ }
+ return
+}
+
+// prepareBlock helps to setup fields of block based on its ProposerID,
+// including:
+// - Set 'Acks' and 'Timestamps' for the highest block of each validator not
+// acked by this proposer before.
+// - Set 'ParentHash' and 'Height' from parent block, if we can't find a
+// parent, these fields would be setup like a genesis block.
+func (data *latticeData) prepareBlock(block *types.Block) {
+ // Reset fields to make sure we got these information from parent block.
+ block.Position.Height = 0
+ block.ParentHash = common.Hash{}
+ acks := common.Hashes{}
+ for chainID := range data.chains {
+ // find height of the latest block for that validator.
+ var (
+ curBlock *types.Block
+ nextHeight = data.chains[chainID].nextAck[block.Position.ChainID]
+ )
+ for {
+ tmpBlock := data.chains[chainID].getBlockByHeight(nextHeight)
+ if tmpBlock == nil {
+ break
+ }
+ curBlock = tmpBlock
+ nextHeight++
+ }
+ if curBlock == nil {
+ continue
+ }
+ acks = append(acks, curBlock.Hash)
+ if uint32(chainID) == block.Position.ChainID {
+ block.ParentHash = curBlock.Hash
+ block.Position.Height = curBlock.Position.Height + 1
+ block.Witness.Height = curBlock.Witness.Height
+ }
+ }
+ block.Acks = common.NewSortedHashes(acks)
+ return
+}
+
+// TODO(mission): make more abstraction for this method.
+// nextHeight returns the next height for the chain.
+func (data *latticeData) nextPosition(chainID uint32) types.Position {
+ return data.chains[chainID].nextPosition()
+}
+
+// appendConfig appends a configuration for upcoming round. When you append
+// a config for round R, next time you can only append the config for round R+1.
+func (data *latticeData) appendConfig(
+ round uint64, config *types.Config) (err error) {
+
+ if round != data.minRound+uint64(len(data.numChainsForRounds)) {
+ return ErrRoundNotIncreasing
+ }
+ data.numChainsForRounds = append(data.numChainsForRounds, config.NumChains)
+ return nil
+}
+
+type chainStatus struct {
+ // ID keeps the chainID of this chain status.
+ ID uint32
+
+ // blocks stores blocks proposed for this chain, sorted by height.
+ blocks []*types.Block
+
+ // minHeight keeps minimum height in blocks.
+ minHeight uint64
+
+ // nextAck stores the height of next height that should be acked, i.e. last
+ // acked height + 1. Initialized to 0.
+ // being acked. For example, rb.chains[vid1].nextAck[vid2] - 1 is the last
+ // acked height by vid2 acking vid1.
+ nextAck []uint64
+
+ // nextOutput is the next output height of block, default to 0.
+ nextOutput uint64
+}
+
+func (s *chainStatus) getBlockByHeight(height uint64) (b *types.Block) {
+ if height < s.minHeight {
+ return
+ }
+ idx := int(height - s.minHeight)
+ if idx >= len(s.blocks) {
+ return
+ }
+ b = s.blocks[idx]
+ return
+}
+
+func (s *chainStatus) addBlock(b *types.Block) error {
+ if len(s.blocks) > 0 {
+ // Make sure the height of incoming block should be
+ // plus one to current latest blocks if exists.
+ if s.blocks[len(s.blocks)-1].Position.Height != b.Position.Height-1 {
+ return ErrChainStatusCorrupt
+ }
+ } else {
+ if b.Position.Height != 0 {
+ return ErrChainStatusCorrupt
+ }
+ }
+ s.blocks = append(s.blocks, b)
+ return nil
+}
+
+func (s *chainStatus) calcPurgeHeight() (safe uint64, ok bool) {
+ // blocks with height less than min(nextOutput, nextAck...)
+ // are safe to be purged.
+ safe = s.nextOutput
+ for _, ackedHeight := range s.nextAck {
+ if safe > ackedHeight {
+ safe = ackedHeight
+ }
+ }
+ // Both 'nextOutput' and 'nextAck' represents some block to be
+ // outputed/acked. To find a block already outputed/acked, the height
+ // needs to be minus 1.
+ if safe == 0 {
+ // Avoid underflow.
+ return
+ }
+ safe--
+ if safe < s.minHeight {
+ return
+ }
+ ok = true
+ return
+}
+
+// purge blocks if they are safe to be deleted from working set.
+func (s *chainStatus) purge() (purged common.Hashes) {
+ safe, ok := s.calcPurgeHeight()
+ if !ok {
+ return
+ }
+ newMinIndex := safe - s.minHeight + 1
+ for _, b := range s.blocks[:newMinIndex] {
+ purged = append(purged, b.Hash)
+ }
+ s.blocks = s.blocks[newMinIndex:]
+ s.minHeight = safe + 1
+ return
+}
+
+// nextPosition returns a valid position for new block in this chain.
+func (s *chainStatus) nextPosition() types.Position {
+ return types.Position{
+ ChainID: s.ID,
+ Height: s.minHeight + uint64(len(s.blocks)),
+ }
+}
diff --git a/core/lattice-data_test.go b/core/lattice-data_test.go
new file mode 100644
index 0000000..92e66a4
--- /dev/null
+++ b/core/lattice-data_test.go
@@ -0,0 +1,679 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "math/rand"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/blockdb"
+ "github.com/dexon-foundation/dexon-consensus-core/core/test"
+ "github.com/dexon-foundation/dexon-consensus-core/core/types"
+ "github.com/stretchr/testify/suite"
+)
+
+type LatticeDataTestSuite struct {
+ suite.Suite
+}
+
+// genTestCase1 generates test case 1,
+// 3
+// |
+// 2
+// | \
+// 1 | 1
+// | | |
+// 0 0 0 0 (block height)
+// 0 1 2 3 (validator)
+func (s *LatticeDataTestSuite) genTestCase1() (data *latticeData) {
+ // Create new reliableBroadcast instance with 4 validators
+ var (
+ round uint64
+ b *types.Block
+ delivered []*types.Block
+ h common.Hash
+ chainNum uint32 = 4
+ req = s.Require()
+ err error
+ )
+
+ data = newLatticeData(round, chainNum, 2*time.Nanosecond, 1000*time.Second)
+ // Add genesis blocks.
+ for i := uint32(0); i < chainNum; i++ {
+ b = s.prepareGenesisBlock(i)
+ delivered, err = data.addBlock(b)
+ // Genesis blocks are safe to be added to DAG, they acks no one.
+ req.Len(delivered, 1)
+ req.Nil(err)
+ }
+
+ // Add block 0-1 which acks 0-0.
+ h = data.chains[0].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Hash: common.NewRandomHash(),
+ Timestamp: time.Now().UTC(),
+ Position: types.Position{
+ ChainID: 0,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{h}),
+ Witness: types.Witness{
+ Height: 1,
+ },
+ }
+ s.hashBlock(b)
+ delivered, err = data.addBlock(b)
+ req.Len(delivered, 1)
+ req.Equal(delivered[0].Hash, b.Hash)
+ req.Nil(err)
+ req.NotNil(data.chains[0].getBlockByHeight(1))
+
+ // Add block 0-2 which acks 0-1 and 1-0.
+ h = data.chains[0].getBlockByHeight(1).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 0,
+ Height: 2,
+ },
+ Timestamp: time.Now().UTC(),
+ Acks: common.NewSortedHashes(common.Hashes{
+ h,
+ data.chains[1].getBlockByHeight(0).Hash,
+ }),
+ Witness: types.Witness{
+ Height: 2,
+ },
+ }
+ s.hashBlock(b)
+ delivered, err = data.addBlock(b)
+ req.Len(delivered, 1)
+ req.Equal(delivered[0].Hash, b.Hash)
+ req.Nil(err)
+ req.NotNil(data.chains[0].getBlockByHeight(2))
+
+ // Add block 0-3 which acks 0-2.
+ h = data.chains[0].getBlockByHeight(2).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Hash: common.NewRandomHash(),
+ Timestamp: time.Now().UTC(),
+ Position: types.Position{
+ ChainID: 0,
+ Height: 3,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{h}),
+ Witness: types.Witness{
+ Height: 3,
+ },
+ }
+ s.hashBlock(b)
+ delivered, err = data.addBlock(b)
+ req.Len(delivered, 1)
+ req.Equal(delivered[0].Hash, b.Hash)
+ req.Nil(err)
+ req.NotNil(data.chains[0].getBlockByHeight(3))
+
+ // Add block 3-1 which acks 3-0.
+ h = data.chains[3].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Hash: common.NewRandomHash(),
+ Timestamp: time.Now().UTC(),
+ Position: types.Position{
+ ChainID: 3,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{h}),
+ Witness: types.Witness{
+ Height: 1,
+ },
+ }
+ s.hashBlock(b)
+ delivered, err = data.addBlock(b)
+ req.Len(delivered, 1)
+ req.Equal(delivered[0].Hash, b.Hash)
+ req.Nil(err)
+ req.NotNil(data.chains[3].getBlockByHeight(0))
+ return
+}
+
+// hashBlock is a helper to hash a block and check if any error.
+func (s *LatticeDataTestSuite) hashBlock(b *types.Block) {
+ var err error
+ b.Hash, err = hashBlock(b)
+ s.Require().Nil(err)
+}
+
+func (s *LatticeDataTestSuite) prepareGenesisBlock(
+ chainID uint32) (b *types.Block) {
+
+ b = &types.Block{
+ ParentHash: common.Hash{},
+ Position: types.Position{
+ ChainID: chainID,
+ Height: 0,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{}),
+ Timestamp: time.Now().UTC(),
+ }
+ s.hashBlock(b)
+ return
+}
+
+func (s *LatticeDataTestSuite) TestSanityCheckInDataLayer() {
+ var (
+ b *types.Block
+ h common.Hash
+ data = s.genTestCase1()
+ req = s.Require()
+ err error
+ )
+
+ // Non-genesis block with no ack, should get error.
+ b = &types.Block{
+ ParentHash: common.NewRandomHash(),
+ Position: types.Position{
+ ChainID: 0,
+ Height: 10,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{}),
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(ErrNotAckParent.Error(), err.Error())
+
+ // Non-genesis block which acks its parent but the height is invalid.
+ h = data.chains[1].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 1,
+ Height: 2,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{h}),
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(ErrInvalidBlockHeight.Error(), err.Error())
+
+ // Invalid chain ID.
+ h = data.chains[1].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 100,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{h}),
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(ErrInvalidChainID.Error(), err.Error())
+
+ // Fork block.
+ h = data.chains[0].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 0,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{h}),
+ Timestamp: time.Now().UTC(),
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(ErrForkBlock.Error(), err.Error())
+
+ // Replicated ack.
+ h = data.chains[0].getBlockByHeight(3).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 0,
+ Height: 4,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{
+ h,
+ data.chains[1].getBlockByHeight(0).Hash,
+ }),
+ Timestamp: time.Now().UTC(),
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(ErrDoubleAck.Error(), err.Error())
+
+ // Acking block doesn't exists.
+ h = data.chains[1].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 1,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{
+ h,
+ common.NewRandomHash(),
+ }),
+ Timestamp: time.Now().UTC(),
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(err.Error(), ErrAckingBlockNotExists.Error())
+
+ // Parent block on different chain.
+ h = data.chains[1].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 2,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{
+ h,
+ data.chains[2].getBlockByHeight(0).Hash,
+ }),
+ Timestamp: time.Now().UTC(),
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(err.Error(), ErrInvalidParentChain.Error())
+
+ // Ack two blocks on the same chain.
+ h = data.chains[2].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 2,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{
+ h,
+ data.chains[0].getBlockByHeight(0).Hash,
+ data.chains[0].getBlockByHeight(1).Hash,
+ }),
+ Timestamp: time.Now().UTC(),
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(err.Error(), ErrDuplicatedAckOnOneChain.Error())
+
+ // Witness height decreases.
+ h = data.chains[0].getBlockByHeight(3).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 0,
+ Height: 4,
+ },
+ Timestamp: time.Now().UTC(),
+ Acks: common.NewSortedHashes(common.Hashes{
+ h,
+ }),
+ Witness: types.Witness{
+ Height: 2,
+ },
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(err.Error(), ErrInvalidWitness.Error())
+
+ // Add block 3-1 which acks 3-0, and violet reasonable block time interval.
+ h = data.chains[2].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Hash: common.NewRandomHash(),
+ Timestamp: time.Now().UTC().Add(data.maxBlockTimeInterval),
+ Position: types.Position{
+ ChainID: 2,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{h}),
+ }
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(err, ErrIncorrectBlockTime)
+ // Violet minimum block time interval.
+ b.Timestamp =
+ data.chains[2].getBlockByHeight(0).Timestamp.Add(1 * time.Nanosecond)
+ s.hashBlock(b)
+ err = data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(err, ErrIncorrectBlockTime)
+
+ // Normal block.
+ h = data.chains[1].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Position: types.Position{
+ ChainID: 1,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{h}),
+ Timestamp: time.Now().UTC(),
+ }
+ s.hashBlock(b)
+ req.Nil(data.sanityCheck(b))
+}
+
+func (s *LatticeDataTestSuite) TestRandomIntensiveAcking() {
+ var (
+ round uint64
+ chainNum uint32 = 19
+ data = newLatticeData(round, chainNum, 0, 1000*time.Second)
+ req = s.Require()
+ delivered []*types.Block
+ extracted []*types.Block
+ b *types.Block
+ err error
+ )
+
+ // Generate genesis blocks.
+ for i := uint32(0); i < chainNum; i++ {
+ b = s.prepareGenesisBlock(i)
+ delivered, err = data.addBlock(b)
+ req.Len(delivered, 1)
+ req.Nil(err)
+ }
+
+ for i := 0; i < 5000; i++ {
+ b := &types.Block{
+ Position: types.Position{
+ ChainID: uint32(rand.Intn(int(chainNum))),
+ },
+ Timestamp: time.Now().UTC(),
+ }
+ data.prepareBlock(b)
+ s.hashBlock(b)
+ delivered, err = data.addBlock(b)
+ req.Nil(err)
+ extracted = append(extracted, delivered...)
+ }
+
+ // The len of array extractedBlocks should be about 5000.
+ req.True(len(extracted) > 4500)
+ // The len of data.blockInfos should be small if deleting mechanism works.
+ req.True(len(data.blockByHash) < 500)
+}
+
+func (s *LatticeDataTestSuite) TestRandomlyGeneratedBlocks() {
+ var (
+ round uint64
+ chainNum uint32 = 19
+ blockNum = 50
+ repeat = 20
+ delivered []*types.Block
+ err error
+ req = s.Require()
+ datum []*latticeData
+ )
+
+ if testing.Short() {
+ chainNum = 7
+ repeat = 3
+ }
+
+ // Prepare a randomly generated blocks.
+ db, err := blockdb.NewMemBackedBlockDB()
+ req.Nil(err)
+ gen := test.NewBlocksGenerator(nil, hashBlock)
+ _, err = gen.Generate(chainNum, blockNum, nil, db)
+ req.Nil(err)
+ iter, err := db.GetAll()
+ req.Nil(err)
+ // Setup a revealer that would reveal blocks randomly but still form
+ // valid DAG without holes.
+ revealer, err := test.NewRandomDAGRevealer(iter)
+ req.Nil(err)
+
+ revealedHashesAsString := map[string]struct{}{}
+ deliveredHashesAsString := map[string]struct{}{}
+ for i := 0; i < repeat; i++ {
+ data := newLatticeData(round, chainNum, 0, 1000*time.Second)
+ deliveredHashes := common.Hashes{}
+ revealedHashes := common.Hashes{}
+ revealer.Reset()
+ for {
+ // Reveal next block.
+ b, err := revealer.Next()
+ if err != nil {
+ if err == blockdb.ErrIterationFinished {
+ err = nil
+ break
+ }
+ }
+ s.Require().Nil(err)
+ revealedHashes = append(revealedHashes, b.Hash)
+
+ // Pass blocks to lattice.
+ delivered, err = data.addBlock(&b)
+ req.Nil(err)
+ for _, b := range delivered {
+ deliveredHashes = append(deliveredHashes, b.Hash)
+ }
+ }
+ // To make it easier to check, sort hashes of
+ // strongly acked blocks, and concatenate them into
+ // a string.
+ sort.Sort(deliveredHashes)
+ asString := ""
+ for _, h := range deliveredHashes {
+ asString += h.String() + ","
+ }
+ deliveredHashesAsString[asString] = struct{}{}
+ // Compose revealing hash sequense to string.
+ asString = ""
+ for _, h := range revealedHashes {
+ asString += h.String() + ","
+ }
+ revealedHashesAsString[asString] = struct{}{}
+ datum = append(datum, data)
+ }
+ // Make sure concatenated hashes of strongly acked blocks are identical.
+ req.Len(deliveredHashesAsString, 1)
+ for h := range deliveredHashesAsString {
+ // Make sure at least some blocks are strongly acked.
+ req.True(len(h) > 0)
+ }
+ // Make sure we test for more than 1 revealing sequence.
+ req.True(len(revealedHashesAsString) > 1)
+ // Make sure each latticeData instance have identical working set.
+ req.True(len(datum) >= repeat)
+ for i, bI := range datum {
+ for j, bJ := range datum {
+ if i == j {
+ continue
+ }
+ for chainID, statusI := range bI.chains {
+ req.Equal(statusI.minHeight, bJ.chains[chainID].minHeight)
+ req.Equal(statusI.nextOutput, bJ.chains[chainID].nextOutput)
+ req.Equal(len(statusI.blocks), len(bJ.chains[chainID].blocks))
+ // Check nextAck.
+ for x, ackI := range statusI.nextAck {
+ req.Equal(ackI, bJ.chains[chainID].nextAck[x])
+ }
+ // Check blocks.
+ if len(statusI.blocks) > 0 {
+ req.Equal(statusI.blocks[0], bJ.chains[chainID].blocks[0])
+ }
+ }
+ // Check blockByHash.
+ req.Equal(bI.blockByHash, bJ.blockByHash)
+ }
+ }
+}
+
+func (s *LatticeDataTestSuite) TestPrepareBlock() {
+ var (
+ round uint64
+ chainNum uint32 = 4
+ req = s.Require()
+ minInterval = 50 * time.Millisecond
+ delivered []*types.Block
+ err error
+ data = newLatticeData(
+ round, chainNum, 0, 3000*time.Second)
+ )
+ // Setup genesis blocks.
+ b00 := s.prepareGenesisBlock(0)
+ time.Sleep(minInterval)
+ b10 := s.prepareGenesisBlock(1)
+ time.Sleep(minInterval)
+ b20 := s.prepareGenesisBlock(2)
+ time.Sleep(minInterval)
+ b30 := s.prepareGenesisBlock(3)
+ // Submit these blocks to lattice.
+ delivered, err = data.addBlock(b00)
+ req.Len(delivered, 1)
+ req.Nil(err)
+ delivered, err = data.addBlock(b10)
+ req.Len(delivered, 1)
+ req.Nil(err)
+ delivered, err = data.addBlock(b20)
+ req.Len(delivered, 1)
+ req.Nil(err)
+ delivered, err = data.addBlock(b30)
+ req.Len(delivered, 1)
+ req.Nil(err)
+ // We should be able to collect all 4 genesis blocks by calling
+ // prepareBlock.
+ b11 := &types.Block{
+ Position: types.Position{
+ ChainID: 1,
+ },
+ Timestamp: time.Now().UTC(),
+ }
+ data.prepareBlock(b11)
+ s.hashBlock(b11)
+ req.Contains(b11.Acks, b00.Hash)
+ req.Contains(b11.Acks, b10.Hash)
+ req.Contains(b11.Acks, b20.Hash)
+ req.Contains(b11.Acks, b30.Hash)
+ req.Equal(b11.ParentHash, b10.Hash)
+ req.Equal(b11.Position.Height, uint64(1))
+ delivered, err = data.addBlock(b11)
+ req.Len(delivered, 1)
+ req.Nil(err)
+ // Propose/Process a block based on collected info.
+ b12 := &types.Block{
+ Position: types.Position{
+ ChainID: 1,
+ },
+ Timestamp: time.Now().UTC(),
+ }
+ data.prepareBlock(b12)
+ s.hashBlock(b12)
+ // This time we only need to ack b11.
+ req.Len(b12.Acks, 1)
+ req.Contains(b12.Acks, b11.Hash)
+ req.Equal(b12.ParentHash, b11.Hash)
+ req.Equal(b12.Position.Height, uint64(2))
+ // When calling with other validator ID, we should be able to
+ // get 4 blocks to ack.
+ b01 := &types.Block{
+ Position: types.Position{
+ ChainID: 0,
+ },
+ }
+ data.prepareBlock(b01)
+ s.hashBlock(b01)
+ req.Len(b01.Acks, 4)
+ req.Contains(b01.Acks, b00.Hash)
+ req.Contains(b01.Acks, b11.Hash)
+ req.Contains(b01.Acks, b20.Hash)
+ req.Contains(b01.Acks, b30.Hash)
+ req.Equal(b01.ParentHash, b00.Hash)
+ req.Equal(b01.Position.Height, uint64(1))
+}
+
+func (s *LatticeDataTestSuite) TestCalcPurgeHeight() {
+ // Test chainStatus.calcPurgeHeight, we don't have
+ // to prepare blocks to test it.
+ var req = s.Require()
+ chain := &chainStatus{
+ minHeight: 0,
+ nextOutput: 0,
+ nextAck: []uint64{1, 1, 1, 1},
+ }
+ // When calculated safe is underflow, nok.
+ safe, ok := chain.calcPurgeHeight()
+ req.False(ok)
+ // height=1 is outputed, and acked by everyone else.
+ chain.nextOutput = 1
+ safe, ok = chain.calcPurgeHeight()
+ req.True(ok)
+ req.Equal(safe, uint64(0))
+ // Should take nextAck's height into consideration.
+ chain.nextOutput = 2
+ safe, ok = chain.calcPurgeHeight()
+ req.True(ok)
+ req.Equal(safe, uint64(0))
+ // When minHeight is large that safe height, return nok.
+ chain.minHeight = 1
+ chain.nextOutput = 1
+ safe, ok = chain.calcPurgeHeight()
+ req.False(ok)
+}
+
+func (s *LatticeDataTestSuite) TestPurge() {
+ // Make a simplest test case to test chainStatus.purge.
+ // Make sure status after purge 1 block expected.
+ b00 := &types.Block{Hash: common.NewRandomHash()}
+ b01 := &types.Block{Hash: common.NewRandomHash()}
+ b02 := &types.Block{Hash: common.NewRandomHash()}
+ chain := &chainStatus{
+ blocks: []*types.Block{b00, b01, b02},
+ nextAck: []uint64{1, 1, 1, 1},
+ nextOutput: 1,
+ }
+ hashes := chain.purge()
+ s.Equal(hashes, common.Hashes{b00.Hash})
+ s.Equal(chain.minHeight, uint64(1))
+ s.Require().Len(chain.blocks, 2)
+ s.Equal(chain.blocks[0].Hash, b01.Hash)
+ s.Equal(chain.blocks[1].Hash, b02.Hash)
+}
+
+func (s *LatticeDataTestSuite) TestNextPosition() {
+ // Test 'NextPosition' method when lattice is ready.
+ data := s.genTestCase1()
+ s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 4})
+
+ // Test 'NextPosition' method when lattice is empty.
+ data = newLatticeData(0, 4, 0, 1000*time.Second)
+ s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 0})
+}
+
+func TestLatticeData(t *testing.T) {
+ suite.Run(t, new(LatticeDataTestSuite))
+}
diff --git a/core/lattice.go b/core/lattice.go
index bb3bccd..530fbab 100644
--- a/core/lattice.go
+++ b/core/lattice.go
@@ -18,8 +18,6 @@
package core
import (
- "errors"
- "fmt"
"sync"
"time"
@@ -29,392 +27,6 @@ import (
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
-// Errors for sanity check error.
-var (
- ErrAckingBlockNotExists = fmt.Errorf("acking block not exists")
- ErrInvalidParentChain = fmt.Errorf("invalid parent chain")
- ErrDuplicatedAckOnOneChain = fmt.Errorf("duplicated ack on one chain")
- ErrChainStatusCorrupt = fmt.Errorf("chain status corrupt")
- ErrInvalidChainID = fmt.Errorf("invalid chain id")
- ErrInvalidProposerID = fmt.Errorf("invalid proposer id")
- ErrInvalidTimestamp = fmt.Errorf("invalid timestamp")
- ErrInvalidWitness = fmt.Errorf("invalid witness data")
- ErrInvalidBlock = fmt.Errorf("invalid block")
- ErrForkBlock = fmt.Errorf("fork block")
- ErrNotAckParent = fmt.Errorf("not ack parent")
- ErrDoubleAck = fmt.Errorf("double ack")
- ErrInvalidBlockHeight = fmt.Errorf("invalid block height")
- ErrAlreadyInLattice = fmt.Errorf("block already in lattice")
- ErrIncorrectBlockTime = fmt.Errorf("block timestampe is incorrect")
-)
-
-// Errors for method usage
-var (
- ErrRoundNotIncreasing = errors.New("round not increasing")
-)
-
-// latticeData is a module for storing lattice.
-type latticeData struct {
- // chains stores chains' blocks and other info.
- chains []*chainStatus
-
- // blockByHash stores blocks, indexed by block hash.
- blockByHash map[common.Hash]*types.Block
-
- // Block interval specifies reasonable time difference between
- // parent/child blocks.
- minBlockTimeInterval time.Duration
- maxBlockTimeInterval time.Duration
-
- // This stores configuration for each round.
- numChainsForRounds []uint32
- minRound uint64
-}
-
-// newLatticeData creates a new latticeData struct.
-func newLatticeData(
- round uint64,
- chainNum uint32,
- minBlockTimeInterval time.Duration,
- maxBlockTimeInterval time.Duration) (data *latticeData) {
- data = &latticeData{
- chains: make([]*chainStatus, chainNum),
- blockByHash: make(map[common.Hash]*types.Block),
- minBlockTimeInterval: minBlockTimeInterval,
- maxBlockTimeInterval: maxBlockTimeInterval,
- numChainsForRounds: []uint32{chainNum},
- minRound: round,
- }
- for i := range data.chains {
- data.chains[i] = &chainStatus{
- ID: uint32(i),
- blocks: []*types.Block{},
- nextAck: make([]uint64, chainNum),
- }
- }
- return
-}
-
-func (data *latticeData) sanityCheck(b *types.Block) error {
- // Check if the chain id is valid.
- if b.Position.ChainID >= uint32(len(data.chains)) {
- return ErrInvalidChainID
- }
-
- // TODO(mission): Check if its proposer is in validator set somewhere,
- // lattice doesn't have to know about node set.
-
- // Check if it forks
- if bInLattice := data.chains[b.Position.ChainID].getBlockByHeight(
- b.Position.Height); bInLattice != nil {
-
- if b.Hash != bInLattice.Hash {
- return ErrForkBlock
- }
- return ErrAlreadyInLattice
- }
- // TODO(mission): check if fork by loading blocks from DB if the block
- // doesn't exists because forking is serious.
-
- // Check if it acks older blocks.
- acksByChainID := make(map[uint32]struct{}, len(data.chains))
- for _, hash := range b.Acks {
- if bAck, exist := data.blockByHash[hash]; exist {
- if bAck.Position.Height <
- data.chains[bAck.Position.ChainID].nextAck[b.Position.ChainID] {
- return ErrDoubleAck
- }
- // Check if ack two blocks on the same chain. This would need
- // to check after we replace map with slice for acks.
- if _, acked := acksByChainID[bAck.Position.ChainID]; acked {
- return ErrDuplicatedAckOnOneChain
- }
- acksByChainID[bAck.Position.ChainID] = struct{}{}
- } else {
- // This error has the same checking effect as areAllAcksInLattice.
- return ErrAckingBlockNotExists
- }
- }
-
- // Check non-genesis blocks if it acks its parent.
- if b.Position.Height > 0 {
- if !b.IsAcking(b.ParentHash) {
- return ErrNotAckParent
- }
- bParent := data.blockByHash[b.ParentHash]
- if bParent.Position.ChainID != b.Position.ChainID {
- return ErrInvalidParentChain
- }
- if bParent.Position.Height != b.Position.Height-1 {
- return ErrInvalidBlockHeight
- }
- if bParent.Witness.Height > b.Witness.Height {
- return ErrInvalidWitness
- }
- // Check if its timestamp is valid.
- if !b.Timestamp.After(bParent.Timestamp) {
- return ErrInvalidTimestamp
- }
- // Check if its timestamp is in expected range.
- if b.Timestamp.Before(bParent.Timestamp.Add(data.minBlockTimeInterval)) ||
- b.Timestamp.After(bParent.Timestamp.Add(data.maxBlockTimeInterval)) {
-
- return ErrIncorrectBlockTime
- }
- }
- return nil
-}
-
-// areAllAcksReceived checks if all ack blocks of a block are all in lattice,
-// we would make sure all blocks not acked by some chain would be kept
-// in working set.
-func (data *latticeData) areAllAcksInLattice(b *types.Block) bool {
- for _, h := range b.Acks {
- bAck, exist := data.blockByHash[h]
- if !exist {
- return false
- }
- if bAckInLattice := data.chains[bAck.Position.ChainID].getBlockByHeight(
- bAck.Position.Height); bAckInLattice != nil {
-
- if bAckInLattice.Hash != bAck.Hash {
- panic("areAllAcksInLattice: latticeData.chains has corrupted")
- }
- } else {
- return false
- }
- }
- return true
-}
-
-// addBlock processes block, it does sanity check, inserts block into
-// lattice and deletes blocks which will not be used.
-func (data *latticeData) addBlock(
- block *types.Block) (deliverable []*types.Block, err error) {
-
- var (
- bAck *types.Block
- updated bool
- )
- // TODO(mission): sanity check twice, might hurt performance.
- // If a block does not pass sanity check, report error.
- if err = data.sanityCheck(block); err != nil {
- return
- }
- if err = data.chains[block.Position.ChainID].addBlock(block); err != nil {
- return
- }
- data.blockByHash[block.Hash] = block
- // Update nextAcks.
- for _, ack := range block.Acks {
- bAck = data.blockByHash[ack]
- data.chains[bAck.Position.ChainID].nextAck[block.Position.ChainID] =
- bAck.Position.Height + 1
- }
- // Extract blocks that deliverable to total ordering.
- // A block is deliverable to total ordering iff:
- // - All its acking blocks are delivered to total ordering.
- for {
- updated = false
- for _, status := range data.chains {
- tip := status.getBlockByHeight(status.nextOutput)
- if tip == nil {
- continue
- }
- allAckingBlockDelivered := true
- for _, ack := range tip.Acks {
- bAck, exists := data.blockByHash[ack]
- if !exists {
- continue
- }
- if data.chains[bAck.Position.ChainID].nextOutput >
- bAck.Position.Height {
-
- continue
- }
- // This acked block exists and not delivered yet.
- allAckingBlockDelivered = false
- }
- if allAckingBlockDelivered {
- deliverable = append(deliverable, tip)
- status.nextOutput++
- updated = true
- }
- }
- if !updated {
- break
- }
- }
-
- // Delete old blocks in "chains" and "blocks" to release memory space.
- //
- // A block is safe to be deleted iff:
- // - It's delivered to total ordering
- // - All chains (including its proposing chain) acks some block with
- // higher height in its proposing chain.
- //
- // This works because blocks of height below this minimum are not going to be
- // acked anymore, the ackings of these blocks are illegal.
- for _, status := range data.chains {
- for _, h := range status.purge() {
- delete(data.blockByHash, h)
- }
- }
- return
-}
-
-// prepareBlock helps to setup fields of block based on its ProposerID,
-// including:
-// - Set 'Acks' and 'Timestamps' for the highest block of each validator not
-// acked by this proposer before.
-// - Set 'ParentHash' and 'Height' from parent block, if we can't find a
-// parent, these fields would be setup like a genesis block.
-func (data *latticeData) prepareBlock(block *types.Block) {
- // Reset fields to make sure we got these information from parent block.
- block.Position.Height = 0
- block.ParentHash = common.Hash{}
- acks := common.Hashes{}
- for chainID := range data.chains {
- // find height of the latest block for that validator.
- var (
- curBlock *types.Block
- nextHeight = data.chains[chainID].nextAck[block.Position.ChainID]
- )
- for {
- tmpBlock := data.chains[chainID].getBlockByHeight(nextHeight)
- if tmpBlock == nil {
- break
- }
- curBlock = tmpBlock
- nextHeight++
- }
- if curBlock == nil {
- continue
- }
- acks = append(acks, curBlock.Hash)
- if uint32(chainID) == block.Position.ChainID {
- block.ParentHash = curBlock.Hash
- block.Position.Height = curBlock.Position.Height + 1
- block.Witness.Height = curBlock.Witness.Height
- }
- }
- block.Acks = common.NewSortedHashes(acks)
- return
-}
-
-// TODO(mission): make more abstraction for this method.
-// nextHeight returns the next height for the chain.
-func (data *latticeData) nextPosition(chainID uint32) types.Position {
- return data.chains[chainID].nextPosition()
-}
-
-// appendConfig appends a configuration for upcoming round. When you append
-// a config for round R, next time you can only append the config for round R+1.
-func (data *latticeData) appendConfig(
- round uint64, config *types.Config) (err error) {
-
- if round != data.minRound+uint64(len(data.numChainsForRounds)) {
- return ErrRoundNotIncreasing
- }
- data.numChainsForRounds = append(data.numChainsForRounds, config.NumChains)
- return nil
-}
-
-type chainStatus struct {
- // ID keeps the chainID of this chain status.
- ID uint32
-
- // blocks stores blocks proposed for this chain, sorted by height.
- blocks []*types.Block
-
- // minHeight keeps minimum height in blocks.
- minHeight uint64
-
- // nextAck stores the height of next height that should be acked, i.e. last
- // acked height + 1. Initialized to 0.
- // being acked. For example, rb.chains[vid1].nextAck[vid2] - 1 is the last
- // acked height by vid2 acking vid1.
- nextAck []uint64
-
- // nextOutput is the next output height of block, default to 0.
- nextOutput uint64
-}
-
-func (s *chainStatus) getBlockByHeight(height uint64) (b *types.Block) {
- if height < s.minHeight {
- return
- }
- idx := int(height - s.minHeight)
- if idx >= len(s.blocks) {
- return
- }
- b = s.blocks[idx]
- return
-}
-
-func (s *chainStatus) addBlock(b *types.Block) error {
- if len(s.blocks) > 0 {
- // Make sure the height of incoming block should be
- // plus one to current latest blocks if exists.
- if s.blocks[len(s.blocks)-1].Position.Height != b.Position.Height-1 {
- return ErrChainStatusCorrupt
- }
- } else {
- if b.Position.Height != 0 {
- return ErrChainStatusCorrupt
- }
- }
- s.blocks = append(s.blocks, b)
- return nil
-}
-
-func (s *chainStatus) calcPurgeHeight() (safe uint64, ok bool) {
- // blocks with height less than min(nextOutput, nextAck...)
- // are safe to be purged.
- safe = s.nextOutput
- for _, ackedHeight := range s.nextAck {
- if safe > ackedHeight {
- safe = ackedHeight
- }
- }
- // Both 'nextOutput' and 'nextAck' represents some block to be
- // outputed/acked. To find a block already outputed/acked, the height
- // needs to be minus 1.
- if safe == 0 {
- // Avoid underflow.
- return
- }
- safe--
- if safe < s.minHeight {
- return
- }
- ok = true
- return
-}
-
-// purge blocks if they are safe to be deleted from working set.
-func (s *chainStatus) purge() (purged common.Hashes) {
- safe, ok := s.calcPurgeHeight()
- if !ok {
- return
- }
- newMinIndex := safe - s.minHeight + 1
- for _, b := range s.blocks[:newMinIndex] {
- purged = append(purged, b.Hash)
- }
- s.blocks = s.blocks[newMinIndex:]
- s.minHeight = safe + 1
- return
-}
-
-// nextPosition returns a valid position for new block in this chain.
-func (s *chainStatus) nextPosition() types.Position {
- return types.Position{
- ChainID: s.ID,
- Height: s.minHeight + uint64(len(s.blocks)),
- }
-}
-
// Lattice represents a unit to produce a global ordering from multiple chains.
type Lattice struct {
lock sync.RWMutex
diff --git a/core/lattice_test.go b/core/lattice_test.go
index 329d698..2115d1a 100644
--- a/core/lattice_test.go
+++ b/core/lattice_test.go
@@ -19,7 +19,6 @@ package core
import (
"math/rand"
- "sort"
"testing"
"time"
@@ -119,675 +118,6 @@ func (s *LatticeTestSuite) newTestLatticeMgr(
db)}
}
-// hashBlock is a helper to hash a block and check if any error.
-func (s *LatticeTestSuite) hashBlock(b *types.Block) {
- var err error
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
-}
-
-func (s *LatticeTestSuite) prepareGenesisBlock(
- chainID uint32) (b *types.Block) {
-
- b = &types.Block{
- ParentHash: common.Hash{},
- Position: types.Position{
- ChainID: chainID,
- Height: 0,
- },
- Acks: common.NewSortedHashes(common.Hashes{}),
- Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- return
-}
-
-// genTestCase1 generates test case 1,
-// 3
-// |
-// 2
-// | \
-// 1 | 1
-// | | |
-// 0 0 0 0 (block height)
-// 0 1 2 3 (validator)
-func (s *LatticeTestSuite) genTestCase1() (data *latticeData) {
- // Create new reliableBroadcast instance with 4 validators
- var (
- round uint64
- b *types.Block
- delivered []*types.Block
- h common.Hash
- chainNum uint32 = 4
- req = s.Require()
- err error
- )
-
- data = newLatticeData(round, chainNum, 2*time.Nanosecond, 1000*time.Second)
- // Add genesis blocks.
- for i := uint32(0); i < chainNum; i++ {
- b = s.prepareGenesisBlock(i)
- delivered, err = data.addBlock(b)
- // Genesis blocks are safe to be added to DAG, they acks no one.
- req.Len(delivered, 1)
- req.Nil(err)
- }
-
- // Add block 0-1 which acks 0-0.
- h = data.chains[0].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Hash: common.NewRandomHash(),
- Timestamp: time.Now().UTC(),
- Position: types.Position{
- ChainID: 0,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- Witness: types.Witness{
- Height: 1,
- },
- }
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Equal(delivered[0].Hash, b.Hash)
- req.Nil(err)
- req.NotNil(data.chains[0].getBlockByHeight(1))
-
- // Add block 0-2 which acks 0-1 and 1-0.
- h = data.chains[0].getBlockByHeight(1).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 0,
- Height: 2,
- },
- Timestamp: time.Now().UTC(),
- Acks: common.NewSortedHashes(common.Hashes{
- h,
- data.chains[1].getBlockByHeight(0).Hash,
- }),
- Witness: types.Witness{
- Height: 2,
- },
- }
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Equal(delivered[0].Hash, b.Hash)
- req.Nil(err)
- req.NotNil(data.chains[0].getBlockByHeight(2))
-
- // Add block 0-3 which acks 0-2.
- h = data.chains[0].getBlockByHeight(2).Hash
- b = &types.Block{
- ParentHash: h,
- Hash: common.NewRandomHash(),
- Timestamp: time.Now().UTC(),
- Position: types.Position{
- ChainID: 0,
- Height: 3,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- Witness: types.Witness{
- Height: 3,
- },
- }
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Equal(delivered[0].Hash, b.Hash)
- req.Nil(err)
- req.NotNil(data.chains[0].getBlockByHeight(3))
-
- // Add block 3-1 which acks 3-0.
- h = data.chains[3].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Hash: common.NewRandomHash(),
- Timestamp: time.Now().UTC(),
- Position: types.Position{
- ChainID: 3,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- Witness: types.Witness{
- Height: 1,
- },
- }
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Equal(delivered[0].Hash, b.Hash)
- req.Nil(err)
- req.NotNil(data.chains[3].getBlockByHeight(0))
- return
-}
-
-func (s *LatticeTestSuite) TestSanityCheckInDataLayer() {
- var (
- b *types.Block
- h common.Hash
- data = s.genTestCase1()
- req = s.Require()
- err error
- )
-
- // Non-genesis block with no ack, should get error.
- b = &types.Block{
- ParentHash: common.NewRandomHash(),
- Position: types.Position{
- ChainID: 0,
- Height: 10,
- },
- Acks: common.NewSortedHashes(common.Hashes{}),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrNotAckParent.Error(), err.Error())
-
- // Non-genesis block which acks its parent but the height is invalid.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 1,
- Height: 2,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrInvalidBlockHeight.Error(), err.Error())
-
- // Invalid chain ID.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 100,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrInvalidChainID.Error(), err.Error())
-
- // Fork block.
- h = data.chains[0].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 0,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrForkBlock.Error(), err.Error())
-
- // Replicated ack.
- h = data.chains[0].getBlockByHeight(3).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 0,
- Height: 4,
- },
- Acks: common.NewSortedHashes(common.Hashes{
- h,
- data.chains[1].getBlockByHeight(0).Hash,
- }),
- Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrDoubleAck.Error(), err.Error())
-
- // Acking block doesn't exists.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 1,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{
- h,
- common.NewRandomHash(),
- }),
- Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err.Error(), ErrAckingBlockNotExists.Error())
-
- // Parent block on different chain.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 2,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{
- h,
- data.chains[2].getBlockByHeight(0).Hash,
- }),
- Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err.Error(), ErrInvalidParentChain.Error())
-
- // Ack two blocks on the same chain.
- h = data.chains[2].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 2,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{
- h,
- data.chains[0].getBlockByHeight(0).Hash,
- data.chains[0].getBlockByHeight(1).Hash,
- }),
- Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err.Error(), ErrDuplicatedAckOnOneChain.Error())
-
- // Witness height decreases.
- h = data.chains[0].getBlockByHeight(3).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 0,
- Height: 4,
- },
- Timestamp: time.Now().UTC(),
- Acks: common.NewSortedHashes(common.Hashes{
- h,
- }),
- Witness: types.Witness{
- Height: 2,
- },
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err.Error(), ErrInvalidWitness.Error())
-
- // Add block 3-1 which acks 3-0, and violet reasonable block time interval.
- h = data.chains[2].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Hash: common.NewRandomHash(),
- Timestamp: time.Now().UTC().Add(data.maxBlockTimeInterval),
- Position: types.Position{
- ChainID: 2,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err, ErrIncorrectBlockTime)
- // Violet minimum block time interval.
- b.Timestamp =
- data.chains[2].getBlockByHeight(0).Timestamp.Add(1 * time.Nanosecond)
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err, ErrIncorrectBlockTime)
-
- // Normal block.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 1,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- req.Nil(data.sanityCheck(b))
-}
-
-func (s *LatticeTestSuite) TestAreAllAcksInLattice() {
- var (
- b *types.Block
- data = s.genTestCase1()
- req = s.Require()
- )
-
- // Empty ack should get true, although won't pass sanity check.
- b = &types.Block{
- Acks: common.NewSortedHashes(common.Hashes{}),
- }
- req.True(data.areAllAcksInLattice(b))
-
- // Acks blocks in lattice
- b = &types.Block{
- Acks: common.NewSortedHashes(common.Hashes{
- data.chains[0].getBlockByHeight(0).Hash,
- data.chains[0].getBlockByHeight(1).Hash,
- }),
- }
- req.True(data.areAllAcksInLattice(b))
-
- // Acks random block hash.
- b = &types.Block{
- Acks: common.NewSortedHashes(common.Hashes{common.NewRandomHash()}),
- }
- req.False(data.areAllAcksInLattice(b))
-}
-
-func (s *LatticeTestSuite) TestRandomIntensiveAcking() {
- var (
- round uint64
- chainNum uint32 = 19
- data = newLatticeData(round, chainNum, 0, 1000*time.Second)
- req = s.Require()
- delivered []*types.Block
- extracted []*types.Block
- b *types.Block
- err error
- )
-
- // Generate genesis blocks.
- for i := uint32(0); i < chainNum; i++ {
- b = s.prepareGenesisBlock(i)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Nil(err)
- }
-
- for i := 0; i < 5000; i++ {
- b := &types.Block{
- Position: types.Position{
- ChainID: uint32(rand.Intn(int(chainNum))),
- },
- Timestamp: time.Now().UTC(),
- }
- data.prepareBlock(b)
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Nil(err)
- extracted = append(extracted, delivered...)
- }
-
- // The len of array extractedBlocks should be about 5000.
- req.True(len(extracted) > 4500)
- // The len of data.blockInfos should be small if deleting mechanism works.
- req.True(len(data.blockByHash) < 500)
-}
-
-func (s *LatticeTestSuite) TestRandomlyGeneratedBlocks() {
- var (
- round uint64
- chainNum uint32 = 19
- blockNum = 50
- repeat = 20
- delivered []*types.Block
- err error
- req = s.Require()
- datum []*latticeData
- )
-
- if testing.Short() {
- chainNum = 7
- repeat = 3
- }
-
- // Prepare a randomly generated blocks.
- db, err := blockdb.NewMemBackedBlockDB()
- req.Nil(err)
- gen := test.NewBlocksGenerator(nil, hashBlock)
- _, err = gen.Generate(chainNum, blockNum, nil, db)
- req.Nil(err)
- iter, err := db.GetAll()
- req.Nil(err)
- // Setup a revealer that would reveal blocks randomly but still form
- // valid DAG without holes.
- revealer, err := test.NewRandomDAGRevealer(iter)
- req.Nil(err)
-
- revealedHashesAsString := map[string]struct{}{}
- deliveredHashesAsString := map[string]struct{}{}
- for i := 0; i < repeat; i++ {
- data := newLatticeData(round, chainNum, 0, 1000*time.Second)
- deliveredHashes := common.Hashes{}
- revealedHashes := common.Hashes{}
- revealer.Reset()
- for {
- // Reveal next block.
- b, err := revealer.Next()
- if err != nil {
- if err == blockdb.ErrIterationFinished {
- err = nil
- break
- }
- }
- s.Require().Nil(err)
- revealedHashes = append(revealedHashes, b.Hash)
-
- // Pass blocks to lattice.
- delivered, err = data.addBlock(&b)
- req.Nil(err)
- for _, b := range delivered {
- deliveredHashes = append(deliveredHashes, b.Hash)
- }
- }
- // To make it easier to check, sort hashes of
- // strongly acked blocks, and concatenate them into
- // a string.
- sort.Sort(deliveredHashes)
- asString := ""
- for _, h := range deliveredHashes {
- asString += h.String() + ","
- }
- deliveredHashesAsString[asString] = struct{}{}
- // Compose revealing hash sequense to string.
- asString = ""
- for _, h := range revealedHashes {
- asString += h.String() + ","
- }
- revealedHashesAsString[asString] = struct{}{}
- datum = append(datum, data)
- }
- // Make sure concatenated hashes of strongly acked blocks are identical.
- req.Len(deliveredHashesAsString, 1)
- for h := range deliveredHashesAsString {
- // Make sure at least some blocks are strongly acked.
- req.True(len(h) > 0)
- }
- // Make sure we test for more than 1 revealing sequence.
- req.True(len(revealedHashesAsString) > 1)
- // Make sure each latticeData instance have identical working set.
- req.True(len(datum) >= repeat)
- for i, bI := range datum {
- for j, bJ := range datum {
- if i == j {
- continue
- }
- for chainID, statusI := range bI.chains {
- req.Equal(statusI.minHeight, bJ.chains[chainID].minHeight)
- req.Equal(statusI.nextOutput, bJ.chains[chainID].nextOutput)
- req.Equal(len(statusI.blocks), len(bJ.chains[chainID].blocks))
- // Check nextAck.
- for x, ackI := range statusI.nextAck {
- req.Equal(ackI, bJ.chains[chainID].nextAck[x])
- }
- // Check blocks.
- if len(statusI.blocks) > 0 {
- req.Equal(statusI.blocks[0], bJ.chains[chainID].blocks[0])
- }
- }
- // Check blockByHash.
- req.Equal(bI.blockByHash, bJ.blockByHash)
- }
- }
-}
-
-func (s *LatticeTestSuite) TestPrepareBlock() {
- var (
- round uint64
- chainNum uint32 = 4
- req = s.Require()
- minInterval = 50 * time.Millisecond
- delivered []*types.Block
- err error
- data = newLatticeData(
- round, chainNum, 0, 3000*time.Second)
- )
- // Setup genesis blocks.
- b00 := s.prepareGenesisBlock(0)
- time.Sleep(minInterval)
- b10 := s.prepareGenesisBlock(1)
- time.Sleep(minInterval)
- b20 := s.prepareGenesisBlock(2)
- time.Sleep(minInterval)
- b30 := s.prepareGenesisBlock(3)
- // Submit these blocks to lattice.
- delivered, err = data.addBlock(b00)
- req.Len(delivered, 1)
- req.Nil(err)
- delivered, err = data.addBlock(b10)
- req.Len(delivered, 1)
- req.Nil(err)
- delivered, err = data.addBlock(b20)
- req.Len(delivered, 1)
- req.Nil(err)
- delivered, err = data.addBlock(b30)
- req.Len(delivered, 1)
- req.Nil(err)
- // We should be able to collect all 4 genesis blocks by calling
- // prepareBlock.
- b11 := &types.Block{
- Position: types.Position{
- ChainID: 1,
- },
- Timestamp: time.Now().UTC(),
- }
- data.prepareBlock(b11)
- s.hashBlock(b11)
- req.Contains(b11.Acks, b00.Hash)
- req.Contains(b11.Acks, b10.Hash)
- req.Contains(b11.Acks, b20.Hash)
- req.Contains(b11.Acks, b30.Hash)
- req.Equal(b11.ParentHash, b10.Hash)
- req.Equal(b11.Position.Height, uint64(1))
- delivered, err = data.addBlock(b11)
- req.Len(delivered, 1)
- req.Nil(err)
- // Propose/Process a block based on collected info.
- b12 := &types.Block{
- Position: types.Position{
- ChainID: 1,
- },
- Timestamp: time.Now().UTC(),
- }
- data.prepareBlock(b12)
- s.hashBlock(b12)
- // This time we only need to ack b11.
- req.Len(b12.Acks, 1)
- req.Contains(b12.Acks, b11.Hash)
- req.Equal(b12.ParentHash, b11.Hash)
- req.Equal(b12.Position.Height, uint64(2))
- // When calling with other validator ID, we should be able to
- // get 4 blocks to ack.
- b01 := &types.Block{
- Position: types.Position{
- ChainID: 0,
- },
- }
- data.prepareBlock(b01)
- s.hashBlock(b01)
- req.Len(b01.Acks, 4)
- req.Contains(b01.Acks, b00.Hash)
- req.Contains(b01.Acks, b11.Hash)
- req.Contains(b01.Acks, b20.Hash)
- req.Contains(b01.Acks, b30.Hash)
- req.Equal(b01.ParentHash, b00.Hash)
- req.Equal(b01.Position.Height, uint64(1))
-}
-
-func (s *LatticeTestSuite) TestCalcPurgeHeight() {
- // Test chainStatus.calcPurgeHeight, we don't have
- // to prepare blocks to test it.
- var req = s.Require()
- chain := &chainStatus{
- minHeight: 0,
- nextOutput: 0,
- nextAck: []uint64{1, 1, 1, 1},
- }
- // When calculated safe is underflow, nok.
- safe, ok := chain.calcPurgeHeight()
- req.False(ok)
- // height=1 is outputed, and acked by everyone else.
- chain.nextOutput = 1
- safe, ok = chain.calcPurgeHeight()
- req.True(ok)
- req.Equal(safe, uint64(0))
- // Should take nextAck's height into consideration.
- chain.nextOutput = 2
- safe, ok = chain.calcPurgeHeight()
- req.True(ok)
- req.Equal(safe, uint64(0))
- // When minHeight is large that safe height, return nok.
- chain.minHeight = 1
- chain.nextOutput = 1
- safe, ok = chain.calcPurgeHeight()
- req.False(ok)
-}
-
-func (s *LatticeTestSuite) TestPurge() {
- // Make a simplest test case to test chainStatus.purge.
- // Make sure status after purge 1 block expected.
- b00 := &types.Block{Hash: common.NewRandomHash()}
- b01 := &types.Block{Hash: common.NewRandomHash()}
- b02 := &types.Block{Hash: common.NewRandomHash()}
- chain := &chainStatus{
- blocks: []*types.Block{b00, b01, b02},
- nextAck: []uint64{1, 1, 1, 1},
- nextOutput: 1,
- }
- hashes := chain.purge()
- s.Equal(hashes, common.Hashes{b00.Hash})
- s.Equal(chain.minHeight, uint64(1))
- s.Require().Len(chain.blocks, 2)
- s.Equal(chain.blocks[0].Hash, b01.Hash)
- s.Equal(chain.blocks[1].Hash, b02.Hash)
-}
-
-func (s *LatticeTestSuite) TestNextPosition() {
- // Test 'NextPosition' method when lattice is ready.
- data := s.genTestCase1()
- s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 4})
-
- // Test 'NextPosition' method when lattice is empty.
- data = newLatticeData(0, 4, 0, 1000*time.Second)
- s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 0})
-}
-
func (s *LatticeTestSuite) TestBasicUsage() {
// One Lattice prepare blocks on chains randomly selected each time
// and process it. Those generated blocks and kept into a buffer, and