aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/consensus.go22
-rw-r--r--core/consensus_test.go2
-rw-r--r--core/lattice.go (renamed from core/blocklattice.go)445
-rw-r--r--core/lattice_test.go (renamed from core/blocklattice_test.go)397
-rw-r--r--core/shard.go198
-rw-r--r--core/shard_test.go225
6 files changed, 613 insertions, 676 deletions
diff --git a/core/consensus.go b/core/consensus.go
index e7b5ec7..6cc0937 100644
--- a/core/consensus.go
+++ b/core/consensus.go
@@ -48,7 +48,7 @@ var (
// consensusBAReceiver implements agreementReceiver.
type consensusBAReceiver struct {
- // TODO(mission): consensus would be replaced by shard and network.
+ // TODO(mission): consensus would be replaced by lattice and network.
consensus *Consensus
agreementModule *agreement
chainID uint32
@@ -169,8 +169,8 @@ type Consensus struct {
cfgModule *configurationChain
// Dexon consensus v1's modules.
- shardModule *Shard
- ccModule *compactionChain
+ lattice *Lattice
+ ccModule *compactionChain
// Interfaces.
db blockdb.BlockDatabase
@@ -198,7 +198,7 @@ func NewConsensus(
var round uint64
config := gov.Configuration(round)
// TODO(w): notarySet is different for each chain, need to write a
- // GetNotarySetForChain(nodeSet, shardID, chainID, crs) function to get the
+ // GetNotarySetForChain(nodeSet, chainID, crs) function to get the
// correct notary set for a given chain.
nodeSetCache := NewNodeSetCache(gov)
crs := gov.CRS(round)
@@ -215,8 +215,8 @@ func NewConsensus(
debugApp, _ := app.(Debug)
// Setup nonblocking module.
nbModule := newNonBlocking(app, debugApp)
- // Init shard.
- shardModule := NewShard(config, authModule, nbModule, nbModule, db)
+ // Init lattice.
+ lattice := NewLattice(config, authModule, nbModule, nbModule, db)
// Init configuration chain.
ID := types.NewNodeID(prv.PublicKey())
cfgModule := newConfigurationChain(
@@ -237,7 +237,7 @@ func NewConsensus(
ID: ID,
currentConfig: config,
ccModule: newCompactionChain(db),
- shardModule: shardModule,
+ lattice: lattice,
nbModule: nbModule,
gov: gov,
db: db,
@@ -335,7 +335,7 @@ BALoop:
nIDs = nodes.GetSubSet(con.gov.Configuration(con.round).NumNotarySet,
types.NewNotarySetTarget(con.gov.CRS(con.round), chainID))
}
- agreement.restart(nIDs, con.shardModule.NextPosition(chainID))
+ agreement.restart(nIDs, con.lattice.NextPosition(chainID))
default:
}
err := agreement.nextState()
@@ -498,7 +498,7 @@ func (con *Consensus) ProcessVote(vote *types.Vote) (err error) {
// preProcessBlock performs Byzantine Agreement on the block.
func (con *Consensus) preProcessBlock(b *types.Block) (err error) {
- if err = con.shardModule.SanityCheck(b); err != nil {
+ if err = con.lattice.SanityCheck(b); err != nil {
return
}
if err = con.baModules[b.Position.ChainID].processBlock(b); err != nil {
@@ -509,7 +509,7 @@ func (con *Consensus) preProcessBlock(b *types.Block) (err error) {
// processBlock is the entry point to submit one block to a Consensus instance.
func (con *Consensus) processBlock(block *types.Block) (err error) {
- verifiedBlocks, deliveredBlocks, err := con.shardModule.ProcessBlock(block)
+ verifiedBlocks, deliveredBlocks, err := con.lattice.ProcessBlock(block)
if err != nil {
return
}
@@ -540,7 +540,7 @@ func (con *Consensus) processBlock(block *types.Block) (err error) {
// PrepareBlock would setup header fields of block based on its ProposerID.
func (con *Consensus) prepareBlock(b *types.Block,
proposeTime time.Time) (err error) {
- if err = con.shardModule.PrepareBlock(b, proposeTime); err != nil {
+ if err = con.lattice.PrepareBlock(b, proposeTime); err != nil {
return
}
// TODO(mission): decide CRS by block's round, which could be determined by
diff --git a/core/consensus_test.go b/core/consensus_test.go
index 71163e7..bb9e7de 100644
--- a/core/consensus_test.go
+++ b/core/consensus_test.go
@@ -103,7 +103,7 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
//
// - This test case only works for Total Ordering with K=0.
// - Byzantine Agreement layer is not taken into consideration, every
- // block is passed to shard module directly.
+ // block is passed to lattice module directly.
var (
gov, err = test.NewGovernance(4, time.Second)
minInterval = gov.Configuration(0).MinBlockInterval
diff --git a/core/blocklattice.go b/core/lattice.go
index 59adaf2..2da32ba 100644
--- a/core/blocklattice.go
+++ b/core/lattice.go
@@ -19,9 +19,12 @@ package core
import (
"fmt"
+ "sync"
"time"
"github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/blockdb"
+ "github.com/dexon-foundation/dexon-consensus-core/core/crypto"
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
@@ -42,9 +45,9 @@ var (
ErrIncorrectBlockTime = fmt.Errorf("block timestampe is incorrect")
)
-// blockLattice is a module for storing blocklattice.
-type blockLattice struct {
- // lattice stores chains' blocks and other info.
+// latticeData is a module for storing lattice.
+type latticeData struct {
+ // chains stores chains' blocks and other info.
chains []*chainStatus
// blockByHash stores blocks, indexed by block hash.
@@ -56,114 +59,19 @@ type blockLattice struct {
maxBlockTimeInterval time.Duration
}
-type chainStatus struct {
- // ID keeps the chainID of this chain status.
- ID uint32
-
- // blocks stores blocks proposed for this chain, sorted by height.
- blocks []*types.Block
-
- // minHeight keeps minimum height in blocks.
- minHeight uint64
-
- // nextAck stores the height of next height that should be acked, i.e. last
- // acked height + 1. Initialized to 0.
- // being acked. For example, rb.chains[vid1].nextAck[vid2] - 1 is the last
- // acked height by vid2 acking vid1.
- nextAck []uint64
-
- // nextOutput is the next output height of block, default to 0.
- nextOutput uint64
-}
-
-func (s *chainStatus) getBlockByHeight(height uint64) (b *types.Block) {
- if height < s.minHeight {
- return
- }
- idx := int(height - s.minHeight)
- if idx >= len(s.blocks) {
- return
- }
- b = s.blocks[idx]
- return
-}
-
-func (s *chainStatus) addBlock(b *types.Block) error {
- if len(s.blocks) > 0 {
- // Make sure the height of incoming block should be
- // plus one to current latest blocks if exists.
- if s.blocks[len(s.blocks)-1].Position.Height != b.Position.Height-1 {
- return ErrChainStatusCorrupt
- }
- } else {
- if b.Position.Height != 0 {
- return ErrChainStatusCorrupt
- }
- }
- s.blocks = append(s.blocks, b)
- return nil
-}
-
-func (s *chainStatus) calcPurgeHeight() (safe uint64, ok bool) {
- // blocks with height less than min(nextOutput, nextAck...)
- // are safe to be purged.
- safe = s.nextOutput
- for _, ackedHeight := range s.nextAck {
- if safe > ackedHeight {
- safe = ackedHeight
- }
- }
- // Both 'nextOutput' and 'nextAck' represents some block to be
- // outputed/acked. To find a block already outputed/acked, the height
- // needs to be minus 1.
- if safe == 0 {
- // Avoid underflow.
- return
- }
- safe--
- if safe < s.minHeight {
- return
- }
- ok = true
- return
-}
-
-// purge blocks if they are safe to be deleted from working set.
-func (s *chainStatus) purge() (purged common.Hashes) {
- safe, ok := s.calcPurgeHeight()
- if !ok {
- return
- }
- newMinIndex := safe - s.minHeight + 1
- for _, b := range s.blocks[:newMinIndex] {
- purged = append(purged, b.Hash)
- }
- s.blocks = s.blocks[newMinIndex:]
- s.minHeight = safe + 1
- return
-}
-
-// nextPosition returns a valid position for new block in this chain.
-func (s *chainStatus) nextPosition() types.Position {
- return types.Position{
- ChainID: s.ID,
- Height: s.minHeight + uint64(len(s.blocks)),
- }
-}
-
-// newBlockLattice creates a new blockLattice struct.
-func newBlockLattice(
+// newLatticeData creates a new latticeData struct.
+func newLatticeData(
chainNum uint32,
minBlockTimeInterval time.Duration,
- maxBlockTimeInterval time.Duration) (bl *blockLattice) {
- bl = &blockLattice{
+ maxBlockTimeInterval time.Duration) (data *latticeData) {
+ data = &latticeData{
chains: make([]*chainStatus, chainNum),
blockByHash: make(map[common.Hash]*types.Block),
minBlockTimeInterval: minBlockTimeInterval,
maxBlockTimeInterval: maxBlockTimeInterval,
}
- for i := range bl.chains {
- bl.chains[i] = &chainStatus{
+ for i := range data.chains {
+ data.chains[i] = &chainStatus{
ID: uint32(i),
blocks: []*types.Block{},
nextAck: make([]uint64, chainNum),
@@ -172,17 +80,17 @@ func newBlockLattice(
return
}
-func (bl *blockLattice) sanityCheck(b *types.Block) error {
+func (data *latticeData) sanityCheck(b *types.Block) error {
// Check if the chain id is valid.
- if b.Position.ChainID >= uint32(len(bl.chains)) {
+ if b.Position.ChainID >= uint32(len(data.chains)) {
return ErrInvalidChainID
}
// TODO(mission): Check if its proposer is in validator set somewhere,
- // blocklattice doesn't have to know about node set.
+ // lattice doesn't have to know about node set.
// Check if it forks
- if bInLattice := bl.chains[b.Position.ChainID].getBlockByHeight(
+ if bInLattice := data.chains[b.Position.ChainID].getBlockByHeight(
b.Position.Height); bInLattice != nil {
if b.Hash != bInLattice.Hash {
@@ -194,11 +102,11 @@ func (bl *blockLattice) sanityCheck(b *types.Block) error {
// doesn't exists because forking is serious.
// Check if it acks older blocks.
- acksByChainID := make(map[uint32]struct{}, len(bl.chains))
+ acksByChainID := make(map[uint32]struct{}, len(data.chains))
for _, hash := range b.Acks {
- if bAck, exist := bl.blockByHash[hash]; exist {
+ if bAck, exist := data.blockByHash[hash]; exist {
if bAck.Position.Height <
- bl.chains[bAck.Position.ChainID].nextAck[b.Position.ChainID] {
+ data.chains[bAck.Position.ChainID].nextAck[b.Position.ChainID] {
return ErrDoubleAck
}
// Check if ack two blocks on the same chain. This would need
@@ -218,7 +126,7 @@ func (bl *blockLattice) sanityCheck(b *types.Block) error {
if !b.IsAcking(b.ParentHash) {
return ErrNotAckParent
}
- bParent := bl.blockByHash[b.ParentHash]
+ bParent := data.blockByHash[b.ParentHash]
if bParent.Position.ChainID != b.Position.ChainID {
return ErrInvalidParentChain
}
@@ -230,8 +138,8 @@ func (bl *blockLattice) sanityCheck(b *types.Block) error {
return ErrInvalidTimestamp
}
// Check if its timestamp is in expected range.
- if b.Timestamp.Before(bParent.Timestamp.Add(bl.minBlockTimeInterval)) ||
- b.Timestamp.After(bParent.Timestamp.Add(bl.maxBlockTimeInterval)) {
+ if b.Timestamp.Before(bParent.Timestamp.Add(data.minBlockTimeInterval)) ||
+ b.Timestamp.After(bParent.Timestamp.Add(data.maxBlockTimeInterval)) {
return ErrIncorrectBlockTime
}
@@ -240,19 +148,19 @@ func (bl *blockLattice) sanityCheck(b *types.Block) error {
}
// areAllAcksReceived checks if all ack blocks of a block are all in lattice,
-// blockLattice would make sure all blocks not acked by some chain would be kept
+// we would make sure all blocks not acked by some chain would be kept
// in working set.
-func (bl *blockLattice) areAllAcksInLattice(b *types.Block) bool {
+func (data *latticeData) areAllAcksInLattice(b *types.Block) bool {
for _, h := range b.Acks {
- bAck, exist := bl.blockByHash[h]
+ bAck, exist := data.blockByHash[h]
if !exist {
return false
}
- if bAckInLattice := bl.chains[bAck.Position.ChainID].getBlockByHeight(
+ if bAckInLattice := data.chains[bAck.Position.ChainID].getBlockByHeight(
bAck.Position.Height); bAckInLattice != nil {
if bAckInLattice.Hash != bAck.Hash {
- panic("areAllAcksInLattice: blockLattice.chains has corrupted")
+ panic("areAllAcksInLattice: latticeData.chains has corrupted")
}
} else {
return false
@@ -263,7 +171,7 @@ func (bl *blockLattice) areAllAcksInLattice(b *types.Block) bool {
// addBlock processes block, it does sanity check, inserts block into
// lattice and deletes blocks which will not be used.
-func (bl *blockLattice) addBlock(
+func (data *latticeData) addBlock(
block *types.Block) (deliverable []*types.Block, err error) {
var (
@@ -272,17 +180,17 @@ func (bl *blockLattice) addBlock(
)
// TODO(mission): sanity check twice, might hurt performance.
// If a block does not pass sanity check, report error.
- if err = bl.sanityCheck(block); err != nil {
+ if err = data.sanityCheck(block); err != nil {
return
}
- if err = bl.chains[block.Position.ChainID].addBlock(block); err != nil {
+ if err = data.chains[block.Position.ChainID].addBlock(block); err != nil {
return
}
- bl.blockByHash[block.Hash] = block
+ data.blockByHash[block.Hash] = block
// Update nextAcks.
for _, ack := range block.Acks {
- bAck = bl.blockByHash[ack]
- bl.chains[bAck.Position.ChainID].nextAck[block.Position.ChainID] =
+ bAck = data.blockByHash[ack]
+ data.chains[bAck.Position.ChainID].nextAck[block.Position.ChainID] =
bAck.Position.Height + 1
}
// Extract blocks that deliverable to total ordering.
@@ -290,18 +198,18 @@ func (bl *blockLattice) addBlock(
// - All its acking blocks are delivered to total ordering.
for {
updated = false
- for _, status := range bl.chains {
+ for _, status := range data.chains {
tip := status.getBlockByHeight(status.nextOutput)
if tip == nil {
continue
}
allAckingBlockDelivered := true
for _, ack := range tip.Acks {
- bAck, exists := bl.blockByHash[ack]
+ bAck, exists := data.blockByHash[ack]
if !exists {
continue
}
- if bl.chains[bAck.Position.ChainID].nextOutput >
+ if data.chains[bAck.Position.ChainID].nextOutput >
bAck.Position.Height {
continue
@@ -329,9 +237,9 @@ func (bl *blockLattice) addBlock(
//
// This works because blocks of height below this minimum are not going to be
// acked anymore, the ackings of these blocks are illegal.
- for _, status := range bl.chains {
+ for _, status := range data.chains {
for _, h := range status.purge() {
- delete(bl.blockByHash, h)
+ delete(data.blockByHash, h)
}
}
return
@@ -343,19 +251,19 @@ func (bl *blockLattice) addBlock(
// acked by this proposer before.
// - Set 'ParentHash' and 'Height' from parent block, if we can't find a
// parent, these fields would be setup like a genesis block.
-func (bl *blockLattice) prepareBlock(block *types.Block) {
+func (data *latticeData) prepareBlock(block *types.Block) {
// Reset fields to make sure we got these information from parent block.
block.Position.Height = 0
block.ParentHash = common.Hash{}
acks := common.Hashes{}
- for chainID := range bl.chains {
+ for chainID := range data.chains {
// find height of the latest block for that validator.
var (
curBlock *types.Block
- nextHeight = bl.chains[chainID].nextAck[block.Position.ChainID]
+ nextHeight = data.chains[chainID].nextAck[block.Position.ChainID]
)
for {
- tmpBlock := bl.chains[chainID].getBlockByHeight(nextHeight)
+ tmpBlock := data.chains[chainID].getBlockByHeight(nextHeight)
if tmpBlock == nil {
break
}
@@ -377,6 +285,271 @@ func (bl *blockLattice) prepareBlock(block *types.Block) {
// TODO(mission): make more abstraction for this method.
// nextHeight returns the next height for the chain.
-func (bl *blockLattice) nextPosition(chainID uint32) types.Position {
- return bl.chains[chainID].nextPosition()
+func (data *latticeData) nextPosition(chainID uint32) types.Position {
+ return data.chains[chainID].nextPosition()
+}
+
+type chainStatus struct {
+ // ID keeps the chainID of this chain status.
+ ID uint32
+
+ // blocks stores blocks proposed for this chain, sorted by height.
+ blocks []*types.Block
+
+ // minHeight keeps minimum height in blocks.
+ minHeight uint64
+
+ // nextAck stores the height of next height that should be acked, i.e. last
+ // acked height + 1. Initialized to 0.
+ // being acked. For example, rb.chains[vid1].nextAck[vid2] - 1 is the last
+ // acked height by vid2 acking vid1.
+ nextAck []uint64
+
+ // nextOutput is the next output height of block, default to 0.
+ nextOutput uint64
+}
+
+func (s *chainStatus) getBlockByHeight(height uint64) (b *types.Block) {
+ if height < s.minHeight {
+ return
+ }
+ idx := int(height - s.minHeight)
+ if idx >= len(s.blocks) {
+ return
+ }
+ b = s.blocks[idx]
+ return
+}
+
+func (s *chainStatus) addBlock(b *types.Block) error {
+ if len(s.blocks) > 0 {
+ // Make sure the height of incoming block should be
+ // plus one to current latest blocks if exists.
+ if s.blocks[len(s.blocks)-1].Position.Height != b.Position.Height-1 {
+ return ErrChainStatusCorrupt
+ }
+ } else {
+ if b.Position.Height != 0 {
+ return ErrChainStatusCorrupt
+ }
+ }
+ s.blocks = append(s.blocks, b)
+ return nil
+}
+
+func (s *chainStatus) calcPurgeHeight() (safe uint64, ok bool) {
+ // blocks with height less than min(nextOutput, nextAck...)
+ // are safe to be purged.
+ safe = s.nextOutput
+ for _, ackedHeight := range s.nextAck {
+ if safe > ackedHeight {
+ safe = ackedHeight
+ }
+ }
+ // Both 'nextOutput' and 'nextAck' represents some block to be
+ // outputed/acked. To find a block already outputed/acked, the height
+ // needs to be minus 1.
+ if safe == 0 {
+ // Avoid underflow.
+ return
+ }
+ safe--
+ if safe < s.minHeight {
+ return
+ }
+ ok = true
+ return
+}
+
+// purge blocks if they are safe to be deleted from working set.
+func (s *chainStatus) purge() (purged common.Hashes) {
+ safe, ok := s.calcPurgeHeight()
+ if !ok {
+ return
+ }
+ newMinIndex := safe - s.minHeight + 1
+ for _, b := range s.blocks[:newMinIndex] {
+ purged = append(purged, b.Hash)
+ }
+ s.blocks = s.blocks[newMinIndex:]
+ s.minHeight = safe + 1
+ return
+}
+
+// nextPosition returns a valid position for new block in this chain.
+func (s *chainStatus) nextPosition() types.Position {
+ return types.Position{
+ ChainID: s.ID,
+ Height: s.minHeight + uint64(len(s.blocks)),
+ }
+}
+
+// Lattice represents a unit to produce a global ordering from multiple chains.
+type Lattice struct {
+ lock sync.RWMutex
+ authModule *Authenticator
+ chainNum uint32
+ app Application
+ debug Debug
+ db blockdb.BlockDatabase
+ pool blockPool
+ data *latticeData
+ toModule *totalOrdering
+ ctModule *consensusTimestamp
+}
+
+// NewLattice constructs an Lattice instance.
+func NewLattice(
+ cfg *types.Config,
+ authModule *Authenticator,
+ app Application,
+ debug Debug,
+ db blockdb.BlockDatabase) (s *Lattice) {
+ data := newLatticeData(
+ cfg.NumChains,
+ cfg.MinBlockInterval,
+ cfg.MaxBlockInterval)
+ s = &Lattice{
+ authModule: authModule,
+ chainNum: cfg.NumChains,
+ app: app,
+ debug: debug,
+ db: db,
+ pool: newBlockPool(cfg.NumChains),
+ data: data,
+ toModule: newTotalOrdering(
+ uint64(cfg.K),
+ uint64(float32(cfg.NumChains-1)*cfg.PhiRatio+1),
+ cfg.NumChains),
+ ctModule: newConsensusTimestamp(),
+ }
+ return
+}
+
+// PrepareBlock setup block's field based on current lattice status.
+func (s *Lattice) PrepareBlock(
+ b *types.Block, proposeTime time.Time) (err error) {
+
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+
+ s.data.prepareBlock(b)
+ // TODO(mission): the proposeTime might be earlier than tip block of
+ // that chain. We should let latticeData suggest the time.
+ b.Timestamp = proposeTime
+ b.Payload, b.Witness.Data = s.app.PrepareBlock(b.Position)
+ if err = s.authModule.SignBlock(b); err != nil {
+ return
+ }
+ return
+}
+
+// SanityCheck check if a block is valid based on current lattice status.
+//
+// If some acking blocks don't exists, Lattice would help to cache this block
+// and retry when lattice updated in Lattice.ProcessBlock.
+func (s *Lattice) SanityCheck(b *types.Block) (err error) {
+ // Check the hash of block.
+ hash, err := hashBlock(b)
+ if err != nil || hash != b.Hash {
+ err = ErrIncorrectHash
+ return
+ }
+ // Check the signer.
+ pubKey, err := crypto.SigToPub(b.Hash, b.Signature)
+ if err != nil {
+ return
+ }
+ if !b.ProposerID.Equal(crypto.Keccak256Hash(pubKey.Bytes())) {
+ err = ErrIncorrectSignature
+ return
+ }
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+ if err = s.data.sanityCheck(b); err != nil {
+ // Add to block pool, once the lattice updated,
+ // would be checked again.
+ if err == ErrAckingBlockNotExists {
+ s.pool.addBlock(b)
+ }
+ return
+ }
+ return
+}
+
+// ProcessBlock adds a block into lattice, and deliver ordered blocks.
+// If any block pass sanity check after this block add into lattice, they
+// would be returned, too.
+//
+// NOTE: assume the block passed sanity check.
+func (s *Lattice) ProcessBlock(
+ input *types.Block) (verified, delivered []*types.Block, err error) {
+
+ var (
+ tip, b *types.Block
+ toDelivered []*types.Block
+ inLattice []*types.Block
+ earlyDelivered bool
+ )
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ if inLattice, err = s.data.addBlock(input); err != nil {
+ return
+ }
+ if err = s.db.Put(*input); err != nil {
+ return
+ }
+ // TODO(mission): remove this hack, BA related stuffs should not
+ // be done here.
+ if s.debug != nil {
+ s.debug.StronglyAcked(input.Hash)
+ s.debug.BlockConfirmed(input.Hash)
+ }
+ // Purge blocks in pool with the same chainID and lower height.
+ s.pool.purgeBlocks(input.Position.ChainID, input.Position.Height)
+ // Replay tips in pool to check their validity.
+ for i := uint32(0); i < s.chainNum; i++ {
+ if tip = s.pool.tip(i); tip == nil {
+ continue
+ }
+ err = s.data.sanityCheck(tip)
+ if err == nil {
+ verified = append(verified, tip)
+ }
+ if err == ErrAckingBlockNotExists {
+ continue
+ }
+ s.pool.removeTip(i)
+ }
+ // Perform total ordering for each block added to lattice.
+ for _, b = range inLattice {
+ toDelivered, earlyDelivered, err = s.toModule.processBlock(b)
+ if err != nil {
+ return
+ }
+ if len(toDelivered) == 0 {
+ continue
+ }
+ hashes := make(common.Hashes, len(toDelivered))
+ for idx := range toDelivered {
+ hashes[idx] = toDelivered[idx].Hash
+ }
+ if s.debug != nil {
+ s.debug.TotalOrderingDelivered(hashes, earlyDelivered)
+ }
+ // Perform timestamp generation.
+ if err = s.ctModule.processBlocks(toDelivered); err != nil {
+ return
+ }
+ delivered = append(delivered, toDelivered...)
+ }
+ return
+}
+
+// NextPosition returns expected position of incoming block for that chain.
+func (s *Lattice) NextPosition(chainID uint32) types.Position {
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+
+ return s.data.nextPosition(chainID)
}
diff --git a/core/blocklattice_test.go b/core/lattice_test.go
index 72cde7d..2da17e4 100644
--- a/core/blocklattice_test.go
+++ b/core/lattice_test.go
@@ -23,26 +23,106 @@ import (
"testing"
"time"
- "github.com/stretchr/testify/suite"
-
"github.com/dexon-foundation/dexon-consensus-core/common"
"github.com/dexon-foundation/dexon-consensus-core/core/blockdb"
+ "github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa"
"github.com/dexon-foundation/dexon-consensus-core/core/test"
"github.com/dexon-foundation/dexon-consensus-core/core/types"
+ "github.com/stretchr/testify/suite"
)
-type BlockLatticeTest struct {
+// testLatticeMgr wraps compaction chain and lattice.
+type testLatticeMgr struct {
+ lattice *Lattice
+ ccModule *compactionChain
+ app *test.App
+ db blockdb.BlockDatabase
+}
+
+func (mgr *testLatticeMgr) prepareBlock(
+ chainID uint32) (b *types.Block, err error) {
+
+ b = &types.Block{
+ Position: types.Position{
+ ChainID: chainID,
+ }}
+ err = mgr.lattice.PrepareBlock(b, time.Now().UTC())
+ return
+}
+
+// Process describes the usage of Lattice.ProcessBlock.
+func (mgr *testLatticeMgr) processBlock(b *types.Block) (err error) {
+ var (
+ delivered []*types.Block
+ verified []*types.Block
+ pendings = []*types.Block{b}
+ )
+ if err = mgr.lattice.SanityCheck(b); err != nil {
+ if err == ErrAckingBlockNotExists {
+ err = nil
+ }
+ return
+ }
+ for {
+ if len(pendings) == 0 {
+ break
+ }
+ b, pendings = pendings[0], pendings[1:]
+ if verified, delivered, err = mgr.lattice.ProcessBlock(b); err != nil {
+ return
+ }
+ // Deliver blocks.
+ for _, b = range delivered {
+ if err = mgr.ccModule.processBlock(b); err != nil {
+ return
+ }
+ if err = mgr.db.Update(*b); err != nil {
+ return
+ }
+ mgr.app.BlockDelivered(*b)
+ }
+ // Update pending blocks for verified block (pass sanity check).
+ pendings = append(pendings, verified...)
+ }
+ return
+}
+
+type LatticeTestSuite struct {
suite.Suite
}
+func (s *LatticeTestSuite) newTestLatticeMgr(
+ cfg *types.Config) *testLatticeMgr {
+ var req = s.Require()
+ // Setup private key.
+ prvKey, err := ecdsa.NewPrivateKey()
+ req.Nil(err)
+ // Setup blockdb.
+ db, err := blockdb.NewMemBackedBlockDB()
+ req.Nil(err)
+ // Setup application.
+ app := test.NewApp()
+ // Setup lattice.
+ return &testLatticeMgr{
+ ccModule: newCompactionChain(db),
+ app: app,
+ db: db,
+ lattice: NewLattice(
+ cfg,
+ NewAuthenticator(prvKey),
+ app,
+ app,
+ db)}
+}
+
// hashBlock is a helper to hash a block and check if any error.
-func (s *BlockLatticeTest) hashBlock(b *types.Block) {
+func (s *LatticeTestSuite) hashBlock(b *types.Block) {
var err error
b.Hash, err = hashBlock(b)
s.Require().Nil(err)
}
-func (s *BlockLatticeTest) prepareGenesisBlock(
+func (s *LatticeTestSuite) prepareGenesisBlock(
chainID uint32) (b *types.Block) {
b = &types.Block{
@@ -67,7 +147,7 @@ func (s *BlockLatticeTest) prepareGenesisBlock(
// | | |
// 0 0 0 0 (block height)
// 0 1 2 3 (validator)
-func (s *BlockLatticeTest) genTestCase1() (bl *blockLattice) {
+func (s *LatticeTestSuite) genTestCase1() (data *latticeData) {
// Create new reliableBroadcast instance with 4 validators
var (
b *types.Block
@@ -78,18 +158,18 @@ func (s *BlockLatticeTest) genTestCase1() (bl *blockLattice) {
err error
)
- bl = newBlockLattice(chainNum, 2*time.Nanosecond, 1000*time.Second)
+ data = newLatticeData(chainNum, 2*time.Nanosecond, 1000*time.Second)
// Add genesis blocks.
for i := uint32(0); i < chainNum; i++ {
b = s.prepareGenesisBlock(i)
- delivered, err = bl.addBlock(b)
+ delivered, err = data.addBlock(b)
// Genesis blocks are safe to be added to DAG, they acks no one.
req.Len(delivered, 1)
req.Nil(err)
}
// Add block 0-1 which acks 0-0.
- h = bl.chains[0].getBlockByHeight(0).Hash
+ h = data.chains[0].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Hash: common.NewRandomHash(),
@@ -101,14 +181,14 @@ func (s *BlockLatticeTest) genTestCase1() (bl *blockLattice) {
Acks: common.NewSortedHashes(common.Hashes{h}),
}
s.hashBlock(b)
- delivered, err = bl.addBlock(b)
+ delivered, err = data.addBlock(b)
req.Len(delivered, 1)
req.Equal(delivered[0].Hash, b.Hash)
req.Nil(err)
- req.NotNil(bl.chains[0].getBlockByHeight(1))
+ req.NotNil(data.chains[0].getBlockByHeight(1))
// Add block 0-2 which acks 0-1 and 1-0.
- h = bl.chains[0].getBlockByHeight(1).Hash
+ h = data.chains[0].getBlockByHeight(1).Hash
b = &types.Block{
ParentHash: h,
Position: types.Position{
@@ -118,18 +198,18 @@ func (s *BlockLatticeTest) genTestCase1() (bl *blockLattice) {
Timestamp: time.Now().UTC(),
Acks: common.NewSortedHashes(common.Hashes{
h,
- bl.chains[1].getBlockByHeight(0).Hash,
+ data.chains[1].getBlockByHeight(0).Hash,
}),
}
s.hashBlock(b)
- delivered, err = bl.addBlock(b)
+ delivered, err = data.addBlock(b)
req.Len(delivered, 1)
req.Equal(delivered[0].Hash, b.Hash)
req.Nil(err)
- req.NotNil(bl.chains[0].getBlockByHeight(2))
+ req.NotNil(data.chains[0].getBlockByHeight(2))
// Add block 0-3 which acks 0-2.
- h = bl.chains[0].getBlockByHeight(2).Hash
+ h = data.chains[0].getBlockByHeight(2).Hash
b = &types.Block{
ParentHash: h,
Hash: common.NewRandomHash(),
@@ -141,14 +221,14 @@ func (s *BlockLatticeTest) genTestCase1() (bl *blockLattice) {
Acks: common.NewSortedHashes(common.Hashes{h}),
}
s.hashBlock(b)
- delivered, err = bl.addBlock(b)
+ delivered, err = data.addBlock(b)
req.Len(delivered, 1)
req.Equal(delivered[0].Hash, b.Hash)
req.Nil(err)
- req.NotNil(bl.chains[0].getBlockByHeight(3))
+ req.NotNil(data.chains[0].getBlockByHeight(3))
// Add block 3-1 which acks 3-0.
- h = bl.chains[3].getBlockByHeight(0).Hash
+ h = data.chains[3].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Hash: common.NewRandomHash(),
@@ -160,21 +240,21 @@ func (s *BlockLatticeTest) genTestCase1() (bl *blockLattice) {
Acks: common.NewSortedHashes(common.Hashes{h}),
}
s.hashBlock(b)
- delivered, err = bl.addBlock(b)
+ delivered, err = data.addBlock(b)
req.Len(delivered, 1)
req.Equal(delivered[0].Hash, b.Hash)
req.Nil(err)
- req.NotNil(bl.chains[3].getBlockByHeight(0))
+ req.NotNil(data.chains[3].getBlockByHeight(0))
return
}
-func (s *BlockLatticeTest) TestSanityCheck() {
+func (s *LatticeTestSuite) TestSanityCheckInDataLayer() {
var (
- b *types.Block
- h common.Hash
- bl = s.genTestCase1()
- req = s.Require()
- err error
+ b *types.Block
+ h common.Hash
+ data = s.genTestCase1()
+ req = s.Require()
+ err error
)
// Non-genesis block with no ack, should get error.
@@ -187,12 +267,12 @@ func (s *BlockLatticeTest) TestSanityCheck() {
Acks: common.NewSortedHashes(common.Hashes{}),
}
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(ErrNotAckParent.Error(), err.Error())
// Non-genesis block which acks its parent but the height is invalid.
- h = bl.chains[1].getBlockByHeight(0).Hash
+ h = data.chains[1].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Position: types.Position{
@@ -202,12 +282,12 @@ func (s *BlockLatticeTest) TestSanityCheck() {
Acks: common.NewSortedHashes(common.Hashes{h}),
}
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(ErrInvalidBlockHeight.Error(), err.Error())
// Invalid chain ID.
- h = bl.chains[1].getBlockByHeight(0).Hash
+ h = data.chains[1].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Position: types.Position{
@@ -217,12 +297,12 @@ func (s *BlockLatticeTest) TestSanityCheck() {
Acks: common.NewSortedHashes(common.Hashes{h}),
}
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(ErrInvalidChainID.Error(), err.Error())
// Fork block.
- h = bl.chains[0].getBlockByHeight(0).Hash
+ h = data.chains[0].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Position: types.Position{
@@ -233,12 +313,12 @@ func (s *BlockLatticeTest) TestSanityCheck() {
Timestamp: time.Now().UTC(),
}
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(ErrForkBlock.Error(), err.Error())
// Replicated ack.
- h = bl.chains[0].getBlockByHeight(3).Hash
+ h = data.chains[0].getBlockByHeight(3).Hash
b = &types.Block{
ParentHash: h,
Position: types.Position{
@@ -247,17 +327,17 @@ func (s *BlockLatticeTest) TestSanityCheck() {
},
Acks: common.NewSortedHashes(common.Hashes{
h,
- bl.chains[1].getBlockByHeight(0).Hash,
+ data.chains[1].getBlockByHeight(0).Hash,
}),
Timestamp: time.Now().UTC(),
}
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(ErrDoubleAck.Error(), err.Error())
// Acking block doesn't exists.
- h = bl.chains[1].getBlockByHeight(0).Hash
+ h = data.chains[1].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Position: types.Position{
@@ -271,12 +351,12 @@ func (s *BlockLatticeTest) TestSanityCheck() {
Timestamp: time.Now().UTC(),
}
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(err.Error(), ErrAckingBlockNotExists.Error())
// Parent block on different chain.
- h = bl.chains[1].getBlockByHeight(0).Hash
+ h = data.chains[1].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Position: types.Position{
@@ -285,17 +365,17 @@ func (s *BlockLatticeTest) TestSanityCheck() {
},
Acks: common.NewSortedHashes(common.Hashes{
h,
- bl.chains[2].getBlockByHeight(0).Hash,
+ data.chains[2].getBlockByHeight(0).Hash,
}),
Timestamp: time.Now().UTC(),
}
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(err.Error(), ErrInvalidParentChain.Error())
// Ack two blocks on the same chain.
- h = bl.chains[2].getBlockByHeight(0).Hash
+ h = data.chains[2].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Position: types.Position{
@@ -304,22 +384,22 @@ func (s *BlockLatticeTest) TestSanityCheck() {
},
Acks: common.NewSortedHashes(common.Hashes{
h,
- bl.chains[0].getBlockByHeight(0).Hash,
- bl.chains[0].getBlockByHeight(1).Hash,
+ data.chains[0].getBlockByHeight(0).Hash,
+ data.chains[0].getBlockByHeight(1).Hash,
}),
Timestamp: time.Now().UTC(),
}
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(err.Error(), ErrDuplicatedAckOnOneChain.Error())
// Add block 3-1 which acks 3-0, and violet reasonable block time interval.
- h = bl.chains[2].getBlockByHeight(0).Hash
+ h = data.chains[2].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Hash: common.NewRandomHash(),
- Timestamp: time.Now().UTC().Add(bl.maxBlockTimeInterval),
+ Timestamp: time.Now().UTC().Add(data.maxBlockTimeInterval),
Position: types.Position{
ChainID: 2,
Height: 1,
@@ -327,19 +407,19 @@ func (s *BlockLatticeTest) TestSanityCheck() {
Acks: common.NewSortedHashes(common.Hashes{h}),
}
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(err, ErrIncorrectBlockTime)
// Violet minimum block time interval.
b.Timestamp =
- bl.chains[2].getBlockByHeight(0).Timestamp.Add(1 * time.Nanosecond)
+ data.chains[2].getBlockByHeight(0).Timestamp.Add(1 * time.Nanosecond)
s.hashBlock(b)
- err = bl.sanityCheck(b)
+ err = data.sanityCheck(b)
req.NotNil(err)
req.Equal(err, ErrIncorrectBlockTime)
// Normal block.
- h = bl.chains[1].getBlockByHeight(0).Hash
+ h = data.chains[1].getBlockByHeight(0).Hash
b = &types.Block{
ParentHash: h,
Position: types.Position{
@@ -350,42 +430,42 @@ func (s *BlockLatticeTest) TestSanityCheck() {
Timestamp: time.Now().UTC(),
}
s.hashBlock(b)
- req.Nil(bl.sanityCheck(b))
+ req.Nil(data.sanityCheck(b))
}
-func (s *BlockLatticeTest) TestAreAllAcksInLattice() {
+func (s *LatticeTestSuite) TestAreAllAcksInLattice() {
var (
- b *types.Block
- bl = s.genTestCase1()
- req = s.Require()
+ b *types.Block
+ data = s.genTestCase1()
+ req = s.Require()
)
// Empty ack should get true, although won't pass sanity check.
b = &types.Block{
Acks: common.NewSortedHashes(common.Hashes{}),
}
- req.True(bl.areAllAcksInLattice(b))
+ req.True(data.areAllAcksInLattice(b))
// Acks blocks in lattice
b = &types.Block{
Acks: common.NewSortedHashes(common.Hashes{
- bl.chains[0].getBlockByHeight(0).Hash,
- bl.chains[0].getBlockByHeight(1).Hash,
+ data.chains[0].getBlockByHeight(0).Hash,
+ data.chains[0].getBlockByHeight(1).Hash,
}),
}
- req.True(bl.areAllAcksInLattice(b))
+ req.True(data.areAllAcksInLattice(b))
// Acks random block hash.
b = &types.Block{
Acks: common.NewSortedHashes(common.Hashes{common.NewRandomHash()}),
}
- req.False(bl.areAllAcksInLattice(b))
+ req.False(data.areAllAcksInLattice(b))
}
-func (s *BlockLatticeTest) TestRandomIntensiveAcking() {
+func (s *LatticeTestSuite) TestRandomIntensiveAcking() {
var (
chainNum uint32 = 19
- bl = newBlockLattice(chainNum, 0, 1000*time.Second)
+ data = newLatticeData(chainNum, 0, 1000*time.Second)
req = s.Require()
delivered []*types.Block
extracted []*types.Block
@@ -396,7 +476,7 @@ func (s *BlockLatticeTest) TestRandomIntensiveAcking() {
// Generate genesis blocks.
for i := uint32(0); i < chainNum; i++ {
b = s.prepareGenesisBlock(i)
- delivered, err = bl.addBlock(b)
+ delivered, err = data.addBlock(b)
req.Len(delivered, 1)
req.Nil(err)
}
@@ -408,28 +488,28 @@ func (s *BlockLatticeTest) TestRandomIntensiveAcking() {
},
Timestamp: time.Now().UTC(),
}
- bl.prepareBlock(b)
+ data.prepareBlock(b)
s.hashBlock(b)
- delivered, err = bl.addBlock(b)
+ delivered, err = data.addBlock(b)
req.Nil(err)
extracted = append(extracted, delivered...)
}
// The len of array extractedBlocks should be about 5000.
req.True(len(extracted) > 4500)
- // The len of bl.blockInfos should be small if deleting mechanism works.
- req.True(len(bl.blockByHash) < 500)
+ // The len of data.blockInfos should be small if deleting mechanism works.
+ req.True(len(data.blockByHash) < 500)
}
-func (s *BlockLatticeTest) TestRandomlyGeneratedBlocks() {
+func (s *LatticeTestSuite) TestRandomlyGeneratedBlocks() {
var (
- chainNum uint32 = 19
- blockNum = 50
- repeat = 20
- delivered []*types.Block
- err error
- req = s.Require()
- blocklattices []*blockLattice
+ chainNum uint32 = 19
+ blockNum = 50
+ repeat = 20
+ delivered []*types.Block
+ err error
+ req = s.Require()
+ datum []*latticeData
)
// Prepare a randomly generated blocks.
@@ -448,7 +528,7 @@ func (s *BlockLatticeTest) TestRandomlyGeneratedBlocks() {
revealedHashesAsString := map[string]struct{}{}
deliveredHashesAsString := map[string]struct{}{}
for i := 0; i < repeat; i++ {
- bl := newBlockLattice(chainNum, 0, 1000*time.Second)
+ data := newLatticeData(chainNum, 0, 1000*time.Second)
deliveredHashes := common.Hashes{}
revealedHashes := common.Hashes{}
revealer.Reset()
@@ -464,8 +544,8 @@ func (s *BlockLatticeTest) TestRandomlyGeneratedBlocks() {
s.Require().Nil(err)
revealedHashes = append(revealedHashes, b.Hash)
- // Pass blocks to blocklattice.
- delivered, err = bl.addBlock(&b)
+ // Pass blocks to lattice.
+ delivered, err = data.addBlock(&b)
req.Nil(err)
for _, b := range delivered {
deliveredHashes = append(deliveredHashes, b.Hash)
@@ -486,7 +566,7 @@ func (s *BlockLatticeTest) TestRandomlyGeneratedBlocks() {
asString += h.String() + ","
}
revealedHashesAsString[asString] = struct{}{}
- blocklattices = append(blocklattices, bl)
+ datum = append(datum, data)
}
// Make sure concatenated hashes of strongly acked blocks are identical.
req.Len(deliveredHashesAsString, 1)
@@ -496,10 +576,10 @@ func (s *BlockLatticeTest) TestRandomlyGeneratedBlocks() {
}
// Make sure we test for more than 1 revealing sequence.
req.True(len(revealedHashesAsString) > 1)
- // Make sure each blocklattice instance have identical working set.
- req.True(len(blocklattices) >= repeat)
- for i, bI := range blocklattices {
- for j, bJ := range blocklattices {
+ // Make sure each latticeData instance have identical working set.
+ req.True(len(datum) >= repeat)
+ for i, bI := range datum {
+ for j, bJ := range datum {
if i == j {
continue
}
@@ -522,11 +602,11 @@ func (s *BlockLatticeTest) TestRandomlyGeneratedBlocks() {
}
}
-func (s *BlockLatticeTest) TestPrepareBlock() {
+func (s *LatticeTestSuite) TestPrepareBlock() {
var (
chainNum uint32 = 4
req = s.Require()
- bl = newBlockLattice(chainNum, 0, 3000*time.Second)
+ data = newLatticeData(chainNum, 0, 3000*time.Second)
minInterval = 50 * time.Millisecond
delivered []*types.Block
err error
@@ -539,17 +619,17 @@ func (s *BlockLatticeTest) TestPrepareBlock() {
b20 := s.prepareGenesisBlock(2)
time.Sleep(minInterval)
b30 := s.prepareGenesisBlock(3)
- // Submit these blocks to blocklattice.
- delivered, err = bl.addBlock(b00)
+ // Submit these blocks to lattice.
+ delivered, err = data.addBlock(b00)
req.Len(delivered, 1)
req.Nil(err)
- delivered, err = bl.addBlock(b10)
+ delivered, err = data.addBlock(b10)
req.Len(delivered, 1)
req.Nil(err)
- delivered, err = bl.addBlock(b20)
+ delivered, err = data.addBlock(b20)
req.Len(delivered, 1)
req.Nil(err)
- delivered, err = bl.addBlock(b30)
+ delivered, err = data.addBlock(b30)
req.Len(delivered, 1)
req.Nil(err)
// We should be able to collect all 4 genesis blocks by calling
@@ -560,7 +640,7 @@ func (s *BlockLatticeTest) TestPrepareBlock() {
},
Timestamp: time.Now().UTC(),
}
- bl.prepareBlock(b11)
+ data.prepareBlock(b11)
s.hashBlock(b11)
req.Contains(b11.Acks, b00.Hash)
req.Contains(b11.Acks, b10.Hash)
@@ -568,7 +648,7 @@ func (s *BlockLatticeTest) TestPrepareBlock() {
req.Contains(b11.Acks, b30.Hash)
req.Equal(b11.ParentHash, b10.Hash)
req.Equal(b11.Position.Height, uint64(1))
- delivered, err = bl.addBlock(b11)
+ delivered, err = data.addBlock(b11)
req.Len(delivered, 1)
req.Nil(err)
// Propose/Process a block based on collected info.
@@ -578,7 +658,7 @@ func (s *BlockLatticeTest) TestPrepareBlock() {
},
Timestamp: time.Now().UTC(),
}
- bl.prepareBlock(b12)
+ data.prepareBlock(b12)
s.hashBlock(b12)
// This time we only need to ack b11.
req.Len(b12.Acks, 1)
@@ -592,7 +672,7 @@ func (s *BlockLatticeTest) TestPrepareBlock() {
ChainID: 0,
},
}
- bl.prepareBlock(b01)
+ data.prepareBlock(b01)
s.hashBlock(b01)
req.Len(b01.Acks, 4)
req.Contains(b01.Acks, b00.Hash)
@@ -603,7 +683,7 @@ func (s *BlockLatticeTest) TestPrepareBlock() {
req.Equal(b01.Position.Height, uint64(1))
}
-func (s *BlockLatticeTest) TestCalcPurgeHeight() {
+func (s *LatticeTestSuite) TestCalcPurgeHeight() {
// Test chainStatus.calcPurgeHeight, we don't have
// to prepare blocks to test it.
var req = s.Require()
@@ -632,7 +712,7 @@ func (s *BlockLatticeTest) TestCalcPurgeHeight() {
req.False(ok)
}
-func (s *BlockLatticeTest) TestPurge() {
+func (s *LatticeTestSuite) TestPurge() {
// Make a simplest test case to test chainStatus.purge.
// Make sure status after purge 1 block expected.
b00 := &types.Block{Hash: common.NewRandomHash()}
@@ -651,16 +731,123 @@ func (s *BlockLatticeTest) TestPurge() {
s.Equal(chain.blocks[1].Hash, b02.Hash)
}
-func (s *BlockLatticeTest) TestNextPosition() {
+func (s *LatticeTestSuite) TestNextPosition() {
// Test 'NextPosition' method when lattice is ready.
- bl := s.genTestCase1()
- s.Equal(bl.nextPosition(0), types.Position{ChainID: 0, Height: 4})
+ data := s.genTestCase1()
+ s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 4})
// Test 'NextPosition' method when lattice is empty.
- bl = newBlockLattice(4, 0, 1000*time.Second)
- s.Equal(bl.nextPosition(0), types.Position{ChainID: 0, Height: 0})
+ data = newLatticeData(4, 0, 1000*time.Second)
+ s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 0})
+}
+
+func (s *LatticeTestSuite) TestBasicUsage() {
+ // One Lattice prepare blocks on chains randomly selected each time
+ // and process it. Those generated blocks and kept into a buffer, and
+ // process by other Lattice instances with random order.
+ var (
+ blockNum = 100
+ chainNum = uint32(19)
+ otherLatticeNum = 20
+ req = s.Require()
+ err error
+ cfg = types.Config{
+ NumChains: chainNum,
+ PhiRatio: float32(2) / float32(3),
+ K: 0,
+ MinBlockInterval: 0,
+ MaxBlockInterval: 3000 * time.Second,
+ }
+ master = s.newTestLatticeMgr(&cfg)
+ apps = []*test.App{master.app}
+ revealSeq = map[string]struct{}{}
+ )
+ // Master-lattice generates blocks.
+ for i := uint32(0); i < chainNum; i++ {
+ // Produce genesis blocks should be delivered before all other blocks,
+ // or the consensus time would be wrong.
+ b, err := master.prepareBlock(i)
+ req.NotNil(b)
+ req.Nil(err)
+ // We've ignored the error for "acking blocks don't exist".
+ req.Nil(master.processBlock(b))
+ }
+ for i := 0; i < (blockNum - int(chainNum)); i++ {
+ b, err := master.prepareBlock(uint32(rand.Intn(int(chainNum))))
+ req.NotNil(b)
+ req.Nil(err)
+ // We've ignored the error for "acking blocks don't exist".
+ req.Nil(master.processBlock(b))
+ }
+ // Now we have some blocks, replay them on different lattices.
+ iter, err := master.db.GetAll()
+ req.Nil(err)
+ revealer, err := test.NewRandomRevealer(iter)
+ req.Nil(err)
+ for i := 0; i < otherLatticeNum; i++ {
+ revealer.Reset()
+ revealed := ""
+ other := s.newTestLatticeMgr(&cfg)
+ for {
+ b, err := revealer.Next()
+ if err != nil {
+ if err == blockdb.ErrIterationFinished {
+ err = nil
+ break
+ }
+ }
+ req.Nil(err)
+ req.Nil(other.processBlock(&b))
+ revealed += b.Hash.String() + ","
+ revealSeq[revealed] = struct{}{}
+ }
+ apps = append(apps, other.app)
+ }
+ // Make sure not only one revealing sequence.
+ req.True(len(revealSeq) > 1)
+ // Make sure nothing goes wrong.
+ for i, app := range apps {
+ req.Nil(app.Verify())
+ for j, otherApp := range apps {
+ if i >= j {
+ continue
+ }
+ req.Nil(app.Compare(otherApp))
+ }
+ }
+}
+
+func (s *LatticeTestSuite) TestSanityCheck() {
+ // This sanity check focuses on hash/signature part.
+ var (
+ chainNum = uint32(19)
+ cfg = types.Config{
+ NumChains: chainNum,
+ PhiRatio: float32(2) / float32(3),
+ K: 0,
+ MinBlockInterval: 0,
+ MaxBlockInterval: 3000 * time.Second,
+ }
+ lattice = s.newTestLatticeMgr(&cfg).lattice
+ auth = lattice.authModule // Steal auth module from lattice, :(
+ req = s.Require()
+ err error
+ )
+ // A block properly signed should pass sanity check.
+ b := &types.Block{
+ Position: types.Position{ChainID: 0},
+ }
+ req.NoError(auth.SignBlock(b))
+ req.NoError(lattice.SanityCheck(b))
+ // A block with incorrect signature should not pass sanity check.
+ b.Signature, err = auth.prvKey.Sign(common.NewRandomHash())
+ req.NoError(err)
+ req.Equal(lattice.SanityCheck(b), ErrIncorrectSignature)
+ // A block with incorrect hash should not pass sanity check.
+ b.Hash = common.NewRandomHash()
+ req.Equal(lattice.SanityCheck(b), ErrIncorrectHash)
}
-func TestBlockLattice(t *testing.T) {
- suite.Run(t, new(BlockLatticeTest))
+func TestLattice(t *testing.T) {
+ suite.Run(t, new(LatticeTestSuite))
}
diff --git a/core/shard.go b/core/shard.go
deleted file mode 100644
index 7085872..0000000
--- a/core/shard.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2018 The dexon-consensus-core Authors
-// This file is part of the dexon-consensus-core library.
-//
-// The dexon-consensus-core library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus-core library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus-core library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "sync"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus-core/common"
- "github.com/dexon-foundation/dexon-consensus-core/core/blockdb"
- "github.com/dexon-foundation/dexon-consensus-core/core/crypto"
- "github.com/dexon-foundation/dexon-consensus-core/core/types"
-)
-
-// Shard represents a unit to produce a global ordering from multiple chains.
-type Shard struct {
- lock sync.RWMutex
- authModule *Authenticator
- chainNum uint32
- app Application
- debug Debug
- db blockdb.BlockDatabase
- pool blockPool
- lattice *blockLattice
- toModule *totalOrdering
- ctModule *consensusTimestamp
-}
-
-// NewShard constructs an Shard instance.
-func NewShard(
- cfg *types.Config,
- authModule *Authenticator,
- app Application,
- debug Debug,
- db blockdb.BlockDatabase) (s *Shard) {
- lattice := newBlockLattice(
- cfg.NumChains,
- cfg.MinBlockInterval,
- cfg.MaxBlockInterval)
- s = &Shard{
- authModule: authModule,
- chainNum: cfg.NumChains,
- app: app,
- debug: debug,
- db: db,
- pool: newBlockPool(cfg.NumChains),
- lattice: lattice,
- toModule: newTotalOrdering(
- uint64(cfg.K),
- uint64(float32(cfg.NumChains-1)*cfg.PhiRatio+1),
- cfg.NumChains),
- ctModule: newConsensusTimestamp(),
- }
- return
-}
-
-// PrepareBlock setup block's field based on current lattice status.
-func (s *Shard) PrepareBlock(
- b *types.Block, proposeTime time.Time) (err error) {
-
- s.lock.RLock()
- defer s.lock.RUnlock()
-
- s.lattice.prepareBlock(b)
- // TODO(mission): the proposeTime might be earlier than tip block of
- // that chain. We should let blockLattice suggest the time.
- b.Timestamp = proposeTime
- b.Payload, b.Witness.Data = s.app.PrepareBlock(b.Position)
- if err = s.authModule.SignBlock(b); err != nil {
- return
- }
- return
-}
-
-// SanityCheck check if a block is valid based on current lattice status.
-//
-// If some acking blocks don't exists, Shard would help to cache this block
-// and retry when lattice updated in Shard.ProcessBlock.
-func (s *Shard) SanityCheck(b *types.Block) (err error) {
- // Check the hash of block.
- hash, err := hashBlock(b)
- if err != nil || hash != b.Hash {
- err = ErrIncorrectHash
- return
- }
- // Check the signer.
- pubKey, err := crypto.SigToPub(b.Hash, b.Signature)
- if err != nil {
- return
- }
- if !b.ProposerID.Equal(crypto.Keccak256Hash(pubKey.Bytes())) {
- err = ErrIncorrectSignature
- return
- }
- s.lock.RLock()
- defer s.lock.RUnlock()
- if err = s.lattice.sanityCheck(b); err != nil {
- // Add to block pool, once the lattice updated,
- // would be checked again.
- if err == ErrAckingBlockNotExists {
- s.pool.addBlock(b)
- }
- return
- }
- return
-}
-
-// ProcessBlock adds a block into lattice, and deliver ordered blocks.
-// If any block pass sanity check after this block add into lattice, they
-// would be returned, too.
-//
-// NOTE: assume the block passed sanity check.
-func (s *Shard) ProcessBlock(
- input *types.Block) (verified, delivered []*types.Block, err error) {
-
- var (
- tip, b *types.Block
- toDelivered []*types.Block
- inLattice []*types.Block
- earlyDelivered bool
- )
- s.lock.Lock()
- defer s.lock.Unlock()
- if inLattice, err = s.lattice.addBlock(input); err != nil {
- return
- }
- if err = s.db.Put(*input); err != nil {
- return
- }
- // TODO(mission): remove this hack, BA related stuffs should not
- // be done here.
- if s.debug != nil {
- s.debug.StronglyAcked(input.Hash)
- s.debug.BlockConfirmed(input.Hash)
- }
- // Purge blocks in pool with the same chainID and lower height.
- s.pool.purgeBlocks(input.Position.ChainID, input.Position.Height)
- // Replay tips in pool to check their validity.
- for i := uint32(0); i < s.chainNum; i++ {
- if tip = s.pool.tip(i); tip == nil {
- continue
- }
- err = s.lattice.sanityCheck(tip)
- if err == nil {
- verified = append(verified, tip)
- }
- if err == ErrAckingBlockNotExists {
- continue
- }
- s.pool.removeTip(i)
- }
- // Perform total ordering for each block added to lattice.
- for _, b = range inLattice {
- toDelivered, earlyDelivered, err = s.toModule.processBlock(b)
- if err != nil {
- return
- }
- if len(toDelivered) == 0 {
- continue
- }
- hashes := make(common.Hashes, len(toDelivered))
- for idx := range toDelivered {
- hashes[idx] = toDelivered[idx].Hash
- }
- if s.debug != nil {
- s.debug.TotalOrderingDelivered(hashes, earlyDelivered)
- }
- // Perform timestamp generation.
- if err = s.ctModule.processBlocks(toDelivered); err != nil {
- return
- }
- delivered = append(delivered, toDelivered...)
- }
- return
-}
-
-// NextPosition returns expected position of incoming block for that chain.
-func (s *Shard) NextPosition(chainID uint32) types.Position {
- s.lock.RLock()
- defer s.lock.RUnlock()
-
- return s.lattice.nextPosition(chainID)
-}
diff --git a/core/shard_test.go b/core/shard_test.go
deleted file mode 100644
index 84f230b..0000000
--- a/core/shard_test.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2018 The dexon-consensus-core Authors
-// This file is part of the dexon-consensus-core library.
-//
-// The dexon-consensus-core library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus-core library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus-core library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "math/rand"
- "testing"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus-core/common"
- "github.com/dexon-foundation/dexon-consensus-core/core/blockdb"
- "github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa"
- "github.com/dexon-foundation/dexon-consensus-core/core/test"
- "github.com/dexon-foundation/dexon-consensus-core/core/types"
- "github.com/stretchr/testify/suite"
-)
-
-// testShardMgr wraps compaction chain and shard.
-type testShardMgr struct {
- shard *Shard
- ccModule *compactionChain
- app *test.App
- db blockdb.BlockDatabase
-}
-
-func (mgr *testShardMgr) prepareBlock(
- chainID uint32) (b *types.Block, err error) {
-
- b = &types.Block{
- Position: types.Position{
- ChainID: chainID,
- }}
- err = mgr.shard.PrepareBlock(b, time.Now().UTC())
- return
-}
-
-// Process describes the usage of Shard.ProcessBlock.
-func (mgr *testShardMgr) processBlock(b *types.Block) (err error) {
- var (
- delivered []*types.Block
- verified []*types.Block
- pendings = []*types.Block{b}
- )
- if err = mgr.shard.SanityCheck(b); err != nil {
- if err == ErrAckingBlockNotExists {
- err = nil
- }
- return
- }
- for {
- if len(pendings) == 0 {
- break
- }
- b, pendings = pendings[0], pendings[1:]
- if verified, delivered, err = mgr.shard.ProcessBlock(b); err != nil {
- return
- }
- // Deliver blocks.
- for _, b = range delivered {
- if err = mgr.ccModule.processBlock(b); err != nil {
- return
- }
- if err = mgr.db.Update(*b); err != nil {
- return
- }
- mgr.app.BlockDelivered(*b)
- }
- // Update pending blocks for verified block (pass sanity check).
- pendings = append(pendings, verified...)
- }
- return
-}
-
-type ShardTestSuite struct {
- suite.Suite
-}
-
-func (s *ShardTestSuite) newTestShardMgr(cfg *types.Config) *testShardMgr {
- var req = s.Require()
- // Setup private key.
- prvKey, err := ecdsa.NewPrivateKey()
- req.Nil(err)
- // Setup blockdb.
- db, err := blockdb.NewMemBackedBlockDB()
- req.Nil(err)
- // Setup application.
- app := test.NewApp()
- // Setup shard.
- return &testShardMgr{
- ccModule: newCompactionChain(db),
- app: app,
- db: db,
- shard: NewShard(
- cfg,
- NewAuthenticator(prvKey),
- app,
- app,
- db)}
-}
-
-func (s *ShardTestSuite) TestBasicUsage() {
- // One shard prepare blocks on chains randomly selected each time
- // and process it. Those generated blocks and kept into a buffer, and
- // process by other shard instances with random order.
- var (
- blockNum = 100
- chainNum = uint32(19)
- otherShardNum = 20
- req = s.Require()
- err error
- cfg = types.Config{
- NumChains: chainNum,
- PhiRatio: float32(2) / float32(3),
- K: 0,
- MinBlockInterval: 0,
- MaxBlockInterval: 3000 * time.Second,
- }
- master = s.newTestShardMgr(&cfg)
- apps = []*test.App{master.app}
- revealSeq = map[string]struct{}{}
- )
- // Master-shard generates blocks.
- for i := uint32(0); i < chainNum; i++ {
- // Produce genesis blocks should be delivered before all other blocks,
- // or the consensus time would be wrong.
- b, err := master.prepareBlock(i)
- req.NotNil(b)
- req.Nil(err)
- // We've ignored the error for "acking blocks don't exist".
- req.Nil(master.processBlock(b))
- }
- for i := 0; i < (blockNum - int(chainNum)); i++ {
- b, err := master.prepareBlock(uint32(rand.Intn(int(chainNum))))
- req.NotNil(b)
- req.Nil(err)
- // We've ignored the error for "acking blocks don't exist".
- req.Nil(master.processBlock(b))
- }
- // Now we have some blocks, replay them on different shards.
- iter, err := master.db.GetAll()
- req.Nil(err)
- revealer, err := test.NewRandomRevealer(iter)
- req.Nil(err)
- for i := 0; i < otherShardNum; i++ {
- revealer.Reset()
- revealed := ""
- other := s.newTestShardMgr(&cfg)
- for {
- b, err := revealer.Next()
- if err != nil {
- if err == blockdb.ErrIterationFinished {
- err = nil
- break
- }
- }
- req.Nil(err)
- req.Nil(other.processBlock(&b))
- revealed += b.Hash.String() + ","
- revealSeq[revealed] = struct{}{}
- }
- apps = append(apps, other.app)
- }
- // Make sure not only one revealing sequence.
- req.True(len(revealSeq) > 1)
- // Make sure nothing goes wrong.
- for i, app := range apps {
- req.Nil(app.Verify())
- for j, otherApp := range apps {
- if i >= j {
- continue
- }
- req.Nil(app.Compare(otherApp))
- }
- }
-}
-
-func (s *ShardTestSuite) TestSanityCheck() {
- // This sanity check focuses on hash/signature part.
- var (
- chainNum = uint32(19)
- cfg = types.Config{
- NumChains: chainNum,
- PhiRatio: float32(2) / float32(3),
- K: 0,
- MinBlockInterval: 0,
- MaxBlockInterval: 3000 * time.Second,
- }
- shard = s.newTestShardMgr(&cfg).shard
- auth = shard.authModule // Steal auth module from shard, :(
- req = s.Require()
- err error
- )
- // A block properly signed should pass sanity check.
- b := &types.Block{
- Position: types.Position{ChainID: 0},
- }
- req.NoError(auth.SignBlock(b))
- req.NoError(shard.SanityCheck(b))
- // A block with incorrect signature should not pass sanity check.
- b.Signature, err = auth.prvKey.Sign(common.NewRandomHash())
- req.NoError(err)
- req.Equal(shard.SanityCheck(b), ErrIncorrectSignature)
- // A block with incorrect hash should not pass sanity check.
- b.Hash = common.NewRandomHash()
- req.Equal(shard.SanityCheck(b), ErrIncorrectHash)
-}
-
-func TestShard(t *testing.T) {
- suite.Run(t, new(ShardTestSuite))
-}