aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMission Liao <mission.liao@dexon.org>2018-10-02 15:45:29 +0800
committerGitHub <noreply@github.com>2018-10-02 15:45:29 +0800
commitfb27745f2ca4eaf66f53f48740cbd148ee15bbdf (patch)
tree1b706c5a93a4f09f27d2bc729cf55c5d4f0b5aaa
parentd7f6db871180b53548aed6a5450e1c5879c90b04 (diff)
downloaddexon-consensus-fb27745f2ca4eaf66f53f48740cbd148ee15bbdf.tar
dexon-consensus-fb27745f2ca4eaf66f53f48740cbd148ee15bbdf.tar.gz
dexon-consensus-fb27745f2ca4eaf66f53f48740cbd148ee15bbdf.tar.bz2
dexon-consensus-fb27745f2ca4eaf66f53f48740cbd148ee15bbdf.tar.lz
dexon-consensus-fb27745f2ca4eaf66f53f48740cbd148ee15bbdf.tar.xz
dexon-consensus-fb27745f2ca4eaf66f53f48740cbd148ee15bbdf.tar.zst
dexon-consensus-fb27745f2ca4eaf66f53f48740cbd148ee15bbdf.zip
core: replace reliable-broadcast with shard (#159)
-rw-r--r--core/blocklattice.go39
-rw-r--r--core/blocklattice_test.go42
-rw-r--r--core/consensus.go221
-rw-r--r--core/consensus_test.go155
-rw-r--r--core/interfaces.go14
-rw-r--r--core/nodeset-cache.go2
-rw-r--r--core/nodeset-cache_test.go8
-rw-r--r--core/reliable-broadcast.go436
-rw-r--r--core/reliable-broadcast_test.go702
-rw-r--r--core/shard.go18
-rw-r--r--core/shard_test.go41
-rw-r--r--core/test/governance.go18
-rw-r--r--core/ticker.go6
-rw-r--r--integration_test/node.go6
-rw-r--r--integration_test/utils.go2
-rw-r--r--simulation/governance.go12
16 files changed, 264 insertions, 1458 deletions
diff --git a/core/blocklattice.go b/core/blocklattice.go
index 3fa0736..59adaf2 100644
--- a/core/blocklattice.go
+++ b/core/blocklattice.go
@@ -19,6 +19,7 @@ package core
import (
"fmt"
+ "time"
"github.com/dexon-foundation/dexon-consensus-core/common"
"github.com/dexon-foundation/dexon-consensus-core/core/types"
@@ -30,6 +31,15 @@ var (
ErrInvalidParentChain = fmt.Errorf("invalid parent chain")
ErrDuplicatedAckOnOneChain = fmt.Errorf("duplicated ack on one chain")
ErrChainStatusCorrupt = fmt.Errorf("chain status corrupt")
+ ErrInvalidChainID = fmt.Errorf("invalid chain id")
+ ErrInvalidProposerID = fmt.Errorf("invalid proposer id")
+ ErrInvalidTimestamp = fmt.Errorf("invalid timestamp")
+ ErrForkBlock = fmt.Errorf("fork block")
+ ErrNotAckParent = fmt.Errorf("not ack parent")
+ ErrDoubleAck = fmt.Errorf("double ack")
+ ErrInvalidBlockHeight = fmt.Errorf("invalid block height")
+ ErrAlreadyInLattice = fmt.Errorf("block already in lattice")
+ ErrIncorrectBlockTime = fmt.Errorf("block timestampe is incorrect")
)
// blockLattice is a module for storing blocklattice.
@@ -40,8 +50,10 @@ type blockLattice struct {
// blockByHash stores blocks, indexed by block hash.
blockByHash map[common.Hash]*types.Block
- // shardID caches which shard I belongs to.
- shardID uint32
+ // Block interval specifies reasonable time difference between
+ // parent/child blocks.
+ minBlockTimeInterval time.Duration
+ maxBlockTimeInterval time.Duration
}
type chainStatus struct {
@@ -132,7 +144,7 @@ func (s *chainStatus) purge() (purged common.Hashes) {
}
// nextPosition returns a valid position for new block in this chain.
-func (s *chainStatus) nextPosition(shardID uint32) types.Position {
+func (s *chainStatus) nextPosition() types.Position {
return types.Position{
ChainID: s.ID,
Height: s.minHeight + uint64(len(s.blocks)),
@@ -140,11 +152,15 @@ func (s *chainStatus) nextPosition(shardID uint32) types.Position {
}
// newBlockLattice creates a new blockLattice struct.
-func newBlockLattice(shardID, chainNum uint32) (bl *blockLattice) {
+func newBlockLattice(
+ chainNum uint32,
+ minBlockTimeInterval time.Duration,
+ maxBlockTimeInterval time.Duration) (bl *blockLattice) {
bl = &blockLattice{
- shardID: shardID,
- chains: make([]*chainStatus, chainNum),
- blockByHash: make(map[common.Hash]*types.Block),
+ chains: make([]*chainStatus, chainNum),
+ blockByHash: make(map[common.Hash]*types.Block),
+ minBlockTimeInterval: minBlockTimeInterval,
+ maxBlockTimeInterval: maxBlockTimeInterval,
}
for i := range bl.chains {
bl.chains[i] = &chainStatus{
@@ -213,6 +229,12 @@ func (bl *blockLattice) sanityCheck(b *types.Block) error {
if !b.Timestamp.After(bParent.Timestamp) {
return ErrInvalidTimestamp
}
+ // Check if its timestamp is in expected range.
+ if b.Timestamp.Before(bParent.Timestamp.Add(bl.minBlockTimeInterval)) ||
+ b.Timestamp.After(bParent.Timestamp.Add(bl.maxBlockTimeInterval)) {
+
+ return ErrIncorrectBlockTime
+ }
}
return nil
}
@@ -248,6 +270,7 @@ func (bl *blockLattice) addBlock(
bAck *types.Block
updated bool
)
+ // TODO(mission): sanity check twice, might hurt performance.
// If a block does not pass sanity check, report error.
if err = bl.sanityCheck(block); err != nil {
return
@@ -355,5 +378,5 @@ func (bl *blockLattice) prepareBlock(block *types.Block) {
// TODO(mission): make more abstraction for this method.
// nextHeight returns the next height for the chain.
func (bl *blockLattice) nextPosition(chainID uint32) types.Position {
- return bl.chains[chainID].nextPosition(bl.shardID)
+ return bl.chains[chainID].nextPosition()
}
diff --git a/core/blocklattice_test.go b/core/blocklattice_test.go
index 76e5129..72cde7d 100644
--- a/core/blocklattice_test.go
+++ b/core/blocklattice_test.go
@@ -78,7 +78,7 @@ func (s *BlockLatticeTest) genTestCase1() (bl *blockLattice) {
err error
)
- bl = newBlockLattice(0, chainNum)
+ bl = newBlockLattice(chainNum, 2*time.Nanosecond, 1000*time.Second)
// Add genesis blocks.
for i := uint32(0); i < chainNum; i++ {
b = s.prepareGenesisBlock(i)
@@ -314,6 +314,30 @@ func (s *BlockLatticeTest) TestSanityCheck() {
req.NotNil(err)
req.Equal(err.Error(), ErrDuplicatedAckOnOneChain.Error())
+ // Add block 3-1 which acks 3-0, and violet reasonable block time interval.
+ h = bl.chains[2].getBlockByHeight(0).Hash
+ b = &types.Block{
+ ParentHash: h,
+ Hash: common.NewRandomHash(),
+ Timestamp: time.Now().UTC().Add(bl.maxBlockTimeInterval),
+ Position: types.Position{
+ ChainID: 2,
+ Height: 1,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{h}),
+ }
+ s.hashBlock(b)
+ err = bl.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(err, ErrIncorrectBlockTime)
+ // Violet minimum block time interval.
+ b.Timestamp =
+ bl.chains[2].getBlockByHeight(0).Timestamp.Add(1 * time.Nanosecond)
+ s.hashBlock(b)
+ err = bl.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(err, ErrIncorrectBlockTime)
+
// Normal block.
h = bl.chains[1].getBlockByHeight(0).Hash
b = &types.Block{
@@ -361,7 +385,7 @@ func (s *BlockLatticeTest) TestAreAllAcksInLattice() {
func (s *BlockLatticeTest) TestRandomIntensiveAcking() {
var (
chainNum uint32 = 19
- bl = newBlockLattice(0, chainNum)
+ bl = newBlockLattice(chainNum, 0, 1000*time.Second)
req = s.Require()
delivered []*types.Block
extracted []*types.Block
@@ -424,7 +448,7 @@ func (s *BlockLatticeTest) TestRandomlyGeneratedBlocks() {
revealedHashesAsString := map[string]struct{}{}
deliveredHashesAsString := map[string]struct{}{}
for i := 0; i < repeat; i++ {
- bl := newBlockLattice(0, chainNum)
+ bl := newBlockLattice(chainNum, 0, 1000*time.Second)
deliveredHashes := common.Hashes{}
revealedHashes := common.Hashes{}
revealer.Reset()
@@ -502,7 +526,7 @@ func (s *BlockLatticeTest) TestPrepareBlock() {
var (
chainNum uint32 = 4
req = s.Require()
- bl = newBlockLattice(0, chainNum)
+ bl = newBlockLattice(chainNum, 0, 3000*time.Second)
minInterval = 50 * time.Millisecond
delivered []*types.Block
err error
@@ -627,6 +651,16 @@ func (s *BlockLatticeTest) TestPurge() {
s.Equal(chain.blocks[1].Hash, b02.Hash)
}
+func (s *BlockLatticeTest) TestNextPosition() {
+ // Test 'NextPosition' method when lattice is ready.
+ bl := s.genTestCase1()
+ s.Equal(bl.nextPosition(0), types.Position{ChainID: 0, Height: 4})
+
+ // Test 'NextPosition' method when lattice is empty.
+ bl = newBlockLattice(4, 0, 1000*time.Second)
+ s.Equal(bl.nextPosition(0), types.Position{ChainID: 0, Height: 0})
+}
+
func TestBlockLattice(t *testing.T) {
suite.Run(t, new(BlockLatticeTest))
}
diff --git a/core/consensus.go b/core/consensus.go
index a1642df..e7b5ec7 100644
--- a/core/consensus.go
+++ b/core/consensus.go
@@ -30,16 +30,6 @@ import (
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
-// ErrMissingBlockInfo would be reported if some information is missing when
-// calling PrepareBlock. It implements error interface.
-type ErrMissingBlockInfo struct {
- MissingField string
-}
-
-func (e *ErrMissingBlockInfo) Error() string {
- return "missing " + e.MissingField + " in block"
-}
-
// Errors for consensus core.
var (
ErrProposerNotInNodeSet = fmt.Errorf(
@@ -54,10 +44,6 @@ var (
"unknown block is proposed")
ErrUnknownBlockConfirmed = fmt.Errorf(
"unknown block is confirmed")
- ErrIncorrectBlockPosition = fmt.Errorf(
- "position of block is incorrect")
- ErrIncorrectBlockTime = fmt.Errorf(
- "block timestampe is incorrect")
)
// consensusBAReceiver implements agreementReceiver.
@@ -182,11 +168,9 @@ type Consensus struct {
dkgReady *sync.Cond
cfgModule *configurationChain
- // Dexon consensus modules.
- rbModule *reliableBroadcast
- toModule *totalOrdering
- ctModule *consensusTimestamp
- ccModule *compactionChain
+ // Dexon consensus v1's modules.
+ shardModule *Shard
+ ccModule *compactionChain
// Interfaces.
db blockdb.BlockDatabase
@@ -212,33 +196,29 @@ func NewConsensus(
// TODO(w): load latest blockHeight from DB, and use config at that height.
var round uint64
- config := gov.GetConfiguration(round)
+ config := gov.Configuration(round)
// TODO(w): notarySet is different for each chain, need to write a
// GetNotarySetForChain(nodeSet, shardID, chainID, crs) function to get the
// correct notary set for a given chain.
nodeSetCache := NewNodeSetCache(gov)
- crs := gov.GetCRS(round)
+ crs := gov.CRS(round)
// Setup acking by information returned from Governace.
nodes, err := nodeSetCache.GetNodeSet(0)
if err != nil {
panic(err)
}
- rb := newReliableBroadcast()
- rb.setChainNum(config.NumChains)
- for nID := range nodes.IDs {
- rb.addNode(nID)
- }
// Setup context.
ctx, ctxCancel := context.WithCancel(context.Background())
-
- // Setup sequencer by information returned from Governace.
- to := newTotalOrdering(
- uint64(config.K),
- uint64(float32(len(nodes.IDs)-1)*config.PhiRatio+1),
- config.NumChains)
-
- ID := types.NewNodeID(prv.PublicKey())
+ // Setup auth module.
authModule := NewAuthenticator(prv)
+ // Check if the application implement Debug interface.
+ debugApp, _ := app.(Debug)
+ // Setup nonblocking module.
+ nbModule := newNonBlocking(app, debugApp)
+ // Init shard.
+ shardModule := NewShard(config, authModule, nbModule, nbModule, db)
+ // Init configuration chain.
+ ID := types.NewNodeID(prv.PublicKey())
cfgModule := newConfigurationChain(
ID,
&consensusDKGReceiver{
@@ -252,17 +232,13 @@ func NewConsensus(
// Register DKG for the initial round. This is a temporary function call for
// simulation.
cfgModule.registerDKG(0, config.NumDKGSet/3)
-
- // Check if the application implement Debug interface.
- debug, _ := app.(Debug)
+ // Construct Consensus instance.
con := &Consensus{
ID: ID,
currentConfig: config,
- rbModule: rb,
- toModule: to,
- ctModule: newConsensusTimestamp(),
ccModule: newCompactionChain(db),
- nbModule: newNonBlocking(app, debug),
+ shardModule: shardModule,
+ nbModule: nbModule,
gov: gov,
db: db,
network: network,
@@ -356,14 +332,10 @@ BALoop:
if err != nil {
panic(err)
}
- nIDs = nodes.GetSubSet(con.gov.GetConfiguration(con.round).NumNotarySet,
- types.NewNotarySetTarget(con.gov.GetCRS(con.round), chainID))
- }
- aID := types.Position{
- ChainID: chainID,
- Height: con.rbModule.nextHeight(chainID),
+ nIDs = nodes.GetSubSet(con.gov.Configuration(con.round).NumNotarySet,
+ types.NewNotarySetTarget(con.gov.CRS(con.round), chainID))
}
- agreement.restart(nIDs, aID)
+ agreement.restart(nIDs, con.shardModule.NextPosition(chainID))
default:
}
err := agreement.nextState()
@@ -406,7 +378,7 @@ func (con *Consensus) runDKGTSIG() {
}
hash := HashConfigurationBlock(
nodes.IDs,
- con.gov.GetConfiguration(round),
+ con.gov.Configuration(round),
common.Hash{},
con.cfgModule.prevHash)
psig, err := con.cfgModule.preparePartialSignature(
@@ -438,7 +410,7 @@ func (con *Consensus) runCRS() {
<-ticker.Tick()
// Start running next round CRS.
psig, err := con.cfgModule.preparePartialSignature(
- con.round, con.gov.GetCRS(con.round), types.TSigCRS)
+ con.round, con.gov.CRS(con.round), types.TSigCRS)
if err != nil {
log.Println(err)
} else if err = con.authModule.SignDKGPartialSignature(psig); err != nil {
@@ -447,7 +419,7 @@ func (con *Consensus) runCRS() {
log.Println(err)
} else {
con.network.BroadcastDKGPartialSignature(psig)
- crs, err := con.cfgModule.runCRSTSig(con.round, con.gov.GetCRS(con.round))
+ crs, err := con.cfgModule.runCRSTSig(con.round, con.gov.CRS(con.round))
if err != nil {
log.Println(err)
} else {
@@ -458,7 +430,7 @@ func (con *Consensus) runCRS() {
<-ticker.Tick()
// Change round.
con.round++
- con.currentConfig = con.gov.GetConfiguration(con.round)
+ con.currentConfig = con.gov.Configuration(con.round)
func() {
con.dkgReady.L.Lock()
defer con.dkgReady.L.Unlock()
@@ -506,23 +478,14 @@ func (con *Consensus) processMsg(msgChan <-chan interface{}) {
func (con *Consensus) proposeBlock(chainID uint32) *types.Block {
block := &types.Block{
- ProposerID: con.ID,
Position: types.Position{
ChainID: chainID,
- Height: con.rbModule.nextHeight(chainID),
},
}
if err := con.prepareBlock(block, time.Now().UTC()); err != nil {
log.Println(err)
return nil
}
- // TODO(mission): decide CRS by block's round, which could be determined by
- // block's info (ex. position, timestamp).
- if err := con.authModule.SignCRS(
- block, con.gov.GetCRS(0)); err != nil {
- log.Println(err)
- return nil
- }
return block
}
@@ -533,43 +496,12 @@ func (con *Consensus) ProcessVote(vote *types.Vote) (err error) {
return err
}
-// sanityCheck checks if the block is a valid block
-func (con *Consensus) sanityCheck(b *types.Block) (err error) {
- // Check block.Position.
- if b.Position.ChainID >= con.rbModule.chainNum() {
- return ErrIncorrectBlockPosition
- }
- // Check the timestamp of block.
- if !b.IsGenesis() {
- chainTime := con.rbModule.chainTime(b.Position.ChainID)
- if b.Timestamp.Before(chainTime.Add(con.currentConfig.MinBlockInterval)) ||
- b.Timestamp.After(chainTime.Add(con.currentConfig.MaxBlockInterval)) {
- return ErrIncorrectBlockTime
- }
- }
- // Check the hash of block.
- hash, err := hashBlock(b)
- if err != nil || hash != b.Hash {
- return ErrIncorrectHash
- }
-
- // Check the signer.
- pubKey, err := crypto.SigToPub(b.Hash, b.Signature)
- if err != nil {
- return err
- }
- if !b.ProposerID.Equal(crypto.Keccak256Hash(pubKey.Bytes())) {
- return ErrIncorrectSignature
- }
- return nil
-}
-
// preProcessBlock performs Byzantine Agreement on the block.
func (con *Consensus) preProcessBlock(b *types.Block) (err error) {
- if err := con.sanityCheck(b); err != nil {
- return err
+ if err = con.shardModule.SanityCheck(b); err != nil {
+ return
}
- if err := con.baModules[b.Position.ChainID].processBlock(b); err != nil {
+ if err = con.baModules[b.Position.ChainID].processBlock(b); err != nil {
return err
}
return
@@ -577,73 +509,30 @@ func (con *Consensus) preProcessBlock(b *types.Block) (err error) {
// processBlock is the entry point to submit one block to a Consensus instance.
func (con *Consensus) processBlock(block *types.Block) (err error) {
- if err := con.sanityCheck(block); err != nil {
- return err
+ verifiedBlocks, deliveredBlocks, err := con.shardModule.ProcessBlock(block)
+ if err != nil {
+ return
}
- var (
- deliveredBlocks []*types.Block
- earlyDelivered bool
- )
- // To avoid application layer modify the content of block during
- // processing, we should always operate based on the cloned one.
- b := block.Clone()
-
- con.lock.Lock()
- defer con.lock.Unlock()
- // Perform reliable broadcast checking.
- if err = con.rbModule.processBlock(b); err != nil {
- return err
+ // Pass verified blocks (pass sanity check) back to BA module.
+ for _, b := range verifiedBlocks {
+ if err :=
+ con.baModules[b.Position.ChainID].processBlock(b); err != nil {
+ return err
+ }
}
- con.nbModule.BlockConfirmed(block.Hash)
- for _, b := range con.rbModule.extractBlocks() {
- // Notify application layer that some block is strongly acked.
- con.nbModule.StronglyAcked(b.Hash)
- // Perform total ordering.
- deliveredBlocks, earlyDelivered, err = con.toModule.processBlock(b)
- if err != nil {
+ // Pass delivered blocks to compaction chain.
+ for _, b := range deliveredBlocks {
+ if err = con.ccModule.processBlock(b); err != nil {
return
}
- if len(deliveredBlocks) == 0 {
- continue
- }
- for _, b := range deliveredBlocks {
- if err = con.db.Put(*b); err != nil {
- return
- }
- }
- // TODO(mission): handle membership events here.
- hashes := make(common.Hashes, len(deliveredBlocks))
- for idx := range deliveredBlocks {
- hashes[idx] = deliveredBlocks[idx].Hash
- }
- con.nbModule.TotalOrderingDelivered(hashes, earlyDelivered)
- // Perform timestamp generation.
- err = con.ctModule.processBlocks(deliveredBlocks)
- if err != nil {
+ if err = con.db.Update(*b); err != nil {
return
}
- for _, b := range deliveredBlocks {
- if err = con.ccModule.processBlock(b); err != nil {
- return
- }
- if err = con.db.Update(*b); err != nil {
- return
- }
- con.nbModule.BlockDelivered(*b)
- // TODO(mission): Find a way to safely recycle the block.
- // We should deliver block directly to
- // nonBlocking and let them recycle the
- // block.
- }
- }
- return
-}
-
-func (con *Consensus) checkPrepareBlock(
- b *types.Block, proposeTime time.Time) (err error) {
- if (b.ProposerID == types.NodeID{}) {
- err = &ErrMissingBlockInfo{MissingField: "ProposerID"}
- return
+ con.nbModule.BlockDelivered(*b)
+ // TODO(mission): Find a way to safely recycle the block.
+ // We should deliver block directly to
+ // nonBlocking and let them recycle the
+ // block.
}
return
}
@@ -651,16 +540,12 @@ func (con *Consensus) checkPrepareBlock(
// PrepareBlock would setup header fields of block based on its ProposerID.
func (con *Consensus) prepareBlock(b *types.Block,
proposeTime time.Time) (err error) {
- if err = con.checkPrepareBlock(b, proposeTime); err != nil {
+ if err = con.shardModule.PrepareBlock(b, proposeTime); err != nil {
return
}
- con.lock.RLock()
- defer con.lock.RUnlock()
-
- con.rbModule.prepareBlock(b)
- b.Timestamp = proposeTime
- b.Payload, b.Witness.Data = con.nbModule.PrepareBlock(b.Position)
- if err = con.authModule.SignBlock(b); err != nil {
+ // TODO(mission): decide CRS by block's round, which could be determined by
+ // block's info (ex. position, timestamp).
+ if err = con.authModule.SignCRS(b, con.gov.CRS(0)); err != nil {
return
}
return
@@ -669,18 +554,12 @@ func (con *Consensus) prepareBlock(b *types.Block,
// PrepareGenesisBlock would setup header fields for genesis block.
func (con *Consensus) PrepareGenesisBlock(b *types.Block,
proposeTime time.Time) (err error) {
- if err = con.checkPrepareBlock(b, proposeTime); err != nil {
+ if err = con.prepareBlock(b, proposeTime); err != nil {
return
}
if len(b.Payload) != 0 {
err = ErrGenesisBlockNotEmpty
return
}
- b.Position.Height = 0
- b.ParentHash = common.Hash{}
- b.Timestamp = proposeTime
- if err = con.authModule.SignBlock(b); err != nil {
- return
- }
return
}
diff --git a/core/consensus_test.go b/core/consensus_test.go
index 583a2e5..71163e7 100644
--- a/core/consensus_test.go
+++ b/core/consensus_test.go
@@ -67,18 +67,16 @@ type ConsensusTestSuite struct {
}
func (s *ConsensusTestSuite) prepareGenesisBlock(
- proposerID types.NodeID,
chainID uint32,
con *Consensus) *types.Block {
block := &types.Block{
- ProposerID: proposerID,
Position: types.Position{
ChainID: chainID,
},
}
err := con.PrepareGenesisBlock(block, time.Now().UTC())
- s.Require().Nil(err)
+ s.Require().NoError(err)
return block
}
@@ -103,12 +101,14 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
// o o o o <- genesis blocks
// 0 1 2 3 <- index of node ID
//
- // This test case only works for Total Ordering with K=0.
+ // - This test case only works for Total Ordering with K=0.
+ // - Byzantine Agreement layer is not taken into consideration, every
+ // block is passed to shard module directly.
var (
gov, err = test.NewGovernance(4, time.Second)
- minInterval = gov.GetConfiguration(0).MinBlockInterval
+ minInterval = gov.Configuration(0).MinBlockInterval
req = s.Require()
- prvKeys = gov.GetPrivateKeys()
+ prvKeys = gov.PrivateKeys()
nodes []types.NodeID
)
s.Require().Nil(err)
@@ -134,24 +134,22 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
}
}
// Genesis blocks
- b00 := s.prepareGenesisBlock(nodes[0], 0, objs[nodes[0]].con)
- b10 := s.prepareGenesisBlock(nodes[1], 1, objs[nodes[1]].con)
- b20 := s.prepareGenesisBlock(nodes[2], 2, objs[nodes[2]].con)
- b30 := s.prepareGenesisBlock(nodes[3], 3, objs[nodes[3]].con)
+ b00 := s.prepareGenesisBlock(0, objs[nodes[0]].con)
+ b10 := s.prepareGenesisBlock(1, objs[nodes[1]].con)
+ b20 := s.prepareGenesisBlock(2, objs[nodes[2]].con)
+ b30 := s.prepareGenesisBlock(3, objs[nodes[3]].con)
broadcast(b00)
broadcast(b10)
broadcast(b20)
broadcast(b30)
// Setup b11.
b11 := &types.Block{
- ProposerID: nodes[1],
Position: types.Position{
ChainID: 1,
},
}
- b11.Hash, err = hashBlock(b11)
- s.Require().Nil(err)
- req.Nil(objs[nodes[1]].con.prepareBlock(b11, b10.Timestamp.Add(minInterval)))
+ req.NoError(
+ objs[nodes[1]].con.prepareBlock(b11, b10.Timestamp.Add(minInterval)))
req.Len(b11.Acks, 4)
req.Contains(b11.Acks, b00.Hash)
req.Contains(b11.Acks, b10.Hash)
@@ -160,37 +158,43 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
broadcast(b11)
// Setup b01.
b01 := &types.Block{
- ProposerID: nodes[0],
Position: types.Position{
ChainID: 0,
},
- Hash: common.NewRandomHash(),
}
- req.Nil(objs[nodes[0]].con.prepareBlock(b01, b00.Timestamp.Add(minInterval)))
+ req.NoError(
+ objs[nodes[0]].con.prepareBlock(b01, b00.Timestamp.Add(minInterval)))
req.Len(b01.Acks, 4)
+ req.Contains(b01.Acks, b00.Hash)
req.Contains(b01.Acks, b11.Hash)
+ req.Contains(b01.Acks, b20.Hash)
+ req.Contains(b01.Acks, b30.Hash)
// Setup b21.
b21 := &types.Block{
- ProposerID: nodes[2],
Position: types.Position{
ChainID: 2,
},
- Hash: common.NewRandomHash(),
}
- req.Nil(objs[nodes[2]].con.prepareBlock(b21, b20.Timestamp.Add(minInterval)))
+ req.NoError(
+ objs[nodes[2]].con.prepareBlock(b21, b20.Timestamp.Add(minInterval)))
req.Len(b21.Acks, 4)
+ req.Contains(b21.Acks, b00.Hash)
req.Contains(b21.Acks, b11.Hash)
+ req.Contains(b21.Acks, b20.Hash)
+ req.Contains(b21.Acks, b30.Hash)
// Setup b31.
b31 := &types.Block{
- ProposerID: nodes[3],
Position: types.Position{
ChainID: 3,
},
- Hash: common.NewRandomHash(),
}
- req.Nil(objs[nodes[3]].con.prepareBlock(b31, b30.Timestamp.Add(minInterval)))
+ req.NoError(
+ objs[nodes[3]].con.prepareBlock(b31, b30.Timestamp.Add(minInterval)))
req.Len(b31.Acks, 4)
+ req.Contains(b31.Acks, b00.Hash)
req.Contains(b31.Acks, b11.Hash)
+ req.Contains(b31.Acks, b20.Hash)
+ req.Contains(b31.Acks, b30.Hash)
// Broadcast other height=1 blocks.
broadcast(b01)
broadcast(b21)
@@ -198,26 +202,24 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
// Setup height=2 blocks.
// Setup b02.
b02 := &types.Block{
- ProposerID: nodes[0],
Position: types.Position{
ChainID: 0,
},
- Hash: common.NewRandomHash(),
}
- req.Nil(objs[nodes[0]].con.prepareBlock(b02, b01.Timestamp.Add(minInterval)))
+ req.NoError(
+ objs[nodes[0]].con.prepareBlock(b02, b01.Timestamp.Add(minInterval)))
req.Len(b02.Acks, 3)
req.Contains(b02.Acks, b01.Hash)
req.Contains(b02.Acks, b21.Hash)
req.Contains(b02.Acks, b31.Hash)
// Setup b12.
b12 := &types.Block{
- ProposerID: nodes[1],
Position: types.Position{
ChainID: 1,
},
- Hash: common.NewRandomHash(),
}
- req.Nil(objs[nodes[1]].con.prepareBlock(b12, b11.Timestamp.Add(minInterval)))
+ req.NoError(
+ objs[nodes[1]].con.prepareBlock(b12, b11.Timestamp.Add(minInterval)))
req.Len(b12.Acks, 4)
req.Contains(b12.Acks, b01.Hash)
req.Contains(b12.Acks, b11.Hash)
@@ -225,26 +227,24 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
req.Contains(b12.Acks, b31.Hash)
// Setup b22.
b22 := &types.Block{
- ProposerID: nodes[2],
Position: types.Position{
ChainID: 2,
},
- Hash: common.NewRandomHash(),
}
- req.Nil(objs[nodes[2]].con.prepareBlock(b22, b21.Timestamp.Add(minInterval)))
+ req.NoError(
+ objs[nodes[2]].con.prepareBlock(b22, b21.Timestamp.Add(minInterval)))
req.Len(b22.Acks, 3)
req.Contains(b22.Acks, b01.Hash)
req.Contains(b22.Acks, b21.Hash)
req.Contains(b22.Acks, b31.Hash)
// Setup b32.
b32 := &types.Block{
- ProposerID: nodes[3],
Position: types.Position{
ChainID: 3,
},
- Hash: common.NewRandomHash(),
}
- req.Nil(objs[nodes[3]].con.prepareBlock(b32, b31.Timestamp.Add(minInterval)))
+ req.NoError(
+ objs[nodes[3]].con.prepareBlock(b32, b31.Timestamp.Add(minInterval)))
req.Len(b32.Acks, 3)
req.Contains(b32.Acks, b01.Hash)
req.Contains(b32.Acks, b21.Hash)
@@ -269,7 +269,7 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
// Genesis blocks are delivered by total ordering as a set.
delivered0 := common.Hashes{b00.Hash, b10.Hash, b20.Hash, b30.Hash}
sort.Sort(delivered0)
- req.Len(app.TotalOrdered, 2)
+ req.Len(app.TotalOrdered, 4)
req.Equal(app.TotalOrdered[0].BlockHashes, delivered0)
req.False(app.TotalOrdered[0].Early)
// b11 is the sencond set delivered by total ordering.
@@ -277,6 +277,16 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
sort.Sort(delivered1)
req.Equal(app.TotalOrdered[1].BlockHashes, delivered1)
req.False(app.TotalOrdered[1].Early)
+ // b01, b21, b31 are the third set delivered by total ordering.
+ delivered2 := common.Hashes{b01.Hash, b21.Hash, b31.Hash}
+ sort.Sort(delivered2)
+ req.Equal(app.TotalOrdered[2].BlockHashes, delivered2)
+ req.False(app.TotalOrdered[2].Early)
+ // b02, b12, b22, b32 are the fourth set delivered by total ordering.
+ delivered3 := common.Hashes{b02.Hash, b12.Hash, b22.Hash, b32.Hash}
+ sort.Sort(delivered3)
+ req.Equal(app.TotalOrdered[3].BlockHashes, delivered3)
+ req.False(app.TotalOrdered[3].Early)
// Check generated timestamps.
req.Contains(app.Delivered, b00.Hash)
req.Contains(app.Delivered, b10.Hash)
@@ -292,7 +302,7 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
timestamps[2] = b20.Timestamp
timestamps[3] = b30.Timestamp
t, err := getMedianTime(timestamps)
- req.Nil(err)
+ req.NoError(err)
req.Equal(t, app.Delivered[b11.Hash].ConsensusTime)
}
for _, obj := range objs {
@@ -313,7 +323,7 @@ func (s *ConsensusTestSuite) TestPrepareBlock() {
gov, err = test.NewGovernance(4, time.Second)
req = s.Require()
nodes []types.NodeID
- prvKeys = gov.GetPrivateKeys()
+ prvKeys = gov.PrivateKeys()
)
s.Require().Nil(err)
// Setup core.Consensus and test.App.
@@ -324,10 +334,10 @@ func (s *ConsensusTestSuite) TestPrepareBlock() {
cons[nID] = con
nodes = append(nodes, nID)
}
- b00 := s.prepareGenesisBlock(nodes[0], 0, cons[nodes[0]])
- b10 := s.prepareGenesisBlock(nodes[1], 1, cons[nodes[1]])
- b20 := s.prepareGenesisBlock(nodes[2], 2, cons[nodes[2]])
- b30 := s.prepareGenesisBlock(nodes[3], 3, cons[nodes[3]])
+ b00 := s.prepareGenesisBlock(0, cons[nodes[0]])
+ b10 := s.prepareGenesisBlock(1, cons[nodes[1]])
+ b20 := s.prepareGenesisBlock(2, cons[nodes[2]])
+ b30 := s.prepareGenesisBlock(3, cons[nodes[3]])
for _, con := range cons {
req.Nil(con.processBlock(b00))
req.Nil(con.processBlock(b10))
@@ -335,74 +345,33 @@ func (s *ConsensusTestSuite) TestPrepareBlock() {
req.Nil(con.processBlock(b30))
}
b11 := &types.Block{
- ProposerID: nodes[1],
+ Position: types.Position{ChainID: b10.Position.ChainID},
}
- interval := gov.GetConfiguration(0).MinBlockInterval
+ interval := gov.Configuration(0).MinBlockInterval
req.Nil(cons[nodes[1]].prepareBlock(b11, b10.Timestamp.Add(interval)))
for _, con := range cons {
+ req.Nil(con.preProcessBlock(b11))
req.Nil(con.processBlock(b11))
}
b12 := &types.Block{
- ProposerID: nodes[1],
+ Position: types.Position{ChainID: b11.Position.ChainID},
}
- req.Nil(cons[nodes[1]].prepareBlock(b12, b10.Timestamp.Add(interval)))
+ req.Nil(cons[nodes[1]].prepareBlock(b12, b11.Timestamp.Add(interval)))
req.Len(b12.Acks, 1)
req.Contains(b12.Acks, b11.Hash)
}
func (s *ConsensusTestSuite) TestPrepareGenesisBlock() {
gov, err := test.NewGovernance(4, time.Second)
- s.Require().Nil(err)
- prvKey := gov.GetPrivateKeys()[0]
+ s.Require().NoError(err)
+ prvKey := gov.PrivateKeys()[0]
_, con := s.prepareConsensus(gov, prvKey)
block := &types.Block{
- ProposerID: types.NewNodeID(prvKey.PublicKey()),
+ Position: types.Position{ChainID: 0},
}
- con.PrepareGenesisBlock(block, time.Now().UTC())
+ s.Require().NoError(con.PrepareGenesisBlock(block, time.Now().UTC()))
s.True(block.IsGenesis())
- s.Nil(con.sanityCheck(block))
-}
-
-func (s *ConsensusTestSuite) TestSanityCheck() {
- gov, err := test.NewGovernance(4, time.Second)
- s.Require().Nil(err)
- prvKey := gov.GetPrivateKeys()[0]
- _, con := s.prepareConsensus(gov, prvKey)
- nID := types.NewNodeID(prvKey.PublicKey())
- b0 := s.prepareGenesisBlock(nID, 0, con)
- s.Require().NoError(con.processBlock(b0))
- invalidChain := &types.Block{
- ProposerID: nID,
- Position: types.Position{
- ChainID: 1000,
- },
- }
- s.Equal(ErrIncorrectBlockPosition, con.sanityCheck(invalidChain))
- invalidTimestamp := &types.Block{
- ProposerID: nID,
- }
- s.Require().NoError(con.prepareBlock(invalidTimestamp, b0.Timestamp))
- s.Equal(ErrIncorrectBlockTime, con.sanityCheck(invalidTimestamp))
- s.Require().NoError(con.prepareBlock(invalidTimestamp, b0.Timestamp.Add(
- gov.GetConfiguration(0).MaxBlockInterval).Add(1*time.Second)))
- s.Equal(ErrIncorrectBlockTime, con.sanityCheck(invalidTimestamp))
-
- ts := b0.Timestamp.Add(gov.GetConfiguration(0).MinBlockInterval)
-
- invalidHash := &types.Block{
- ProposerID: nID,
- }
- s.Require().NoError(con.prepareBlock(invalidHash, ts))
- invalidHash.Hash = common.NewRandomHash()
- s.Equal(ErrIncorrectHash, con.sanityCheck(invalidHash))
-
- invalidSignature := &types.Block{
- ProposerID: nID,
- }
- s.Require().NoError(con.prepareBlock(invalidSignature, ts))
- invalidSignature.Signature, err = prvKey.Sign(common.NewRandomHash())
- s.Require().NoError(err)
- s.Equal(ErrIncorrectSignature, con.sanityCheck(invalidSignature))
+ s.NoError(con.preProcessBlock(block))
}
func TestConsensus(t *testing.T) {
diff --git a/core/interfaces.go b/core/interfaces.go
index d1868a5..fa593eb 100644
--- a/core/interfaces.go
+++ b/core/interfaces.go
@@ -79,27 +79,27 @@ type Network interface {
// Note that there are a lot more methods in the governance contract, that this
// interface only define those that are required to run the consensus algorithm.
type Governance interface {
- // GetConfiguration returns the configuration at a given round.
+ // Configuration returns the configuration at a given round.
// Return the genesis configuration if round == 0.
- GetConfiguration(round uint64) *types.Config
+ Configuration(round uint64) *types.Config
- // GetCRS returns the CRS for a given round.
+ // CRS returns the CRS for a given round.
// Return the genesis CRS if round == 0.
- GetCRS(round uint64) common.Hash
+ CRS(round uint64) common.Hash
// Propose a CRS of round.
ProposeCRS(round uint64, signedCRS []byte)
- // GetNodeSet returns the node set at a given round.
+ // NodeSet returns the node set at a given round.
// Return the genesis node set if round == 0.
- GetNodeSet(round uint64) []crypto.PublicKey
+ NodeSet(round uint64) []crypto.PublicKey
//// DKG-related methods.
// AddDKGComplaint adds a DKGComplaint.
AddDKGComplaint(complaint *types.DKGComplaint)
- // GetDKGComplaints gets all the DKGComplaints of round.
+ // DKGComplaints gets all the DKGComplaints of round.
DKGComplaints(round uint64) []*types.DKGComplaint
// AddDKGMasterPublicKey adds a DKGMasterPublicKey.
diff --git a/core/nodeset-cache.go b/core/nodeset-cache.go
index 610131b..49521ab 100644
--- a/core/nodeset-cache.go
+++ b/core/nodeset-cache.go
@@ -106,7 +106,7 @@ func (cache *NodeSetCache) update(
defer cache.lock.Unlock()
// Get the requested round from governance contract.
- keySet := cache.gov.GetNodeSet(round)
+ keySet := cache.gov.NodeSet(round)
if keySet == nil {
// That round is not ready yet.
err = ErrRoundNotReady
diff --git a/core/nodeset-cache_test.go b/core/nodeset-cache_test.go
index 84e5d54..d7e7dba 100644
--- a/core/nodeset-cache_test.go
+++ b/core/nodeset-cache_test.go
@@ -32,10 +32,10 @@ type testGov struct {
curKeys []crypto.PublicKey
}
-func (g *testGov) GetConfiguration(round uint64) (cfg *types.Config) { return }
-func (g *testGov) GetCRS(round uint64) (b common.Hash) { return }
-func (g *testGov) ProposeCRS(uint64, []byte) {}
-func (g *testGov) GetNodeSet(round uint64) []crypto.PublicKey {
+func (g *testGov) Configuration(round uint64) (cfg *types.Config) { return }
+func (g *testGov) CRS(round uint64) (b common.Hash) { return }
+func (g *testGov) ProposeCRS(uint64, []byte) {}
+func (g *testGov) NodeSet(round uint64) []crypto.PublicKey {
// Randomly generating keys, and check them for verification.
g.curKeys = []crypto.PublicKey{}
for i := 0; i < 10; i++ {
diff --git a/core/reliable-broadcast.go b/core/reliable-broadcast.go
deleted file mode 100644
index e8c5ca4..0000000
--- a/core/reliable-broadcast.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// Copyright 2018 The dexon-consensus-core Authors
-// This file is part of the dexon-consensus-core library.
-//
-// The dexon-consensus-core library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus-core library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus-core library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "fmt"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus-core/common"
- "github.com/dexon-foundation/dexon-consensus-core/core/types"
-)
-
-// Status represents the block process state.
-type blockStatus int
-
-// Block Status.
-const (
- blockStatusInit blockStatus = iota
- blockStatusAcked
- blockStatusOrdering
- blockStatusFinal
-)
-
-// reliableBroadcast is a module for reliable broadcast.
-type reliableBroadcast struct {
- // lattice stores node's blocks and other info.
- lattice []*rbcNodeStatus
-
- // blockInfos stores block infos.
- blockInfos map[common.Hash]*rbcBlockInfo
-
- // receivedBlocks stores blocks which is received but its acks are not all
- // in lattice.
- receivedBlocks map[common.Hash]*types.Block
-
- // nodes stores node set.
- nodes map[types.NodeID]struct{}
-}
-
-type rbcNodeStatus struct {
- // blocks stores blocks proposed by specified node in map which key is
- // the height of the block.
- blocks map[uint64]*types.Block
-
- // nextAck stores the height of next height that should be acked, i.e. last
- // acked height + 1. Initialized to 0, when genesis blocks are still not
- // being acked. For example, rb.lattice[vid1].NextAck[vid2] - 1 is the last
- // acked height by vid1 acking vid2.
- nextAck []uint64
-
- // nextOutput is the next output height of block, default to 0.
- nextOutput uint64
-
- // nextHeight is the next height of block to be prepared.
- nextHeight uint64
-
- // timestamp of the chain.
- timestamp time.Time
-}
-
-type rbcBlockInfo struct {
- block *types.Block
- receivedTime time.Time
- status blockStatus
- ackedChain map[uint32]struct{}
-}
-
-// Errors for sanity check error.
-var (
- ErrInvalidChainID = fmt.Errorf("invalid chain id")
- ErrInvalidProposerID = fmt.Errorf("invalid proposer id")
- ErrInvalidTimestamp = fmt.Errorf("invalid timestamp")
- ErrForkBlock = fmt.Errorf("fork block")
- ErrNotAckParent = fmt.Errorf("not ack parent")
- ErrDoubleAck = fmt.Errorf("double ack")
- ErrInvalidBlockHeight = fmt.Errorf("invalid block height")
- ErrAlreadyInLattice = fmt.Errorf("block already in lattice")
-)
-
-// newReliableBroadcast creates a new reliableBroadcast struct.
-func newReliableBroadcast() *reliableBroadcast {
- return &reliableBroadcast{
- blockInfos: make(map[common.Hash]*rbcBlockInfo),
- receivedBlocks: make(map[common.Hash]*types.Block),
- nodes: make(map[types.NodeID]struct{}),
- }
-}
-
-func (rb *reliableBroadcast) sanityCheck(b *types.Block) error {
- // Check if the chain id is valid.
- if b.Position.ChainID >= uint32(len(rb.lattice)) {
- return ErrInvalidChainID
- }
-
- // Check if its proposer is in node set.
- if _, exist := rb.nodes[b.ProposerID]; !exist {
- return ErrInvalidProposerID
- }
-
- // Check if it forks.
- if bInLattice, exist :=
- rb.lattice[b.Position.ChainID].blocks[b.Position.Height]; exist {
- if b.Hash != bInLattice.Hash {
- return ErrForkBlock
- }
- return ErrAlreadyInLattice
- }
-
- // Check non-genesis blocks if it acks its parent.
- if b.Position.Height > 0 {
- if !b.IsAcking(b.ParentHash) {
- return ErrNotAckParent
- }
- bParentStat, exists := rb.blockInfos[b.ParentHash]
- if exists && bParentStat.block.Position.Height != b.Position.Height-1 {
- return ErrInvalidBlockHeight
- }
- }
-
- // Check if it acks older blocks.
- for _, hash := range b.Acks {
- if bAckStat, exist := rb.blockInfos[hash]; exist {
- bAck := bAckStat.block
- if bAck.Position.Height <
- rb.lattice[b.Position.ChainID].nextAck[bAck.Position.ChainID] {
- return ErrDoubleAck
- }
- }
- }
-
- // Check if its timestamp is valid.
- if bParent, exist :=
- rb.lattice[b.Position.ChainID].blocks[b.Position.Height-1]; exist {
- if !b.Timestamp.After(bParent.Timestamp) {
- return ErrInvalidTimestamp
- }
- }
-
- // TODO(haoping): application layer check of block's content
-
- return nil
-}
-
-// areAllAcksReceived checks if all ack blocks of a block are all in lattice.
-func (rb *reliableBroadcast) areAllAcksInLattice(b *types.Block) bool {
- for _, h := range b.Acks {
- bAckStat, exist := rb.blockInfos[h]
- if !exist {
- return false
- }
- bAck := bAckStat.block
-
- bAckInLattice, exist :=
- rb.lattice[bAck.Position.ChainID].blocks[bAck.Position.Height]
- if !exist {
- return false
- }
- if bAckInLattice.Hash != bAck.Hash {
- panic("areAllAcksInLattice: reliableBroadcast.lattice has corrupted")
- }
- }
- return true
-}
-
-// processBlock processes block, it does sanity check, inserts block into
-// lattice, handles strong acking and deletes blocks which will not be used.
-func (rb *reliableBroadcast) processBlock(block *types.Block) (err error) {
- // If a block does not pass sanity check, discard this block.
- if err = rb.sanityCheck(block); err != nil {
- return
- }
- rb.blockInfos[block.Hash] = &rbcBlockInfo{
- block: block,
- receivedTime: time.Now().UTC(),
- ackedChain: make(map[uint32]struct{}),
- }
- rb.receivedBlocks[block.Hash] = block
- if rb.lattice[block.Position.ChainID].nextHeight <= block.Position.Height {
- rb.lattice[block.Position.ChainID].nextHeight = block.Position.Height + 1
- }
- rb.lattice[block.Position.ChainID].timestamp = block.Timestamp
-
- // Check blocks in receivedBlocks if its acks are all in lattice. If a block's
- // acking blocks are all in lattice, execute sanity check and add the block
- // into lattice.
- blocksToAcked := map[common.Hash]*types.Block{}
- for {
- blocksToLattice := map[common.Hash]*types.Block{}
- for _, b := range rb.receivedBlocks {
- if rb.areAllAcksInLattice(b) {
- blocksToLattice[b.Hash] = b
- }
- }
- if len(blocksToLattice) == 0 {
- break
- }
- for _, b := range blocksToLattice {
- // Sanity check must been executed again here for the case that several
- // valid blocks with different content being added into blocksToLattice
- // in the same time. For example
- // B C Block B and C both ack A and are valid. B, C received first
- // \ / (added in receivedBlocks), and A comes, if sanity check is
- // A not being executed here, B and C will both be added in lattice
- if err = rb.sanityCheck(b); err != nil {
- delete(rb.blockInfos, b.Hash)
- delete(rb.receivedBlocks, b.Hash)
- continue
- // TODO(mission): how to return for multiple errors?
- }
- chainID := b.Position.ChainID
- rb.lattice[chainID].blocks[b.Position.Height] = b
- delete(rb.receivedBlocks, b.Hash)
- for _, h := range b.Acks {
- bAckStat := rb.blockInfos[h]
- // Update nextAck only when bAckStat.block.Position.Height + 1
- // is greater. A block might ack blocks proposed by same node with
- // different height.
- if rb.lattice[chainID].nextAck[bAckStat.block.Position.ChainID] <
- bAckStat.block.Position.Height+1 {
- rb.lattice[chainID].nextAck[bAckStat.block.Position.ChainID] =
- bAckStat.block.Position.Height + 1
- }
- // Update ackedChain for each ack blocks and its parents.
- for {
- if _, exist := bAckStat.ackedChain[chainID]; exist {
- break
- }
- if bAckStat.status > blockStatusInit {
- break
- }
- bAckStat.ackedChain[chainID] = struct{}{}
- // A block is strongly acked if it is acked by more than
- // 2 * (maximum number of byzatine nodes) unique nodes.
- if len(bAckStat.ackedChain) > 2*((len(rb.lattice)-1)/3) {
- blocksToAcked[bAckStat.block.Hash] = bAckStat.block
- }
- if bAckStat.block.Position.Height == 0 {
- break
- }
- bAckStat = rb.blockInfos[bAckStat.block.ParentHash]
- }
- }
- }
- }
-
- for _, b := range blocksToAcked {
- rb.blockInfos[b.Hash].status = blockStatusAcked
- }
-
- // Delete blocks in received array when it is received a long time ago.
- oldBlocks := []common.Hash{}
- for h, b := range rb.receivedBlocks {
- if time.Now().Sub(rb.blockInfos[b.Hash].receivedTime) >= 30*time.Second {
- oldBlocks = append(oldBlocks, h)
- }
- }
- for _, h := range oldBlocks {
- delete(rb.receivedBlocks, h)
- delete(rb.blockInfos, h)
- }
-
- // Delete old blocks in "lattice" and "blocks" for release memory space.
- // First, find the height that blocks below it can be deleted. This height
- // is defined by finding minimum of node's nextOutput and last acking
- // heights from other nodes, i.e. rb.lattice[v_other].nextAck[this_vid].
- // This works because blocks of height below this minimum are not going to be
- // acked anymore, the ackings of these blocks are illegal.
- for vid := range rb.lattice {
- // Find the minimum height that heights lesser can be deleted.
- min := rb.lattice[vid].nextOutput
- for vid2 := range rb.lattice {
- if rb.lattice[vid2].nextAck[vid] < min {
- min = rb.lattice[vid2].nextAck[vid]
- }
- }
- // "min" is the height of "next" last acked, min - 1 is the last height.
- // Delete blocks from min - 2 which will never be acked.
- if min < 3 {
- continue
- }
- min -= 2
- for {
- b, exist := rb.lattice[vid].blocks[min]
- if !exist {
- break
- }
- if rb.blockInfos[b.Hash].status >= blockStatusOrdering {
- delete(rb.lattice[vid].blocks, b.Position.Height)
- delete(rb.blockInfos, b.Hash)
- }
- if min == 0 {
- break
- }
- min--
- }
- }
- return
-}
-
-// extractBlocks returns all blocks that can be inserted into total ordering's
-// DAG. This function changes the status of blocks from blockStatusAcked to
-// blockStatusOrdering.
-func (rb *reliableBroadcast) extractBlocks() []*types.Block {
- ret := []*types.Block{}
- for {
- updated := false
- for vid := range rb.lattice {
- b, exist := rb.lattice[vid].blocks[rb.lattice[vid].nextOutput]
- if !exist || rb.blockInfos[b.Hash].status < blockStatusAcked {
- continue
- }
- allAcksInOrderingStatus := true
- // Check if all acks are in ordering or above status. If a block of an ack
- // does not exist means that it deleted but its status is definitely Acked
- // or ordering.
- for _, ackHash := range b.Acks {
- bAckStat, exist := rb.blockInfos[ackHash]
- if !exist {
- continue
- }
- if bAckStat.status < blockStatusOrdering {
- allAcksInOrderingStatus = false
- break
- }
- }
- if !allAcksInOrderingStatus {
- continue
- }
- updated = true
- rb.blockInfos[b.Hash].status = blockStatusOrdering
- ret = append(ret, b)
- rb.lattice[vid].nextOutput++
- }
- if !updated {
- break
- }
- }
- return ret
-}
-
-// prepareBlock helps to setup fields of block based on its ProposerID,
-// including:
-// - Set 'Acks' and 'Timestamps' for the highest block of each node not
-// acked by this proposer before.
-// - Set 'ParentHash' and 'Height' from parent block, if we can't find a
-// parent, these fields would be setup like a genesis block.
-func (rb *reliableBroadcast) prepareBlock(block *types.Block) {
- // Reset fields to make sure we got these information from parent block.
- block.Position.Height = 0
- block.ParentHash = common.Hash{}
- acks := common.Hashes{}
- for chainID := range rb.lattice {
- // find height of the latest block for that node.
- var (
- curBlock *types.Block
- nextHeight = rb.lattice[block.Position.ChainID].nextAck[chainID]
- )
-
- for {
- tmpBlock, exists := rb.lattice[chainID].blocks[nextHeight]
- if !exists {
- break
- }
- curBlock = tmpBlock
- nextHeight++
- }
- if curBlock == nil {
- continue
- }
- acks = append(acks, curBlock.Hash)
- if uint32(chainID) == block.Position.ChainID {
- block.ParentHash = curBlock.Hash
- if block.Timestamp.Before(curBlock.Timestamp) {
- // TODO (mission): make epslon configurable.
- block.Timestamp = curBlock.Timestamp.Add(1 * time.Millisecond)
- }
- if block.Position.Height == 0 {
- block.Position.Height = curBlock.Position.Height + 1
- }
- }
- }
- block.Acks = common.NewSortedHashes(acks)
- return
-}
-
-// addNode adds node in the node set.
-func (rb *reliableBroadcast) addNode(h types.NodeID) {
- rb.nodes[h] = struct{}{}
-}
-
-// deleteNode deletes node in node set.
-func (rb *reliableBroadcast) deleteNode(h types.NodeID) {
- delete(rb.nodes, h)
-}
-
-// setChainNum set the number of chains.
-func (rb *reliableBroadcast) setChainNum(num uint32) {
- rb.lattice = make([]*rbcNodeStatus, num)
- for i := range rb.lattice {
- rb.lattice[i] = &rbcNodeStatus{
- blocks: make(map[uint64]*types.Block),
- nextAck: make([]uint64, num),
- nextOutput: 0,
- nextHeight: 0,
- }
- }
-}
-
-func (rb *reliableBroadcast) chainNum() uint32 {
- return uint32(len(rb.lattice))
-}
-
-// nextHeight returns the next height for the chain.
-func (rb *reliableBroadcast) nextHeight(chainID uint32) uint64 {
- return rb.lattice[chainID].nextHeight
-}
-
-// chainTime returnes the latest time for the chain.
-func (rb *reliableBroadcast) chainTime(chainID uint32) time.Time {
- return rb.lattice[chainID].timestamp
-}
diff --git a/core/reliable-broadcast_test.go b/core/reliable-broadcast_test.go
deleted file mode 100644
index 0e627d1..0000000
--- a/core/reliable-broadcast_test.go
+++ /dev/null
@@ -1,702 +0,0 @@
-// Copyright 2018 The dexon-consensus-core Authors
-// This file is part of the dexon-consensus-core library.
-//
-// The dexon-consensus-core library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus-core library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus-core library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-// TODO(mission): we should check the return value from processBlock.
-
-package core
-
-import (
- "math/rand"
- "sort"
- "testing"
- "time"
-
- "github.com/stretchr/testify/suite"
-
- "github.com/dexon-foundation/dexon-consensus-core/common"
- "github.com/dexon-foundation/dexon-consensus-core/core/blockdb"
- "github.com/dexon-foundation/dexon-consensus-core/core/test"
- "github.com/dexon-foundation/dexon-consensus-core/core/types"
-)
-
-type ReliableBroadcastTest struct {
- suite.Suite
-}
-
-func (s *ReliableBroadcastTest) SetupSuite() {
-
-}
-
-func (s *ReliableBroadcastTest) SetupTest() {
-
-}
-
-func (s *ReliableBroadcastTest) prepareGenesisBlock(
- proposerID types.NodeID,
- nodeIDs []types.NodeID) (b *types.Block) {
-
- b = &types.Block{
- ProposerID: proposerID,
- ParentHash: common.Hash{},
- Position: types.Position{
- Height: 0,
- },
- Acks: common.NewSortedHashes(common.Hashes{}),
- Timestamp: time.Now().UTC(),
- }
- for i, vID := range nodeIDs {
- if proposerID == vID {
- b.Position.ChainID = uint32(i)
- break
- }
- }
- b.Timestamp = time.Now().UTC()
- var err error
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- return
-}
-
-// genTestCase1 generates test case 1,
-// 3
-// |
-// 2
-// | \
-// 1 | 1
-// | | |
-// 0 0 0 0 (block height)
-// 0 1 2 3 (node)
-func genTestCase1(s *ReliableBroadcastTest, rb *reliableBroadcast) []types.NodeID {
- // Create new reliableBroadcast instance with 4 nodes
- var b *types.Block
- var h common.Hash
-
- vids := []types.NodeID{}
- for i := 0; i < 4; i++ {
- vid := types.NodeID{Hash: common.NewRandomHash()}
- rb.addNode(vid)
- vids = append(vids, vid)
- }
- rb.setChainNum(uint32(len(vids)))
- // Add genesis blocks.
- for _, vid := range vids {
- b = s.prepareGenesisBlock(vid, vids)
- s.Require().Nil(rb.processBlock(b))
- }
-
- // Add block 0-1 which acks 0-0.
- h = rb.lattice[0].blocks[0].Hash
- b = &types.Block{
- ProposerID: vids[0],
- ParentHash: h,
- Hash: common.NewRandomHash(),
- Timestamp: time.Now().UTC(),
- Position: types.Position{
- ChainID: 0,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- var err error
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- s.Require().Nil(rb.processBlock(b))
- s.Require().NotNil(rb.lattice[0].blocks[1])
-
- // Add block 0-2 which acks 0-1 and 1-0.
- h = rb.lattice[0].blocks[1].Hash
- b = &types.Block{
- ProposerID: vids[0],
- ParentHash: h,
- Position: types.Position{
- ChainID: 0,
- Height: 2,
- },
- Timestamp: time.Now().UTC(),
- Acks: common.NewSortedHashes(common.Hashes{
- h,
- rb.lattice[1].blocks[0].Hash,
- }),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- s.Require().Nil(rb.processBlock(b))
- s.Require().NotNil(rb.lattice[0].blocks[2])
-
- // Add block 0-3 which acks 0-2.
- h = rb.lattice[0].blocks[2].Hash
- b = &types.Block{
- ProposerID: vids[0],
- ParentHash: h,
- Hash: common.NewRandomHash(),
- Timestamp: time.Now().UTC(),
- Position: types.Position{
- ChainID: 0,
- Height: 3,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- s.Require().Nil(rb.processBlock(b))
- s.Require().NotNil(rb.lattice[0].blocks[3])
-
- // Add block 3-1 which acks 3-0.
- h = rb.lattice[3].blocks[0].Hash
- b = &types.Block{
- ProposerID: vids[3],
- ParentHash: h,
- Hash: common.NewRandomHash(),
- Timestamp: time.Now().UTC(),
- Position: types.Position{
- ChainID: 3,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- s.Require().Nil(rb.processBlock(b))
- s.Require().NotNil(rb.lattice[3].blocks[0])
-
- return vids
-}
-
-func (s *ReliableBroadcastTest) TestAddNode() {
- rb := newReliableBroadcast()
- s.Require().Equal(len(rb.lattice), 0)
- vids := genTestCase1(s, rb)
- s.Require().Equal(len(rb.lattice), 4)
- for _, vid := range vids {
- rb.deleteNode(vid)
- }
-}
-
-func (s *ReliableBroadcastTest) TestSanityCheck() {
- var b *types.Block
- var h common.Hash
- var vids []types.NodeID
- var err error
- rb := newReliableBroadcast()
- vids = genTestCase1(s, rb)
-
- // Non-genesis block with no ack, should get error.
- b = &types.Block{
- ProposerID: vids[0],
- ParentHash: common.NewRandomHash(),
- Position: types.Position{
- ChainID: 0,
- Height: 10,
- },
- Acks: common.NewSortedHashes(common.Hashes{}),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- err = rb.sanityCheck(b)
- s.Require().NotNil(err)
- s.Require().Equal(ErrNotAckParent.Error(), err.Error())
-
- // Non-genesis block which does not ack its parent.
- b = &types.Block{
- ProposerID: vids[1],
- ParentHash: common.NewRandomHash(),
- Position: types.Position{
- ChainID: 1,
- Height: 1,
- },
- Acks: common.NewSortedHashes(
- common.Hashes{rb.lattice[2].blocks[0].Hash}),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- err = rb.sanityCheck(b)
- s.Require().NotNil(err)
- s.Require().Equal(ErrNotAckParent.Error(), err.Error())
-
- // Non-genesis block which acks its parent but the height is invalid.
- h = rb.lattice[1].blocks[0].Hash
- b = &types.Block{
- ProposerID: vids[1],
- ParentHash: h,
- Position: types.Position{
- ChainID: 1,
- Height: 2,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- err = rb.sanityCheck(b)
- s.Require().NotNil(err)
- s.Require().Equal(ErrInvalidBlockHeight.Error(), err.Error())
-
- // Invalid proposer ID.
- h = rb.lattice[1].blocks[0].Hash
- b = &types.Block{
- ProposerID: types.NodeID{Hash: common.NewRandomHash()},
- ParentHash: h,
- Position: types.Position{
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- err = rb.sanityCheck(b)
- s.Require().NotNil(err)
- s.Require().Equal(ErrInvalidProposerID.Error(), err.Error())
-
- // Invalid chain ID.
- h = rb.lattice[1].blocks[0].Hash
- b = &types.Block{
- ProposerID: vids[1],
- ParentHash: h,
- Position: types.Position{
- ChainID: 100,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- err = rb.sanityCheck(b)
- s.Require().NotNil(err)
- s.Require().Equal(ErrInvalidChainID.Error(), err.Error())
-
- // Fork block.
- h = rb.lattice[0].blocks[0].Hash
- b = &types.Block{
- ProposerID: vids[0],
- ParentHash: h,
- Position: types.Position{
- ChainID: 0,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
- Timestamp: time.Now().UTC(),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- err = rb.sanityCheck(b)
- s.Require().NotNil(err)
- s.Require().Equal(ErrForkBlock.Error(), err.Error())
-
- // Replicated ack.
- h = rb.lattice[0].blocks[3].Hash
- b = &types.Block{
- ProposerID: vids[0],
- ParentHash: h,
- Position: types.Position{
- ChainID: 0,
- Height: 4,
- },
- Acks: common.NewSortedHashes(common.Hashes{
- h,
- rb.lattice[1].blocks[0].Hash,
- }),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- err = rb.sanityCheck(b)
- s.Require().NotNil(err)
- s.Require().Equal(ErrDoubleAck.Error(), err.Error())
-
- // Normal block.
- h = rb.lattice[1].blocks[0].Hash
- b = &types.Block{
- ProposerID: vids[1],
- ParentHash: h,
- Position: types.Position{
- ChainID: 1,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{
- h,
- common.NewRandomHash(),
- }),
- Timestamp: time.Now().UTC(),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- err = rb.sanityCheck(b)
- s.Nil(err)
-}
-
-func (s *ReliableBroadcastTest) TestAreAllAcksInLattice() {
- var b *types.Block
- rb := newReliableBroadcast()
- genTestCase1(s, rb)
-
- // Empty ack should get true, although won't pass sanity check.
- b = &types.Block{
- Acks: common.NewSortedHashes(common.Hashes{}),
- }
- s.Require().True(rb.areAllAcksInLattice(b))
-
- // Acks blocks in lattice
- b = &types.Block{
- Acks: common.NewSortedHashes(common.Hashes{
- rb.lattice[0].blocks[0].Hash,
- rb.lattice[0].blocks[1].Hash,
- }),
- }
- s.Require().True(rb.areAllAcksInLattice(b))
-
- // Acks random block hash.
- b = &types.Block{
- Acks: common.NewSortedHashes(common.Hashes{common.NewRandomHash()}),
- }
- s.Require().False(rb.areAllAcksInLattice(b))
-}
-
-func (s *ReliableBroadcastTest) TestStrongAck() {
- var b *types.Block
- var vids []types.NodeID
-
- rb := newReliableBroadcast()
- vids = genTestCase1(s, rb)
-
- // Check block 0-0 to 0-3 before adding 1-1 and 2-1.
- for i := uint64(0); i < 4; i++ {
- s.Require().Equal(blockStatusInit, rb.blockInfos[rb.lattice[0].blocks[i].Hash].status)
- }
-
- // Add block 1-1 which acks 1-0 and 0-2, and block 0-0 to 0-3 are still
- // in blockStatusInit, because they are not strongly acked.
- b = &types.Block{
- ProposerID: vids[1],
- ParentHash: rb.lattice[1].blocks[0].Hash,
- Hash: common.NewRandomHash(),
- Position: types.Position{
- ChainID: 1,
- Height: 1,
- },
- Timestamp: time.Now().UTC(),
- Acks: common.NewSortedHashes(common.Hashes{
- rb.lattice[0].blocks[2].Hash,
- rb.lattice[1].blocks[0].Hash,
- }),
- }
- var err error
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- s.Require().Nil(rb.processBlock(b))
- s.Require().NotNil(rb.lattice[1].blocks[1])
- for i := uint64(0); i < 4; i++ {
- h := rb.lattice[0].blocks[i].Hash
- s.Require().Equal(blockStatusInit, rb.blockInfos[h].status)
- }
-
- // Add block 2-1 which acks 0-2 and 2-0, block 0-0 to 0-2 are strongly acked but
- // 0-3 is still not.
- b = &types.Block{
- ProposerID: vids[2],
- ParentHash: rb.lattice[2].blocks[0].Hash,
- Hash: common.NewRandomHash(),
- Position: types.Position{
- ChainID: 2,
- Height: 1,
- },
- Timestamp: time.Now().UTC(),
- Acks: common.NewSortedHashes(common.Hashes{
- rb.lattice[0].blocks[2].Hash,
- rb.lattice[2].blocks[0].Hash,
- }),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- s.Require().Nil(rb.processBlock(b))
-
- s.Require().Equal(blockStatusAcked, rb.blockInfos[rb.lattice[0].blocks[0].Hash].status)
- s.Require().Equal(blockStatusAcked, rb.blockInfos[rb.lattice[0].blocks[1].Hash].status)
- s.Require().Equal(blockStatusAcked, rb.blockInfos[rb.lattice[0].blocks[2].Hash].status)
- s.Require().Equal(blockStatusInit, rb.blockInfos[rb.lattice[0].blocks[3].Hash].status)
-}
-
-func (s *ReliableBroadcastTest) TestExtractBlocks() {
- var b *types.Block
- rb := newReliableBroadcast()
- vids := genTestCase1(s, rb)
-
- // Add block 1-1 which acks 1-0, 0-2, 3-0.
- b = &types.Block{
- ProposerID: vids[1],
- ParentHash: rb.lattice[1].blocks[0].Hash,
- Hash: common.NewRandomHash(),
- Position: types.Position{
- ChainID: 1,
- Height: 1,
- },
- Timestamp: time.Now().UTC(),
- Acks: common.NewSortedHashes(common.Hashes{
- rb.lattice[0].blocks[2].Hash,
- rb.lattice[1].blocks[0].Hash,
- rb.lattice[3].blocks[0].Hash,
- }),
- }
- var err error
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- s.Require().Nil(rb.processBlock(b))
-
- // Add block 2-1 which acks 0-2, 2-0, 3-0.
- b = &types.Block{
- ProposerID: vids[2],
- ParentHash: rb.lattice[2].blocks[0].Hash,
- Hash: common.NewRandomHash(),
- Position: types.Position{
- ChainID: 2,
- Height: 1,
- },
- Timestamp: time.Now().UTC(),
- Acks: common.NewSortedHashes(common.Hashes{
- rb.lattice[0].blocks[2].Hash,
- rb.lattice[2].blocks[0].Hash,
- rb.lattice[3].blocks[0].Hash,
- }),
- }
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- s.Require().Nil(rb.processBlock(b))
-
- hashes := []common.Hash{
- rb.lattice[0].blocks[0].Hash,
- rb.lattice[0].blocks[1].Hash,
- rb.lattice[3].blocks[0].Hash,
- }
- hashExtracted := map[common.Hash]*types.Block{}
- for _, b := range rb.extractBlocks() {
- hashExtracted[b.Hash] = b
- s.Require().Equal(blockStatusOrdering, rb.blockInfos[b.Hash].status)
- }
- for _, h := range hashes {
- _, exist := hashExtracted[h]
- s.Require().True(exist)
- }
-}
-
-func (s *ReliableBroadcastTest) TestRandomIntensiveAcking() {
- rb := newReliableBroadcast()
- vids := test.GenerateRandomNodeIDs(4)
- heights := map[types.NodeID]uint64{}
- extractedBlocks := []*types.Block{}
-
- // Generate nodes.
- for _, vid := range vids {
- rb.addNode(vid)
- }
- rb.setChainNum(uint32(len(vids)))
- // Generate genesis blocks.
- for _, vid := range vids {
- b := s.prepareGenesisBlock(vid, vids)
- s.Require().Nil(rb.processBlock(b))
- heights[vid] = 1
- }
-
- for i := 0; i < 5000; i++ {
- id := rand.Int() % len(vids)
- vid := vids[id]
- height := heights[vid]
- heights[vid]++
- parentHash := rb.lattice[id].blocks[height-1].Hash
- acks := common.Hashes{}
- for id2 := range vids {
- if b, exist := rb.lattice[id2].blocks[rb.lattice[id].nextAck[id2]]; exist {
- acks = append(acks, b.Hash)
- }
- }
- b := &types.Block{
- ProposerID: vid,
- ParentHash: parentHash,
- Position: types.Position{
- ChainID: uint32(id),
- Height: height,
- },
- Timestamp: time.Now().UTC(),
- Acks: common.NewSortedHashes(acks),
- }
- var err error
- b.Hash, err = hashBlock(b)
- s.Require().Nil(err)
- s.Require().Nil(rb.processBlock(b))
- extractedBlocks = append(extractedBlocks, rb.extractBlocks()...)
- }
-
- extractedBlocks = append(extractedBlocks, rb.extractBlocks()...)
- // The len of array extractedBlocks should be about 5000.
- s.Require().True(len(extractedBlocks) > 4500)
- // The len of rb.blockInfos should be small if deleting mechanism works.
- // s.True(len(rb.blockInfos) < 500)
-}
-
-func (s *ReliableBroadcastTest) TestRandomlyGeneratedBlocks() {
- var (
- chainNum = uint32(19)
- blockNum = 50
- repeat = 20
- )
-
- // Prepare a randomly generated blocks.
- db, err := blockdb.NewMemBackedBlockDB("test-reliable-broadcast-random.blockdb")
- s.Require().Nil(err)
- defer func() {
- // If the test fails, keep the block database for troubleshooting.
- if s.T().Failed() {
- s.Nil(db.Close())
- }
- }()
- gen := test.NewBlocksGenerator(nil, hashBlock)
- _, err = gen.Generate(chainNum, blockNum, nil, db)
- s.Require().Nil(err)
- iter, err := db.GetAll()
- s.Require().Nil(err)
- // Setup a revealer that would reveal blocks randomly.
- revealer, err := test.NewRandomRevealer(iter)
- s.Require().Nil(err)
-
- stronglyAckedHashesAsString := map[string]struct{}{}
- for i := 0; i < repeat; i++ {
- nodes := map[types.NodeID]struct{}{}
- rb := newReliableBroadcast()
- rb.setChainNum(chainNum)
- stronglyAckedHashes := common.Hashes{}
- revealer.Reset()
-
- for {
- // Reveal next block.
- b, err := revealer.Next()
- if err != nil {
- if err == blockdb.ErrIterationFinished {
- err = nil
- break
- }
- }
- s.Require().Nil(err)
-
- // It's a hack to add node to reliableBroadcast module.
- if _, added := nodes[b.ProposerID]; !added {
- rb.addNode(b.ProposerID)
- nodes[b.ProposerID] = struct{}{}
- }
- // Perform reliable broadcast process.
- s.Require().Nil(rb.processBlock(&b))
- for _, b := range rb.extractBlocks() {
- stronglyAckedHashes = append(stronglyAckedHashes, b.Hash)
- }
- }
- // To make it easier to check, sort hashes of
- // strongly acked blocks, and concatenate them into
- // a string.
- sort.Sort(stronglyAckedHashes)
- asString := ""
- for _, h := range stronglyAckedHashes {
- asString += h.String() + ","
- }
- stronglyAckedHashesAsString[asString] = struct{}{}
- }
- // Make sure concatenated hashes of strongly acked blocks are identical.
- s.Require().Len(stronglyAckedHashesAsString, 1)
- for h := range stronglyAckedHashesAsString {
- // Make sure at least some blocks are strongly acked.
- s.True(len(h) > 0)
- }
-}
-
-func (s *ReliableBroadcastTest) TestPrepareBlock() {
- var (
- req = s.Require()
- rb = newReliableBroadcast()
- minInterval = 50 * time.Millisecond
- nodes = test.GenerateRandomNodeIDs(4)
- )
- // Prepare node IDs.
- for _, vID := range nodes {
- rb.addNode(vID)
- }
- rb.setChainNum(uint32(len(nodes)))
- // Setup genesis blocks.
- b00 := s.prepareGenesisBlock(nodes[0], nodes)
- time.Sleep(minInterval)
- b10 := s.prepareGenesisBlock(nodes[1], nodes)
- time.Sleep(minInterval)
- b20 := s.prepareGenesisBlock(nodes[2], nodes)
- time.Sleep(minInterval)
- b30 := s.prepareGenesisBlock(nodes[3], nodes)
- // Submit these blocks to reliableBroadcast instance.
- s.Require().Nil(rb.processBlock(b00))
- s.Require().Nil(rb.processBlock(b10))
- s.Require().Nil(rb.processBlock(b20))
- s.Require().Nil(rb.processBlock(b30))
- // We should be able to collect all 4 genesis blocks by calling
- // prepareBlock.
- b11 := &types.Block{
- ProposerID: nodes[1],
- Position: types.Position{
- ChainID: 1,
- },
- }
- rb.prepareBlock(b11)
- var err error
- b11.Hash, err = hashBlock(b11)
- s.Require().Nil(err)
- req.Contains(b11.Acks, b00.Hash)
- req.Contains(b11.Acks, b10.Hash)
- req.Contains(b11.Acks, b20.Hash)
- req.Contains(b11.Acks, b30.Hash)
- req.Equal(b11.Timestamp,
- b10.Timestamp.Add(time.Millisecond))
- req.Equal(b11.ParentHash, b10.Hash)
- req.Equal(b11.Position.Height, uint64(1))
- s.Require().Nil(rb.processBlock(b11))
- // Propose/Process a block based on collected info.
- b12 := &types.Block{
- ProposerID: nodes[1],
- Position: types.Position{
- ChainID: 1,
- },
- }
- rb.prepareBlock(b12)
- b12.Hash, err = hashBlock(b12)
- s.Require().Nil(err)
- // This time we only need to ack b11.
- req.Len(b12.Acks, 1)
- req.Contains(b12.Acks, b11.Hash)
- req.Equal(b12.ParentHash, b11.Hash)
- req.Equal(b12.Position.Height, uint64(2))
- // When calling with other node ID, we should be able to
- // get 4 blocks to ack.
- b01 := &types.Block{
- ProposerID: nodes[0],
- Position: types.Position{
- ChainID: 0,
- },
- }
- rb.prepareBlock(b01)
- b01.Hash, err = hashBlock(b01)
- s.Require().Nil(err)
- req.Len(b01.Acks, 4)
- req.Contains(b01.Acks, b00.Hash)
- req.Contains(b01.Acks, b11.Hash)
- req.Contains(b01.Acks, b20.Hash)
- req.Contains(b01.Acks, b30.Hash)
- req.Equal(b01.ParentHash, b00.Hash)
- req.Equal(b01.Position.Height, uint64(1))
-}
-
-func TestReliableBroadcast(t *testing.T) {
- suite.Run(t, new(ReliableBroadcastTest))
-}
diff --git a/core/shard.go b/core/shard.go
index 32f1b79..7085872 100644
--- a/core/shard.go
+++ b/core/shard.go
@@ -30,7 +30,6 @@ import (
// Shard represents a unit to produce a global ordering from multiple chains.
type Shard struct {
lock sync.RWMutex
- ID uint32
authModule *Authenticator
chainNum uint32
app Application
@@ -44,22 +43,23 @@ type Shard struct {
// NewShard constructs an Shard instance.
func NewShard(
- ID uint32,
cfg *types.Config,
authModule *Authenticator,
app Application,
debug Debug,
db blockdb.BlockDatabase) (s *Shard) {
-
+ lattice := newBlockLattice(
+ cfg.NumChains,
+ cfg.MinBlockInterval,
+ cfg.MaxBlockInterval)
s = &Shard{
- ID: ID,
authModule: authModule,
chainNum: cfg.NumChains,
app: app,
debug: debug,
db: db,
pool: newBlockPool(cfg.NumChains),
- lattice: newBlockLattice(ID, cfg.NumChains),
+ lattice: lattice,
toModule: newTotalOrdering(
uint64(cfg.K),
uint64(float32(cfg.NumChains-1)*cfg.PhiRatio+1),
@@ -188,3 +188,11 @@ func (s *Shard) ProcessBlock(
}
return
}
+
+// NextPosition returns expected position of incoming block for that chain.
+func (s *Shard) NextPosition(chainID uint32) types.Position {
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+
+ return s.lattice.nextPosition(chainID)
+}
diff --git a/core/shard_test.go b/core/shard_test.go
index d4ceffd..84f230b 100644
--- a/core/shard_test.go
+++ b/core/shard_test.go
@@ -22,6 +22,7 @@ import (
"testing"
"time"
+ "github.com/dexon-foundation/dexon-consensus-core/common"
"github.com/dexon-foundation/dexon-consensus-core/core/blockdb"
"github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa"
"github.com/dexon-foundation/dexon-consensus-core/core/test"
@@ -105,7 +106,6 @@ func (s *ShardTestSuite) newTestShardMgr(cfg *types.Config) *testShardMgr {
app: app,
db: db,
shard: NewShard(
- uint32(0),
cfg,
NewAuthenticator(prvKey),
app,
@@ -124,9 +124,11 @@ func (s *ShardTestSuite) TestBasicUsage() {
req = s.Require()
err error
cfg = types.Config{
- NumChains: chainNum,
- PhiRatio: float32(2) / float32(3),
- K: 0,
+ NumChains: chainNum,
+ PhiRatio: float32(2) / float32(3),
+ K: 0,
+ MinBlockInterval: 0,
+ MaxBlockInterval: 3000 * time.Second,
}
master = s.newTestShardMgr(&cfg)
apps = []*test.App{master.app}
@@ -187,6 +189,37 @@ func (s *ShardTestSuite) TestBasicUsage() {
}
}
+func (s *ShardTestSuite) TestSanityCheck() {
+ // This sanity check focuses on hash/signature part.
+ var (
+ chainNum = uint32(19)
+ cfg = types.Config{
+ NumChains: chainNum,
+ PhiRatio: float32(2) / float32(3),
+ K: 0,
+ MinBlockInterval: 0,
+ MaxBlockInterval: 3000 * time.Second,
+ }
+ shard = s.newTestShardMgr(&cfg).shard
+ auth = shard.authModule // Steal auth module from shard, :(
+ req = s.Require()
+ err error
+ )
+ // A block properly signed should pass sanity check.
+ b := &types.Block{
+ Position: types.Position{ChainID: 0},
+ }
+ req.NoError(auth.SignBlock(b))
+ req.NoError(shard.SanityCheck(b))
+ // A block with incorrect signature should not pass sanity check.
+ b.Signature, err = auth.prvKey.Sign(common.NewRandomHash())
+ req.NoError(err)
+ req.Equal(shard.SanityCheck(b), ErrIncorrectSignature)
+ // A block with incorrect hash should not pass sanity check.
+ b.Hash = common.NewRandomHash()
+ req.Equal(shard.SanityCheck(b), ErrIncorrectHash)
+}
+
func TestShard(t *testing.T) {
suite.Run(t, new(ShardTestSuite))
}
diff --git a/core/test/governance.go b/core/test/governance.go
index 6ae2462..ee4491f 100644
--- a/core/test/governance.go
+++ b/core/test/governance.go
@@ -62,7 +62,7 @@ func NewGovernance(nodeCount int, lambda time.Duration) (
DKGComplaint: make(map[uint64][]*types.DKGComplaint),
DKGMasterPublicKey: make(map[uint64][]*types.DKGMasterPublicKey),
RoundInterval: 365 * 86400 * time.Second,
- MinBlockInterval: lambda * 3,
+ MinBlockInterval: 1 * time.Millisecond,
MaxBlockInterval: lambda * 8,
}
for i := 0; i < nodeCount; i++ {
@@ -76,9 +76,9 @@ func NewGovernance(nodeCount int, lambda time.Duration) (
return
}
-// GetNodeSet implements Governance interface to return current
+// NodeSet implements Governance interface to return current
// notary set.
-func (g *Governance) GetNodeSet(_ uint64) (
+func (g *Governance) NodeSet(_ uint64) (
ret []crypto.PublicKey) {
for _, key := range g.privateKeys {
ret = append(ret, key.PublicKey())
@@ -86,8 +86,8 @@ func (g *Governance) GetNodeSet(_ uint64) (
return
}
-// GetConfiguration returns the configuration at a given block height.
-func (g *Governance) GetConfiguration(_ uint64) *types.Config {
+// Configuration returns the configuration at a given block height.
+func (g *Governance) Configuration(_ uint64) *types.Config {
return &types.Config{
NumShards: 1,
NumChains: uint32(len(g.privateKeys)),
@@ -104,8 +104,8 @@ func (g *Governance) GetConfiguration(_ uint64) *types.Config {
}
}
-// GetCRS returns the CRS for a given round.
-func (g *Governance) GetCRS(round uint64) common.Hash {
+// CRS returns the CRS for a given round.
+func (g *Governance) CRS(round uint64) common.Hash {
return g.crs[round]
}
@@ -114,9 +114,9 @@ func (g *Governance) ProposeCRS(round uint64, signedCRS []byte) {
g.crs[round] = crypto.Keccak256Hash(signedCRS)
}
-// GetPrivateKeys return the private key for that node, this function
+// PrivateKeys return the private key for that node, this function
// is a test utility and not a general Governance interface.
-func (g *Governance) GetPrivateKeys() (keys []crypto.PrivateKey) {
+func (g *Governance) PrivateKeys() (keys []crypto.PrivateKey) {
for _, k := range g.privateKeys {
keys = append(keys, k)
}
diff --git a/core/ticker.go b/core/ticker.go
index eac80de..0d2e433 100644
--- a/core/ticker.go
+++ b/core/ticker.go
@@ -65,11 +65,11 @@ func newTicker(gov Governance, round uint64, tickerType TickerType) (t Ticker) {
var duration time.Duration
switch tickerType {
case TickerBA:
- duration = gov.GetConfiguration(round).LambdaBA
+ duration = gov.Configuration(round).LambdaBA
case TickerDKG:
- duration = gov.GetConfiguration(round).LambdaDKG
+ duration = gov.Configuration(round).LambdaDKG
case TickerCRS:
- duration = gov.GetConfiguration(round).RoundInterval / 2
+ duration = gov.Configuration(round).RoundInterval / 2
}
t = newDefaultTicker(duration)
}
diff --git a/integration_test/node.go b/integration_test/node.go
index bbd604a..2fa3eb6 100644
--- a/integration_test/node.go
+++ b/integration_test/node.go
@@ -85,10 +85,9 @@ func NewNode(
proposingLatency test.LatencyModel) *Node {
var (
- shardID = uint32(0)
chainID = uint32(math.MaxUint32)
- governanceConfig = gov.GetConfiguration(0)
- nodeSetKeys = gov.GetNodeSet(0)
+ governanceConfig = gov.Configuration(0)
+ nodeSetKeys = gov.NodeSet(0)
nodeID = types.NewNodeID(privateKey.PublicKey())
)
broadcastTargets := make(map[types.NodeID]struct{})
@@ -116,7 +115,6 @@ func NewNode(
app: app,
db: db,
shard: core.NewShard(
- shardID,
governanceConfig,
core.NewAuthenticator(privateKey),
app,
diff --git a/integration_test/utils.go b/integration_test/utils.go
index 3e33362..6c665ad 100644
--- a/integration_test/utils.go
+++ b/integration_test/utils.go
@@ -25,7 +25,7 @@ func PrepareNodes(
if err != nil {
return
}
- for _, prvKey := range gov.GetPrivateKeys() {
+ for _, prvKey := range gov.PrivateKeys() {
nID := types.NewNodeID(prvKey.PublicKey())
apps[nID] = test.NewApp()
dbs[nID], err = blockdb.NewMemBackedBlockDB()
diff --git a/simulation/governance.go b/simulation/governance.go
index 8b6c3e5..8a83feb 100644
--- a/simulation/governance.go
+++ b/simulation/governance.go
@@ -78,8 +78,8 @@ func (g *simGovernance) setNetwork(network *network) {
g.network = network
}
-// GetNodeSet returns the current notary set.
-func (g *simGovernance) GetNodeSet(round uint64) (ret []crypto.PublicKey) {
+// NodeSet returns the current notary set.
+func (g *simGovernance) NodeSet(round uint64) (ret []crypto.PublicKey) {
g.lock.RLock()
defer g.lock.RUnlock()
@@ -89,8 +89,8 @@ func (g *simGovernance) GetNodeSet(round uint64) (ret []crypto.PublicKey) {
return
}
-// GetConfiguration returns the configuration at a given round.
-func (g *simGovernance) GetConfiguration(round uint64) *types.Config {
+// Configuration returns the configuration at a given round.
+func (g *simGovernance) Configuration(round uint64) *types.Config {
return &types.Config{
NumShards: 1,
NumChains: g.chainNum,
@@ -107,8 +107,8 @@ func (g *simGovernance) GetConfiguration(round uint64) *types.Config {
}
}
-// GetCRS returns the CRS for a given round.
-func (g *simGovernance) GetCRS(round uint64) common.Hash {
+// CRS returns the CRS for a given round.
+func (g *simGovernance) CRS(round uint64) common.Hash {
return g.crs[round]
}