aboutsummaryrefslogtreecommitdiffstats
path: root/core/test
diff options
context:
space:
mode:
Diffstat (limited to 'core/test')
-rw-r--r--core/test/app.go152
-rw-r--r--core/test/app_test.go220
-rw-r--r--core/test/block-revealer.go214
-rw-r--r--core/test/block-revealer_test.go131
-rw-r--r--core/test/blocks-generator.go379
-rw-r--r--core/test/blocks-generator_test.go323
-rw-r--r--core/test/interface.go16
-rw-r--r--core/test/network.go31
-rw-r--r--core/test/scheduler-event.go79
-rw-r--r--core/test/scheduler.go215
-rw-r--r--core/test/scheduler_test.go176
-rw-r--r--core/test/stopper.go140
-rw-r--r--core/test/stopper_test.go182
13 files changed, 124 insertions, 2134 deletions
diff --git a/core/test/app.go b/core/test/app.go
index 515ed23..1ce5b84 100644
--- a/core/test/app.go
+++ b/core/test/app.go
@@ -50,10 +50,6 @@ var (
// ErrDeliveredBlockNotConfirmed means some block delivered (confirmed) but
// not confirmed.
ErrDeliveredBlockNotConfirmed = fmt.Errorf("delivered block not confirmed")
- // ErrMismatchTotalOrderingAndDelivered mean the sequence of total ordering
- // and delivered are different.
- ErrMismatchTotalOrderingAndDelivered = fmt.Errorf(
- "mismatch total ordering and delivered sequence")
// ErrAckingBlockNotDelivered means the delivered sequence not forming a
// DAG.
ErrAckingBlockNotDelivered = fmt.Errorf("acking block not delivered")
@@ -70,26 +66,6 @@ var (
ErrParentBlockNotDelivered = fmt.Errorf("parent block not delivered")
)
-// This definition is copied from core package.
-const (
- // TotalOrderingModeError returns mode error.
- TotalOrderingModeError uint32 = iota
- // TotalOrderingModeNormal returns mode normal.
- TotalOrderingModeNormal
- // TotalOrderingModeEarly returns mode early.
- TotalOrderingModeEarly
- // TotalOrderingModeFlush returns mode flush.
- TotalOrderingModeFlush
-)
-
-// AppTotalOrderRecord caches information when this application received
-// a total-ordering deliver notification.
-type AppTotalOrderRecord struct {
- BlockHashes common.Hashes
- Mode uint32
- When time.Time
-}
-
// AppDeliveredRecord caches information when this application received
// a block delivered notification.
type AppDeliveredRecord struct {
@@ -103,9 +79,6 @@ type App struct {
Confirmed map[common.Hash]*types.Block
LastConfirmedHeights map[uint32]uint64
confirmedLock sync.RWMutex
- TotalOrdered []*AppTotalOrderRecord
- TotalOrderedByHash map[common.Hash]*AppTotalOrderRecord
- totalOrderedLock sync.RWMutex
Delivered map[common.Hash]*AppDeliveredRecord
DeliverSequence common.Hashes
deliveredLock sync.RWMutex
@@ -121,8 +94,6 @@ func NewApp(initRound uint64, gov *Governance) (app *App) {
app = &App{
Confirmed: make(map[common.Hash]*types.Block),
LastConfirmedHeights: make(map[uint32]uint64),
- TotalOrdered: []*AppTotalOrderRecord{},
- TotalOrderedByHash: make(map[common.Hash]*AppTotalOrderRecord),
Delivered: make(map[common.Hash]*AppDeliveredRecord),
DeliverSequence: common.Hashes{},
gov: gov,
@@ -223,25 +194,6 @@ func (app *App) BlockConfirmed(b types.Block) {
app.LastConfirmedHeights[b.Position.ChainID] = b.Position.Height
}
-// TotalOrderingDelivered implements Application interface.
-func (app *App) TotalOrderingDelivered(blockHashes common.Hashes, mode uint32) {
- app.totalOrderedLock.Lock()
- defer app.totalOrderedLock.Unlock()
-
- rec := &AppTotalOrderRecord{
- BlockHashes: blockHashes,
- Mode: mode,
- When: time.Now().UTC(),
- }
- app.TotalOrdered = append(app.TotalOrdered, rec)
- for _, h := range blockHashes {
- if _, exists := app.TotalOrderedByHash[h]; exists {
- panic(fmt.Errorf("deliver duplicated blocks from total ordering"))
- }
- app.TotalOrderedByHash[h] = rec
- }
-}
-
// BlockDelivered implements Application interface.
func (app *App) BlockDelivered(
blockHash common.Hash, pos types.Position, result types.FinalizationResult) {
@@ -307,30 +259,33 @@ func (app *App) GetLatestDeliveredPosition() types.Position {
// and return erros if not passed:
// - deliver sequence by comparing block hashes.
// - consensus timestamp of each block are equal.
-func (app *App) Compare(other *App) error {
- app.deliveredLock.RLock()
- defer app.deliveredLock.RUnlock()
- other.deliveredLock.RLock()
- defer other.deliveredLock.RUnlock()
-
- minLength := len(app.DeliverSequence)
- if minLength > len(other.DeliverSequence) {
- minLength = len(other.DeliverSequence)
- }
- if minLength == 0 {
- return ErrEmptyDeliverSequence
- }
- for idx, h := range app.DeliverSequence[:minLength] {
- hOther := other.DeliverSequence[idx]
- if hOther != h {
- return ErrMismatchBlockHashSequence
- }
- if app.Delivered[h].Result.Timestamp !=
- other.Delivered[h].Result.Timestamp {
- return ErrMismatchConsensusTime
- }
- }
- return nil
+func (app *App) Compare(other *App) (err error) {
+ app.WithLock(func(app *App) {
+ other.WithLock(func(other *App) {
+ minLength := len(app.DeliverSequence)
+ if minLength > len(other.DeliverSequence) {
+ minLength = len(other.DeliverSequence)
+ }
+ if minLength == 0 {
+ err = ErrEmptyDeliverSequence
+ return
+ }
+ // Here we assumes both Apps begin from the same height.
+ for idx, h := range app.DeliverSequence[:minLength] {
+ hOther := other.DeliverSequence[idx]
+ if hOther != h {
+ err = ErrMismatchBlockHashSequence
+ return
+ }
+ if app.Delivered[h].Result.Timestamp !=
+ other.Delivered[h].Result.Timestamp {
+ err = ErrMismatchConsensusTime
+ return
+ }
+ }
+ })
+ })
+ return
}
// Verify checks the integrity of date received by this App instance.
@@ -371,57 +326,6 @@ func (app *App) Verify() error {
}
expectHeight++
}
- // Check causality.
- revealedDAG := make(map[common.Hash]struct{})
- for _, toDeliver := range app.TotalOrdered {
- for _, h := range toDeliver.BlockHashes {
- b, exists := app.Confirmed[h]
- if !exists {
- return ErrDeliveredBlockNotConfirmed
- }
- for _, ack := range b.Acks {
- if _, ackingBlockExists := revealedDAG[ack]; !ackingBlockExists {
- return ErrAckingBlockNotDelivered
- }
- }
- if toDeliver.Mode == TotalOrderingModeFlush {
- // For blocks delivered by flushing, the acking relations would
- // exist in one deliver set, however, only later block would
- // ack previous block, not backward.
- revealedDAG[h] = struct{}{}
- }
- }
- // For blocks not delivered by flushing, the acking relations only exist
- // between deliver sets.
- if toDeliver.Mode != TotalOrderingModeFlush {
- for _, h := range toDeliver.BlockHashes {
- revealedDAG[h] = struct{}{}
- }
- }
- }
- // Make sure the order of delivered and total ordering are the same by
- // comparing the concated string.
- app.totalOrderedLock.RLock()
- defer app.totalOrderedLock.RUnlock()
-
- hashSequenceIdx := 0
-Loop:
- for _, rec := range app.TotalOrdered {
- for _, h := range rec.BlockHashes {
- if hashSequenceIdx >= len(app.DeliverSequence) {
- break Loop
- }
- if h != app.DeliverSequence[hashSequenceIdx] {
- return ErrMismatchTotalOrderingAndDelivered
- }
- hashSequenceIdx++
- }
- }
- if hashSequenceIdx != len(app.DeliverSequence) {
- // The count of delivered blocks should be larger than those delivered
- // by total ordering.
- return ErrMismatchTotalOrderingAndDelivered
- }
return nil
}
@@ -435,8 +339,6 @@ func (app *App) BlockReady(hash common.Hash) {}
func (app *App) WithLock(function func(*App)) {
app.confirmedLock.RLock()
defer app.confirmedLock.RUnlock()
- app.totalOrderedLock.RLock()
- defer app.totalOrderedLock.RUnlock()
app.deliveredLock.RLock()
defer app.deliveredLock.RUnlock()
app.lastPendingHeightLock.RLock()
diff --git a/core/test/app_test.go b/core/test/app_test.go
index 5ed562a..79518ea 100644
--- a/core/test/app_test.go
+++ b/core/test/app_test.go
@@ -23,52 +23,12 @@ import (
"time"
"github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core"
"github.com/dexon-foundation/dexon-consensus/core/types"
"github.com/stretchr/testify/suite"
)
type AppTestSuite struct {
suite.Suite
-
- to1, to2, to3 *AppTotalOrderRecord
-}
-
-func (s *AppTestSuite) SetupSuite() {
- s.to1 = &AppTotalOrderRecord{
- BlockHashes: common.Hashes{
- common.NewRandomHash(),
- common.NewRandomHash(),
- },
- Mode: core.TotalOrderingModeNormal,
- }
- s.to2 = &AppTotalOrderRecord{
- BlockHashes: common.Hashes{
- common.NewRandomHash(),
- common.NewRandomHash(),
- common.NewRandomHash(),
- },
- Mode: core.TotalOrderingModeNormal,
- }
- s.to3 = &AppTotalOrderRecord{
- BlockHashes: common.Hashes{
- common.NewRandomHash(),
- },
- Mode: core.TotalOrderingModeNormal,
- }
-}
-
-func (s *AppTestSuite) setupAppByTotalOrderDeliver(
- app *App, to *AppTotalOrderRecord) {
- for _, h := range to.BlockHashes {
- app.BlockConfirmed(types.Block{Hash: h})
- }
- app.TotalOrderingDelivered(to.BlockHashes, to.Mode)
- for _, h := range to.BlockHashes {
- // To make it simpler, use the index of hash sequence
- // as the time.
- s.deliverBlockWithTimeFromSequenceLength(app, h)
- }
}
func (s *AppTestSuite) deliverBlockWithTimeFromSequenceLength(
@@ -89,112 +49,94 @@ func (s *AppTestSuite) deliverBlock(
}
func (s *AppTestSuite) TestCompare() {
- req := s.Require()
-
+ var (
+ now = time.Now().UTC()
+ b0 = types.Block{Hash: common.Hash{}}
+ b1 = types.Block{
+ Hash: common.NewRandomHash(),
+ Position: types.Position{Height: 1},
+ }
+ )
+ // Prepare an OK App instance.
app1 := NewApp(0, nil)
- s.setupAppByTotalOrderDeliver(app1, s.to1)
- s.setupAppByTotalOrderDeliver(app1, s.to2)
- s.setupAppByTotalOrderDeliver(app1, s.to3)
- // An App with different deliver sequence.
+ app1.BlockConfirmed(b0)
+ app1.BlockConfirmed(b1)
+ app1.BlockDelivered(b0.Hash, b0.Position, types.FinalizationResult{
+ Height: 1,
+ Timestamp: now,
+ })
+ app1.BlockDelivered(b1.Hash, b1.Position, types.FinalizationResult{
+ Height: 2,
+ Timestamp: now.Add(1 * time.Second),
+ })
app2 := NewApp(0, nil)
- s.setupAppByTotalOrderDeliver(app2, s.to1)
- s.setupAppByTotalOrderDeliver(app2, s.to2)
- hash := common.NewRandomHash()
- app2.BlockConfirmed(types.Block{Hash: hash})
- app2.TotalOrderingDelivered(common.Hashes{hash}, core.TotalOrderingModeNormal)
- s.deliverBlockWithTimeFromSequenceLength(app2, hash)
- req.Equal(ErrMismatchBlockHashSequence, app1.Compare(app2))
- // An App with different consensus time for the same block.
- app3 := NewApp(0, nil)
- s.setupAppByTotalOrderDeliver(app3, s.to1)
- s.setupAppByTotalOrderDeliver(app3, s.to2)
- for _, h := range s.to3.BlockHashes {
- app3.BlockConfirmed(types.Block{Hash: h})
+ s.Require().Equal(ErrEmptyDeliverSequence.Error(),
+ app1.Compare(app2).Error())
+ app2.BlockConfirmed(b0)
+ app2.BlockDelivered(b0.Hash, b0.Position, types.FinalizationResult{
+ Height: 1,
+ Timestamp: now,
+ })
+ b1Bad := types.Block{
+ Hash: common.NewRandomHash(),
+ Position: types.Position{Height: 1},
}
- app3.TotalOrderingDelivered(s.to3.BlockHashes, s.to3.Mode)
- wrongTime := time.Time{}.Add(
- time.Duration(len(app3.DeliverSequence)) * time.Second)
- wrongTime = wrongTime.Add(1 * time.Second)
- s.deliverBlock(app3, s.to3.BlockHashes[0], wrongTime,
- uint64(len(app3.DeliverSequence)+1))
- req.Equal(ErrMismatchConsensusTime, app1.Compare(app3))
- req.Equal(ErrMismatchConsensusTime, app3.Compare(app1))
- // An App without any delivered blocks.
- app4 := NewApp(0, nil)
- req.Equal(ErrEmptyDeliverSequence, app4.Compare(app1))
- req.Equal(ErrEmptyDeliverSequence, app1.Compare(app4))
+ app2.BlockConfirmed(b1Bad)
+ app2.BlockDelivered(b1Bad.Hash, b1Bad.Position, types.FinalizationResult{
+ Height: 1,
+ Timestamp: now,
+ })
+ s.Require().Equal(ErrMismatchBlockHashSequence.Error(),
+ app1.Compare(app2).Error())
+ app2 = NewApp(0, nil)
+ app2.BlockConfirmed(b0)
+ app2.BlockDelivered(b0.Hash, b0.Position, types.FinalizationResult{
+ Height: 1,
+ Timestamp: now.Add(1 * time.Second),
+ })
+ s.Require().Equal(ErrMismatchConsensusTime.Error(),
+ app1.Compare(app2).Error())
}
func (s *AppTestSuite) TestVerify() {
- req := s.Require()
-
- // An OK App instance.
- app1 := NewApp(0, nil)
- s.setupAppByTotalOrderDeliver(app1, s.to1)
- s.setupAppByTotalOrderDeliver(app1, s.to2)
- s.setupAppByTotalOrderDeliver(app1, s.to3)
- req.NoError(app1.Verify())
- // A delivered block without strongly ack
- s.deliverBlock(app1, common.NewRandomHash(), time.Time{},
- uint64(len(app1.DeliverSequence)))
- req.Equal(ErrDeliveredBlockNotConfirmed, app1.Verify())
- // The consensus time is out of order.
- app2 := NewApp(0, nil)
- s.setupAppByTotalOrderDeliver(app2, s.to1)
- for _, h := range s.to2.BlockHashes {
- app2.BlockConfirmed(types.Block{Hash: h})
- }
- app2.TotalOrderingDelivered(s.to2.BlockHashes, s.to2.Mode)
- s.deliverBlock(app2, s.to2.BlockHashes[0], time.Time{},
- uint64(len(app2.DeliverSequence)+1))
- req.Equal(ErrConsensusTimestampOutOfOrder, app2.Verify())
- // A delivered block is not found in total ordering delivers.
- app3 := NewApp(0, nil)
- s.setupAppByTotalOrderDeliver(app3, s.to1)
- hash := common.NewRandomHash()
- app3.BlockConfirmed(types.Block{Hash: hash})
- s.deliverBlockWithTimeFromSequenceLength(app3, hash)
- req.Equal(ErrMismatchTotalOrderingAndDelivered, app3.Verify())
- // A delivered block is not found in total ordering delivers.
- app4 := NewApp(0, nil)
- s.setupAppByTotalOrderDeliver(app4, s.to1)
- for _, h := range s.to2.BlockHashes {
- app4.BlockConfirmed(types.Block{Hash: h})
- }
- app4.TotalOrderingDelivered(s.to2.BlockHashes, s.to2.Mode)
- hash = common.NewRandomHash()
- app4.BlockConfirmed(types.Block{Hash: hash})
- app4.TotalOrderingDelivered(common.Hashes{hash}, core.TotalOrderingModeNormal)
- s.deliverBlockWithTimeFromSequenceLength(app4, hash)
- // Witness ack on unknown block.
- app5 := NewApp(0, nil)
- s.setupAppByTotalOrderDeliver(app5, s.to1)
- // The conensus height is out of order.
- app6 := NewApp(0, nil)
- s.setupAppByTotalOrderDeliver(app6, s.to1)
- for _, h := range s.to2.BlockHashes {
- app6.BlockConfirmed(types.Block{Hash: h})
- }
- app6.TotalOrderingDelivered(s.to2.BlockHashes, s.to2.Mode)
- s.deliverBlock(app6, s.to2.BlockHashes[0], time.Time{}.Add(
- time.Duration(len(app6.DeliverSequence))*time.Second),
- uint64(len(app6.DeliverSequence)+2))
- req.Equal(ErrConsensusHeightOutOfOrder, app6.Verify())
- // Test the acking block doesn't delivered.
- app7 := NewApp(0, nil)
- // Patch a block's acks.
- b7 := &types.Block{
- Hash: common.NewRandomHash(),
- Acks: common.NewSortedHashes(common.Hashes{common.NewRandomHash()}),
- }
- app7.BlockConfirmed(*b7)
- app7.TotalOrderingDelivered(
- common.Hashes{b7.Hash}, core.TotalOrderingModeNormal)
- app7.BlockDelivered(b7.Hash, types.Position{}, types.FinalizationResult{
- Timestamp: time.Now(),
+ var (
+ now = time.Now().UTC()
+ b0 = types.Block{Hash: common.Hash{}}
+ b1 = types.Block{
+ Hash: common.NewRandomHash(),
+ Position: types.Position{Height: 1},
+ }
+ )
+ app := NewApp(0, nil)
+ s.Require().Equal(ErrEmptyDeliverSequence.Error(), app.Verify().Error())
+ app.BlockDelivered(b0.Hash, b0.Position, types.FinalizationResult{})
+ app.BlockDelivered(b1.Hash, b1.Position, types.FinalizationResult{Height: 1})
+ s.Require().Equal(
+ ErrDeliveredBlockNotConfirmed.Error(), app.Verify().Error())
+ app = NewApp(0, nil)
+ app.BlockConfirmed(b0)
+ app.BlockDelivered(b0.Hash, b0.Position, types.FinalizationResult{
+ Height: 1,
+ Timestamp: now,
+ })
+ app.BlockConfirmed(b1)
+ app.BlockDelivered(b1.Hash, b1.Position, types.FinalizationResult{
+ Height: 2,
+ Timestamp: now.Add(-1 * time.Second),
+ })
+ s.Require().Equal(ErrConsensusTimestampOutOfOrder.Error(),
+ app.Verify().Error())
+ app = NewApp(0, nil)
+ app.BlockConfirmed(b0)
+ app.BlockConfirmed(b1)
+ app.BlockDelivered(b0.Hash, b0.Position, types.FinalizationResult{
+ Height: 1,
+ Timestamp: now,
+ })
+ app.BlockDelivered(b1.Hash, b1.Position, types.FinalizationResult{
Height: 1,
+ Timestamp: now.Add(1 * time.Second),
})
- req.Equal(ErrAckingBlockNotDelivered, app7.Verify())
}
func (s *AppTestSuite) TestWitness() {
@@ -255,7 +197,7 @@ func (s *AppTestSuite) TestWitness() {
s.Require().Equal(app.LastPendingHeight, uint64(3))
// We can only prepare witness for what've delivered.
_, err := app.PrepareWitness(4)
- s.Require().IsType(err, ErrLowerPendingHeight)
+ s.Require().Equal(err.Error(), ErrLowerPendingHeight.Error())
// It should be ok to prepare for height that already delivered.
w, err := app.PrepareWitness(3)
s.Require().NoError(err)
diff --git a/core/test/block-revealer.go b/core/test/block-revealer.go
index ebd2e35..90b3d3e 100644
--- a/core/test/block-revealer.go
+++ b/core/test/block-revealer.go
@@ -19,9 +19,7 @@ package test
import (
"errors"
- "math/rand"
"sort"
- "time"
"github.com/dexon-foundation/dexon-consensus/common"
"github.com/dexon-foundation/dexon-consensus/core/db"
@@ -65,217 +63,11 @@ func loadAllBlocks(iter db.BlockIterator) (
return
}
-// RandomDAGBlockRevealer implements BlockRevealer interface, which would load
-// all blocks from db, and randomly pick one block to reveal if it still forms
-// a valid DAG in revealed blocks.
-type RandomDAGBlockRevealer struct {
- // blocksByChain group all blocks by chains and sorting
- // them by height.
- blocksByChain map[uint32][]*types.Block
- // tipIndexes store the height of next block from one chain
- // to check if is candidate.
- tipIndexes map[uint32]int
- // candidate are blocks that forms valid DAG with
- // current revealed blocks.
- candidates []*types.Block
- candidateChains map[uint32]struct{}
- // revealed stores block hashes of current revealed blocks.
- revealed map[common.Hash]struct{}
- randGen *rand.Rand
-}
-
-// NewRandomDAGBlockRevealer constructs RandomDAGBlockRevealer.
-func NewRandomDAGBlockRevealer(
- iter db.BlockIterator) (r *RandomDAGBlockRevealer, err error) {
-
- blocks, err := loadAllBlocks(iter)
- if err != nil {
- return
- }
-
- // Rearrange blocks by nodes and height.
- blocksByChain := make(map[uint32][]*types.Block)
- for _, block := range blocks {
- blocksByChain[block.Position.ChainID] =
- append(blocksByChain[block.Position.ChainID], block)
- }
- // Make sure blocks are sorted by block heights, from lower to higher.
- for chainID := range blocksByChain {
- sort.Sort(types.ByPosition(blocksByChain[chainID]))
- }
- r = &RandomDAGBlockRevealer{
- blocksByChain: blocksByChain,
- randGen: rand.New(rand.NewSource(time.Now().UnixNano())),
- candidateChains: make(map[uint32]struct{}),
- }
- // Make sure this revealer is ready to use.
- r.Reset()
- return
-}
-
-// pickCandidates is a helper function to pick candidates from current tips.
-func (r *RandomDAGBlockRevealer) pickCandidates() {
- for chainID, tip := range r.tipIndexes {
- if _, isPicked := r.candidateChains[chainID]; isPicked {
- continue
- }
- blocks, exists := r.blocksByChain[chainID]
- if !exists {
- continue
- }
- if tip >= len(blocks) {
- continue
- }
- block := blocks[tip]
- if isAllAckingBlockRevealed(block, r.revealed) {
- r.tipIndexes[chainID]++
- r.candidates = append(r.candidates, block)
- r.candidateChains[chainID] = struct{}{}
- }
- }
-}
-
-// NextBlock implement Revealer.Next method, which would reveal blocks
-// forming valid DAGs.
-func (r *RandomDAGBlockRevealer) NextBlock() (types.Block, error) {
- if len(r.candidates) == 0 {
- r.pickCandidates()
- if len(r.candidates) == 0 {
- return types.Block{}, db.ErrIterationFinished
- }
- }
-
- // Pick next block to be revealed.
- picked := r.randGen.Intn(len(r.candidates))
- block := r.candidates[picked]
- r.candidates =
- append(r.candidates[:picked], r.candidates[picked+1:]...)
- delete(r.candidateChains, block.Position.ChainID)
- r.revealed[block.Hash] = struct{}{}
- r.pickCandidates()
- return *block, nil
-}
-
-// Reset implement Revealer.Reset method, which would reset the revealing.
-func (r *RandomDAGBlockRevealer) Reset() {
- r.tipIndexes = make(map[uint32]int)
- for chainID := range r.blocksByChain {
- r.tipIndexes[chainID] = 0
- }
- r.revealed = make(map[common.Hash]struct{})
- r.candidates = []*types.Block{}
-}
-
-// RandomBlockRevealer implements BlockRevealer interface, which would load
-// all blocks from db, and randomly pick one block to reveal.
-type RandomBlockRevealer struct {
- blocks map[common.Hash]*types.Block
- remains common.Hashes
- randGen *rand.Rand
-}
-
-// NewRandomBlockRevealer constructs RandomBlockRevealer.
-func NewRandomBlockRevealer(
- iter db.BlockIterator) (r *RandomBlockRevealer, err error) {
-
- blocks, err := loadAllBlocks(iter)
- if err != nil {
- return
- }
- r = &RandomBlockRevealer{
- blocks: blocks,
- randGen: rand.New(rand.NewSource(time.Now().UnixNano())),
- }
- r.Reset()
- return
-}
-
-// NextBlock implements Revealer.NextBlock method, which would reveal blocks
-// randomly.
-func (r *RandomBlockRevealer) NextBlock() (types.Block, error) {
- if len(r.remains) == 0 {
- return types.Block{}, db.ErrIterationFinished
- }
-
- picked := r.randGen.Intn(len(r.remains))
- block := r.blocks[r.remains[picked]]
- r.remains =
- append(r.remains[:picked], r.remains[picked+1:]...)
- return *block, nil
-}
-
-// Reset implement Revealer.Reset method, which would reset revealing.
-func (r *RandomBlockRevealer) Reset() {
- hashes := common.Hashes{}
- for hash := range r.blocks {
- hashes = append(hashes, hash)
- }
- r.remains = hashes
-}
-
-// RandomTipBlockRevealer implements BlockRevealer interface, which would load
-// all blocks from db, and randomly pick one chain's tip to reveal.
-type RandomTipBlockRevealer struct {
- chainsBlock []map[uint64]*types.Block
- chainTip []uint64
- chainRevealSeq []uint32
- revealed int
- randGen *rand.Rand
-}
-
-// NewRandomTipBlockRevealer constructs RandomTipBlockRevealer.
-func NewRandomTipBlockRevealer(
- iter db.BlockIterator) (r *RandomTipBlockRevealer, err error) {
-
- blocks, err := loadAllBlocks(iter)
- if err != nil {
- return
- }
- r = &RandomTipBlockRevealer{
- randGen: rand.New(rand.NewSource(time.Now().UnixNano())),
- }
- for _, b := range blocks {
- for b.Position.ChainID >= uint32(len(r.chainsBlock)) {
- r.chainsBlock = append(r.chainsBlock, make(map[uint64]*types.Block))
- r.chainTip = append(r.chainTip, 0)
- }
- r.chainsBlock[b.Position.ChainID][b.Position.Height] = b
- r.chainRevealSeq = append(r.chainRevealSeq, b.Position.ChainID)
- }
- r.Reset()
- return
-}
-
-// NextBlock implements Revealer.Next method, which would reveal blocks randomly.
-func (r *RandomTipBlockRevealer) NextBlock() (types.Block, error) {
- if len(r.chainRevealSeq) == r.revealed {
- return types.Block{}, db.ErrIterationFinished
- }
-
- picked := r.chainRevealSeq[r.revealed]
- r.revealed++
- block := r.chainsBlock[picked][r.chainTip[picked]]
- r.chainTip[picked]++
- return *block, nil
-}
-
-// Reset implement Revealer.Reset method, which would reset revealing.
-func (r *RandomTipBlockRevealer) Reset() {
- r.revealed = 0
- r.randGen.Shuffle(len(r.chainRevealSeq), func(i, j int) {
- r.chainRevealSeq[i], r.chainRevealSeq[j] =
- r.chainRevealSeq[j], r.chainRevealSeq[i]
- })
- for i := range r.chainTip {
- r.chainTip[i] = 0
- }
-}
-
// CompactionChainBlockRevealer implements BlockRevealer interface, which would
// load all blocks from db, reveal them in the order of compaction chain,
// from the genesis block to the latest one.
type CompactionChainBlockRevealer struct {
- blocks types.ByFinalizationHeight
+ blocks types.BlocksByFinalizationHeight
nextRevealIndex int
}
@@ -290,14 +82,14 @@ func NewCompactionChainBlockRevealer(iter db.BlockIterator,
if startHeight == 0 {
startHeight = 1
}
- blocks := types.ByFinalizationHeight{}
+ blocks := types.BlocksByFinalizationHeight{}
for _, b := range blocksByHash {
if b.Finalization.Height < startHeight {
continue
}
blocks = append(blocks, b)
}
- sort.Sort(types.ByFinalizationHeight(blocks))
+ sort.Sort(types.BlocksByFinalizationHeight(blocks))
// Make sure the finalization height of blocks are incremental with step 1.
for idx, b := range blocks {
if idx == 0 {
diff --git a/core/test/block-revealer_test.go b/core/test/block-revealer_test.go
index 0e56eea..54432e8 100644
--- a/core/test/block-revealer_test.go
+++ b/core/test/block-revealer_test.go
@@ -19,7 +19,6 @@ package test
import (
"testing"
- "time"
"github.com/dexon-foundation/dexon-consensus/common"
"github.com/dexon-foundation/dexon-consensus/core/db"
@@ -29,132 +28,6 @@ import (
type BlockRevealerTestSuite struct {
suite.Suite
-
- db db.Database
- totalBlockCount int
-}
-
-func (s *BlockRevealerTestSuite) SetupSuite() {
- var (
- err error
- genesisTime = time.Now().UTC()
- )
- // Setup block database.
- s.db, err = db.NewMemBackedDB()
- s.Require().NoError(err)
-
- // Randomly generate blocks.
- config := &BlocksGeneratorConfig{
- NumChains: 19,
- MinBlockTimeInterval: 250 * time.Millisecond,
- }
- gen := NewBlocksGenerator(config, nil)
- s.Require().NoError(gen.Generate(
- 0,
- genesisTime,
- genesisTime.Add(30*time.Second),
- s.db))
- // Cache the count of total generated block.
- iter, err := s.db.GetAllBlocks()
- s.Require().NoError(err)
- blocks, err := loadAllBlocks(iter)
- s.Require().NoError(err)
- s.totalBlockCount = len(blocks)
-}
-
-func (s *BlockRevealerTestSuite) baseTest(
- revealer BlockRevealer,
- repeat int,
- checkFunc func(*types.Block, map[common.Hash]struct{})) {
-
- revealingSequence := map[string]struct{}{}
- for i := 0; i < repeat; i++ {
- revealed := map[common.Hash]struct{}{}
- sequence := ""
- for {
- b, err := revealer.NextBlock()
- if err != nil {
- if err == db.ErrIterationFinished {
- err = nil
- break
- }
- s.Require().NotNil(err)
- }
- checkFunc(&b, revealed)
- revealed[b.Hash] = struct{}{}
- sequence += b.Hash.String() + ","
- }
- s.Len(revealed, s.totalBlockCount)
- revealingSequence[sequence] = struct{}{}
- revealer.Reset()
- }
- // It should be reasonable to reveal at least two
- // different sequence.
- s.True(len(revealingSequence) > 1)
-
-}
-
-func (s *BlockRevealerTestSuite) TestRandomBlockReveal() {
- // This test case would make sure we could at least generate
- // two different revealing sequence when revealing more than
- // 10 times.
- iter, err := s.db.GetAllBlocks()
- s.Require().Nil(err)
- revealer, err := NewRandomBlockRevealer(iter)
- s.Require().Nil(err)
-
- checkFunc := func(b *types.Block, revealed map[common.Hash]struct{}) {
- // Make sure the revealer won't reveal the same block twice.
- _, alreadyRevealed := revealed[b.Hash]
- s.False(alreadyRevealed)
- }
- s.baseTest(revealer, 10, checkFunc)
-}
-
-func (s *BlockRevealerTestSuite) TestRandomDAGBlockReveal() {
- // This test case would make sure we could at least generate
- // two different revealing sequence when revealing more than
- // 10 times, and each of them would form valid DAGs during
- // revealing.
-
- iter, err := s.db.GetAllBlocks()
- s.Require().Nil(err)
- revealer, err := NewRandomDAGBlockRevealer(iter)
- s.Require().Nil(err)
-
- checkFunc := func(b *types.Block, revealed map[common.Hash]struct{}) {
- // Make sure this revealer won't reveal
- // the same block twice.
- _, alreadyRevealed := revealed[b.Hash]
- s.False(alreadyRevealed)
- // Make sure the newly revealed block would still
- // form a valid DAG after added to revealed blocks.
- s.True(isAllAckingBlockRevealed(b, revealed))
- }
- s.baseTest(revealer, 10, checkFunc)
-}
-
-func (s *BlockRevealerTestSuite) TestRandomTipBlockReveal() {
- // This test case would make sure we could at least generate
- // two different revealing sequence when revealing more than
- // 10 times.
- iter, err := s.db.GetAllBlocks()
- s.Require().Nil(err)
- revealer, err := NewRandomTipBlockRevealer(iter)
- s.Require().Nil(err)
-
- checkFunc := func(b *types.Block, revealed map[common.Hash]struct{}) {
- // Make sure the revealer won't reveal the same block twice.
- _, alreadyRevealed := revealed[b.Hash]
- s.False(alreadyRevealed)
- // Make sure the parent is already revealed.
- if b.Position.Height == 0 {
- return
- }
- _, alreadyRevealed = revealed[b.ParentHash]
- s.True(alreadyRevealed)
- }
- s.baseTest(revealer, 10, checkFunc)
}
func (s *BlockRevealerTestSuite) TestCompactionChainBlockReveal() {
@@ -186,7 +59,7 @@ func (s *BlockRevealerTestSuite) TestCompactionChainBlockReveal() {
// instance successfully.
r, err := NewCompactionChainBlockRevealer(iter, 0)
s.Require().Nil(r)
- s.Require().IsType(ErrNotValidCompactionChain, err)
+ s.Require().Equal(ErrNotValidCompactionChain.Error(), err.Error())
// Put a block to make the compaction chain complete.
s.Require().NoError(dbInst.PutBlock(*b2))
// We can construct that revealer now.
@@ -206,7 +79,7 @@ func (s *BlockRevealerTestSuite) TestCompactionChainBlockReveal() {
chk(3)
// Iteration should be finished
_, err = r.NextBlock()
- s.Require().IsType(db.ErrIterationFinished, err)
+ s.Require().Equal(db.ErrIterationFinished.Error(), err.Error())
// Test 'startHeight' parameter.
iter, err = dbInst.GetAllBlocks()
s.Require().NoError(err)
diff --git a/core/test/blocks-generator.go b/core/test/blocks-generator.go
deleted file mode 100644
index 5f1dbea..0000000
--- a/core/test/blocks-generator.go
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package test
-
-import (
- "errors"
- "math"
- "math/rand"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa"
- "github.com/dexon-foundation/dexon-consensus/core/db"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- "github.com/dexon-foundation/dexon-consensus/core/utils"
-)
-
-// ErrParentNotAcked would be raised when some block doesn't
-// ack its parent block.
-var ErrParentNotAcked = errors.New("parent is not acked")
-
-// nodeStatus is a state holder for each node
-// during generating blocks.
-type nodeStatus struct {
- blocks []*types.Block
- genesisTime time.Time
- signer *utils.Signer
- tip *types.Block
- nextAckingIndex map[types.NodeID]uint64
-}
-
-// getAckedBlockHash would randomly pick one block between
-// last acked one to current head.
-func (ns *nodeStatus) getAckedBlockHash(
- ackedNID types.NodeID,
- ackedNode *nodeStatus,
- randGen *rand.Rand) (
- hash common.Hash, ok bool) {
- baseAckingIndex := ns.nextAckingIndex[ackedNID]
- totalBlockCount := uint64(len(ackedNode.blocks))
- if totalBlockCount <= baseAckingIndex {
- // There is no new block to ack.
- return
- }
- ackableRange := totalBlockCount - baseAckingIndex
- idx := uint64((randGen.Uint64() % ackableRange) + baseAckingIndex)
- ns.nextAckingIndex[ackedNID] = idx + 1
- hash = ackedNode.blocks[idx].Hash
- ok = true
- return
-}
-
-func (ns *nodeStatus) getNextBlockTime(
- timePicker func(time.Time) time.Time) time.Time {
- if ns.tip == nil {
- return timePicker(ns.genesisTime)
- }
- return timePicker(ns.tip.Timestamp)
-}
-
-// nodeSetStatus is a state holder for all nodes
-// during generating blocks.
-type nodeSetStatus struct {
- round uint64
- status map[types.NodeID]*nodeStatus
- proposerChain map[types.NodeID]uint32
- endTime time.Time
- nIDs []types.NodeID
- randGen *rand.Rand
- timePicker func(time.Time) time.Time
-}
-
-func newNodeSetStatus(
- numChains uint32,
- tips map[uint32]*types.Block,
- round uint64,
- genesisTime, endTime time.Time,
- timePicker func(time.Time) time.Time) *nodeSetStatus {
- var (
- status = make(map[types.NodeID]*nodeStatus)
- proposerChain = make(map[types.NodeID]uint32)
- nIDs = []types.NodeID{}
- )
- for i := uint32(0); i < numChains; i++ {
- prvKey, err := ecdsa.NewPrivateKey()
- if err != nil {
- panic(err)
- }
- nID := types.NewNodeID(prvKey.PublicKey())
- nIDs = append(nIDs, nID)
- status[nID] = &nodeStatus{
- blocks: []*types.Block{},
- genesisTime: genesisTime,
- signer: utils.NewSigner(prvKey),
- tip: tips[i],
- nextAckingIndex: make(map[types.NodeID]uint64),
- }
- proposerChain[nID] = i
- }
- return &nodeSetStatus{
- round: round,
- status: status,
- proposerChain: proposerChain,
- endTime: endTime,
- nIDs: nIDs,
- randGen: rand.New(rand.NewSource(time.Now().UnixNano())),
- timePicker: timePicker,
- }
-}
-
-// findIncompleteNodes is a helper to check which node doesn't generate
-// enough blocks.
-func (ns *nodeSetStatus) findIncompleteNodes() (nIDs []types.NodeID) {
- for nID, status := range ns.status {
- if status.tip == nil {
- nIDs = append(nIDs, nID)
- continue
- }
- if status.tip.Timestamp.After(ns.endTime) {
- continue
- }
- nIDs = append(nIDs, nID)
- }
- return
-}
-
-// prepareAcksForNewBlock collects acks for one block.
-func (ns *nodeSetStatus) prepareAcksForNewBlock(
- proposerID types.NodeID, ackingCount int) (
- acks common.Hashes, err error) {
- acks = common.Hashes{}
- if len(ns.status[proposerID].blocks) == 0 {
- // The 'Acks' filed of genesis blocks would always be empty.
- return
- }
- // Pick nodeIDs to be acked.
- ackingNIDs := map[types.NodeID]struct{}{}
- if ackingCount > 0 {
- ackingCount-- // We would always include ack to parent block.
- }
- for _, i := range ns.randGen.Perm(len(ns.nIDs))[:ackingCount] {
- ackingNIDs[ns.nIDs[i]] = struct{}{}
- }
- // Generate acks.
- for nID := range ackingNIDs {
- if nID == proposerID {
- continue
- }
- ack, ok := ns.status[proposerID].getAckedBlockHash(
- nID, ns.status[nID], ns.randGen)
- if !ok {
- if nID == proposerID {
- err = ErrParentNotAcked
- }
- continue
- }
- acks = append(acks, ack)
- }
- return
-}
-
-// proposeBlock propose new block and update node status.
-func (ns *nodeSetStatus) proposeBlock(
- proposerID types.NodeID, acks common.Hashes) (*types.Block, error) {
- status := ns.status[proposerID]
- parentHash := common.Hash{}
- blockHeight := uint64(0)
- if status.tip != nil {
- parentHash = status.tip.Hash
- blockHeight = status.tip.Position.Height + 1
- acks = append(acks, parentHash)
- }
- chainID := ns.proposerChain[proposerID]
- newBlock := &types.Block{
- ParentHash: parentHash,
- Position: types.Position{
- Round: ns.round,
- Height: blockHeight,
- ChainID: chainID,
- },
- Timestamp: status.getNextBlockTime(ns.timePicker),
- }
- newBlock.Acks = common.NewSortedHashes(acks)
- if err := status.signer.SignBlock(newBlock); err != nil {
- return nil, err
- }
- status.blocks = append(status.blocks, newBlock)
- status.tip = newBlock
- return newBlock, nil
-}
-
-// normalAckingCountGenerator would randomly pick acking count
-// by a normal distribution.
-func normalAckingCountGenerator(
- chainNum uint32, mean, deviation float64) func() int {
- return func() int {
- var expected float64
- for {
- expected = rand.NormFloat64()*deviation + mean
- if expected >= 0 && expected <= float64(chainNum) {
- break
- }
- }
- return int(math.Ceil(expected))
- }
-}
-
-// MaxAckingCountGenerator return generator which returns
-// fixed maximum acking count.
-func MaxAckingCountGenerator(count uint32) func() int {
- return func() int { return int(count) }
-}
-
-// generateNodePicker is a function generator, which would generate
-// a function to randomly pick one node ID from a slice of node ID.
-func generateNodePicker() func([]types.NodeID) types.NodeID {
- privateRand := rand.New(rand.NewSource(time.Now().UnixNano()))
- return func(nIDs []types.NodeID) types.NodeID {
- return nIDs[privateRand.Intn(len(nIDs))]
- }
-}
-
-// defaultTimePicker would pick a time based on reference time plus min.
-func generateTimePicker(min time.Duration) (f func(time.Time) time.Time) {
- privateRand := rand.New(rand.NewSource(time.Now().UnixNano()))
- return func(ref time.Time) time.Time {
- return ref.Add(min + time.Duration(
- privateRand.Int63n(int64(500*time.Millisecond))))
- }
-}
-
-// BlocksGeneratorConfig is the configuration for BlocksGenerator.
-type BlocksGeneratorConfig struct {
- NumChains uint32
- MinBlockTimeInterval time.Duration
-}
-
-// NewBlocksGeneratorConfig construct a BlocksGeneratorConfig instance.
-func NewBlocksGeneratorConfig(c *types.Config) *BlocksGeneratorConfig {
- return &BlocksGeneratorConfig{
- NumChains: c.NumChains,
- MinBlockTimeInterval: c.MinBlockInterval,
- }
-}
-
-// BlocksGenerator could generate blocks forming valid DAGs.
-type BlocksGenerator struct {
- config *BlocksGeneratorConfig
- nodePicker func([]types.NodeID) types.NodeID
- timePicker func(time.Time) time.Time
- ackingCountGenerator func() int
-}
-
-// NewBlocksGenerator constructs BlockGenerator.
-//
-// The caller is responsible to provide a function to generate count of
-// acked block for each new block. The prototype of ackingCountGenerator is
-// a function returning 'int'. For example, if you need to generate a group of
-// blocks and each of them has maximum 2 acks.
-// func () int { return 2 }
-// The default ackingCountGenerator would randomly pick a number based on
-// the nodeCount you provided with a normal distribution.
-func NewBlocksGenerator(
- config *BlocksGeneratorConfig,
- ackingCountGenerator func() int) *BlocksGenerator {
- if config.MinBlockTimeInterval == time.Duration(0) {
- panic(errors.New("min block interval cannot be 0"))
- }
- if ackingCountGenerator == nil {
- ackingCountGenerator = normalAckingCountGenerator(
- config.NumChains,
- float64(config.NumChains/5),
- float64(config.NumChains/7+1))
- }
- timePicker := generateTimePicker(config.MinBlockTimeInterval)
- return &BlocksGenerator{
- config: config,
- nodePicker: generateNodePicker(),
- timePicker: timePicker,
- ackingCountGenerator: ackingCountGenerator,
- }
-}
-
-// Generate is the entry point to generate blocks in one round.
-func (gen *BlocksGenerator) Generate(
- roundID uint64,
- roundBegin, roundEnd time.Time,
- dbInst db.Database) (err error) {
- // Find tips of previous round if available.
- tips := make(map[uint32]*types.Block)
- if roundID > 0 {
- tips, err = gen.findTips(roundID-1, dbInst)
- if err != nil {
- return
- }
- }
- status := newNodeSetStatus(gen.config.NumChains, tips, roundID,
- roundBegin, roundEnd, gen.timePicker)
- // We would record the smallest height of block that could be acked
- // from each node's point-of-view.
- toAck := make(map[types.NodeID]map[types.NodeID]uint64)
- for _, nID := range status.nIDs {
- toAck[nID] = make(map[types.NodeID]uint64)
- }
- for {
- // Find nodes that doesn't propose enough blocks and
- // pick one from them randomly.
- notYet := status.findIncompleteNodes()
- if len(notYet) == 0 {
- break
- }
- // Propose a new block.
- var (
- proposerID = gen.nodePicker(notYet)
- acks common.Hashes
- )
- if acks, err = status.prepareAcksForNewBlock(
- proposerID, gen.ackingCountGenerator()); err != nil {
- return
- }
- var newBlock *types.Block
- if newBlock, err = status.proposeBlock(proposerID, acks); err != nil {
- return
- }
- // Persist block to db.
- if err = dbInst.PutBlock(*newBlock); err != nil {
- return
- }
- }
- return
-}
-
-// findTips is an utility to find tips of each chain in that round in db.
-func (gen *BlocksGenerator) findTips(round uint64, dbInst db.Reader) (
- tips map[uint32]*types.Block, err error) {
- iter, err := dbInst.GetAllBlocks()
- if err != nil {
- return
- }
- revealer, err := NewRandomBlockRevealer(iter)
- if err != nil {
- return
- }
- tips = make(map[uint32]*types.Block)
- for {
- var b types.Block
- if b, err = revealer.NextBlock(); err != nil {
- if err == db.ErrIterationFinished {
- err = nil
- break
- }
- return
- }
- if b.Position.Round != round {
- continue
- }
- tip, exists := tips[b.Position.ChainID]
- if exists && tip.Position.Height > b.Position.Height {
- continue
- }
- tips[b.Position.ChainID] = &b
- }
- return
-}
diff --git a/core/test/blocks-generator_test.go b/core/test/blocks-generator_test.go
deleted file mode 100644
index bcbd749..0000000
--- a/core/test/blocks-generator_test.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package test
-
-import (
- "sort"
- "testing"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/db"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- "github.com/stretchr/testify/suite"
-)
-
-type BlocksGeneratorTestSuite struct {
- suite.Suite
-}
-
-func (s *BlocksGeneratorTestSuite) TestGenerate() {
- // This test case is to make sure the generated blocks are legimate.
- var (
- config = &BlocksGeneratorConfig{
- NumChains: 19,
- MinBlockTimeInterval: 200 * time.Millisecond,
- }
- gen = NewBlocksGenerator(config, nil)
- req = s.Require()
- beginTime = time.Now().UTC()
- endTime = beginTime.Add(time.Minute)
- )
- dbInst, err := db.NewMemBackedDB()
- req.NoError(err)
- req.NoError(gen.Generate(1, beginTime, endTime, dbInst))
- // Load all blocks in that database for further checking.
- iter, err := dbInst.GetAllBlocks()
- req.NoError(err)
- blocksByChain := make(map[uint32][]*types.Block)
- blocksByHash := make(map[common.Hash]*types.Block)
- for {
- block, err := iter.NextBlock()
- if err == db.ErrIterationFinished {
- break
- }
- req.NoError(err)
- // TODO(mission): Make sure each block is correctly signed once
- // we have a way to access core.hashBlock.
- req.NotEqual(block.Hash, common.Hash{})
- if !block.IsEmpty() {
- req.NotEmpty(block.Signature)
- }
- req.Equal(block.Position.Round, uint64(1))
- blocksByChain[block.Position.ChainID] =
- append(blocksByChain[block.Position.ChainID], &block)
- sort.Sort(types.ByPosition(blocksByChain[block.Position.ChainID]))
- blocksByHash[block.Hash] = &block
- }
- // Make sure these two rules are hold for these blocks:
- // - No backward acking: the later block should only ack new blocks
- // compared to its parent block.
- // - Parent Ack: always ack its parent block.
- // - Timestamp: timestamp are increasing, and with valid interval to
- // previous block.
- // - The last block of each chain should pass endTime.
- // - No Acks in genesis bloc
- for _, blocks := range blocksByChain {
- lastAckingHeights := map[uint32]uint64{}
- req.NotEmpty(blocks)
- // Check genesis block.
- genesisBlock := blocks[0]
- req.Equal(genesisBlock.ParentHash, common.Hash{})
- req.Equal(genesisBlock.Position.Height, uint64(0))
- req.Empty(genesisBlock.Acks)
- // Check normal blocks.
- for index, block := range blocks[1:] {
- parentAcked := false
- for _, ack := range block.Acks {
- if ack == block.ParentHash {
- parentAcked = true
- }
- ackedBlock := blocksByHash[ack]
- req.NotNil(ackedBlock)
- prevAckingHeight, exists :=
- lastAckingHeights[ackedBlock.Position.ChainID]
- if exists {
- s.True(prevAckingHeight < ackedBlock.Position.Height)
- }
- lastAckingHeights[ackedBlock.Position.ChainID] =
- ackedBlock.Position.Height
- // Block Height should always incremental by 1.
- //
- // Because we iterate blocks slice from 1,
- // we need to add 1 to the index.
- req.Equal(block.Position.Height, uint64(index+1))
- }
- req.True(parentAcked)
- }
- // The block time of the last block should be after end time.
- req.True(blocks[len(blocks)-1].Timestamp.After(endTime))
- }
-}
-
-func (s *BlocksGeneratorTestSuite) TestGenerateWithMaxAckCount() {
- var (
- config = &BlocksGeneratorConfig{
- NumChains: 13,
- MinBlockTimeInterval: 250 * time.Millisecond,
- }
- req = s.Require()
- totalAckingCount = 0
- totalBlockCount = 0
- genesisTime = time.Now().UTC()
- )
- // Generate with 0 acks.
- dbInst, err := db.NewMemBackedDB()
- req.NoError(err)
- gen := NewBlocksGenerator(config, MaxAckingCountGenerator(0))
- req.NoError(gen.Generate(
- 0,
- genesisTime,
- genesisTime.Add(50*time.Second),
- dbInst))
- // Load blocks to check their acking count.
- iter, err := dbInst.GetAllBlocks()
- req.NoError(err)
- for {
- block, err := iter.NextBlock()
- if err == db.ErrIterationFinished {
- break
- }
- req.NoError(err)
- if block.IsGenesis() {
- continue
- }
- req.Len(block.Acks, 1)
- }
- // Generate with acks as many as possible.
- dbInst, err = db.NewMemBackedDB()
- req.NoError(err)
- gen = NewBlocksGenerator(config, MaxAckingCountGenerator(config.NumChains))
- req.NoError(gen.Generate(
- 0,
- genesisTime,
- genesisTime.Add(50*time.Second),
- dbInst))
- // Load blocks to verify the average acking count.
- iter, err = dbInst.GetAllBlocks()
- req.NoError(err)
- for {
- block, err := iter.NextBlock()
- if err == db.ErrIterationFinished {
- break
- }
- req.NoError(err)
- if block.IsGenesis() {
- continue
- }
- totalAckingCount += len(block.Acks)
- totalBlockCount++
- }
- req.NotZero(totalBlockCount)
- req.True((totalAckingCount / totalBlockCount) >= int(config.NumChains/2))
-}
-
-// TestFindTips make sure findTips works as expected.
-func (s *BlocksGeneratorTestSuite) TestFindTips() {
- var (
- config = &BlocksGeneratorConfig{
- NumChains: 10,
- MinBlockTimeInterval: 250 * time.Millisecond,
- }
- req = s.Require()
- genesisTime = time.Now().UTC()
- endTime = genesisTime.Add(100 * time.Second)
- )
- gen := NewBlocksGenerator(config, nil)
- dbInst, err := db.NewMemBackedDB()
- req.NoError(err)
- req.NoError(gen.Generate(
- 0,
- genesisTime,
- endTime,
- dbInst))
- tips, err := gen.findTips(0, dbInst)
- req.NoError(err)
- req.Len(tips, int(config.NumChains))
- for _, b := range tips {
- req.True(b.Timestamp.After(endTime))
- }
-}
-
-func (s *BlocksGeneratorTestSuite) TestConcateBlocksFromRounds() {
- // This test case run these steps:
- // - generate blocks by round but sharing one db.
- // - if those rounds are continuous, they should be concated.
- var (
- req = s.Require()
- genesisTime = time.Now().UTC()
- )
- dbInst, err := db.NewMemBackedDB()
- req.NoError(err)
- // Generate round 0 blocks.
- gen := NewBlocksGenerator(&BlocksGeneratorConfig{
- NumChains: 4,
- MinBlockTimeInterval: 250 * time.Millisecond,
- }, MaxAckingCountGenerator(4))
- req.NoError(gen.Generate(
- 0,
- genesisTime,
- genesisTime.Add(10*time.Second),
- dbInst))
- tips0, err := gen.findTips(0, dbInst)
- req.NoError(err)
- req.Len(tips0, 4)
- // Generate round 1 blocks.
- gen = NewBlocksGenerator(&BlocksGeneratorConfig{
- NumChains: 10,
- MinBlockTimeInterval: 250 * time.Millisecond,
- }, MaxAckingCountGenerator(10))
- req.NoError(gen.Generate(
- 1,
- genesisTime.Add(10*time.Second),
- genesisTime.Add(20*time.Second),
- dbInst))
- tips1, err := gen.findTips(1, dbInst)
- req.NoError(err)
- req.Len(tips1, 10)
- // Generate round 2 blocks.
- gen = NewBlocksGenerator(&BlocksGeneratorConfig{
- NumChains: 7,
- MinBlockTimeInterval: 250 * time.Millisecond,
- }, MaxAckingCountGenerator(7))
- req.NoError(gen.Generate(
- 2,
- genesisTime.Add(20*time.Second),
- genesisTime.Add(30*time.Second),
- dbInst))
- tips2, err := gen.findTips(2, dbInst)
- req.NoError(err)
- req.Len(tips2, 7)
- // Check results, make sure tips0, tips1 are acked by correct blocks.
- iter, err := dbInst.GetAllBlocks()
- req.NoError(err)
- revealer, err := NewRandomBlockRevealer(iter)
- req.NoError(err)
- removeTip := func(tips map[uint32]*types.Block, b *types.Block) {
- toRemove := []uint32{}
- for chainID, tip := range tips {
- if b.ParentHash == tip.Hash {
- req.Equal(b.Position.Height, tip.Position.Height+1)
- req.Equal(b.Position.Round, tip.Position.Round+1)
- req.True(b.IsAcking(tip.Hash))
- toRemove = append(toRemove, chainID)
- }
- }
- for _, ID := range toRemove {
- delete(tips, ID)
- }
- }
- // Make sure all tips are acked by loading blocks from db
- // and check them one by one.
- for {
- b, err := revealer.NextBlock()
- if err != nil {
- if err == db.ErrIterationFinished {
- err = nil
- break
- }
- req.NoError(err)
- }
- switch b.Position.Round {
- case 1:
- removeTip(tips0, &b)
- case 2:
- removeTip(tips1, &b)
- }
- }
- req.Empty(tips0)
- req.Len(tips1, 3)
- req.Contains(tips1, uint32(7))
- req.Contains(tips1, uint32(8))
- req.Contains(tips1, uint32(9))
- // Check the acking frequency of last round, it might be wrong.
- totalBlockCount := 0
- totalAckCount := 0
- revealer.Reset()
- for {
- b, err := revealer.NextBlock()
- if err != nil {
- if err == db.ErrIterationFinished {
- err = nil
- break
- }
- req.NoError(err)
- }
- if b.Position.Round != 2 {
- continue
- }
- totalBlockCount++
- totalAckCount += len(b.Acks)
- }
- // At least all blocks can ack some non-parent block.
- req.True(totalAckCount/totalBlockCount >= 2)
-}
-
-func TestBlocksGenerator(t *testing.T) {
- suite.Run(t, new(BlocksGeneratorTestSuite))
-}
diff --git a/core/test/interface.go b/core/test/interface.go
index 000b835..0712ad5 100644
--- a/core/test/interface.go
+++ b/core/test/interface.go
@@ -34,22 +34,6 @@ type BlockRevealer interface {
Reset()
}
-// Stopper defines an interface for Scheduler to tell when to stop execution.
-type Stopper interface {
- // ShouldStop is provided with the ID of the handler just finishes an event.
- // It's thread-safe to access internal/shared state of the handler at this
- // moment.
- // The Stopper should check state of that handler and return 'true'
- // if the execution could be stopped.
- ShouldStop(nID types.NodeID) bool
-}
-
-// EventHandler defines an interface to handle a Scheduler event.
-type EventHandler interface {
- // Handle the event belongs to this handler, and return derivated events.
- Handle(*Event) []*Event
-}
-
// TransportPeerType defines the type of peer, either 'peer' or 'server'.
type TransportPeerType string
diff --git a/core/test/network.go b/core/test/network.go
index 000037f..f5d1c6e 100644
--- a/core/test/network.go
+++ b/core/test/network.go
@@ -166,7 +166,7 @@ type Network struct {
unreceivedRandomness map[common.Hash]chan<- common.Hash
cache *utils.NodeSetCache
notarySetCachesLock sync.Mutex
- notarySetCaches map[uint64]map[uint32]map[types.NodeID]struct{}
+ notarySetCaches map[uint64]map[types.NodeID]struct{}
dkgSetCachesLock sync.Mutex
dkgSetCaches map[uint64]map[types.NodeID]struct{}
}
@@ -188,9 +188,8 @@ func NewNetwork(pubKey crypto.PublicKey, config NetworkConfig) (
unreceivedBlocks: make(map[common.Hash]chan<- common.Hash),
unreceivedRandomness: make(map[common.Hash]chan<- common.Hash),
peers: make(map[types.NodeID]struct{}),
- notarySetCaches: make(
- map[uint64]map[uint32]map[types.NodeID]struct{}),
- dkgSetCaches: make(map[uint64]map[types.NodeID]struct{}),
+ notarySetCaches: make(map[uint64]map[types.NodeID]struct{}),
+ dkgSetCaches: make(map[uint64]map[types.NodeID]struct{}),
voteCache: make(
map[types.Position]map[types.VoteHeader]*types.Vote),
}
@@ -226,8 +225,7 @@ func (n *Network) PullRandomness(hashes common.Hashes) {
// BroadcastVote implements core.Network interface.
func (n *Network) BroadcastVote(vote *types.Vote) {
- if err := n.trans.Broadcast(
- n.getNotarySet(vote.Position.Round, vote.Position.ChainID),
+ if err := n.trans.Broadcast(n.getNotarySet(vote.Position.Round),
n.config.DirectLatency, vote); err != nil {
panic(err)
}
@@ -238,7 +236,7 @@ func (n *Network) BroadcastVote(vote *types.Vote) {
func (n *Network) BroadcastBlock(block *types.Block) {
// Avoid data race in fake transport.
block = n.cloneForFake(block).(*types.Block)
- notarySet := n.getNotarySet(block.Position.Round, block.Position.ChainID)
+ notarySet := n.getNotarySet(block.Position.Round)
if err := n.trans.Broadcast(
notarySet, n.config.DirectLatency, block); err != nil {
panic(err)
@@ -276,8 +274,7 @@ func (n *Network) BroadcastRandomnessResult(
return
}
// Send to notary set first.
- notarySet := n.getNotarySet(
- randResult.Position.Round, randResult.Position.ChainID)
+ notarySet := n.getNotarySet(randResult.Position.Round)
if err := n.trans.Broadcast(
notarySet, n.config.DirectLatency, randResult); err != nil {
panic(err)
@@ -568,7 +565,7 @@ func (n *Network) pullVotesAsync(pos types.Position) {
Identity: pos,
}
// Get corresponding notary set.
- notarySet := n.getNotarySet(pos.Round, pos.ChainID)
+ notarySet := n.getNotarySet(pos.Round)
// Randomly select one peer from notary set and send a pull request.
sentCount := 0
for nID := range notarySet {
@@ -727,8 +724,7 @@ func (n *Network) cloneForFake(v interface{}) interface{} {
}
// getNotarySet gets notary set for that (round, chain) from cache.
-func (n *Network) getNotarySet(
- round uint64, chain uint32) map[types.NodeID]struct{} {
+func (n *Network) getNotarySet(round uint64) map[types.NodeID]struct{} {
if n.cache == nil {
// Default behavior is to broadcast to all peers, which makes it easier
// to be used in simple test cases.
@@ -736,19 +732,14 @@ func (n *Network) getNotarySet(
}
n.notarySetCachesLock.Lock()
defer n.notarySetCachesLock.Unlock()
- roundSets, exists := n.notarySetCaches[round]
- if !exists {
- roundSets = make(map[uint32]map[types.NodeID]struct{})
- n.notarySetCaches[round] = roundSets
- }
- set, exists := roundSets[chain]
+ set, exists := n.notarySetCaches[round]
if !exists {
var err error
- set, err = n.cache.GetNotarySet(round, chain)
+ set, err = n.cache.GetNotarySet(round, 0)
if err != nil {
panic(err)
}
- roundSets[chain] = set
+ n.notarySetCaches[round] = set
}
return set
}
diff --git a/core/test/scheduler-event.go b/core/test/scheduler-event.go
deleted file mode 100644
index 180ed07..0000000
--- a/core/test/scheduler-event.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package test
-
-import (
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// Event defines a scheduler event.
-type Event struct {
- // HistoryIndex is the index of this event in history.
- HistoryIndex int
- // NodeID is the ID of handler that this event deginated to.
- NodeID types.NodeID
- // Time is the expected execution time of this event.
- Time time.Time
- // ExecError record the error when handling this event.
- ExecError error
- // Payload is application specific data carried by this event.
- Payload interface{}
- // ParentHistoryIndex is the index of parent event in history.
- ParentHistoryIndex int
- // ExecInterval is the latency to execute this event
- ExecInterval time.Duration
-}
-
-// eventQueue implements heap.Interface.
-type eventQueue []*Event
-
-func (eq eventQueue) Len() int { return len(eq) }
-
-func (eq eventQueue) Less(i, j int) bool {
- return eq[i].Time.Before(eq[j].Time)
-}
-
-func (eq eventQueue) Swap(i, j int) {
- eq[i], eq[j] = eq[j], eq[i]
-}
-
-func (eq *eventQueue) Push(x interface{}) {
- *eq = append(*eq, x.(*Event))
-}
-
-func (eq *eventQueue) Pop() interface{} {
- pos := len(*eq) - 1
- item := (*eq)[pos]
- *eq = (*eq)[0:pos]
- return item
-}
-
-// NewEvent is the constructor for Event.
-func NewEvent(
- nID types.NodeID, when time.Time, payload interface{}) *Event {
-
- return &Event{
- HistoryIndex: -1,
- ParentHistoryIndex: -1,
- NodeID: nID,
- Time: when,
- Payload: payload,
- }
-}
diff --git a/core/test/scheduler.go b/core/test/scheduler.go
deleted file mode 100644
index f6c7eed..0000000
--- a/core/test/scheduler.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package test
-
-import (
- "container/heap"
- "context"
- "fmt"
- "sync"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-var (
- // ErrSchedulerAlreadyStarted means callers attempt to insert some
- // seed events after calling 'Run'.
- ErrSchedulerAlreadyStarted = fmt.Errorf("scheduler already started")
- // errNilEventWhenNotified is an internal error which means a worker routine
- // can't get an event when notified.
- errNilEventWhenNotified = fmt.Errorf("nil event when notified")
-)
-
-type schedulerHandlerRecord struct {
- handler EventHandler
- lock sync.Mutex
-}
-
-// Scheduler is an event scheduler.
-type Scheduler struct {
- events eventQueue
- eventsLock sync.Mutex
- history []*Event
- historyLock sync.RWMutex
- isStarted bool
- handlers map[types.NodeID]*schedulerHandlerRecord
- handlersLock sync.RWMutex
- eventNotification chan struct{}
- ctx context.Context
- cancelFunc context.CancelFunc
- stopper Stopper
-}
-
-// NewScheduler constructs an Scheduler instance.
-func NewScheduler(stopper Stopper) *Scheduler {
- ctx, cancel := context.WithCancel(context.Background())
- return &Scheduler{
- events: eventQueue{},
- history: []*Event{},
- handlers: make(map[types.NodeID]*schedulerHandlerRecord),
- eventNotification: make(chan struct{}, 100000),
- ctx: ctx,
- cancelFunc: cancel,
- stopper: stopper,
- }
-}
-
-// Run would run the scheduler. If you need strict incrememtal execution order
-// of events based on their 'Time' field, assign 'numWorkers' as 1. If you need
-// faster execution, assign 'numWorkers' a larger number.
-func (sch *Scheduler) Run(numWorkers int) {
- var wg sync.WaitGroup
-
- sch.isStarted = true
- for i := 0; i < numWorkers; i++ {
- wg.Add(1)
- go sch.workerRoutine(&wg)
- }
- // Blocks until all routines are finished.
- wg.Wait()
-}
-
-// Seed is used to provide the scheduler some seed events.
-func (sch *Scheduler) Seed(e *Event) error {
- sch.eventsLock.Lock()
- defer sch.eventsLock.Unlock()
-
- if sch.isStarted {
- return ErrSchedulerAlreadyStarted
- }
- sch.addEvent(e)
- return nil
-}
-
-// RegisterEventHandler register an event handler by providing ID of
-// corresponding node.
-func (sch *Scheduler) RegisterEventHandler(
- nID types.NodeID,
- handler EventHandler) {
-
- sch.handlersLock.Lock()
- defer sch.handlersLock.Unlock()
-
- sch.handlers[nID] = &schedulerHandlerRecord{handler: handler}
-}
-
-// nextTick would pick the oldest event from eventQueue.
-func (sch *Scheduler) nextTick() (e *Event) {
- sch.eventsLock.Lock()
- defer sch.eventsLock.Unlock()
-
- if len(sch.events) == 0 {
- return nil
- }
- return heap.Pop(&sch.events).(*Event)
-}
-
-// addEvent is an helper function to add events into eventQueue sorted by
-// their 'Time' field.
-func (sch *Scheduler) addEvent(e *Event) {
- // Perform sorted insertion.
- heap.Push(&sch.events, e)
- sch.eventNotification <- struct{}{}
-}
-
-// CloneExecutionHistory returns a cloned event execution history.
-func (sch *Scheduler) CloneExecutionHistory() (cloned []*Event) {
- sch.historyLock.RLock()
- defer sch.historyLock.RUnlock()
-
- cloned = make([]*Event, len(sch.history))
- copy(cloned, sch.history)
- return
-}
-
-// workerRoutine is the mainloop when handling events.
-func (sch *Scheduler) workerRoutine(wg *sync.WaitGroup) {
- defer wg.Done()
-
- handleEvent := func(e *Event) {
- // Find correspond handler record.
- hRec := func(nID types.NodeID) *schedulerHandlerRecord {
- sch.handlersLock.RLock()
- defer sch.handlersLock.RUnlock()
-
- return sch.handlers[nID]
- }(e.NodeID)
-
- newEvents := func() []*Event {
- // This lock makes sure there would be no concurrent access
- // against each handler.
- hRec.lock.Lock()
- defer hRec.lock.Unlock()
-
- // Handle incoming event, and record its execution time.
- beforeExecution := time.Now().UTC()
- newEvents := hRec.handler.Handle(e)
- e.ExecInterval = time.Now().UTC().Sub(beforeExecution)
- // It's safe to check status of that node under 'hRec.lock'.
- if sch.stopper.ShouldStop(e.NodeID) {
- sch.cancelFunc()
- }
- return newEvents
- }()
- // Record executed events as history.
- func() {
- sch.historyLock.Lock()
- defer sch.historyLock.Unlock()
-
- e.HistoryIndex = len(sch.history)
- sch.history = append(sch.history, e)
- }()
- // Include the execution interval of parent event to the expected time
- // to execute child events.
- for _, newEvent := range newEvents {
- newEvent.ParentHistoryIndex = e.HistoryIndex
- newEvent.Time = newEvent.Time.Add(e.ExecInterval)
- }
- // Add derivated events back to event queue.
- func() {
- sch.eventsLock.Lock()
- defer sch.eventsLock.Unlock()
-
- for _, newEvent := range newEvents {
- sch.addEvent(newEvent)
- }
- }()
- }
-
-Done:
- for {
- // We favor scheduler-shutdown signal than other events.
- select {
- case <-sch.ctx.Done():
- break Done
- default:
- }
- // Block until new event arrival or scheduler shutdown.
- select {
- case <-sch.eventNotification:
- e := sch.nextTick()
- if e == nil {
- panic(errNilEventWhenNotified)
- }
- handleEvent(e)
- case <-sch.ctx.Done():
- break Done
- }
- }
-}
diff --git a/core/test/scheduler_test.go b/core/test/scheduler_test.go
deleted file mode 100644
index 4c39f95..0000000
--- a/core/test/scheduler_test.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package test
-
-import (
- "sync"
- "testing"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- "github.com/stretchr/testify/suite"
-)
-
-type SchedulerTestSuite struct {
- suite.Suite
-}
-
-type simpleStopper struct {
- lock sync.Mutex
- touched map[types.NodeID]int
- touchedCount int
-}
-
-func newSimpleStopper(
- nodes []types.NodeID, touchedCount int) *simpleStopper {
-
- touched := make(map[types.NodeID]int)
- for _, nID := range nodes {
- touched[nID] = 0
- }
- return &simpleStopper{
- touched: touched,
- touchedCount: touchedCount,
- }
-}
-
-func (stopper *simpleStopper) ShouldStop(nID types.NodeID) bool {
- stopper.lock.Lock()
- defer stopper.lock.Unlock()
-
- stopper.touched[nID] = stopper.touched[nID] + 1
- for _, count := range stopper.touched {
- if count < stopper.touchedCount {
- return false
- }
- }
- return true
-}
-
-type simpleHandler struct {
- count int
- nID types.NodeID
-}
-
-func (handler *simpleHandler) Handle(e *Event) (events []*Event) {
- if e.NodeID == handler.nID {
- handler.count++
- }
- return
-}
-
-type fixedLatencyHandler struct {
- nID types.NodeID
-}
-
-func (handler *fixedLatencyHandler) Handle(e *Event) (events []*Event) {
- // Simulate execution time.
- time.Sleep(500 * time.Millisecond)
- return []*Event{&Event{
- NodeID: handler.nID,
- Time: e.Time.Add(800 * time.Millisecond),
- }}
-}
-
-func (s *SchedulerTestSuite) TestEventSequence() {
- // This test case makes sure the event sequence is correctly increment
- // by their timestamps in 'Time' field.
- var (
- sch = NewScheduler(nil)
- req = s.Require()
- )
-
- req.NotNil(sch)
- now := time.Now()
- req.Nil(sch.Seed(&Event{Time: now.Add(100 * time.Second), Payload: 1}))
- req.Nil(sch.Seed(&Event{Time: now.Add(99 * time.Second), Payload: 2}))
- req.Nil(sch.Seed(&Event{Time: now.Add(98 * time.Second), Payload: 3}))
- req.Nil(sch.Seed(&Event{Time: now.Add(97 * time.Second), Payload: 4}))
- req.Nil(sch.Seed(&Event{Time: now.Add(96 * time.Second), Payload: 5}))
-
- req.Equal(sch.nextTick().Payload.(int), 5)
- req.Equal(sch.nextTick().Payload.(int), 4)
- req.Equal(sch.nextTick().Payload.(int), 3)
- req.Equal(sch.nextTick().Payload.(int), 2)
- req.Equal(sch.nextTick().Payload.(int), 1)
- req.Nil(sch.nextTick())
-}
-
-func (s *SchedulerTestSuite) TestBasicRound() {
- // This test case makes sure these facts:
- // - event is dispatched by NodeID attached to each handler.
- // - stopper can stop the execution when condition is met.
- var (
- req = s.Require()
- nodes = GenerateRandomNodeIDs(3)
- stopper = newSimpleStopper(nodes, 2)
- sch = NewScheduler(stopper)
- handlers = make(map[types.NodeID]*simpleHandler)
- )
-
- for _, nID := range nodes {
- handler := &simpleHandler{nID: nID}
- handlers[nID] = handler
- sch.RegisterEventHandler(nID, handler)
- req.Nil(sch.Seed(&Event{NodeID: nID}))
- req.Nil(sch.Seed(&Event{NodeID: nID}))
- }
- sch.Run(10)
- // Verify result.
- for _, h := range handlers {
- req.Equal(h.count, 2)
- }
-}
-
-func (s *SchedulerTestSuite) TestChildEvent() {
- // This test case makes sure these fields of child events are
- // assigned correctly.
- var (
- req = s.Require()
- nID = types.NodeID{Hash: common.NewRandomHash()}
- stopper = newSimpleStopper(types.NodeIDs{nID}, 3)
- handler = &fixedLatencyHandler{nID: nID}
- sch = NewScheduler(stopper)
- )
-
- sch.RegisterEventHandler(nID, handler)
- req.Nil(sch.Seed(&Event{
- NodeID: nID,
- Time: time.Now().UTC(),
- }))
- sch.Run(1)
- // Verify result.
- history := sch.CloneExecutionHistory()
- req.Len(history, 3)
- curEvent := history[0]
- for _, e := range history[1:] {
- // Make sure the time difference between events are more than
- // 1.3 second.
- req.True(e.Time.Sub(curEvent.Time) >= 1300*time.Millisecond)
- // Make sure ParentTime field is set and is equal to parent event's
- // time.
- req.NotEqual(-1, e.ParentHistoryIndex)
- req.Equal(e.ParentHistoryIndex, curEvent.HistoryIndex)
- curEvent = e
- }
-}
-
-func TestScheduler(t *testing.T) {
- suite.Run(t, new(SchedulerTestSuite))
-}
diff --git a/core/test/stopper.go b/core/test/stopper.go
deleted file mode 100644
index 2ba31d3..0000000
--- a/core/test/stopper.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package test
-
-import (
- "sync"
-
- "github.com/dexon-foundation/dexon-consensus/core/db"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// StopByConfirmedBlocks would make sure each nodes confirms
-// at least X blocks proposed by itself.
-type StopByConfirmedBlocks struct {
- apps map[types.NodeID]*App
- dbs map[types.NodeID]db.Database
- lastCheckDelivered map[types.NodeID]int
- confirmedBlocks map[types.NodeID]int
- blockCount int
- lock sync.Mutex
-}
-
-// NewStopByConfirmedBlocks construct an StopByConfirmedBlocks instance.
-func NewStopByConfirmedBlocks(
- blockCount int,
- apps map[types.NodeID]*App,
- dbs map[types.NodeID]db.Database) *StopByConfirmedBlocks {
- confirmedBlocks := make(map[types.NodeID]int)
- for nID := range apps {
- confirmedBlocks[nID] = 0
- }
- return &StopByConfirmedBlocks{
- apps: apps,
- dbs: dbs,
- lastCheckDelivered: make(map[types.NodeID]int),
- confirmedBlocks: confirmedBlocks,
- blockCount: blockCount,
- }
-}
-
-// ShouldStop implements Stopper interface.
-func (s *StopByConfirmedBlocks) ShouldStop(nID types.NodeID) bool {
- s.lock.Lock()
- defer s.lock.Unlock()
- // Accumulate confirmed blocks proposed by this node in this round.
- lastChecked := s.lastCheckDelivered[nID]
- currentConfirmedBlocks := s.confirmedBlocks[nID]
- dbInst := s.dbs[nID]
- s.apps[nID].WithLock(func(app *App) {
- for _, h := range app.DeliverSequence[lastChecked:] {
- b, err := dbInst.GetBlock(h)
- if err != nil {
- panic(err)
- }
- if b.ProposerID == nID {
- currentConfirmedBlocks++
- }
- }
- s.lastCheckDelivered[nID] = len(app.DeliverSequence)
- })
- s.confirmedBlocks[nID] = currentConfirmedBlocks
- // Check if all nodes confirmed at least 'blockCount' blocks.
- for _, v := range s.confirmedBlocks {
- if v < s.blockCount {
- return false
- }
- }
- return true
-}
-
-// StopByRound would make sure at least one block at round R is delivered
-// at each node.
-type StopByRound struct {
- untilRound uint64
- currentRounds map[types.NodeID]uint64
- lastCheckDelivered map[types.NodeID]int
- apps map[types.NodeID]*App
- dbs map[types.NodeID]db.Database
- lock sync.Mutex
-}
-
-// NewStopByRound constructs an StopByRound instance.
-func NewStopByRound(
- round uint64,
- apps map[types.NodeID]*App,
- dbs map[types.NodeID]db.Database) *StopByRound {
- return &StopByRound{
- untilRound: round,
- currentRounds: make(map[types.NodeID]uint64),
- lastCheckDelivered: make(map[types.NodeID]int),
- apps: apps,
- dbs: dbs,
- }
-}
-
-// ShouldStop implements Stopper interface.
-func (s *StopByRound) ShouldStop(nID types.NodeID) bool {
- s.lock.Lock()
- defer s.lock.Unlock()
- // Cache latest round of this node.
- if curRound := s.currentRounds[nID]; curRound < s.untilRound {
- lastChecked := s.lastCheckDelivered[nID]
- dbInst := s.dbs[nID]
- s.apps[nID].WithLock(func(app *App) {
- for _, h := range app.DeliverSequence[lastChecked:] {
- b, err := dbInst.GetBlock(h)
- if err != nil {
- panic(err)
- }
- if b.Position.Round > curRound {
- curRound = b.Position.Round
- }
- }
- s.lastCheckDelivered[nID] = len(app.DeliverSequence)
- s.currentRounds[nID] = curRound
- })
- }
- // Check if latest round on each node is later than untilRound.
- for _, round := range s.currentRounds {
- if round < s.untilRound {
- return false
- }
- }
- return true
-}
diff --git a/core/test/stopper_test.go b/core/test/stopper_test.go
deleted file mode 100644
index 758a0e4..0000000
--- a/core/test/stopper_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package test
-
-import (
- "testing"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core"
- "github.com/dexon-foundation/dexon-consensus/core/db"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- "github.com/stretchr/testify/suite"
-)
-
-type StopperTestSuite struct {
- suite.Suite
-}
-
-func (s *StopperTestSuite) deliver(
- blocks []*types.Block, app *App, dbInst db.Database) {
- hashes := common.Hashes{}
- for _, b := range blocks {
- hashes = append(hashes, b.Hash)
- s.Require().NoError(dbInst.PutBlock(*b))
- }
- for _, h := range hashes {
- app.BlockConfirmed(types.Block{Hash: h})
- }
- app.TotalOrderingDelivered(hashes, core.TotalOrderingModeNormal)
- for _, h := range hashes {
- app.BlockDelivered(h, types.Position{}, types.FinalizationResult{
- Timestamp: time.Time{},
- })
- }
-}
-
-func (s *StopperTestSuite) deliverToAllNodes(
- blocks []*types.Block,
- apps map[types.NodeID]*App,
- dbInsts map[types.NodeID]db.Database) {
- for nID := range apps {
- s.deliver(blocks, apps[nID], dbInsts[nID])
- }
-}
-
-func (s *StopperTestSuite) TestStopByConfirmedBlocks() {
- // This test case makes sure this stopper would stop when
- // all nodes confirmed at least 'x' count of blocks produced
- // by themselves.
- var (
- req = s.Require()
- apps = make(map[types.NodeID]*App)
- dbInsts = make(map[types.NodeID]db.Database)
- nodes = GenerateRandomNodeIDs(2)
- )
- for _, nID := range nodes {
- apps[nID] = NewApp(0, nil)
- dbInst, err := db.NewMemBackedDB()
- req.NoError(err)
- dbInsts[nID] = dbInst
- }
- stopper := NewStopByConfirmedBlocks(2, apps, dbInsts)
- b00 := &types.Block{
- ProposerID: nodes[0],
- Hash: common.NewRandomHash(),
- }
- s.deliverToAllNodes([]*types.Block{b00}, apps, dbInsts)
- b10 := &types.Block{
- ProposerID: nodes[1],
- Hash: common.NewRandomHash(),
- }
- b11 := &types.Block{
- ProposerID: nodes[1],
- ParentHash: b10.Hash,
- Hash: common.NewRandomHash(),
- }
- s.deliverToAllNodes([]*types.Block{b10, b11}, apps, dbInsts)
- req.False(stopper.ShouldStop(nodes[1]))
- b12 := &types.Block{
- ProposerID: nodes[1],
- ParentHash: b11.Hash,
- Hash: common.NewRandomHash(),
- }
- s.deliverToAllNodes([]*types.Block{b12}, apps, dbInsts)
- req.False(stopper.ShouldStop(nodes[1]))
- b01 := &types.Block{
- ProposerID: nodes[0],
- ParentHash: b00.Hash,
- Hash: common.NewRandomHash(),
- }
- s.deliverToAllNodes([]*types.Block{b01}, apps, dbInsts)
- req.True(stopper.ShouldStop(nodes[0]))
-}
-
-func (s *StopperTestSuite) TestStopByRound() {
- // This test case make sure at least one block from round R
- // is delivered by each node.
- var (
- req = s.Require()
- apps = make(map[types.NodeID]*App)
- dbInsts = make(map[types.NodeID]db.Database)
- nodes = GenerateRandomNodeIDs(2)
- )
- for _, nID := range nodes {
- apps[nID] = NewApp(0, nil)
- dbInst, err := db.NewMemBackedDB()
- req.NoError(err)
- dbInsts[nID] = dbInst
- }
- stopper := NewStopByRound(10, apps, dbInsts)
- b00 := &types.Block{
- ProposerID: nodes[0],
- Position: types.Position{
- Round: 0,
- ChainID: 0,
- Height: 0,
- },
- Hash: common.NewRandomHash(),
- }
- s.deliverToAllNodes([]*types.Block{b00}, apps, dbInsts)
- b10 := &types.Block{
- ProposerID: nodes[1],
- Position: types.Position{
- Round: 0,
- ChainID: 1,
- Height: 0,
- },
- Hash: common.NewRandomHash(),
- }
- b11 := &types.Block{
- ProposerID: nodes[1],
- ParentHash: b10.Hash,
- Position: types.Position{
- Round: 0,
- ChainID: 1,
- Height: 1,
- },
- Hash: common.NewRandomHash(),
- }
- s.deliverToAllNodes([]*types.Block{b10, b11}, apps, dbInsts)
- req.False(stopper.ShouldStop(nodes[0]))
- req.False(stopper.ShouldStop(nodes[1]))
- // Deliver one block at round 10 to node0
- b12 := &types.Block{
- ProposerID: nodes[1],
- ParentHash: b11.Hash,
- Position: types.Position{
- Round: 10,
- ChainID: 1,
- Height: 2,
- },
- Hash: common.NewRandomHash(),
- }
- // None should stop when only one node reach that round.
- s.deliver([]*types.Block{b12}, apps[nodes[0]], dbInsts[nodes[0]])
- req.False(stopper.ShouldStop(nodes[0]))
- req.False(stopper.ShouldStop(nodes[1]))
- // Everyone should stop now.
- s.deliver([]*types.Block{b12}, apps[nodes[1]], dbInsts[nodes[1]])
- req.True(stopper.ShouldStop(nodes[1]))
- req.True(stopper.ShouldStop(nodes[0]))
-}
-
-func TestStopper(t *testing.T) {
- suite.Run(t, new(StopperTestSuite))
-}