aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorMission Liao <mission.liao@dexon.org>2018-07-30 14:55:42 +0800
committerWei-Ning Huang <aitjcize@gmail.com>2018-07-30 14:55:42 +0800
commit9261a3b9f3711ba736f2a143456e0b73bdfcfab8 (patch)
treeb2fc027de52b6f8809748025f8a96c3af741cb95 /core
parentaaeaebaf5dec3a9a17d0a626cb19e76834f9e4cb (diff)
downloaddexon-consensus-9261a3b9f3711ba736f2a143456e0b73bdfcfab8.tar
dexon-consensus-9261a3b9f3711ba736f2a143456e0b73bdfcfab8.tar.gz
dexon-consensus-9261a3b9f3711ba736f2a143456e0b73bdfcfab8.tar.bz2
dexon-consensus-9261a3b9f3711ba736f2a143456e0b73bdfcfab8.tar.lz
dexon-consensus-9261a3b9f3711ba736f2a143456e0b73bdfcfab8.tar.xz
dexon-consensus-9261a3b9f3711ba736f2a143456e0b73bdfcfab8.tar.zst
dexon-consensus-9261a3b9f3711ba736f2a143456e0b73bdfcfab8.zip
Implement DEXON total ordering algorithm (#16)
Implement K-Level Total ordering algorithm Besides algorithm implementation, these concepts are also included: The candidate set and ackingStatusVector of each candidate won't be recalculated upon receiving each block. Make the component to calculate total ordering more self-contained. The access to block status is only required when receiving a new block.
Diffstat (limited to 'core')
-rw-r--r--core/sequencer.go522
-rw-r--r--core/sequencer_test.go874
2 files changed, 1396 insertions, 0 deletions
diff --git a/core/sequencer.go b/core/sequencer.go
new file mode 100644
index 0000000..36c1383
--- /dev/null
+++ b/core/sequencer.go
@@ -0,0 +1,522 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/types"
+)
+
+// ErrNotValidDAG would be reported when block subbmitted to sequencer
+// didn't form a DAG.
+var ErrNotValidDAG = fmt.Errorf("not a valid dag")
+
+// ackingStatusVector describes the acking status, either globally or just
+// for one candidate.
+//
+// When block A acks block B, all blocks proposed from the same proposer
+// as block A with higher height would also acks block B. Therefore,
+// we just need to record:
+// - the minimum height of acking block from that proposer
+// - count of acking blocks from that proposer
+// to repsent the acking status for block A.
+type ackingStatusVector map[types.ValidatorID]*struct{ minHeight, count uint64 }
+
+// addBlock would update ackingStatusVector, it's caller's duty
+// to make sure the input block acutally acking the target block.
+func (v ackingStatusVector) addBlock(b *types.Block) (err error) {
+ rec, exists := v[b.ProposerID]
+ if !exists {
+ v[b.ProposerID] = &struct {
+ minHeight, count uint64
+ }{
+ minHeight: b.Height,
+ count: 1,
+ }
+ } else {
+ if b.Height < rec.minHeight {
+ err = ErrNotValidDAG
+ return
+ }
+ rec.count++
+ }
+ return
+}
+
+// getAckingNodeSet would generate the Acking Node Set.
+// Only block height larger than
+//
+// global minimum height + k
+//
+// would be taken into consideration, ex.
+//
+// For some validator X:
+// - the global minimum acking height = 1,
+// - k = 1
+// then only block height >= 2 would be added to acking node set.
+func (v ackingStatusVector) getAckingNodeSet(
+ global ackingStatusVector, k uint64) map[types.ValidatorID]struct{} {
+
+ ret := make(map[types.ValidatorID]struct{})
+ for vID, gRec := range global {
+ rec, exists := v[vID]
+ if !exists {
+ continue
+ }
+
+ // This line would check if these two ranges would overlap:
+ // - (global minimum height + k, infinity)
+ // - (local minimum height, local minimum height + count - 1)
+ if rec.minHeight+rec.count-1 >= gRec.minHeight+k {
+ ret[vID] = struct{}{}
+ }
+ }
+ return ret
+}
+
+// getAckingHeightVector would convert 'ackingStatusVector' to
+// Acking Height Vector.
+//
+// Only block height equals to (global minimum block height + k) would be
+// taken into consideration.
+func (v ackingStatusVector) getAckingHeightVector(
+ global ackingStatusVector, k uint64) map[types.ValidatorID]uint64 {
+
+ ret := make(map[types.ValidatorID]uint64)
+ for vID, gRec := range global {
+ rec, exists := v[vID]
+
+ if gRec.count <= k {
+ continue
+ } else if !exists {
+ ret[vID] = infinity
+ } else if rec.minHeight <= gRec.minHeight+k {
+ // This check is sufficient to make sure the block height:
+ //
+ // gRec.minHeight + k
+ //
+ // would be included in this ackingStatusVector.
+ ret[vID] = gRec.minHeight + k
+ } else {
+ ret[vID] = infinity
+ }
+ }
+ return ret
+}
+
+// blockVector stores all blocks grouped by their proposers and
+// sorted by their block height.
+type blockVector map[types.ValidatorID][]*types.Block
+
+func (v blockVector) addBlock(b *types.Block) (err error) {
+ blocksFromProposer := v[b.ProposerID]
+ if len(blocksFromProposer) > 0 {
+ lastBlock := blocksFromProposer[len(blocksFromProposer)-1]
+ if b.Height-lastBlock.Height != 1 {
+ err = ErrNotValidDAG
+ return
+ }
+ }
+ v[b.ProposerID] = append(blocksFromProposer, b)
+ return
+}
+
+// getHeightVector would convert a blockVector to
+// ackingStatusVectorAckingStatus.
+func (v blockVector) getHeightVector() ackingStatusVector {
+ ret := ackingStatusVector{}
+ for vID, vec := range v {
+ if len(vec) == 0 {
+ continue
+ }
+ ret[vID] = &struct {
+ minHeight, count uint64
+ }{
+ minHeight: vec[0].Height,
+ count: uint64(len(vec)),
+ }
+ }
+ return ret
+}
+
+// Sequencer represent a process unit to handle total ordering
+// for blocks.
+type sequencer struct {
+ // pendings stores blocks awaiting to be ordered.
+ pendings map[common.Hash]*types.Block
+
+ // k represents the k in 'k-level total ordering'.
+ // In short, only block height equals to (global minimum height + k)
+ // would be taken into consideration.
+ k uint64
+
+ // phi is a const to control how strong the leading preceding block
+ // should be.
+ phi uint64
+
+ // validatorCount is the count of validator set.
+ validatorCount uint64
+
+ // globalVector group all pending blocks by proposers and
+ // sort them by block height. This structure is helpful when:
+ //
+ // - build global height vector
+ // - picking candidates next round
+ globalVector blockVector
+
+ // candidateAckingStatusVectors caches ackingStatusVector of candidates.
+ candidateAckingStatusVectors map[common.Hash]ackingStatusVector
+
+ // acked cache the 'block A acked by block B' relation by
+ // keeping a record in acked[A.Hash][B.Hash]
+ acked map[common.Hash]map[common.Hash]struct{}
+}
+
+func newSequencer(k, phi, validatorCount uint64) *sequencer {
+ return &sequencer{
+ candidateAckingStatusVectors: make(map[common.Hash]ackingStatusVector),
+ pendings: make(map[common.Hash]*types.Block),
+ k: k,
+ phi: phi,
+ validatorCount: validatorCount,
+ globalVector: blockVector{},
+ acked: make(map[common.Hash]map[common.Hash]struct{}),
+ }
+}
+
+// buildBlockRelation populates the acked according their acking relationships.
+func (s *sequencer) buildBlockRelation(b *types.Block) {
+ // populateAcked would update all blocks implcitly acked
+ // by input block recursively.
+ var populateAcked func(bx, target *types.Block)
+ populateAcked = func(bx, target *types.Block) {
+ for ack := range bx.Acks {
+ acked, exists := s.acked[ack]
+ if !exists {
+ acked = make(map[common.Hash]struct{})
+ s.acked[ack] = acked
+ }
+
+ // This means we've walked this block already.
+ if _, alreadyPopulated := acked[target.Hash]; alreadyPopulated {
+ continue
+ }
+ acked[target.Hash] = struct{}{}
+
+ // See if we need to go forward.
+ if nextBlock, exists := s.pendings[ack]; !exists {
+ continue
+ } else {
+ populateAcked(nextBlock, target)
+ }
+ }
+ }
+ populateAcked(b, b)
+}
+
+// clean would remove a block from working set. This behaviour
+// would prevent our memory usage growing infinity.
+func (s *sequencer) clean(h common.Hash) {
+ delete(s.acked, h)
+ delete(s.pendings, h)
+ delete(s.candidateAckingStatusVectors, h)
+}
+
+// updateVectors is a helper function to update all cached vectors.
+func (s *sequencer) updateVectors(b *types.Block) (err error) {
+ // Update global height vector
+ err = s.globalVector.addBlock(b)
+ if err != nil {
+ return
+ }
+
+ // Update acking status of candidates.
+ for candidate, vector := range s.candidateAckingStatusVectors {
+ if _, acked := s.acked[candidate][b.Hash]; !acked {
+ continue
+ }
+ if err = vector.addBlock(b); err != nil {
+ return
+ }
+ }
+ return
+}
+
+// grade implements the 'grade' potential function described in white paper.
+func (s *sequencer) grade(
+ hvFrom, hvTo map[types.ValidatorID]uint64,
+ globalAns map[types.ValidatorID]struct{}) int {
+
+ count := uint64(0)
+ for vID, hFrom := range hvFrom {
+ hTo, exists := hvTo[vID]
+ if !exists {
+ continue
+ }
+
+ if hFrom != infinity && hTo == infinity {
+ count++
+ }
+ }
+
+ if count >= s.phi {
+ return 1
+ } else if count < s.phi-s.validatorCount+uint64(len(globalAns)) {
+ return 0
+ } else {
+ return -1
+ }
+}
+
+// buildAckingStatusVectorForNewCandidate is a helper function to
+// build ackingStatusVector for new candidate.
+func (s *sequencer) buildAckingStatusVectorForNewCandidate(
+ candidate *types.Block) (hVec ackingStatusVector) {
+
+ blocks := s.globalVector[candidate.ProposerID]
+ hVec = ackingStatusVector{
+ candidate.ProposerID: &struct {
+ minHeight, count uint64
+ }{
+ minHeight: candidate.Height,
+ count: uint64(len(blocks)),
+ },
+ }
+
+ ackedsForCandidate, exists := s.acked[candidate.Hash]
+ if !exists {
+ // This candidate is acked by nobody.
+ return
+ }
+
+ for vID, blocks := range s.globalVector {
+ if vID == candidate.ProposerID {
+ continue
+ }
+
+ for i, b := range blocks {
+ if _, acked := ackedsForCandidate[b.Hash]; !acked {
+ continue
+ }
+
+ // If this block acks this candidate, all newer blocks
+ // from the same validator also 'indirect' acks it.
+ hVec[vID] = &struct {
+ minHeight, count uint64
+ }{
+ minHeight: b.Height,
+ count: uint64(len(blocks) - i),
+ }
+ break
+ }
+ }
+ return
+}
+
+// isAckOnlyPrecedings is a helper function to check if a block
+// only contain acks to delivered blocks.
+func (s *sequencer) isAckOnlyPrecedings(b *types.Block) bool {
+ for ack := range b.Acks {
+ if _, pending := s.pendings[ack]; pending {
+ return false
+ }
+ }
+ return true
+}
+
+// output is a helper function to finish the delivery of
+// deliverable preceding set.
+func (s *sequencer) output(precedings map[common.Hash]struct{}) common.Hashes {
+ ret := common.Hashes{}
+ for p := range precedings {
+ ret = append(ret, p)
+
+ // Remove the first element from corresponding blockVector.
+ b := s.pendings[p]
+ s.globalVector[b.ProposerID] = s.globalVector[b.ProposerID][1:]
+
+ // Remove block relations.
+ s.clean(p)
+ }
+ sort.Sort(ret)
+
+ // Find new candidates from tip of globalVector of each validator.
+ // The complexity here is O(N^2logN).
+ for _, blocks := range s.globalVector {
+ if len(blocks) == 0 {
+ continue
+ }
+
+ tip := blocks[0]
+ if _, alreadyCandidate :=
+ s.candidateAckingStatusVectors[tip.Hash]; alreadyCandidate {
+ continue
+ }
+
+ if !s.isAckOnlyPrecedings(tip) {
+ continue
+ }
+
+ // Build ackingStatusVector for new candidate.
+ s.candidateAckingStatusVectors[tip.Hash] =
+ s.buildAckingStatusVectorForNewCandidate(tip)
+ }
+ return ret
+}
+
+// generateDeliverSet would:
+// - generate preceding set
+// - check if the preceding set deliverable by checking potential function
+func (s *sequencer) generateDeliverSet() (
+ delivered map[common.Hash]struct{}, early bool) {
+
+ globalHeightVector := s.globalVector.getHeightVector()
+ ahvs := map[common.Hash]map[types.ValidatorID]uint64{}
+ for candidate, v := range s.candidateAckingStatusVectors {
+ ahvs[candidate] = v.getAckingHeightVector(globalHeightVector, s.k)
+ }
+
+ globalAns := globalHeightVector.getAckingNodeSet(globalHeightVector, s.k)
+ precedings := make(map[common.Hash]struct{})
+
+CheckNextCandidateLoop:
+ for candidate := range s.candidateAckingStatusVectors {
+ for otherCandidate := range s.candidateAckingStatusVectors {
+ if candidate == otherCandidate {
+ continue
+ }
+ if s.grade(ahvs[otherCandidate], ahvs[candidate], globalAns) != 0 {
+ continue CheckNextCandidateLoop
+ }
+ }
+ precedings[candidate] = struct{}{}
+ }
+
+ if len(precedings) == 0 {
+ return
+ }
+
+ // internal is a helper function to verify internal stability.
+ internal := func() bool {
+ for candidate := range s.candidateAckingStatusVectors {
+ if _, isPreceding := precedings[candidate]; isPreceding {
+ continue
+ }
+
+ beaten := false
+ for p := range precedings {
+ if beaten =
+ s.grade(ahvs[p], ahvs[candidate], globalAns) == 1; beaten {
+ break
+ }
+ }
+ if !beaten {
+ return false
+ }
+ }
+ return true
+ }
+
+ // checkAHV is a helper function to verify external stability.
+ // It would make sure some preceding block is strong enough
+ // to lead the whole preceding set.
+ checkAHV := func() bool {
+ for p := range precedings {
+ count := uint64(0)
+ for _, v := range ahvs[p] {
+ if v != infinity {
+ count++
+ }
+ }
+
+ if count > s.phi {
+ return true
+ }
+ }
+ return false
+ }
+
+ // checkANS is a helper function to verify external stability.
+ // It would make sure all preceding blocks are strong enough
+ // to be delivered.
+ checkANS := func() bool {
+ for p := range precedings {
+ validatorAns := s.candidateAckingStatusVectors[p].getAckingNodeSet(
+ globalHeightVector, s.k)
+ if uint64(len(validatorAns)) < s.validatorCount-s.phi {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ // Check internal stability first.
+ if !internal() {
+ return
+ }
+
+ // If all validators propose enough blocks, we should force
+ // to deliver since the whole picture of the DAG is revealed.
+ if uint64(len(globalAns)) != s.validatorCount {
+ // The whole picture is not ready, we need to check if
+ // exteranl stability is met, and we can deliver earlier.
+ if checkAHV() && checkANS() {
+ early = true
+ } else {
+ return
+ }
+ }
+
+ delivered = precedings
+ return
+}
+
+// processBlock is the entry point of sequencer.
+func (s *sequencer) processBlock(b *types.Block) (
+ delivered common.Hashes, early bool, err error) {
+
+ // NOTE: I assume the block 'b' is already safe for total ordering.
+ // That means, all its acking blocks are during/after
+ // total ordering stage.
+
+ // Incremental part.
+ s.pendings[b.Hash] = b
+ s.buildBlockRelation(b)
+ if err = s.updateVectors(b); err != nil {
+ return
+ }
+ if s.isAckOnlyPrecedings(b) {
+ s.candidateAckingStatusVectors[b.Hash] =
+ s.buildAckingStatusVectorForNewCandidate(b)
+ }
+
+ // Not-Incremental part (yet).
+ // - generate ahv for each candidate
+ // - generate ans for each candidate
+ // - generate global ans
+ // - find preceding set
+ hashes, early := s.generateDeliverSet()
+
+ // output precedings
+ delivered = s.output(hashes)
+ return
+}
diff --git a/core/sequencer_test.go b/core/sequencer_test.go
new file mode 100644
index 0000000..fa8b461
--- /dev/null
+++ b/core/sequencer_test.go
@@ -0,0 +1,874 @@
+package core
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/types"
+ "github.com/stretchr/testify/suite"
+)
+
+type SequencerTestSuite struct {
+ suite.Suite
+}
+
+func (s *SequencerTestSuite) generateValidatorIDs(
+ count int) []types.ValidatorID {
+
+ validatorIDs := []types.ValidatorID{}
+ for i := 0; i < count; i++ {
+ validatorIDs = append(validatorIDs,
+ types.ValidatorID{Hash: common.NewRandomHash()})
+ }
+
+ return validatorIDs
+}
+
+func (s *SequencerTestSuite) genRootBlock(
+ vID types.ValidatorID, acks map[common.Hash]struct{}) *types.Block {
+
+ hash := common.NewRandomHash()
+ return &types.Block{
+ ProposerID: vID,
+ ParentHash: hash,
+ Hash: hash,
+ Height: 0,
+ Acks: acks,
+ }
+}
+
+func (s *SequencerTestSuite) checkNotDeliver(seq *sequencer, b *types.Block) {
+ hashes, eqrly, err := seq.processBlock(b)
+ s.Empty(hashes)
+ s.False(eqrly)
+ s.Nil(err)
+}
+
+func (s *SequencerTestSuite) checkNotInWorkingSet(
+ seq *sequencer, b *types.Block) {
+
+ s.NotContains(seq.pendings, b.Hash)
+ s.NotContains(seq.acked, b.Hash)
+}
+
+func (s *SequencerTestSuite) TestBlockRelation() {
+ // This test case would verify if 'acking' and 'acked'
+ // accumulated correctly.
+ //
+ // The DAG used below is:
+ // A <- B <- C
+
+ vID := types.ValidatorID{Hash: common.NewRandomHash()}
+
+ hash := common.NewRandomHash()
+ blockA := &types.Block{
+ ProposerID: vID,
+ ParentHash: hash,
+ Hash: hash,
+ Height: 0,
+ Acks: map[common.Hash]struct{}{},
+ }
+ blockB := &types.Block{
+ ProposerID: vID,
+ ParentHash: blockA.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ blockA.Hash: struct{}{},
+ },
+ }
+ blockC := &types.Block{
+ ProposerID: vID,
+ ParentHash: blockB.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 2,
+ Acks: map[common.Hash]struct{}{
+ blockB.Hash: struct{}{},
+ },
+ }
+
+ seq := newSequencer(1, 3, 5)
+ s.checkNotDeliver(seq, blockA)
+ s.checkNotDeliver(seq, blockB)
+ s.checkNotDeliver(seq, blockC)
+
+ // Check 'acked'.
+ ackedA := seq.acked[blockA.Hash]
+ s.Require().NotNil(ackedA)
+ s.Len(ackedA, 2)
+ s.Contains(ackedA, blockB.Hash)
+ s.Contains(ackedA, blockC.Hash)
+
+ ackedB := seq.acked[blockB.Hash]
+ s.Require().NotNil(ackedB)
+ s.Len(ackedB, 1)
+ s.Contains(ackedB, blockC.Hash)
+
+ s.Nil(seq.acked[blockC.Hash])
+}
+
+func (s *SequencerTestSuite) TestCreateAckingHeightVectorFromHeightVector() {
+ validators := s.generateValidatorIDs(5)
+ global := ackingStatusVector{
+ validators[0]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 5},
+ validators[1]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 5},
+ validators[2]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 5},
+ validators[3]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 5},
+ }
+
+ // For 'not existed' record in local but exist in global,
+ // should be infinity.
+ ahv := ackingStatusVector{
+ validators[0]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 2},
+ }.getAckingHeightVector(global, 0)
+ s.Len(ahv, 4)
+ s.Equal(ahv[validators[0]], uint64(0))
+ s.Equal(ahv[validators[1]], infinity)
+ s.Equal(ahv[validators[2]], infinity)
+ s.Equal(ahv[validators[3]], infinity)
+
+ // For local min exceeds global's min+k-1, should be infinity
+ hv := ackingStatusVector{
+ validators[0]: &struct{ minHeight, count uint64 }{
+ minHeight: 3, count: 1},
+ }
+ ahv = hv.getAckingHeightVector(global, 2)
+ s.Equal(ahv[validators[0]], infinity)
+ ahv = hv.getAckingHeightVector(global, 3)
+ s.Equal(ahv[validators[0]], uint64(3))
+
+ ahv = ackingStatusVector{
+ validators[0]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 3},
+ validators[1]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 3},
+ }.getAckingHeightVector(global, 5)
+ s.Len(ahv, 0)
+}
+
+func (s *SequencerTestSuite) TestCreateAckingNodeSetFromHeightVector() {
+ validators := s.generateValidatorIDs(5)
+ global := ackingStatusVector{
+ validators[0]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 5},
+ validators[1]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 5},
+ validators[2]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 5},
+ validators[3]: &struct{ minHeight, count uint64 }{
+ minHeight: 0, count: 5},
+ }
+
+ local := ackingStatusVector{
+ validators[0]: &struct{ minHeight, count uint64 }{
+ minHeight: 1, count: 2},
+ }
+ s.Len(local.getAckingNodeSet(global, 1), 1)
+ s.Len(local.getAckingNodeSet(global, 2), 1)
+ s.Len(local.getAckingNodeSet(global, 3), 0)
+}
+
+func (s *SequencerTestSuite) TestGrade() {
+ validators := s.generateValidatorIDs(5)
+ seq := newSequencer(1, 3, 5) // K doesn't matter when calculating preceding.
+
+ ans := map[types.ValidatorID]struct{}{
+ validators[0]: struct{}{},
+ validators[1]: struct{}{},
+ validators[2]: struct{}{},
+ validators[3]: struct{}{},
+ }
+
+ ahv1 := map[types.ValidatorID]uint64{
+ validators[0]: 1,
+ validators[1]: infinity,
+ validators[2]: infinity,
+ validators[3]: infinity,
+ }
+ ahv2 := map[types.ValidatorID]uint64{
+ validators[0]: 1,
+ validators[1]: 1,
+ validators[2]: 1,
+ validators[3]: 1,
+ }
+ ahv3 := map[types.ValidatorID]uint64{
+ validators[0]: 1,
+ validators[1]: 1,
+ validators[2]: infinity,
+ validators[3]: infinity,
+ }
+ s.Equal(seq.grade(ahv2, ahv1, ans), 1)
+ s.Equal(seq.grade(ahv1, ahv2, ans), 0)
+ s.Equal(seq.grade(ahv2, ahv3, ans), -1)
+ s.Equal(seq.grade(ahv3, ahv2, ans), 0)
+}
+
+func (s *SequencerTestSuite) TestCycleDetection() {
+ // Make sure we don't get hang by cycle from
+ // block's acks.
+ validators := s.generateValidatorIDs(5)
+
+ // create blocks with cycles in acking relation.
+ cycledHash := common.NewRandomHash()
+ hash := common.NewRandomHash()
+ b00 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: hash,
+ Hash: hash,
+ Height: 0,
+ Acks: map[common.Hash]struct{}{
+ cycledHash: struct{}{},
+ },
+ }
+ b01 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: b00.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b00.Hash: struct{}{},
+ },
+ }
+ b02 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: b01.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 2,
+ Acks: map[common.Hash]struct{}{
+ b01.Hash: struct{}{},
+ },
+ }
+ b03 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: b02.Hash,
+ Hash: cycledHash,
+ Height: 3,
+ Acks: map[common.Hash]struct{}{
+ b02.Hash: struct{}{},
+ },
+ }
+
+ // Create a block acks self.
+ hash = common.NewRandomHash()
+ b10 := &types.Block{
+ ProposerID: validators[1],
+ ParentHash: hash,
+ Hash: hash,
+ Height: 0,
+ Acks: map[common.Hash]struct{}{
+ hash: struct{}{},
+ },
+ }
+
+ // Make sure we won't hang when cycle exists.
+ seq := newSequencer(1, 3, 5)
+ s.checkNotDeliver(seq, b00)
+ s.checkNotDeliver(seq, b01)
+ s.checkNotDeliver(seq, b02)
+
+ // Should not hang in this line.
+ s.checkNotDeliver(seq, b03)
+ // Should not hang in this line
+ s.checkNotDeliver(seq, b10)
+}
+
+func (s *SequencerTestSuite) TestNotValidDAGDetection() {
+ validators := s.generateValidatorIDs(4)
+ seq := newSequencer(1, 3, 5)
+
+ hash := common.NewRandomHash()
+ b00 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: hash,
+ Hash: hash,
+ Height: 0,
+ Acks: map[common.Hash]struct{}{},
+ }
+ b01 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: b00.Hash,
+ Hash: common.NewRandomHash(),
+ }
+
+ // When submit to block with lower height to sequencer,
+ // caller should receive an error.
+ s.checkNotDeliver(seq, b01)
+ _, _, err := seq.processBlock(b00)
+ s.Equal(err, ErrNotValidDAG)
+}
+
+func (s *SequencerTestSuite) TestEarlyDeliver() {
+ // The test scenario:
+ //
+ // o o o o o
+ // : : : : : <- (K - 1) layers
+ // o o o o o
+ // \ v / |
+ // o o
+ // A B
+ // Even when B is not received, A should
+ // be able to be delivered.
+ seq := newSequencer(2, 3, 5)
+ validators := s.generateValidatorIDs(5)
+
+ genNextBlock := func(b *types.Block) *types.Block {
+ return &types.Block{
+ ProposerID: b.ProposerID,
+ ParentHash: b.Hash,
+ Hash: common.NewRandomHash(),
+ Height: b.Height + 1,
+ Acks: map[common.Hash]struct{}{
+ b.Hash: struct{}{},
+ },
+ }
+ }
+
+ b00 := s.genRootBlock(validators[0], map[common.Hash]struct{}{})
+ b01 := genNextBlock(b00)
+ b02 := genNextBlock(b01)
+
+ b10 := s.genRootBlock(validators[1], map[common.Hash]struct{}{
+ b00.Hash: struct{}{},
+ })
+ b11 := genNextBlock(b10)
+ b12 := genNextBlock(b11)
+
+ b20 := s.genRootBlock(validators[2], map[common.Hash]struct{}{
+ b00.Hash: struct{}{},
+ })
+ b21 := genNextBlock(b20)
+ b22 := genNextBlock(b21)
+
+ b30 := s.genRootBlock(validators[3], map[common.Hash]struct{}{
+ b00.Hash: struct{}{},
+ })
+ b31 := genNextBlock(b30)
+ b32 := genNextBlock(b31)
+
+ // It's a valid block sequence to deliver
+ // to total ordering algorithm: DAG.
+ s.checkNotDeliver(seq, b00)
+ s.checkNotDeliver(seq, b01)
+ s.checkNotDeliver(seq, b02)
+
+ vec := seq.candidateAckingStatusVectors[b00.Hash]
+ s.Require().NotNil(vec)
+ s.Len(vec, 1)
+ s.Equal(vec[validators[0]].minHeight, b00.Height)
+ s.Equal(vec[validators[0]].count, uint64(3))
+
+ s.checkNotDeliver(seq, b10)
+ s.checkNotDeliver(seq, b11)
+ s.checkNotDeliver(seq, b12)
+ s.checkNotDeliver(seq, b20)
+ s.checkNotDeliver(seq, b21)
+ s.checkNotDeliver(seq, b22)
+ s.checkNotDeliver(seq, b30)
+ s.checkNotDeliver(seq, b31)
+
+ // Check the internal state before delivering.
+ s.Len(seq.candidateAckingStatusVectors, 1) // b00 is the only candidate.
+
+ vec = seq.candidateAckingStatusVectors[b00.Hash]
+ s.Require().NotNil(vec)
+ s.Len(vec, 4)
+ s.Equal(vec[validators[0]].minHeight, b00.Height)
+ s.Equal(vec[validators[0]].count, uint64(3))
+ s.Equal(vec[validators[1]].minHeight, b10.Height)
+ s.Equal(vec[validators[1]].count, uint64(3))
+ s.Equal(vec[validators[2]].minHeight, b20.Height)
+ s.Equal(vec[validators[2]].count, uint64(3))
+ s.Equal(vec[validators[3]].minHeight, b30.Height)
+ s.Equal(vec[validators[3]].count, uint64(2))
+
+ hashes, early, err := seq.processBlock(b32)
+ s.Require().Len(hashes, 1)
+ s.True(early)
+ s.Nil(err)
+ s.Equal(hashes[0], b00.Hash)
+
+ // Check the internal state after delivered.
+ s.Len(seq.candidateAckingStatusVectors, 4) // b01, b10, b20, b30 are candidates.
+
+ // Check b01.
+ vec = seq.candidateAckingStatusVectors[b01.Hash]
+ s.Require().NotNil(vec)
+ s.Len(vec, 1)
+ s.Equal(vec[validators[0]].minHeight, b01.Height)
+ s.Equal(vec[validators[0]].count, uint64(2))
+
+ // Check b10.
+ vec = seq.candidateAckingStatusVectors[b10.Hash]
+ s.Require().NotNil(vec)
+ s.Len(vec, 1)
+ s.Equal(vec[validators[1]].minHeight, b10.Height)
+ s.Equal(vec[validators[1]].count, uint64(3))
+
+ // Check b20.
+ vec = seq.candidateAckingStatusVectors[b20.Hash]
+ s.Require().NotNil(vec)
+ s.Len(vec, 1)
+ s.Equal(vec[validators[2]].minHeight, b20.Height)
+ s.Equal(vec[validators[2]].count, uint64(3))
+
+ // Check b30.
+ vec = seq.candidateAckingStatusVectors[b30.Hash]
+ s.Require().NotNil(vec)
+ s.Len(vec, 1)
+ s.Equal(vec[validators[3]].minHeight, b30.Height)
+ s.Equal(vec[validators[3]].count, uint64(3))
+
+ // Make sure b00 doesn't exist in current working set:
+ s.checkNotInWorkingSet(seq, b00)
+}
+
+func (s *SequencerTestSuite) TestBasicCaseForK2() {
+ // It's a handcrafted test case.
+ seq := newSequencer(2, 3, 5)
+ validators := s.generateValidatorIDs(5)
+
+ b00 := s.genRootBlock(validators[0], map[common.Hash]struct{}{})
+ b10 := s.genRootBlock(validators[1], map[common.Hash]struct{}{})
+ b20 := s.genRootBlock(
+ validators[2], map[common.Hash]struct{}{b10.Hash: struct{}{}})
+ b30 := s.genRootBlock(
+ validators[3], map[common.Hash]struct{}{b20.Hash: struct{}{}})
+ b40 := s.genRootBlock(validators[4], map[common.Hash]struct{}{})
+ b11 := &types.Block{
+ ProposerID: validators[1],
+ ParentHash: b10.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b10.Hash: struct{}{},
+ b00.Hash: struct{}{},
+ },
+ }
+ b01 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: b00.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b00.Hash: struct{}{},
+ b11.Hash: struct{}{},
+ },
+ }
+ b21 := &types.Block{
+ ProposerID: validators[2],
+ ParentHash: b20.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b20.Hash: struct{}{},
+ b01.Hash: struct{}{},
+ },
+ }
+ b31 := &types.Block{
+ ProposerID: validators[3],
+ ParentHash: b30.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b30.Hash: struct{}{},
+ b21.Hash: struct{}{},
+ },
+ }
+ b02 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: b01.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 2,
+ Acks: map[common.Hash]struct{}{
+ b01.Hash: struct{}{},
+ b21.Hash: struct{}{},
+ },
+ }
+ b12 := &types.Block{
+ ProposerID: validators[1],
+ ParentHash: b11.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 2,
+ Acks: map[common.Hash]struct{}{
+ b11.Hash: struct{}{},
+ b21.Hash: struct{}{},
+ },
+ }
+ b32 := &types.Block{
+ ProposerID: validators[3],
+ ParentHash: b31.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 2,
+ Acks: map[common.Hash]struct{}{
+ b31.Hash: struct{}{},
+ },
+ }
+ b22 := &types.Block{
+ ProposerID: validators[2],
+ ParentHash: b21.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 2,
+ Acks: map[common.Hash]struct{}{
+ b21.Hash: struct{}{},
+ b32.Hash: struct{}{},
+ },
+ }
+ b23 := &types.Block{
+ ProposerID: validators[2],
+ ParentHash: b22.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 3,
+ Acks: map[common.Hash]struct{}{
+ b22.Hash: struct{}{},
+ },
+ }
+ b03 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: b02.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 3,
+ Acks: map[common.Hash]struct{}{
+ b02.Hash: struct{}{},
+ b22.Hash: struct{}{},
+ },
+ }
+ b13 := &types.Block{
+ ProposerID: validators[1],
+ ParentHash: b12.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 3,
+ Acks: map[common.Hash]struct{}{
+ b12.Hash: struct{}{},
+ b22.Hash: struct{}{},
+ },
+ }
+ b14 := &types.Block{
+ ProposerID: validators[1],
+ ParentHash: b13.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 4,
+ Acks: map[common.Hash]struct{}{
+ b13.Hash: struct{}{},
+ },
+ }
+ b41 := &types.Block{
+ ProposerID: validators[4],
+ ParentHash: b40.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b40.Hash: struct{}{},
+ },
+ }
+ b42 := &types.Block{
+ ProposerID: validators[4],
+ ParentHash: b41.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 2,
+ Acks: map[common.Hash]struct{}{
+ b41.Hash: struct{}{},
+ },
+ }
+
+ s.checkNotDeliver(seq, b00)
+ s.checkNotDeliver(seq, b10)
+ s.checkNotDeliver(seq, b11)
+ s.checkNotDeliver(seq, b01)
+ s.checkNotDeliver(seq, b20)
+ s.checkNotDeliver(seq, b30)
+ s.checkNotDeliver(seq, b21)
+ s.checkNotDeliver(seq, b31)
+ s.checkNotDeliver(seq, b32)
+ s.checkNotDeliver(seq, b22)
+ s.checkNotDeliver(seq, b12)
+
+ // Make sure 'acked' for current precedings is correct.
+ acked := seq.acked[b00.Hash]
+ s.Require().NotNil(acked)
+ s.Len(acked, 7)
+ s.Contains(acked, b01.Hash)
+ s.Contains(acked, b11.Hash)
+ s.Contains(acked, b12.Hash)
+ s.Contains(acked, b21.Hash)
+ s.Contains(acked, b22.Hash)
+ s.Contains(acked, b31.Hash)
+ s.Contains(acked, b32.Hash)
+
+ acked = seq.acked[b10.Hash]
+ s.Require().NotNil(acked)
+ s.Len(acked, 9)
+ s.Contains(acked, b01.Hash)
+ s.Contains(acked, b11.Hash)
+ s.Contains(acked, b12.Hash)
+ s.Contains(acked, b20.Hash)
+ s.Contains(acked, b21.Hash)
+ s.Contains(acked, b22.Hash)
+ s.Contains(acked, b30.Hash)
+ s.Contains(acked, b31.Hash)
+ s.Contains(acked, b32.Hash)
+
+ // Make sure there are 2 candidates.
+ s.Require().Len(seq.candidateAckingStatusVectors, 2)
+
+ // Check b00's height vector.
+ vec := seq.candidateAckingStatusVectors[b00.Hash]
+ s.Require().NotNil(vec)
+ s.NotContains(vec, validators[4])
+ s.Equal(vec[validators[0]].minHeight, b00.Height)
+ s.Equal(vec[validators[0]].count, uint64(2))
+ s.Equal(vec[validators[1]].minHeight, b11.Height)
+ s.Equal(vec[validators[1]].count, uint64(2))
+ s.Equal(vec[validators[2]].minHeight, b21.Height)
+ s.Equal(vec[validators[2]].count, uint64(2))
+ s.Equal(vec[validators[3]].minHeight, b31.Height)
+ s.Equal(vec[validators[3]].count, uint64(2))
+
+ // Check b10's height vector.
+ vec = seq.candidateAckingStatusVectors[b10.Hash]
+ s.Require().NotNil(vec)
+ s.NotContains(vec, validators[4])
+ s.Equal(vec[validators[0]].minHeight, b01.Height)
+ s.Equal(vec[validators[0]].count, uint64(1))
+ s.Equal(vec[validators[1]].minHeight, b10.Height)
+ s.Equal(vec[validators[1]].count, uint64(3))
+ s.Equal(vec[validators[2]].minHeight, b20.Height)
+ s.Equal(vec[validators[2]].count, uint64(3))
+ s.Equal(vec[validators[3]].minHeight, b30.Height)
+ s.Equal(vec[validators[3]].count, uint64(3))
+
+ // Check the first deliver.
+ hashes, early, err := seq.processBlock(b02)
+ s.True(early)
+ s.Nil(err)
+ expected := common.Hashes{b00.Hash, b10.Hash}
+ sort.Sort(expected)
+ s.Equal(hashes, expected)
+
+ // Make sure b00, b10 are removed from current working set.
+ s.checkNotInWorkingSet(seq, b00)
+ s.checkNotInWorkingSet(seq, b10)
+
+ // Check if candidates of next round are picked correctly.
+ s.Len(seq.candidateAckingStatusVectors, 2)
+
+ // Check b01's height vector.
+ vec = seq.candidateAckingStatusVectors[b11.Hash]
+ s.Require().NotNil(vec)
+ s.NotContains(vec, validators[4])
+ s.Equal(vec[validators[0]].minHeight, b01.Height)
+ s.Equal(vec[validators[0]].count, uint64(2))
+ s.Equal(vec[validators[1]].minHeight, b11.Height)
+ s.Equal(vec[validators[1]].count, uint64(2))
+ s.Equal(vec[validators[2]].minHeight, b21.Height)
+ s.Equal(vec[validators[2]].count, uint64(2))
+ s.Equal(vec[validators[3]].minHeight, b11.Height)
+ s.Equal(vec[validators[3]].count, uint64(2))
+
+ // Check b20's height vector.
+ vec = seq.candidateAckingStatusVectors[b20.Hash]
+ s.Require().NotNil(vec)
+ s.NotContains(vec, validators[4])
+ s.Equal(vec[validators[0]].minHeight, b02.Height)
+ s.Equal(vec[validators[0]].count, uint64(1))
+ s.Equal(vec[validators[1]].minHeight, b12.Height)
+ s.Equal(vec[validators[1]].count, uint64(1))
+ s.Equal(vec[validators[2]].minHeight, b20.Height)
+ s.Equal(vec[validators[2]].count, uint64(3))
+ s.Equal(vec[validators[3]].minHeight, b30.Height)
+ s.Equal(vec[validators[3]].count, uint64(3))
+
+ s.checkNotDeliver(seq, b13)
+
+ // Check the second deliver.
+ hashes, early, err = seq.processBlock(b03)
+ s.True(early)
+ s.Nil(err)
+ expected = common.Hashes{b11.Hash, b20.Hash}
+ sort.Sort(expected)
+ s.Equal(hashes, expected)
+
+ // Make sure b11, b20 are removed from current working set.
+ s.checkNotInWorkingSet(seq, b11)
+ s.checkNotInWorkingSet(seq, b20)
+
+ // Add b40, b41, b42 to pending set.
+ s.checkNotDeliver(seq, b40)
+ s.checkNotDeliver(seq, b41)
+ s.checkNotDeliver(seq, b42)
+ s.checkNotDeliver(seq, b14)
+
+ // Make sure b01, b30, b40 are candidate in next round.
+ s.Len(seq.candidateAckingStatusVectors, 3)
+ vec = seq.candidateAckingStatusVectors[b01.Hash]
+ s.Require().NotNil(vec)
+ s.NotContains(vec, validators[4])
+ s.Equal(vec[validators[0]].minHeight, b01.Height)
+ s.Equal(vec[validators[0]].count, uint64(3))
+ s.Equal(vec[validators[1]].minHeight, b12.Height)
+ s.Equal(vec[validators[1]].count, uint64(3))
+ s.Equal(vec[validators[2]].minHeight, b21.Height)
+ s.Equal(vec[validators[2]].count, uint64(2))
+ s.Equal(vec[validators[3]].minHeight, b31.Height)
+ s.Equal(vec[validators[3]].count, uint64(2))
+
+ vec = seq.candidateAckingStatusVectors[b30.Hash]
+ s.Require().NotNil(vec)
+ s.NotContains(vec, validators[4])
+ s.Equal(vec[validators[0]].minHeight, b03.Height)
+ s.Equal(vec[validators[0]].count, uint64(1))
+ s.Equal(vec[validators[1]].minHeight, b13.Height)
+ s.Equal(vec[validators[1]].count, uint64(2))
+ s.Equal(vec[validators[2]].minHeight, b22.Height)
+ s.Equal(vec[validators[2]].count, uint64(1))
+ s.Equal(vec[validators[3]].minHeight, b30.Height)
+ s.Equal(vec[validators[3]].count, uint64(3))
+
+ vec = seq.candidateAckingStatusVectors[b40.Hash]
+ s.Require().NotNil(vec)
+ s.NotContains(vec, validators[0])
+ s.NotContains(vec, validators[1])
+ s.NotContains(vec, validators[2])
+ s.NotContains(vec, validators[3])
+ s.Equal(vec[validators[4]].minHeight, b40.Height)
+ s.Equal(vec[validators[4]].count, uint64(3))
+
+ // Make 'Acking Node Set' contains blocks from all validators,
+ // this should trigger not-early deliver.
+ hashes, early, err = seq.processBlock(b23)
+ s.False(early)
+ s.Nil(err)
+ expected = common.Hashes{b01.Hash, b30.Hash}
+ sort.Sort(expected)
+ s.Equal(expected, hashes)
+
+ // Make sure b01, b30 not in working set
+ s.checkNotInWorkingSet(seq, b01)
+ s.checkNotInWorkingSet(seq, b30)
+
+ // Make sure b21, b40 are candidates of next round.
+ s.Contains(seq.candidateAckingStatusVectors, b21.Hash)
+ s.Contains(seq.candidateAckingStatusVectors, b40.Hash)
+}
+
+func (s *SequencerTestSuite) TestBasicCaseForK0() {
+ // This is a relatively simple test for K=0.
+ //
+ // 0 1 2 3 4
+ // -------------------
+ // . . . . .
+ // . . . . .
+ // o o o <- o <- o Height: 1
+ // | \ | \ | |
+ // v v v v
+ // o o o <- o Height: 0
+ seq := newSequencer(0, 3, 5)
+ validators := s.generateValidatorIDs(5)
+
+ b00 := s.genRootBlock(validators[0], map[common.Hash]struct{}{})
+ b10 := s.genRootBlock(validators[1], map[common.Hash]struct{}{})
+ b20 := s.genRootBlock(validators[2], map[common.Hash]struct{}{})
+ b30 := s.genRootBlock(validators[3], map[common.Hash]struct{}{
+ b20.Hash: struct{}{},
+ })
+ b01 := &types.Block{
+ ProposerID: validators[0],
+ ParentHash: b00.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b00.Hash: struct{}{},
+ b10.Hash: struct{}{},
+ },
+ }
+ b11 := &types.Block{
+ ProposerID: validators[1],
+ ParentHash: b10.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b10.Hash: struct{}{},
+ b20.Hash: struct{}{},
+ },
+ }
+ b21 := &types.Block{
+ ProposerID: validators[2],
+ ParentHash: b20.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b20.Hash: struct{}{},
+ },
+ }
+ b31 := &types.Block{
+ ProposerID: validators[3],
+ ParentHash: b30.Hash,
+ Hash: common.NewRandomHash(),
+ Height: 1,
+ Acks: map[common.Hash]struct{}{
+ b21.Hash: struct{}{},
+ b30.Hash: struct{}{},
+ },
+ }
+ b40 := s.genRootBlock(validators[4], map[common.Hash]struct{}{
+ b31.Hash: struct{}{},
+ })
+
+ s.checkNotDeliver(seq, b00)
+ s.checkNotDeliver(seq, b10)
+ s.checkNotDeliver(seq, b20)
+ s.checkNotDeliver(seq, b30)
+ s.checkNotDeliver(seq, b01)
+ s.checkNotDeliver(seq, b11)
+ s.checkNotDeliver(seq, b21)
+ s.checkNotDeliver(seq, b31)
+
+ // Check status before delivering.
+ vec := seq.candidateAckingStatusVectors[b00.Hash]
+ s.Require().NotNil(vec)
+ s.Len(vec, 1)
+ s.Equal(vec[validators[0]].minHeight, b00.Height)
+ s.Equal(vec[validators[0]].count, uint64(2))
+
+ vec = seq.candidateAckingStatusVectors[b10.Hash]
+ s.Require().NotNil(vec)
+ s.Len(vec, 2)
+ s.Equal(vec[validators[0]].minHeight, b01.Height)
+ s.Equal(vec[validators[0]].count, uint64(1))
+ s.Equal(vec[validators[1]].minHeight, b10.Height)
+ s.Equal(vec[validators[1]].count, uint64(2))
+
+ vec = seq.candidateAckingStatusVectors[b20.Hash]
+ s.Require().NotNil(vec)
+ s.Len(vec, 3)
+ s.Equal(vec[validators[1]].minHeight, b11.Height)
+ s.Equal(vec[validators[1]].count, uint64(1))
+ s.Equal(vec[validators[2]].minHeight, b20.Height)
+ s.Equal(vec[validators[2]].count, uint64(2))
+ s.Equal(vec[validators[3]].minHeight, b30.Height)
+ s.Equal(vec[validators[3]].count, uint64(2))
+
+ // This new block should trigger non-early deliver.
+ hashes, early, err := seq.processBlock(b40)
+ s.False(early)
+ s.Nil(err)
+ expected := common.Hashes{b20.Hash}
+ sort.Sort(expected)
+ s.Equal(expected, hashes)
+
+ // Make sure b20 is no long existing in working set.
+ s.checkNotInWorkingSet(seq, b20)
+
+ // Make sure b10, b30 are candidates for next round.
+ s.Contains(seq.candidateAckingStatusVectors, b10.Hash)
+ s.Contains(seq.candidateAckingStatusVectors, b30.Hash)
+}
+
+func TestSequencer(t *testing.T) {
+ suite.Run(t, new(SequencerTestSuite))
+}