From 75281fcfc95434672781785ee8f70a0f319470cf Mon Sep 17 00:00:00 2001 From: Mission Liao Date: Mon, 6 Aug 2018 14:36:07 +0800 Subject: Rename names of struct and files Rename these files: - core/sequencer[_test].go -> core/total-ordering[_test].go - core/acking[_test].go -> core/reliable-broadcast[_test].go - core/timestamp[_test].go -> core/consensus-timestamp[_test].go Rename these structs: - core.sequencer -> core.totalOrdering - core.acking -> core.reliableBroadcast - core.timestamp -> core.consensusTimestamp --- core/acking.go | 309 ------------ core/acking_test.go | 500 -------------------- core/consensus-timestamp.go | 151 ++++++ core/consensus-timestamp_test.go | 206 ++++++++ core/reliable-broadcast.go | 309 ++++++++++++ core/reliable-broadcast_test.go | 500 ++++++++++++++++++++ core/sequencer.go | 522 -------------------- core/sequencer_test.go | 996 --------------------------------------- core/timestamp.go | 151 ------ core/timestamp_test.go | 206 -------- core/total-ordering.go | 522 ++++++++++++++++++++ core/total-ordering_test.go | 996 +++++++++++++++++++++++++++++++++++++++ 12 files changed, 2684 insertions(+), 2684 deletions(-) delete mode 100644 core/acking.go delete mode 100644 core/acking_test.go create mode 100644 core/consensus-timestamp.go create mode 100644 core/consensus-timestamp_test.go create mode 100644 core/reliable-broadcast.go create mode 100644 core/reliable-broadcast_test.go delete mode 100644 core/sequencer.go delete mode 100644 core/sequencer_test.go delete mode 100644 core/timestamp.go delete mode 100644 core/timestamp_test.go create mode 100644 core/total-ordering.go create mode 100644 core/total-ordering_test.go diff --git a/core/acking.go b/core/acking.go deleted file mode 100644 index 47f13fb..0000000 --- a/core/acking.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2018 The dexon-consensus-core Authors -// This file is part of the dexon-consensus-core library. -// -// The dexon-consensus-core library is free software: you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus-core library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus-core library. If not, see -// . - -package core - -import ( - "fmt" - - "github.com/dexon-foundation/dexon-consensus-core/common" - "github.com/dexon-foundation/dexon-consensus-core/core/types" -) - -// acking is for acking module. -type acking struct { - // lattice stores blocks by its validator ID and height. - lattice map[types.ValidatorID]*ackingValidatorStatus - - // blocks stores the hash to block map. - blocks map[common.Hash]*types.Block - - // receivedBlocks stores blocks which is received but its acks are not all - // in lattice. - receivedBlocks map[common.Hash]*types.Block - - // ackedBlocks stores blocks in status types.BlockStatusAcked, which are - // strongly acked but not yet being output to total ordering module. - ackedBlocks map[common.Hash]*types.Block -} - -type ackingValidatorStatus struct { - // blocks stores blocks proposed by specified validator in map which key is - // the height of the block. - blocks map[uint64]*types.Block - - // nextAck stores the height of next height that should be acked, i.e. last - // acked height + 1. Initialized to 0, when genesis blocks are still not - // being acked. For example, a.lattice[vid1].NextAck[vid2] - 1 is the last - // acked height by vid1 acking vid2. - nextAck map[types.ValidatorID]uint64 - - // nextOutput is the next output height of block, default to 0. - nextOutput uint64 - - // restricted is the flag of a validator is in restricted mode or not. - restricted bool -} - -// Errors for sanity check error. -var ( - ErrInvalidProposerID = fmt.Errorf("invalid proposer id") - ErrForkBlock = fmt.Errorf("fork block") - ErrNotAckParent = fmt.Errorf("not ack parent") - ErrDoubleAck = fmt.Errorf("double ack") - ErrInvalidBlockHeight = fmt.Errorf("invalid block height") -) - -// newAcking creates a new acking struct. -func newAcking() *acking { - return &acking{ - lattice: make(map[types.ValidatorID]*ackingValidatorStatus), - blocks: make(map[common.Hash]*types.Block), - receivedBlocks: make(map[common.Hash]*types.Block), - ackedBlocks: make(map[common.Hash]*types.Block), - } -} - -func (a *acking) sanityCheck(b *types.Block) error { - // Check if its proposer is in validator set. - if _, exist := a.lattice[b.ProposerID]; !exist { - return ErrInvalidProposerID - } - - // Check if it forks. - if bInLattice, exist := a.lattice[b.ProposerID].blocks[b.Height]; exist { - if b.Hash != bInLattice.Hash { - return ErrForkBlock - } - } - - // Check non-genesis blocks if it acks its parent. - if b.Height > 0 { - if _, exist := b.Acks[b.ParentHash]; !exist { - return ErrNotAckParent - } - bParent, exists := a.blocks[b.ParentHash] - if exists && bParent.Height != b.Height-1 { - return ErrInvalidBlockHeight - } - } - - // Check if it acks older blocks. - for hash := range b.Acks { - if bAck, exist := a.blocks[hash]; exist { - if bAck.Height < a.lattice[b.ProposerID].nextAck[bAck.ProposerID] { - return ErrDoubleAck - } - } - } - - // TODO(haoping): application layer check of block's content - - return nil -} - -// areAllAcksReceived checks if all ack blocks of a block are all in lattice. -func (a *acking) areAllAcksInLattice(b *types.Block) bool { - for h := range b.Acks { - bAck, exist := a.blocks[h] - if !exist { - return false - } - bAckInLattice, exist := a.lattice[bAck.ProposerID].blocks[bAck.Height] - if !exist { - return false - } - if bAckInLattice.Hash != bAck.Hash { - panic("areAllAcksInLattice: acking.lattice has corrupted") - } - } - return true -} - -// processBlock processes block, it does sanity check, inserts block into -// lattice, handles strong acking and deletes blocks which will not be used. -func (a *acking) processBlock(block *types.Block) { - // If a block does not pass sanity check, discard this block. - if err := a.sanityCheck(block); err != nil { - return - } - a.blocks[block.Hash] = block - block.AckedValidators = make(map[types.ValidatorID]struct{}) - a.receivedBlocks[block.Hash] = block - - // Check blocks in receivedBlocks if its acks are all in lattice. If a block's - // acking blocks are all in lattice, execute sanity check and add the block - // into lattice. - blocksToAcked := map[common.Hash]*types.Block{} - for { - blocksToLattice := map[common.Hash]*types.Block{} - for _, b := range a.receivedBlocks { - if a.areAllAcksInLattice(b) { - blocksToLattice[b.Hash] = b - } - } - if len(blocksToLattice) == 0 { - break - } - for _, b := range blocksToLattice { - // Sanity check must been executed again here for the case that several - // valid blocks with different content being added into blocksToLattice - // in the same time. For example - // B C Block B and C both ack A and are valid. B, C received first - // \ / (added in receivedBlocks), and A comes, if sanity check is - // A not being executed here, B and C will both be added in lattice - if err := a.sanityCheck(b); err != nil { - delete(a.blocks, b.Hash) - delete(a.receivedBlocks, b.Hash) - continue - } - a.lattice[b.ProposerID].blocks[b.Height] = b - delete(a.receivedBlocks, b.Hash) - for h := range b.Acks { - bAck := a.blocks[h] - // Update nextAck only when bAck.Height + 1 is greater. A block might - // ack blocks proposed by same validator with different height. - if a.lattice[b.ProposerID].nextAck[bAck.ProposerID] < bAck.Height+1 { - a.lattice[b.ProposerID].nextAck[bAck.ProposerID] = bAck.Height + 1 - } - // Update AckedValidators for each ack blocks and its parents. - for { - if _, exist := bAck.AckedValidators[b.ProposerID]; exist { - break - } - if bAck.Status > types.BlockStatusInit { - break - } - bAck.AckedValidators[b.ProposerID] = struct{}{} - // A block is strongly acked if it is acked by more than - // 2 * (maximum number of byzatine validators) unique validators. - if len(bAck.AckedValidators) > 2*((len(a.lattice)-1)/3) { - blocksToAcked[bAck.Hash] = bAck - } - if bAck.Height == 0 { - break - } - bAck = a.blocks[bAck.ParentHash] - } - } - } - } - - for _, b := range blocksToAcked { - a.ackedBlocks[b.Hash] = b - b.Status = types.BlockStatusAcked - } - - // TODO(haoping): delete blocks in received array when it is received a long - // time ago - - // Delete old blocks in "lattice" and "blocks" for release memory space. - // First, find the height that blocks below it can be deleted. This height - // is defined by finding minimum of validator's nextOutput and last acking - // heights from other validators, i.e. a.lattice[v_other].nextAck[this_vid]. - // This works because blocks of height below this minimum are not going to be - // acked anymore, the ackings of these blocks are illegal. - for vid := range a.lattice { - // Find the minimum height that heights lesser can be deleted. - min := a.lattice[vid].nextOutput - for vid2 := range a.lattice { - if a.lattice[vid2].nextAck[vid] < min { - min = a.lattice[vid2].nextAck[vid] - } - } - // "min" is the height of "next" last acked, min - 1 is the last height. - // Delete blocks from min - 2 which will never be acked. - if min < 3 { - continue - } - min -= 2 - for { - b, exist := a.lattice[vid].blocks[min] - if !exist { - break - } - if b.Status >= types.BlockStatusOrdering { - delete(a.lattice[vid].blocks, b.Height) - delete(a.blocks, b.Hash) - } - if min == 0 { - break - } - min-- - } - } -} - -// extractBlocks returns all blocks that can be inserted into total ordering's -// DAG. This function changes the status of blocks from types.BlockStatusAcked -// to blockStatusOrdering. -func (a *acking) extractBlocks() []*types.Block { - ret := []*types.Block{} - for { - updated := false - for vid := range a.lattice { - b, exist := a.lattice[vid].blocks[a.lattice[vid].nextOutput] - if !exist || b.Status < types.BlockStatusAcked { - continue - } - allAcksInOrderingStatus := true - // Check if all acks are in ordering or above status. If a block of an ack - // does not exist means that it deleted but its status is definitely Acked - // or ordering. - for ackHash := range b.Acks { - bAck, exist := a.blocks[ackHash] - if !exist { - continue - } - if bAck.Status < types.BlockStatusOrdering { - allAcksInOrderingStatus = false - break - } - } - if !allAcksInOrderingStatus { - continue - } - updated = true - b.Status = types.BlockStatusOrdering - delete(a.ackedBlocks, b.Hash) - ret = append(ret, b) - a.lattice[vid].nextOutput++ - } - if !updated { - break - } - } - return ret -} - -// addValidator adds validator in the validator set. -func (a *acking) addValidator(h types.ValidatorID) { - a.lattice[h] = &ackingValidatorStatus{ - blocks: make(map[uint64]*types.Block), - nextAck: make(map[types.ValidatorID]uint64), - nextOutput: 0, - restricted: false, - } -} - -// deleteValidator deletes validator in validator set. -func (a *acking) deleteValidator(h types.ValidatorID) { - for h := range a.lattice { - delete(a.lattice[h].nextAck, h) - } - delete(a.lattice, h) -} diff --git a/core/acking_test.go b/core/acking_test.go deleted file mode 100644 index b1f26b2..0000000 --- a/core/acking_test.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright 2018 The dexon-consensus-core Authors -// This file is part of the dexon-consensus-core library. -// -// The dexon-consensus-core library is free software: you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus-core library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus-core library. If not, see -// . - -package core - -import ( - "math/rand" - "sort" - "testing" - - "github.com/stretchr/testify/suite" - - "github.com/dexon-foundation/dexon-consensus-core/blockdb" - "github.com/dexon-foundation/dexon-consensus-core/common" - "github.com/dexon-foundation/dexon-consensus-core/core/test" - "github.com/dexon-foundation/dexon-consensus-core/core/types" -) - -type AckingTest struct { - suite.Suite -} - -func (s *AckingTest) SetupSuite() { - -} - -func (s *AckingTest) SetupTest() { - -} - -// genTestCase1 generates test case 1, -// 3 -// | -// 2 -// | \ -// 1 | 1 -// | | | -// 0 0 0 0 (block height) -// 0 1 2 3 (validator) -func genTestCase1(s *AckingTest, a *acking) []types.ValidatorID { - // Create new acking instance with 4 validators - var b *types.Block - var h common.Hash - vids := []types.ValidatorID{} - for i := 0; i < 4; i++ { - vid := types.ValidatorID{Hash: common.NewRandomHash()} - a.addValidator(vid) - vids = append(vids, vid) - } - // Add genesis blocks. - for i := 0; i < 4; i++ { - h = common.NewRandomHash() - b = &types.Block{ - ProposerID: vids[i], - ParentHash: h, - Hash: h, - Height: 0, - Acks: map[common.Hash]struct{}{}, - } - a.processBlock(b) - } - - // Add block 0-1 which acks 0-0. - h = a.lattice[vids[0]].blocks[0].Hash - b = &types.Block{ - ProposerID: vids[0], - ParentHash: h, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - h: struct{}{}, - }, - } - a.processBlock(b) - s.NotNil(a.lattice[vids[0]].blocks[1]) - - // Add block 0-2 which acks 0-1 and 1-0. - h = a.lattice[vids[0]].blocks[1].Hash - b = &types.Block{ - ProposerID: vids[0], - ParentHash: h, - Hash: common.NewRandomHash(), - Height: 2, - Acks: map[common.Hash]struct{}{ - h: struct{}{}, - a.lattice[vids[1]].blocks[0].Hash: struct{}{}, - }, - } - a.processBlock(b) - s.NotNil(a.lattice[vids[0]].blocks[2]) - - // Add block 0-3 which acks 0-2. - h = a.lattice[vids[0]].blocks[2].Hash - b = &types.Block{ - ProposerID: vids[0], - ParentHash: h, - Hash: common.NewRandomHash(), - Height: 3, - Acks: map[common.Hash]struct{}{ - h: struct{}{}, - }, - } - a.processBlock(b) - s.NotNil(a.lattice[vids[0]].blocks[3]) - - // Add block 3-1 which acks 3-0. - h = a.lattice[vids[3]].blocks[0].Hash - b = &types.Block{ - ProposerID: vids[3], - ParentHash: h, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - h: struct{}{}, - }, - } - a.processBlock(b) - s.NotNil(a.lattice[vids[3]].blocks[0]) - - return vids -} - -func (s *AckingTest) TestAddValidator() { - a := newAcking() - s.Equal(len(a.lattice), 0) - genTestCase1(s, a) - s.Equal(len(a.lattice), 4) -} - -func (s *AckingTest) TestSanityCheck() { - var b *types.Block - var h common.Hash - var vids []types.ValidatorID - var err error - a := newAcking() - vids = genTestCase1(s, a) - - // Non-genesis block with no ack, should get error. - b = &types.Block{ - ProposerID: vids[0], - ParentHash: common.NewRandomHash(), - Height: 10, - Acks: make(map[common.Hash]struct{}), - } - err = a.sanityCheck(b) - s.NotNil(err) - s.Equal(ErrNotAckParent.Error(), err.Error()) - - // Non-genesis block which does not ack its parent. - b = &types.Block{ - ProposerID: vids[1], - ParentHash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - a.lattice[vids[2]].blocks[0].Hash: struct{}{}, - }, - } - err = a.sanityCheck(b) - s.NotNil(err) - s.Equal(ErrNotAckParent.Error(), err.Error()) - - // Non-genesis block which acks its parent but the height is invalid. - h = a.lattice[vids[1]].blocks[0].Hash - b = &types.Block{ - ProposerID: vids[1], - ParentHash: h, - Height: 2, - Acks: map[common.Hash]struct{}{ - h: struct{}{}, - }, - } - err = a.sanityCheck(b) - s.NotNil(err) - s.Equal(ErrInvalidBlockHeight.Error(), err.Error()) - - // Invalid proposer ID. - h = a.lattice[vids[1]].blocks[0].Hash - b = &types.Block{ - ProposerID: types.ValidatorID{Hash: common.NewRandomHash()}, - ParentHash: h, - Height: 1, - Acks: map[common.Hash]struct{}{ - h: struct{}{}, - }, - } - err = a.sanityCheck(b) - s.NotNil(err) - s.Equal(ErrInvalidProposerID.Error(), err.Error()) - - // Fork block. - h = a.lattice[vids[0]].blocks[0].Hash - b = &types.Block{ - ProposerID: vids[0], - ParentHash: h, - Height: 1, - Acks: map[common.Hash]struct{}{ - h: struct{}{}, - }, - } - err = a.sanityCheck(b) - s.NotNil(err) - s.Equal(ErrForkBlock.Error(), err.Error()) - - // Replicated ack. - h = a.lattice[vids[0]].blocks[3].Hash - b = &types.Block{ - ProposerID: vids[0], - ParentHash: h, - Height: 4, - Acks: map[common.Hash]struct{}{ - h: struct{}{}, - a.lattice[vids[1]].blocks[0].Hash: struct{}{}, - }, - } - err = a.sanityCheck(b) - s.NotNil(err) - s.Equal(ErrDoubleAck.Error(), err.Error()) - - // Normal block. - h = a.lattice[vids[1]].blocks[0].Hash - b = &types.Block{ - ProposerID: vids[1], - ParentHash: h, - Height: 1, - Acks: map[common.Hash]struct{}{ - h: struct{}{}, - common.NewRandomHash(): struct{}{}, - }, - } - err = a.sanityCheck(b) - s.Nil(err) -} - -func (s *AckingTest) TestAreAllAcksInLattice() { - var b *types.Block - var vids []types.ValidatorID - a := newAcking() - vids = genTestCase1(s, a) - - // Empty ack should get true, although won't pass sanity check. - b = &types.Block{ - Acks: map[common.Hash]struct{}{}, - } - s.True(a.areAllAcksInLattice(b)) - - // Acks blocks in lattice - b = &types.Block{ - Acks: map[common.Hash]struct{}{ - a.lattice[vids[0]].blocks[0].Hash: struct{}{}, - a.lattice[vids[0]].blocks[1].Hash: struct{}{}, - }, - } - s.True(a.areAllAcksInLattice(b)) - - // Acks random block hash. - b = &types.Block{ - Acks: map[common.Hash]struct{}{ - common.NewRandomHash(): struct{}{}, - }, - } - s.False(a.areAllAcksInLattice(b)) -} - -func (s *AckingTest) TestStrongAck() { - var b *types.Block - var vids []types.ValidatorID - a := newAcking() - vids = genTestCase1(s, a) - - // Check block 0-0 to 0-3 before adding 1-1 and 2-1. - for i := uint64(0); i < 4; i++ { - s.Equal(types.BlockStatusInit, a.lattice[vids[0]].blocks[i].Status) - } - - // Add block 1-1 which acks 1-0 and 0-2, and block 0-0 to 0-3 are still - // in BlockStatusInit, because they are not strongly acked. - b = &types.Block{ - ProposerID: vids[1], - ParentHash: a.lattice[vids[1]].blocks[0].Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - a.lattice[vids[0]].blocks[2].Hash: struct{}{}, - a.lattice[vids[1]].blocks[0].Hash: struct{}{}, - }, - } - a.processBlock(b) - s.NotNil(a.lattice[vids[1]].blocks[1]) - for i := uint64(0); i < 4; i++ { - s.Equal(types.BlockStatusInit, a.lattice[vids[0]].blocks[i].Status) - } - - // Add block 2-1 which acks 0-2 and 2-0, block 0-0 to 0-2 are strongly acked but - // 0-3 is still not. - b = &types.Block{ - ProposerID: vids[2], - ParentHash: a.lattice[vids[2]].blocks[0].Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - a.lattice[vids[0]].blocks[2].Hash: struct{}{}, - a.lattice[vids[2]].blocks[0].Hash: struct{}{}, - }, - } - a.processBlock(b) - s.Equal(types.BlockStatusAcked, a.lattice[vids[0]].blocks[0].Status) - s.Equal(types.BlockStatusAcked, a.lattice[vids[0]].blocks[1].Status) - s.Equal(types.BlockStatusAcked, a.lattice[vids[0]].blocks[2].Status) - s.Equal(types.BlockStatusInit, a.lattice[vids[0]].blocks[3].Status) -} - -func (s *AckingTest) TestExtractBlocks() { - var b *types.Block - a := newAcking() - vids := genTestCase1(s, a) - - // Add block 1-1 which acks 1-0, 0-2, 3-0. - b = &types.Block{ - ProposerID: vids[1], - ParentHash: a.lattice[vids[1]].blocks[0].Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - a.lattice[vids[0]].blocks[2].Hash: struct{}{}, - a.lattice[vids[1]].blocks[0].Hash: struct{}{}, - a.lattice[vids[3]].blocks[0].Hash: struct{}{}, - }, - } - a.processBlock(b) - - // Add block 2-1 which acks 0-2, 2-0, 3-0. - b = &types.Block{ - ProposerID: vids[2], - ParentHash: a.lattice[vids[2]].blocks[0].Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - a.lattice[vids[0]].blocks[2].Hash: struct{}{}, - a.lattice[vids[2]].blocks[0].Hash: struct{}{}, - a.lattice[vids[3]].blocks[0].Hash: struct{}{}, - }, - } - a.processBlock(b) - - hashs := []common.Hash{ - a.lattice[vids[0]].blocks[0].Hash, - a.lattice[vids[0]].blocks[1].Hash, - a.lattice[vids[3]].blocks[0].Hash, - } - hashExtracted := map[common.Hash]*types.Block{} - for _, b := range a.extractBlocks() { - hashExtracted[b.Hash] = b - s.Equal(types.BlockStatusOrdering, b.Status) - } - for _, h := range hashs { - _, exist := hashExtracted[h] - s.True(exist) - } -} - -func (s *AckingTest) TestRandomIntensiveAcking() { - a := newAcking() - vids := []types.ValidatorID{} - heights := map[types.ValidatorID]uint64{} - extractedBlocks := []*types.Block{} - - // Generate validators and genesis blocks. - for i := 0; i < 4; i++ { - vid := types.ValidatorID{Hash: common.NewRandomHash()} - a.addValidator(vid) - vids = append(vids, vid) - h := common.NewRandomHash() - b := &types.Block{ - Hash: h, - ParentHash: h, - Acks: map[common.Hash]struct{}{}, - Height: 0, - ProposerID: vid, - } - a.processBlock(b) - heights[vid] = 1 - } - - for i := 0; i < 5000; i++ { - vid := vids[rand.Int()%len(vids)] - height := heights[vid] - heights[vid]++ - parentHash := a.lattice[vid].blocks[height-1].Hash - acks := map[common.Hash]struct{}{} - for _, vid2 := range vids { - if b, exist := a.lattice[vid2].blocks[a.lattice[vid].nextAck[vid2]]; exist { - acks[b.Hash] = struct{}{} - } - } - b := &types.Block{ - ProposerID: vid, - Hash: common.NewRandomHash(), - ParentHash: parentHash, - Height: height, - Acks: acks, - } - a.processBlock(b) - extractedBlocks = append(extractedBlocks, a.extractBlocks()...) - } - - extractedBlocks = append(extractedBlocks, a.extractBlocks()...) - // The len of array extractedBlocks should be about 5000. - s.True(len(extractedBlocks) > 4500) - // The len of a.blocks should be small if deleting mechanism works. - s.True(len(a.blocks) < 500) -} - -func (s *AckingTest) TestRandomlyGeneratedBlocks() { - var ( - validatorCount = 19 - blockCount = 50 - repeat = 20 - ) - - // Prepare a randomly generated blocks. - db, err := blockdb.NewMemBackedBlockDB("test-acking-random.blockdb") - s.Require().Nil(err) - defer func() { - // If the test fails, keep the block database for troubleshooting. - if s.T().Failed() { - s.Nil(db.Close()) - } - }() - gen := test.NewBlocksGenerator(nil) - s.Require().Nil(gen.Generate(validatorCount, blockCount, nil, db)) - iter, err := db.GetAll() - s.Require().Nil(err) - // Setup a revealer that would reveal blocks randomly. - revealer, err := test.NewRandomRevealer(iter) - s.Require().Nil(err) - - stronglyAckedHashesAsString := map[string]struct{}{} - for i := 0; i < repeat; i++ { - validators := map[types.ValidatorID]struct{}{} - acking := newAcking() - stronglyAckedHashes := common.Hashes{} - revealer.Reset() - - for { - // Reveal next block. - b, err := revealer.Next() - if err != nil { - if err == blockdb.ErrIterationFinished { - err = nil - break - } - } - s.Require().Nil(err) - - // It's a hack to add validator to Acking module. - if _, added := validators[b.ProposerID]; !added { - acking.addValidator(b.ProposerID) - validators[b.ProposerID] = struct{}{} - } - // Perform reliable broadcast process. - acking.processBlock(&b) - for _, b := range acking.extractBlocks() { - stronglyAckedHashes = append(stronglyAckedHashes, b.Hash) - } - } - // To make it easier to check, sort hashes of - // strongly acked blocks, and concatenate them into - // a string. - sort.Sort(stronglyAckedHashes) - asString := "" - for _, h := range stronglyAckedHashes { - asString += h.String() + "," - } - stronglyAckedHashesAsString[asString] = struct{}{} - } - // Make sure concatenated hashes of strongly acked blocks are identical. - s.Require().Len(stronglyAckedHashesAsString, 1) - for h := range stronglyAckedHashesAsString { - // Make sure at least some blocks are strongly acked. - s.True(len(h) > 0) - } -} - -func TestAcking(t *testing.T) { - suite.Run(t, new(AckingTest)) -} diff --git a/core/consensus-timestamp.go b/core/consensus-timestamp.go new file mode 100644 index 0000000..e9b5cce --- /dev/null +++ b/core/consensus-timestamp.go @@ -0,0 +1,151 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// . + +package core + +import ( + "errors" + "sort" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// Timestamp is for Concensus Timestamp Algorithm. +type consensusTimestamp struct { + lastMainChainBlock *types.Block + blocksNotInMainChain []*types.Block +} + +var ( + // ErrInvalidMainChain would be reported if the invalid result from + // main chain selection algorithm is detected. + ErrInvalidMainChain = errors.New("invalid main chain") + // ErrEmptyTimestamps would be reported if Block.timestamps is empty. + ErrEmptyTimestamps = errors.New("timestamp vector should not be empty") +) + +// NewTimestamp create Timestamp object. +func newConsensusTimestamp() *consensusTimestamp { + return &consensusTimestamp{} +} + +// ProcessBlocks is the entry function. +func (ct *consensusTimestamp) processBlocks(blocks []*types.Block) ( + blocksWithTimestamp []*types.Block, mainChain []*types.Block, err error) { + if len(blocks) == 0 { + // TODO (jimmy-dexon): Remove this panic before release. + panic("Unexpected empty block list.") + } + outputFirstBlock := true + blocks = append(ct.blocksNotInMainChain, blocks...) + if ct.lastMainChainBlock != nil { + // TODO (jimmy-dexon): The performance here can be optimized. + blocks = append([]*types.Block{ct.lastMainChainBlock}, blocks...) + outputFirstBlock = false + } + mainChain, nonMainChain := ct.selectMainChain(blocks) + ct.blocksNotInMainChain = nonMainChain + ct.lastMainChainBlock = mainChain[len(mainChain)-1] + blocksWithTimestamp = blocks[:len(blocks)-len(nonMainChain)] + leftMainChainIdx := 0 + rightMainChainIdx := 0 + idxMainChain := 0 + for idx, block := range blocksWithTimestamp { + if idxMainChain >= len(mainChain) { + err = ErrInvalidMainChain + return + } else if block.Hash == mainChain[idxMainChain].Hash { + rightMainChainIdx = idx + blocksWithTimestamp[idx].ConsensusTime, err = ct.getMedianTime(block) + if err != nil { + return + } + // Process Non-MainChain blocks. + if rightMainChainIdx > leftMainChainIdx { + for idx, timestamp := range interpoTime( + blocksWithTimestamp[leftMainChainIdx].ConsensusTime, + blocksWithTimestamp[rightMainChainIdx].ConsensusTime, + rightMainChainIdx-leftMainChainIdx-1) { + blocksWithTimestamp[leftMainChainIdx+idx+1].ConsensusTime = timestamp + } + } + leftMainChainIdx = idx + idxMainChain++ + } + } + if !outputFirstBlock { + blocksWithTimestamp = blocksWithTimestamp[1:] + } + return +} + +func (ct *consensusTimestamp) selectMainChain(blocks []*types.Block) ( + mainChain []*types.Block, nonMainChain []*types.Block) { + for _, block := range blocks { + if len(mainChain) != 0 { + if _, exists := block.Acks[mainChain[len(mainChain)-1].Hash]; !exists { + nonMainChain = append(nonMainChain, block) + continue + } + } + nonMainChain = []*types.Block{} + mainChain = append(mainChain, block) + } + return +} + +func (ct *consensusTimestamp) getMedianTime(block *types.Block) ( + timestamp time.Time, err error) { + timestamps := []time.Time{} + for _, timestamp := range block.Timestamps { + timestamps = append(timestamps, timestamp) + } + if len(timestamps) == 0 { + err = ErrEmptyTimestamps + return + } + sort.Sort(common.ByTime(timestamps)) + if len(timestamps)%2 == 0 { + t1 := timestamps[len(timestamps)/2-1] + t2 := timestamps[len(timestamps)/2] + timestamp = interpoTime(t1, t2, 1)[0] + } else { + timestamp = timestamps[len(timestamps)/2] + } + return +} + +func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time { + if sep == 0 { + return []time.Time{} + } + if t1.After(t2) { + return interpoTime(t2, t1, sep) + } + timestamps := make([]time.Time, sep) + duration := t2.Sub(t1) + period := time.Duration( + (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond + prevTime := t1 + for idx := range timestamps { + prevTime = prevTime.Add(period) + timestamps[idx] = prevTime + } + return timestamps +} diff --git a/core/consensus-timestamp_test.go b/core/consensus-timestamp_test.go new file mode 100644 index 0000000..8a82080 --- /dev/null +++ b/core/consensus-timestamp_test.go @@ -0,0 +1,206 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// . + +package core + +import ( + "math" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +type ConsensusTimestampTest struct { + suite.Suite +} + +func generateBlocksWithAcks(blockNum, maxAcks int) []*types.Block { + chain := []*types.Block{ + &types.Block{ + Hash: common.NewRandomHash(), + Acks: make(map[common.Hash]struct{}), + }, + } + for i := 1; i < blockNum; i++ { + acks := make(map[common.Hash]struct{}) + ackNum := rand.Intn(maxAcks) + 1 + for j := 0; j < ackNum; j++ { + ack := rand.Intn(len(chain)) + acks[chain[ack].Hash] = struct{}{} + } + block := &types.Block{ + Hash: common.NewRandomHash(), + Acks: acks, + } + chain = append(chain, block) + } + return chain +} + +func fillBlocksTimestamps(blocks []*types.Block, validatorNum int, + step, sigma time.Duration) { + curTime := time.Now().UTC() + vIDs := make([]types.ValidatorID, validatorNum) + for i := 0; i < validatorNum; i++ { + vIDs[i] = types.ValidatorID{Hash: common.NewRandomHash()} + } + for _, block := range blocks { + block.Timestamps = make(map[types.ValidatorID]time.Time) + for _, vID := range vIDs { + diffSeconds := rand.NormFloat64() * sigma.Seconds() + diffSeconds = math.Min(diffSeconds, step.Seconds()/2) + diffSeconds = math.Max(diffSeconds, -step.Seconds()/2) + diffDuration := time.Duration(diffSeconds*1000) * time.Millisecond + block.Timestamps[vID] = curTime.Add(diffDuration) + } + curTime = curTime.Add(step) + } +} + +func extractTimestamps(blocks []*types.Block) []time.Time { + timestamps := make([]time.Time, len(blocks)) + for idx, block := range blocks { + timestamps[idx] = block.ConsensusTime + } + return timestamps +} + +func (s *ConsensusTimestampTest) TestMainChainSelection() { + ct := newConsensusTimestamp() + ct2 := newConsensusTimestamp() + blockNums := []int{50, 100, 30} + maxAcks := 5 + for _, blockNum := range blockNums { + chain := generateBlocksWithAcks(blockNum, maxAcks) + mainChain, _ := ct.selectMainChain(chain) + // Verify the selected main chain. + for i := 1; i < len(mainChain); i++ { + _, exists := mainChain[i].Acks[mainChain[i-1].Hash] + s.True(exists) + } + // Verify if selectMainChain is stable. + mainChain2, _ := ct2.selectMainChain(chain) + s.Equal(mainChain, mainChain2) + } +} + +func (s *ConsensusTimestampTest) TestTimestampPartition() { + blockNums := []int{50, 100, 30} + validatorNum := 19 + sigma := 100 * time.Millisecond + maxAcks := 5 + totalMainChain := make([]*types.Block, 1) + totalChain := make([]*types.Block, 0) + totalTimestamps := make([]time.Time, 0) + ct := newConsensusTimestamp() + var lastMainChainBlock *types.Block + for _, blockNum := range blockNums { + chain := generateBlocksWithAcks(blockNum, maxAcks) + fillBlocksTimestamps(chain, validatorNum, time.Second, sigma) + blocksWithTimestamps, mainChain, err := ct.processBlocks(chain) + s.Require().Nil(err) + timestamps := extractTimestamps(blocksWithTimestamps) + if lastMainChainBlock != nil { + s.Require().Equal(mainChain[0], lastMainChainBlock) + } + s.Require().Equal(mainChain[len(mainChain)-1], ct.lastMainChainBlock) + lastMainChainBlock = ct.lastMainChainBlock + totalMainChain = + append(totalMainChain[:len(totalMainChain)-1], mainChain...) + totalChain = append(totalChain, chain...) + totalTimestamps = append(totalTimestamps, timestamps...) + } + ct2 := newConsensusTimestamp() + blocksWithTimestamps2, mainChain2, err := ct2.processBlocks(totalChain) + s.Require().Nil(err) + timestamps2 := extractTimestamps(blocksWithTimestamps2) + s.Equal(totalMainChain, mainChain2) + s.Equal(totalTimestamps, timestamps2) +} + +func timeDiffWithinTolerance(t1, t2 time.Time, tolerance time.Duration) bool { + if t1.After(t2) { + return timeDiffWithinTolerance(t2, t1, tolerance) + } + return t1.Add(tolerance).After(t2) +} + +func (s *ConsensusTimestampTest) TestTimestampIncrease() { + validatorNum := 19 + sigma := 100 * time.Millisecond + ct := newConsensusTimestamp() + chain := generateBlocksWithAcks(1000, 5) + fillBlocksTimestamps(chain, validatorNum, time.Second, sigma) + blocksWithTimestamps, _, err := ct.processBlocks(chain) + s.Require().Nil(err) + timestamps := extractTimestamps(blocksWithTimestamps) + for i := 1; i < len(timestamps); i++ { + s.True(timestamps[i].After(timestamps[i-1])) + } + // Test if the processBlocks is stable. + ct2 := newConsensusTimestamp() + blocksWithTimestamps2, _, err := ct2.processBlocks(chain) + s.Require().Nil(err) + timestamps2 := extractTimestamps(blocksWithTimestamps2) + s.Equal(timestamps, timestamps2) +} + +func (s *ConsensusTimestampTest) TestByzantineBiasTime() { + // Test that Byzantine node cannot bias the timestamps. + validatorNum := 19 + sigma := 100 * time.Millisecond + tolerance := 4 * sigma + ct := newConsensusTimestamp() + chain := generateBlocksWithAcks(1000, 5) + fillBlocksTimestamps(chain, validatorNum, time.Second, sigma) + blocksWithTimestamps, _, err := ct.processBlocks(chain) + s.Require().Nil(err) + timestamps := extractTimestamps(blocksWithTimestamps) + byzantine := validatorNum / 3 + validators := make([]types.ValidatorID, 0, validatorNum) + for vID := range chain[0].Timestamps { + validators = append(validators, vID) + } + // The number of Byzantine node is at most N/3. + for i := 0; i < byzantine; i++ { + // Pick one validator to be Byzantine node. + // It is allowed to have the vID be duplicated, + // because the number of Byzantine node is between 1 and N/3. + vID := validators[rand.Intn(validatorNum)] + for _, block := range chain { + block.Timestamps[vID] = time.Time{} + } + } + ctByzantine := newConsensusTimestamp() + blocksWithTimestampsB, _, err := ctByzantine.processBlocks(chain) + s.Require().Nil(err) + timestampsWithByzantine := extractTimestamps(blocksWithTimestampsB) + for idx, timestamp := range timestamps { + timestampWithByzantine := timestampsWithByzantine[idx] + s.True(timeDiffWithinTolerance( + timestamp, timestampWithByzantine, tolerance)) + } +} + +func TestConsensusTimestamp(t *testing.T) { + suite.Run(t, new(ConsensusTimestampTest)) +} diff --git a/core/reliable-broadcast.go b/core/reliable-broadcast.go new file mode 100644 index 0000000..dd57241 --- /dev/null +++ b/core/reliable-broadcast.go @@ -0,0 +1,309 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// . + +package core + +import ( + "fmt" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// reliableBroadcast is a module for reliable broadcast. +type reliableBroadcast struct { + // lattice stores blocks by its validator ID and height. + lattice map[types.ValidatorID]*ackingValidatorStatus + + // blocks stores the hash to block map. + blocks map[common.Hash]*types.Block + + // receivedBlocks stores blocks which is received but its acks are not all + // in lattice. + receivedBlocks map[common.Hash]*types.Block + + // ackedBlocks stores blocks in status types.BlockStatusAcked, which are + // strongly acked but not yet being output to total ordering module. + ackedBlocks map[common.Hash]*types.Block +} + +type ackingValidatorStatus struct { + // blocks stores blocks proposed by specified validator in map which key is + // the height of the block. + blocks map[uint64]*types.Block + + // nextAck stores the height of next height that should be acked, i.e. last + // acked height + 1. Initialized to 0, when genesis blocks are still not + // being acked. For example, rb.lattice[vid1].NextAck[vid2] - 1 is the last + // acked height by vid1 acking vid2. + nextAck map[types.ValidatorID]uint64 + + // nextOutput is the next output height of block, default to 0. + nextOutput uint64 + + // restricted is the flag of a validator is in restricted mode or not. + restricted bool +} + +// Errors for sanity check error. +var ( + ErrInvalidProposerID = fmt.Errorf("invalid proposer id") + ErrForkBlock = fmt.Errorf("fork block") + ErrNotAckParent = fmt.Errorf("not ack parent") + ErrDoubleAck = fmt.Errorf("double ack") + ErrInvalidBlockHeight = fmt.Errorf("invalid block height") +) + +// newReliableBroadcast creates a new reliableBroadcast struct. +func newReliableBroadcast() *reliableBroadcast { + return &reliableBroadcast{ + lattice: make(map[types.ValidatorID]*ackingValidatorStatus), + blocks: make(map[common.Hash]*types.Block), + receivedBlocks: make(map[common.Hash]*types.Block), + ackedBlocks: make(map[common.Hash]*types.Block), + } +} + +func (rb *reliableBroadcast) sanityCheck(b *types.Block) error { + // Check if its proposer is in validator set. + if _, exist := rb.lattice[b.ProposerID]; !exist { + return ErrInvalidProposerID + } + + // Check if it forks. + if bInLattice, exist := rb.lattice[b.ProposerID].blocks[b.Height]; exist { + if b.Hash != bInLattice.Hash { + return ErrForkBlock + } + } + + // Check non-genesis blocks if it acks its parent. + if b.Height > 0 { + if _, exist := b.Acks[b.ParentHash]; !exist { + return ErrNotAckParent + } + bParent, exists := rb.blocks[b.ParentHash] + if exists && bParent.Height != b.Height-1 { + return ErrInvalidBlockHeight + } + } + + // Check if it acks older blocks. + for hash := range b.Acks { + if bAck, exist := rb.blocks[hash]; exist { + if bAck.Height < rb.lattice[b.ProposerID].nextAck[bAck.ProposerID] { + return ErrDoubleAck + } + } + } + + // TODO(haoping): application layer check of block's content + + return nil +} + +// areAllAcksReceived checks if all ack blocks of a block are all in lattice. +func (rb *reliableBroadcast) areAllAcksInLattice(b *types.Block) bool { + for h := range b.Acks { + bAck, exist := rb.blocks[h] + if !exist { + return false + } + bAckInLattice, exist := rb.lattice[bAck.ProposerID].blocks[bAck.Height] + if !exist { + return false + } + if bAckInLattice.Hash != bAck.Hash { + panic("areAllAcksInLattice: reliableBroadcast.lattice has corrupted") + } + } + return true +} + +// processBlock processes block, it does sanity check, inserts block into +// lattice, handles strong acking and deletes blocks which will not be used. +func (rb *reliableBroadcast) processBlock(block *types.Block) { + // If a block does not pass sanity check, discard this block. + if err := rb.sanityCheck(block); err != nil { + return + } + rb.blocks[block.Hash] = block + block.AckedValidators = make(map[types.ValidatorID]struct{}) + rb.receivedBlocks[block.Hash] = block + + // Check blocks in receivedBlocks if its acks are all in lattice. If a block's + // acking blocks are all in lattice, execute sanity check and add the block + // into lattice. + blocksToAcked := map[common.Hash]*types.Block{} + for { + blocksToLattice := map[common.Hash]*types.Block{} + for _, b := range rb.receivedBlocks { + if rb.areAllAcksInLattice(b) { + blocksToLattice[b.Hash] = b + } + } + if len(blocksToLattice) == 0 { + break + } + for _, b := range blocksToLattice { + // Sanity check must been executed again here for the case that several + // valid blocks with different content being added into blocksToLattice + // in the same time. For example + // B C Block B and C both ack A and are valid. B, C received first + // \ / (added in receivedBlocks), and A comes, if sanity check is + // A not being executed here, B and C will both be added in lattice + if err := rb.sanityCheck(b); err != nil { + delete(rb.blocks, b.Hash) + delete(rb.receivedBlocks, b.Hash) + continue + } + rb.lattice[b.ProposerID].blocks[b.Height] = b + delete(rb.receivedBlocks, b.Hash) + for h := range b.Acks { + bAck := rb.blocks[h] + // Update nextAck only when bAck.Height + 1 is greater. A block might + // ack blocks proposed by same validator with different height. + if rb.lattice[b.ProposerID].nextAck[bAck.ProposerID] < bAck.Height+1 { + rb.lattice[b.ProposerID].nextAck[bAck.ProposerID] = bAck.Height + 1 + } + // Update AckedValidators for each ack blocks and its parents. + for { + if _, exist := bAck.AckedValidators[b.ProposerID]; exist { + break + } + if bAck.Status > types.BlockStatusInit { + break + } + bAck.AckedValidators[b.ProposerID] = struct{}{} + // A block is strongly acked if it is acked by more than + // 2 * (maximum number of byzatine validators) unique validators. + if len(bAck.AckedValidators) > 2*((len(rb.lattice)-1)/3) { + blocksToAcked[bAck.Hash] = bAck + } + if bAck.Height == 0 { + break + } + bAck = rb.blocks[bAck.ParentHash] + } + } + } + } + + for _, b := range blocksToAcked { + rb.ackedBlocks[b.Hash] = b + b.Status = types.BlockStatusAcked + } + + // TODO(haoping): delete blocks in received array when it is received a long + // time ago + + // Delete old blocks in "lattice" and "blocks" for release memory space. + // First, find the height that blocks below it can be deleted. This height + // is defined by finding minimum of validator's nextOutput and last acking + // heights from other validators, i.e. rb.lattice[v_other].nextAck[this_vid]. + // This works because blocks of height below this minimum are not going to be + // acked anymore, the ackings of these blocks are illegal. + for vid := range rb.lattice { + // Find the minimum height that heights lesser can be deleted. + min := rb.lattice[vid].nextOutput + for vid2 := range rb.lattice { + if rb.lattice[vid2].nextAck[vid] < min { + min = rb.lattice[vid2].nextAck[vid] + } + } + // "min" is the height of "next" last acked, min - 1 is the last height. + // Delete blocks from min - 2 which will never be acked. + if min < 3 { + continue + } + min -= 2 + for { + b, exist := rb.lattice[vid].blocks[min] + if !exist { + break + } + if b.Status >= types.BlockStatusOrdering { + delete(rb.lattice[vid].blocks, b.Height) + delete(rb.blocks, b.Hash) + } + if min == 0 { + break + } + min-- + } + } +} + +// extractBlocks returns all blocks that can be inserted into total ordering's +// DAG. This function changes the status of blocks from types.BlockStatusAcked +// to blockStatusOrdering. +func (rb *reliableBroadcast) extractBlocks() []*types.Block { + ret := []*types.Block{} + for { + updated := false + for vid := range rb.lattice { + b, exist := rb.lattice[vid].blocks[rb.lattice[vid].nextOutput] + if !exist || b.Status < types.BlockStatusAcked { + continue + } + allAcksInOrderingStatus := true + // Check if all acks are in ordering or above status. If a block of an ack + // does not exist means that it deleted but its status is definitely Acked + // or ordering. + for ackHash := range b.Acks { + bAck, exist := rb.blocks[ackHash] + if !exist { + continue + } + if bAck.Status < types.BlockStatusOrdering { + allAcksInOrderingStatus = false + break + } + } + if !allAcksInOrderingStatus { + continue + } + updated = true + b.Status = types.BlockStatusOrdering + delete(rb.ackedBlocks, b.Hash) + ret = append(ret, b) + rb.lattice[vid].nextOutput++ + } + if !updated { + break + } + } + return ret +} + +// addValidator adds validator in the validator set. +func (rb *reliableBroadcast) addValidator(h types.ValidatorID) { + rb.lattice[h] = &ackingValidatorStatus{ + blocks: make(map[uint64]*types.Block), + nextAck: make(map[types.ValidatorID]uint64), + nextOutput: 0, + restricted: false, + } +} + +// deleteValidator deletes validator in validator set. +func (rb *reliableBroadcast) deleteValidator(h types.ValidatorID) { + for h := range rb.lattice { + delete(rb.lattice[h].nextAck, h) + } + delete(rb.lattice, h) +} diff --git a/core/reliable-broadcast_test.go b/core/reliable-broadcast_test.go new file mode 100644 index 0000000..7c15fe5 --- /dev/null +++ b/core/reliable-broadcast_test.go @@ -0,0 +1,500 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// . + +package core + +import ( + "math/rand" + "sort" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus-core/blockdb" + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/test" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +type ReliableBroadcastTest struct { + suite.Suite +} + +func (s *ReliableBroadcastTest) SetupSuite() { + +} + +func (s *ReliableBroadcastTest) SetupTest() { + +} + +// genTestCase1 generates test case 1, +// 3 +// | +// 2 +// | \ +// 1 | 1 +// | | | +// 0 0 0 0 (block height) +// 0 1 2 3 (validator) +func genTestCase1(s *ReliableBroadcastTest, r *reliableBroadcast) []types.ValidatorID { + // Create new reliableBroadcast instance with 4 validators + var b *types.Block + var h common.Hash + vids := []types.ValidatorID{} + for i := 0; i < 4; i++ { + vid := types.ValidatorID{Hash: common.NewRandomHash()} + r.addValidator(vid) + vids = append(vids, vid) + } + // Add genesis blocks. + for i := 0; i < 4; i++ { + h = common.NewRandomHash() + b = &types.Block{ + ProposerID: vids[i], + ParentHash: h, + Hash: h, + Height: 0, + Acks: map[common.Hash]struct{}{}, + } + r.processBlock(b) + } + + // Add block 0-1 which acks 0-0. + h = r.lattice[vids[0]].blocks[0].Hash + b = &types.Block{ + ProposerID: vids[0], + ParentHash: h, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + h: struct{}{}, + }, + } + r.processBlock(b) + s.NotNil(r.lattice[vids[0]].blocks[1]) + + // Add block 0-2 which acks 0-1 and 1-0. + h = r.lattice[vids[0]].blocks[1].Hash + b = &types.Block{ + ProposerID: vids[0], + ParentHash: h, + Hash: common.NewRandomHash(), + Height: 2, + Acks: map[common.Hash]struct{}{ + h: struct{}{}, + r.lattice[vids[1]].blocks[0].Hash: struct{}{}, + }, + } + r.processBlock(b) + s.NotNil(r.lattice[vids[0]].blocks[2]) + + // Add block 0-3 which acks 0-2. + h = r.lattice[vids[0]].blocks[2].Hash + b = &types.Block{ + ProposerID: vids[0], + ParentHash: h, + Hash: common.NewRandomHash(), + Height: 3, + Acks: map[common.Hash]struct{}{ + h: struct{}{}, + }, + } + r.processBlock(b) + s.NotNil(r.lattice[vids[0]].blocks[3]) + + // Add block 3-1 which acks 3-0. + h = r.lattice[vids[3]].blocks[0].Hash + b = &types.Block{ + ProposerID: vids[3], + ParentHash: h, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + h: struct{}{}, + }, + } + r.processBlock(b) + s.NotNil(r.lattice[vids[3]].blocks[0]) + + return vids +} + +func (s *ReliableBroadcastTest) TestAddValidator() { + r := newReliableBroadcast() + s.Equal(len(r.lattice), 0) + genTestCase1(s, r) + s.Equal(len(r.lattice), 4) +} + +func (s *ReliableBroadcastTest) TestSanityCheck() { + var b *types.Block + var h common.Hash + var vids []types.ValidatorID + var err error + r := newReliableBroadcast() + vids = genTestCase1(s, r) + + // Non-genesis block with no ack, should get error. + b = &types.Block{ + ProposerID: vids[0], + ParentHash: common.NewRandomHash(), + Height: 10, + Acks: make(map[common.Hash]struct{}), + } + err = r.sanityCheck(b) + s.NotNil(err) + s.Equal(ErrNotAckParent.Error(), err.Error()) + + // Non-genesis block which does not ack its parent. + b = &types.Block{ + ProposerID: vids[1], + ParentHash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + r.lattice[vids[2]].blocks[0].Hash: struct{}{}, + }, + } + err = r.sanityCheck(b) + s.NotNil(err) + s.Equal(ErrNotAckParent.Error(), err.Error()) + + // Non-genesis block which acks its parent but the height is invalid. + h = r.lattice[vids[1]].blocks[0].Hash + b = &types.Block{ + ProposerID: vids[1], + ParentHash: h, + Height: 2, + Acks: map[common.Hash]struct{}{ + h: struct{}{}, + }, + } + err = r.sanityCheck(b) + s.NotNil(err) + s.Equal(ErrInvalidBlockHeight.Error(), err.Error()) + + // Invalid proposer ID. + h = r.lattice[vids[1]].blocks[0].Hash + b = &types.Block{ + ProposerID: types.ValidatorID{Hash: common.NewRandomHash()}, + ParentHash: h, + Height: 1, + Acks: map[common.Hash]struct{}{ + h: struct{}{}, + }, + } + err = r.sanityCheck(b) + s.NotNil(err) + s.Equal(ErrInvalidProposerID.Error(), err.Error()) + + // Fork block. + h = r.lattice[vids[0]].blocks[0].Hash + b = &types.Block{ + ProposerID: vids[0], + ParentHash: h, + Height: 1, + Acks: map[common.Hash]struct{}{ + h: struct{}{}, + }, + } + err = r.sanityCheck(b) + s.NotNil(err) + s.Equal(ErrForkBlock.Error(), err.Error()) + + // Replicated ack. + h = r.lattice[vids[0]].blocks[3].Hash + b = &types.Block{ + ProposerID: vids[0], + ParentHash: h, + Height: 4, + Acks: map[common.Hash]struct{}{ + h: struct{}{}, + r.lattice[vids[1]].blocks[0].Hash: struct{}{}, + }, + } + err = r.sanityCheck(b) + s.NotNil(err) + s.Equal(ErrDoubleAck.Error(), err.Error()) + + // Normal block. + h = r.lattice[vids[1]].blocks[0].Hash + b = &types.Block{ + ProposerID: vids[1], + ParentHash: h, + Height: 1, + Acks: map[common.Hash]struct{}{ + h: struct{}{}, + common.NewRandomHash(): struct{}{}, + }, + } + err = r.sanityCheck(b) + s.Nil(err) +} + +func (s *ReliableBroadcastTest) TestAreAllAcksInLattice() { + var b *types.Block + var vids []types.ValidatorID + r := newReliableBroadcast() + vids = genTestCase1(s, r) + + // Empty ack should get true, although won't pass sanity check. + b = &types.Block{ + Acks: map[common.Hash]struct{}{}, + } + s.True(r.areAllAcksInLattice(b)) + + // Acks blocks in lattice + b = &types.Block{ + Acks: map[common.Hash]struct{}{ + r.lattice[vids[0]].blocks[0].Hash: struct{}{}, + r.lattice[vids[0]].blocks[1].Hash: struct{}{}, + }, + } + s.True(r.areAllAcksInLattice(b)) + + // Acks random block hash. + b = &types.Block{ + Acks: map[common.Hash]struct{}{ + common.NewRandomHash(): struct{}{}, + }, + } + s.False(r.areAllAcksInLattice(b)) +} + +func (s *ReliableBroadcastTest) TestStrongAck() { + var b *types.Block + var vids []types.ValidatorID + r := newReliableBroadcast() + vids = genTestCase1(s, r) + + // Check block 0-0 to 0-3 before adding 1-1 and 2-1. + for i := uint64(0); i < 4; i++ { + s.Equal(types.BlockStatusInit, r.lattice[vids[0]].blocks[i].Status) + } + + // Add block 1-1 which acks 1-0 and 0-2, and block 0-0 to 0-3 are still + // in BlockStatusInit, because they are not strongly acked. + b = &types.Block{ + ProposerID: vids[1], + ParentHash: r.lattice[vids[1]].blocks[0].Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + r.lattice[vids[0]].blocks[2].Hash: struct{}{}, + r.lattice[vids[1]].blocks[0].Hash: struct{}{}, + }, + } + r.processBlock(b) + s.NotNil(r.lattice[vids[1]].blocks[1]) + for i := uint64(0); i < 4; i++ { + s.Equal(types.BlockStatusInit, r.lattice[vids[0]].blocks[i].Status) + } + + // Add block 2-1 which acks 0-2 and 2-0, block 0-0 to 0-2 are strongly acked but + // 0-3 is still not. + b = &types.Block{ + ProposerID: vids[2], + ParentHash: r.lattice[vids[2]].blocks[0].Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + r.lattice[vids[0]].blocks[2].Hash: struct{}{}, + r.lattice[vids[2]].blocks[0].Hash: struct{}{}, + }, + } + r.processBlock(b) + s.Equal(types.BlockStatusAcked, r.lattice[vids[0]].blocks[0].Status) + s.Equal(types.BlockStatusAcked, r.lattice[vids[0]].blocks[1].Status) + s.Equal(types.BlockStatusAcked, r.lattice[vids[0]].blocks[2].Status) + s.Equal(types.BlockStatusInit, r.lattice[vids[0]].blocks[3].Status) +} + +func (s *ReliableBroadcastTest) TestExtractBlocks() { + var b *types.Block + r := newReliableBroadcast() + vids := genTestCase1(s, r) + + // Add block 1-1 which acks 1-0, 0-2, 3-0. + b = &types.Block{ + ProposerID: vids[1], + ParentHash: r.lattice[vids[1]].blocks[0].Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + r.lattice[vids[0]].blocks[2].Hash: struct{}{}, + r.lattice[vids[1]].blocks[0].Hash: struct{}{}, + r.lattice[vids[3]].blocks[0].Hash: struct{}{}, + }, + } + r.processBlock(b) + + // Add block 2-1 which acks 0-2, 2-0, 3-0. + b = &types.Block{ + ProposerID: vids[2], + ParentHash: r.lattice[vids[2]].blocks[0].Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + r.lattice[vids[0]].blocks[2].Hash: struct{}{}, + r.lattice[vids[2]].blocks[0].Hash: struct{}{}, + r.lattice[vids[3]].blocks[0].Hash: struct{}{}, + }, + } + r.processBlock(b) + + hashs := []common.Hash{ + r.lattice[vids[0]].blocks[0].Hash, + r.lattice[vids[0]].blocks[1].Hash, + r.lattice[vids[3]].blocks[0].Hash, + } + hashExtracted := map[common.Hash]*types.Block{} + for _, b := range r.extractBlocks() { + hashExtracted[b.Hash] = b + s.Equal(types.BlockStatusOrdering, b.Status) + } + for _, h := range hashs { + _, exist := hashExtracted[h] + s.True(exist) + } +} + +func (s *ReliableBroadcastTest) TestRandomIntensiveAcking() { + r := newReliableBroadcast() + vids := []types.ValidatorID{} + heights := map[types.ValidatorID]uint64{} + extractedBlocks := []*types.Block{} + + // Generate validators and genesis blocks. + for i := 0; i < 4; i++ { + vid := types.ValidatorID{Hash: common.NewRandomHash()} + r.addValidator(vid) + vids = append(vids, vid) + h := common.NewRandomHash() + b := &types.Block{ + Hash: h, + ParentHash: h, + Acks: map[common.Hash]struct{}{}, + Height: 0, + ProposerID: vid, + } + r.processBlock(b) + heights[vid] = 1 + } + + for i := 0; i < 5000; i++ { + vid := vids[rand.Int()%len(vids)] + height := heights[vid] + heights[vid]++ + parentHash := r.lattice[vid].blocks[height-1].Hash + acks := map[common.Hash]struct{}{} + for _, vid2 := range vids { + if b, exist := r.lattice[vid2].blocks[r.lattice[vid].nextAck[vid2]]; exist { + acks[b.Hash] = struct{}{} + } + } + b := &types.Block{ + ProposerID: vid, + Hash: common.NewRandomHash(), + ParentHash: parentHash, + Height: height, + Acks: acks, + } + r.processBlock(b) + extractedBlocks = append(extractedBlocks, r.extractBlocks()...) + } + + extractedBlocks = append(extractedBlocks, r.extractBlocks()...) + // The len of array extractedBlocks should be about 5000. + s.True(len(extractedBlocks) > 4500) + // The len of r.blocks should be small if deleting mechanism works. + s.True(len(r.blocks) < 500) +} + +func (s *ReliableBroadcastTest) TestRandomlyGeneratedBlocks() { + var ( + validatorCount = 19 + blockCount = 50 + repeat = 20 + ) + + // Prepare a randomly generated blocks. + db, err := blockdb.NewMemBackedBlockDB("test-reliable-broadcast-random.blockdb") + s.Require().Nil(err) + defer func() { + // If the test fails, keep the block database for troubleshooting. + if s.T().Failed() { + s.Nil(db.Close()) + } + }() + gen := test.NewBlocksGenerator(nil) + s.Require().Nil(gen.Generate(validatorCount, blockCount, nil, db)) + iter, err := db.GetAll() + s.Require().Nil(err) + // Setup a revealer that would reveal blocks randomly. + revealer, err := test.NewRandomRevealer(iter) + s.Require().Nil(err) + + stronglyAckedHashesAsString := map[string]struct{}{} + for i := 0; i < repeat; i++ { + validators := map[types.ValidatorID]struct{}{} + rb := newReliableBroadcast() + stronglyAckedHashes := common.Hashes{} + revealer.Reset() + + for { + // Reveal next block. + b, err := revealer.Next() + if err != nil { + if err == blockdb.ErrIterationFinished { + err = nil + break + } + } + s.Require().Nil(err) + + // It's a hack to add validator to reliableBroadcast module. + if _, added := validators[b.ProposerID]; !added { + rb.addValidator(b.ProposerID) + validators[b.ProposerID] = struct{}{} + } + // Perform reliable broadcast process. + rb.processBlock(&b) + for _, b := range rb.extractBlocks() { + stronglyAckedHashes = append(stronglyAckedHashes, b.Hash) + } + } + // To make it easier to check, sort hashes of + // strongly acked blocks, and concatenate them into + // a string. + sort.Sort(stronglyAckedHashes) + asString := "" + for _, h := range stronglyAckedHashes { + asString += h.String() + "," + } + stronglyAckedHashesAsString[asString] = struct{}{} + } + // Make sure concatenated hashes of strongly acked blocks are identical. + s.Require().Len(stronglyAckedHashesAsString, 1) + for h := range stronglyAckedHashesAsString { + // Make sure at least some blocks are strongly acked. + s.True(len(h) > 0) + } +} + +func TestReliableBroadcast(t *testing.T) { + suite.Run(t, new(ReliableBroadcastTest)) +} diff --git a/core/sequencer.go b/core/sequencer.go deleted file mode 100644 index 36c1383..0000000 --- a/core/sequencer.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2018 The dexon-consensus-core Authors -// This file is part of the dexon-consensus-core library. -// -// The dexon-consensus-core library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus-core library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus-core library. If not, see -// . - -package core - -import ( - "fmt" - "sort" - - "github.com/dexon-foundation/dexon-consensus-core/common" - "github.com/dexon-foundation/dexon-consensus-core/core/types" -) - -// ErrNotValidDAG would be reported when block subbmitted to sequencer -// didn't form a DAG. -var ErrNotValidDAG = fmt.Errorf("not a valid dag") - -// ackingStatusVector describes the acking status, either globally or just -// for one candidate. -// -// When block A acks block B, all blocks proposed from the same proposer -// as block A with higher height would also acks block B. Therefore, -// we just need to record: -// - the minimum height of acking block from that proposer -// - count of acking blocks from that proposer -// to repsent the acking status for block A. -type ackingStatusVector map[types.ValidatorID]*struct{ minHeight, count uint64 } - -// addBlock would update ackingStatusVector, it's caller's duty -// to make sure the input block acutally acking the target block. -func (v ackingStatusVector) addBlock(b *types.Block) (err error) { - rec, exists := v[b.ProposerID] - if !exists { - v[b.ProposerID] = &struct { - minHeight, count uint64 - }{ - minHeight: b.Height, - count: 1, - } - } else { - if b.Height < rec.minHeight { - err = ErrNotValidDAG - return - } - rec.count++ - } - return -} - -// getAckingNodeSet would generate the Acking Node Set. -// Only block height larger than -// -// global minimum height + k -// -// would be taken into consideration, ex. -// -// For some validator X: -// - the global minimum acking height = 1, -// - k = 1 -// then only block height >= 2 would be added to acking node set. -func (v ackingStatusVector) getAckingNodeSet( - global ackingStatusVector, k uint64) map[types.ValidatorID]struct{} { - - ret := make(map[types.ValidatorID]struct{}) - for vID, gRec := range global { - rec, exists := v[vID] - if !exists { - continue - } - - // This line would check if these two ranges would overlap: - // - (global minimum height + k, infinity) - // - (local minimum height, local minimum height + count - 1) - if rec.minHeight+rec.count-1 >= gRec.minHeight+k { - ret[vID] = struct{}{} - } - } - return ret -} - -// getAckingHeightVector would convert 'ackingStatusVector' to -// Acking Height Vector. -// -// Only block height equals to (global minimum block height + k) would be -// taken into consideration. -func (v ackingStatusVector) getAckingHeightVector( - global ackingStatusVector, k uint64) map[types.ValidatorID]uint64 { - - ret := make(map[types.ValidatorID]uint64) - for vID, gRec := range global { - rec, exists := v[vID] - - if gRec.count <= k { - continue - } else if !exists { - ret[vID] = infinity - } else if rec.minHeight <= gRec.minHeight+k { - // This check is sufficient to make sure the block height: - // - // gRec.minHeight + k - // - // would be included in this ackingStatusVector. - ret[vID] = gRec.minHeight + k - } else { - ret[vID] = infinity - } - } - return ret -} - -// blockVector stores all blocks grouped by their proposers and -// sorted by their block height. -type blockVector map[types.ValidatorID][]*types.Block - -func (v blockVector) addBlock(b *types.Block) (err error) { - blocksFromProposer := v[b.ProposerID] - if len(blocksFromProposer) > 0 { - lastBlock := blocksFromProposer[len(blocksFromProposer)-1] - if b.Height-lastBlock.Height != 1 { - err = ErrNotValidDAG - return - } - } - v[b.ProposerID] = append(blocksFromProposer, b) - return -} - -// getHeightVector would convert a blockVector to -// ackingStatusVectorAckingStatus. -func (v blockVector) getHeightVector() ackingStatusVector { - ret := ackingStatusVector{} - for vID, vec := range v { - if len(vec) == 0 { - continue - } - ret[vID] = &struct { - minHeight, count uint64 - }{ - minHeight: vec[0].Height, - count: uint64(len(vec)), - } - } - return ret -} - -// Sequencer represent a process unit to handle total ordering -// for blocks. -type sequencer struct { - // pendings stores blocks awaiting to be ordered. - pendings map[common.Hash]*types.Block - - // k represents the k in 'k-level total ordering'. - // In short, only block height equals to (global minimum height + k) - // would be taken into consideration. - k uint64 - - // phi is a const to control how strong the leading preceding block - // should be. - phi uint64 - - // validatorCount is the count of validator set. - validatorCount uint64 - - // globalVector group all pending blocks by proposers and - // sort them by block height. This structure is helpful when: - // - // - build global height vector - // - picking candidates next round - globalVector blockVector - - // candidateAckingStatusVectors caches ackingStatusVector of candidates. - candidateAckingStatusVectors map[common.Hash]ackingStatusVector - - // acked cache the 'block A acked by block B' relation by - // keeping a record in acked[A.Hash][B.Hash] - acked map[common.Hash]map[common.Hash]struct{} -} - -func newSequencer(k, phi, validatorCount uint64) *sequencer { - return &sequencer{ - candidateAckingStatusVectors: make(map[common.Hash]ackingStatusVector), - pendings: make(map[common.Hash]*types.Block), - k: k, - phi: phi, - validatorCount: validatorCount, - globalVector: blockVector{}, - acked: make(map[common.Hash]map[common.Hash]struct{}), - } -} - -// buildBlockRelation populates the acked according their acking relationships. -func (s *sequencer) buildBlockRelation(b *types.Block) { - // populateAcked would update all blocks implcitly acked - // by input block recursively. - var populateAcked func(bx, target *types.Block) - populateAcked = func(bx, target *types.Block) { - for ack := range bx.Acks { - acked, exists := s.acked[ack] - if !exists { - acked = make(map[common.Hash]struct{}) - s.acked[ack] = acked - } - - // This means we've walked this block already. - if _, alreadyPopulated := acked[target.Hash]; alreadyPopulated { - continue - } - acked[target.Hash] = struct{}{} - - // See if we need to go forward. - if nextBlock, exists := s.pendings[ack]; !exists { - continue - } else { - populateAcked(nextBlock, target) - } - } - } - populateAcked(b, b) -} - -// clean would remove a block from working set. This behaviour -// would prevent our memory usage growing infinity. -func (s *sequencer) clean(h common.Hash) { - delete(s.acked, h) - delete(s.pendings, h) - delete(s.candidateAckingStatusVectors, h) -} - -// updateVectors is a helper function to update all cached vectors. -func (s *sequencer) updateVectors(b *types.Block) (err error) { - // Update global height vector - err = s.globalVector.addBlock(b) - if err != nil { - return - } - - // Update acking status of candidates. - for candidate, vector := range s.candidateAckingStatusVectors { - if _, acked := s.acked[candidate][b.Hash]; !acked { - continue - } - if err = vector.addBlock(b); err != nil { - return - } - } - return -} - -// grade implements the 'grade' potential function described in white paper. -func (s *sequencer) grade( - hvFrom, hvTo map[types.ValidatorID]uint64, - globalAns map[types.ValidatorID]struct{}) int { - - count := uint64(0) - for vID, hFrom := range hvFrom { - hTo, exists := hvTo[vID] - if !exists { - continue - } - - if hFrom != infinity && hTo == infinity { - count++ - } - } - - if count >= s.phi { - return 1 - } else if count < s.phi-s.validatorCount+uint64(len(globalAns)) { - return 0 - } else { - return -1 - } -} - -// buildAckingStatusVectorForNewCandidate is a helper function to -// build ackingStatusVector for new candidate. -func (s *sequencer) buildAckingStatusVectorForNewCandidate( - candidate *types.Block) (hVec ackingStatusVector) { - - blocks := s.globalVector[candidate.ProposerID] - hVec = ackingStatusVector{ - candidate.ProposerID: &struct { - minHeight, count uint64 - }{ - minHeight: candidate.Height, - count: uint64(len(blocks)), - }, - } - - ackedsForCandidate, exists := s.acked[candidate.Hash] - if !exists { - // This candidate is acked by nobody. - return - } - - for vID, blocks := range s.globalVector { - if vID == candidate.ProposerID { - continue - } - - for i, b := range blocks { - if _, acked := ackedsForCandidate[b.Hash]; !acked { - continue - } - - // If this block acks this candidate, all newer blocks - // from the same validator also 'indirect' acks it. - hVec[vID] = &struct { - minHeight, count uint64 - }{ - minHeight: b.Height, - count: uint64(len(blocks) - i), - } - break - } - } - return -} - -// isAckOnlyPrecedings is a helper function to check if a block -// only contain acks to delivered blocks. -func (s *sequencer) isAckOnlyPrecedings(b *types.Block) bool { - for ack := range b.Acks { - if _, pending := s.pendings[ack]; pending { - return false - } - } - return true -} - -// output is a helper function to finish the delivery of -// deliverable preceding set. -func (s *sequencer) output(precedings map[common.Hash]struct{}) common.Hashes { - ret := common.Hashes{} - for p := range precedings { - ret = append(ret, p) - - // Remove the first element from corresponding blockVector. - b := s.pendings[p] - s.globalVector[b.ProposerID] = s.globalVector[b.ProposerID][1:] - - // Remove block relations. - s.clean(p) - } - sort.Sort(ret) - - // Find new candidates from tip of globalVector of each validator. - // The complexity here is O(N^2logN). - for _, blocks := range s.globalVector { - if len(blocks) == 0 { - continue - } - - tip := blocks[0] - if _, alreadyCandidate := - s.candidateAckingStatusVectors[tip.Hash]; alreadyCandidate { - continue - } - - if !s.isAckOnlyPrecedings(tip) { - continue - } - - // Build ackingStatusVector for new candidate. - s.candidateAckingStatusVectors[tip.Hash] = - s.buildAckingStatusVectorForNewCandidate(tip) - } - return ret -} - -// generateDeliverSet would: -// - generate preceding set -// - check if the preceding set deliverable by checking potential function -func (s *sequencer) generateDeliverSet() ( - delivered map[common.Hash]struct{}, early bool) { - - globalHeightVector := s.globalVector.getHeightVector() - ahvs := map[common.Hash]map[types.ValidatorID]uint64{} - for candidate, v := range s.candidateAckingStatusVectors { - ahvs[candidate] = v.getAckingHeightVector(globalHeightVector, s.k) - } - - globalAns := globalHeightVector.getAckingNodeSet(globalHeightVector, s.k) - precedings := make(map[common.Hash]struct{}) - -CheckNextCandidateLoop: - for candidate := range s.candidateAckingStatusVectors { - for otherCandidate := range s.candidateAckingStatusVectors { - if candidate == otherCandidate { - continue - } - if s.grade(ahvs[otherCandidate], ahvs[candidate], globalAns) != 0 { - continue CheckNextCandidateLoop - } - } - precedings[candidate] = struct{}{} - } - - if len(precedings) == 0 { - return - } - - // internal is a helper function to verify internal stability. - internal := func() bool { - for candidate := range s.candidateAckingStatusVectors { - if _, isPreceding := precedings[candidate]; isPreceding { - continue - } - - beaten := false - for p := range precedings { - if beaten = - s.grade(ahvs[p], ahvs[candidate], globalAns) == 1; beaten { - break - } - } - if !beaten { - return false - } - } - return true - } - - // checkAHV is a helper function to verify external stability. - // It would make sure some preceding block is strong enough - // to lead the whole preceding set. - checkAHV := func() bool { - for p := range precedings { - count := uint64(0) - for _, v := range ahvs[p] { - if v != infinity { - count++ - } - } - - if count > s.phi { - return true - } - } - return false - } - - // checkANS is a helper function to verify external stability. - // It would make sure all preceding blocks are strong enough - // to be delivered. - checkANS := func() bool { - for p := range precedings { - validatorAns := s.candidateAckingStatusVectors[p].getAckingNodeSet( - globalHeightVector, s.k) - if uint64(len(validatorAns)) < s.validatorCount-s.phi { - return false - } - } - - return true - } - - // Check internal stability first. - if !internal() { - return - } - - // If all validators propose enough blocks, we should force - // to deliver since the whole picture of the DAG is revealed. - if uint64(len(globalAns)) != s.validatorCount { - // The whole picture is not ready, we need to check if - // exteranl stability is met, and we can deliver earlier. - if checkAHV() && checkANS() { - early = true - } else { - return - } - } - - delivered = precedings - return -} - -// processBlock is the entry point of sequencer. -func (s *sequencer) processBlock(b *types.Block) ( - delivered common.Hashes, early bool, err error) { - - // NOTE: I assume the block 'b' is already safe for total ordering. - // That means, all its acking blocks are during/after - // total ordering stage. - - // Incremental part. - s.pendings[b.Hash] = b - s.buildBlockRelation(b) - if err = s.updateVectors(b); err != nil { - return - } - if s.isAckOnlyPrecedings(b) { - s.candidateAckingStatusVectors[b.Hash] = - s.buildAckingStatusVectorForNewCandidate(b) - } - - // Not-Incremental part (yet). - // - generate ahv for each candidate - // - generate ans for each candidate - // - generate global ans - // - find preceding set - hashes, early := s.generateDeliverSet() - - // output precedings - delivered = s.output(hashes) - return -} diff --git a/core/sequencer_test.go b/core/sequencer_test.go deleted file mode 100644 index ae9cca7..0000000 --- a/core/sequencer_test.go +++ /dev/null @@ -1,996 +0,0 @@ -// Copyright 2018 The dexon-consensus-core Authors -// This file is part of the dexon-consensus-core library. -// -// The dexon-consensus-core library is free software: you can redistribute it and/or -// modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus-core library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus-core library. If not, see -// . - -package core - -import ( - "sort" - "strings" - "testing" - - "github.com/dexon-foundation/dexon-consensus-core/blockdb" - "github.com/dexon-foundation/dexon-consensus-core/common" - "github.com/dexon-foundation/dexon-consensus-core/core/test" - "github.com/dexon-foundation/dexon-consensus-core/core/types" - "github.com/stretchr/testify/suite" -) - -type SequencerTestSuite struct { - suite.Suite -} - -func (s *SequencerTestSuite) generateValidatorIDs( - count int) []types.ValidatorID { - - validatorIDs := []types.ValidatorID{} - for i := 0; i < count; i++ { - validatorIDs = append(validatorIDs, - types.ValidatorID{Hash: common.NewRandomHash()}) - } - - return validatorIDs -} - -func (s *SequencerTestSuite) genRootBlock( - vID types.ValidatorID, acks map[common.Hash]struct{}) *types.Block { - - hash := common.NewRandomHash() - return &types.Block{ - ProposerID: vID, - ParentHash: hash, - Hash: hash, - Height: 0, - Acks: acks, - } -} - -func (s *SequencerTestSuite) checkNotDeliver(seq *sequencer, b *types.Block) { - hashes, eqrly, err := seq.processBlock(b) - s.Empty(hashes) - s.False(eqrly) - s.Nil(err) -} - -func (s *SequencerTestSuite) checkNotInWorkingSet( - seq *sequencer, b *types.Block) { - - s.NotContains(seq.pendings, b.Hash) - s.NotContains(seq.acked, b.Hash) -} - -func (s *SequencerTestSuite) TestBlockRelation() { - // This test case would verify if 'acking' and 'acked' - // accumulated correctly. - // - // The DAG used below is: - // A <- B <- C - - vID := types.ValidatorID{Hash: common.NewRandomHash()} - - hash := common.NewRandomHash() - blockA := &types.Block{ - ProposerID: vID, - ParentHash: hash, - Hash: hash, - Height: 0, - Acks: map[common.Hash]struct{}{}, - } - blockB := &types.Block{ - ProposerID: vID, - ParentHash: blockA.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - blockA.Hash: struct{}{}, - }, - } - blockC := &types.Block{ - ProposerID: vID, - ParentHash: blockB.Hash, - Hash: common.NewRandomHash(), - Height: 2, - Acks: map[common.Hash]struct{}{ - blockB.Hash: struct{}{}, - }, - } - - seq := newSequencer(1, 3, 5) - s.checkNotDeliver(seq, blockA) - s.checkNotDeliver(seq, blockB) - s.checkNotDeliver(seq, blockC) - - // Check 'acked'. - ackedA := seq.acked[blockA.Hash] - s.Require().NotNil(ackedA) - s.Len(ackedA, 2) - s.Contains(ackedA, blockB.Hash) - s.Contains(ackedA, blockC.Hash) - - ackedB := seq.acked[blockB.Hash] - s.Require().NotNil(ackedB) - s.Len(ackedB, 1) - s.Contains(ackedB, blockC.Hash) - - s.Nil(seq.acked[blockC.Hash]) -} - -func (s *SequencerTestSuite) TestCreateAckingHeightVectorFromHeightVector() { - validators := s.generateValidatorIDs(5) - global := ackingStatusVector{ - validators[0]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 5}, - validators[1]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 5}, - validators[2]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 5}, - validators[3]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 5}, - } - - // For 'not existed' record in local but exist in global, - // should be infinity. - ahv := ackingStatusVector{ - validators[0]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 2}, - }.getAckingHeightVector(global, 0) - s.Len(ahv, 4) - s.Equal(ahv[validators[0]], uint64(0)) - s.Equal(ahv[validators[1]], infinity) - s.Equal(ahv[validators[2]], infinity) - s.Equal(ahv[validators[3]], infinity) - - // For local min exceeds global's min+k-1, should be infinity - hv := ackingStatusVector{ - validators[0]: &struct{ minHeight, count uint64 }{ - minHeight: 3, count: 1}, - } - ahv = hv.getAckingHeightVector(global, 2) - s.Equal(ahv[validators[0]], infinity) - ahv = hv.getAckingHeightVector(global, 3) - s.Equal(ahv[validators[0]], uint64(3)) - - ahv = ackingStatusVector{ - validators[0]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 3}, - validators[1]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 3}, - }.getAckingHeightVector(global, 5) - s.Len(ahv, 0) -} - -func (s *SequencerTestSuite) TestCreateAckingNodeSetFromHeightVector() { - validators := s.generateValidatorIDs(5) - global := ackingStatusVector{ - validators[0]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 5}, - validators[1]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 5}, - validators[2]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 5}, - validators[3]: &struct{ minHeight, count uint64 }{ - minHeight: 0, count: 5}, - } - - local := ackingStatusVector{ - validators[0]: &struct{ minHeight, count uint64 }{ - minHeight: 1, count: 2}, - } - s.Len(local.getAckingNodeSet(global, 1), 1) - s.Len(local.getAckingNodeSet(global, 2), 1) - s.Len(local.getAckingNodeSet(global, 3), 0) -} - -func (s *SequencerTestSuite) TestGrade() { - validators := s.generateValidatorIDs(5) - seq := newSequencer(1, 3, 5) // K doesn't matter when calculating preceding. - - ans := map[types.ValidatorID]struct{}{ - validators[0]: struct{}{}, - validators[1]: struct{}{}, - validators[2]: struct{}{}, - validators[3]: struct{}{}, - } - - ahv1 := map[types.ValidatorID]uint64{ - validators[0]: 1, - validators[1]: infinity, - validators[2]: infinity, - validators[3]: infinity, - } - ahv2 := map[types.ValidatorID]uint64{ - validators[0]: 1, - validators[1]: 1, - validators[2]: 1, - validators[3]: 1, - } - ahv3 := map[types.ValidatorID]uint64{ - validators[0]: 1, - validators[1]: 1, - validators[2]: infinity, - validators[3]: infinity, - } - s.Equal(seq.grade(ahv2, ahv1, ans), 1) - s.Equal(seq.grade(ahv1, ahv2, ans), 0) - s.Equal(seq.grade(ahv2, ahv3, ans), -1) - s.Equal(seq.grade(ahv3, ahv2, ans), 0) -} - -func (s *SequencerTestSuite) TestCycleDetection() { - // Make sure we don't get hang by cycle from - // block's acks. - validators := s.generateValidatorIDs(5) - - // create blocks with cycles in acking relation. - cycledHash := common.NewRandomHash() - hash := common.NewRandomHash() - b00 := &types.Block{ - ProposerID: validators[0], - ParentHash: hash, - Hash: hash, - Height: 0, - Acks: map[common.Hash]struct{}{ - cycledHash: struct{}{}, - }, - } - b01 := &types.Block{ - ProposerID: validators[0], - ParentHash: b00.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b00.Hash: struct{}{}, - }, - } - b02 := &types.Block{ - ProposerID: validators[0], - ParentHash: b01.Hash, - Hash: common.NewRandomHash(), - Height: 2, - Acks: map[common.Hash]struct{}{ - b01.Hash: struct{}{}, - }, - } - b03 := &types.Block{ - ProposerID: validators[0], - ParentHash: b02.Hash, - Hash: cycledHash, - Height: 3, - Acks: map[common.Hash]struct{}{ - b02.Hash: struct{}{}, - }, - } - - // Create a block acks self. - hash = common.NewRandomHash() - b10 := &types.Block{ - ProposerID: validators[1], - ParentHash: hash, - Hash: hash, - Height: 0, - Acks: map[common.Hash]struct{}{ - hash: struct{}{}, - }, - } - - // Make sure we won't hang when cycle exists. - seq := newSequencer(1, 3, 5) - s.checkNotDeliver(seq, b00) - s.checkNotDeliver(seq, b01) - s.checkNotDeliver(seq, b02) - - // Should not hang in this line. - s.checkNotDeliver(seq, b03) - // Should not hang in this line - s.checkNotDeliver(seq, b10) -} - -func (s *SequencerTestSuite) TestNotValidDAGDetection() { - validators := s.generateValidatorIDs(4) - seq := newSequencer(1, 3, 5) - - hash := common.NewRandomHash() - b00 := &types.Block{ - ProposerID: validators[0], - ParentHash: hash, - Hash: hash, - Height: 0, - Acks: map[common.Hash]struct{}{}, - } - b01 := &types.Block{ - ProposerID: validators[0], - ParentHash: b00.Hash, - Hash: common.NewRandomHash(), - } - - // When submit to block with lower height to sequencer, - // caller should receive an error. - s.checkNotDeliver(seq, b01) - _, _, err := seq.processBlock(b00) - s.Equal(err, ErrNotValidDAG) -} - -func (s *SequencerTestSuite) TestEarlyDeliver() { - // The test scenario: - // - // o o o o o - // : : : : : <- (K - 1) layers - // o o o o o - // \ v / | - // o o - // A B - // Even when B is not received, A should - // be able to be delivered. - seq := newSequencer(2, 3, 5) - validators := s.generateValidatorIDs(5) - - genNextBlock := func(b *types.Block) *types.Block { - return &types.Block{ - ProposerID: b.ProposerID, - ParentHash: b.Hash, - Hash: common.NewRandomHash(), - Height: b.Height + 1, - Acks: map[common.Hash]struct{}{ - b.Hash: struct{}{}, - }, - } - } - - b00 := s.genRootBlock(validators[0], map[common.Hash]struct{}{}) - b01 := genNextBlock(b00) - b02 := genNextBlock(b01) - - b10 := s.genRootBlock(validators[1], map[common.Hash]struct{}{ - b00.Hash: struct{}{}, - }) - b11 := genNextBlock(b10) - b12 := genNextBlock(b11) - - b20 := s.genRootBlock(validators[2], map[common.Hash]struct{}{ - b00.Hash: struct{}{}, - }) - b21 := genNextBlock(b20) - b22 := genNextBlock(b21) - - b30 := s.genRootBlock(validators[3], map[common.Hash]struct{}{ - b00.Hash: struct{}{}, - }) - b31 := genNextBlock(b30) - b32 := genNextBlock(b31) - - // It's a valid block sequence to deliver - // to total ordering algorithm: DAG. - s.checkNotDeliver(seq, b00) - s.checkNotDeliver(seq, b01) - s.checkNotDeliver(seq, b02) - - vec := seq.candidateAckingStatusVectors[b00.Hash] - s.Require().NotNil(vec) - s.Len(vec, 1) - s.Equal(vec[validators[0]].minHeight, b00.Height) - s.Equal(vec[validators[0]].count, uint64(3)) - - s.checkNotDeliver(seq, b10) - s.checkNotDeliver(seq, b11) - s.checkNotDeliver(seq, b12) - s.checkNotDeliver(seq, b20) - s.checkNotDeliver(seq, b21) - s.checkNotDeliver(seq, b22) - s.checkNotDeliver(seq, b30) - s.checkNotDeliver(seq, b31) - - // Check the internal state before delivering. - s.Len(seq.candidateAckingStatusVectors, 1) // b00 is the only candidate. - - vec = seq.candidateAckingStatusVectors[b00.Hash] - s.Require().NotNil(vec) - s.Len(vec, 4) - s.Equal(vec[validators[0]].minHeight, b00.Height) - s.Equal(vec[validators[0]].count, uint64(3)) - s.Equal(vec[validators[1]].minHeight, b10.Height) - s.Equal(vec[validators[1]].count, uint64(3)) - s.Equal(vec[validators[2]].minHeight, b20.Height) - s.Equal(vec[validators[2]].count, uint64(3)) - s.Equal(vec[validators[3]].minHeight, b30.Height) - s.Equal(vec[validators[3]].count, uint64(2)) - - hashes, early, err := seq.processBlock(b32) - s.Require().Len(hashes, 1) - s.True(early) - s.Nil(err) - s.Equal(hashes[0], b00.Hash) - - // Check the internal state after delivered. - s.Len(seq.candidateAckingStatusVectors, 4) // b01, b10, b20, b30 are candidates. - - // Check b01. - vec = seq.candidateAckingStatusVectors[b01.Hash] - s.Require().NotNil(vec) - s.Len(vec, 1) - s.Equal(vec[validators[0]].minHeight, b01.Height) - s.Equal(vec[validators[0]].count, uint64(2)) - - // Check b10. - vec = seq.candidateAckingStatusVectors[b10.Hash] - s.Require().NotNil(vec) - s.Len(vec, 1) - s.Equal(vec[validators[1]].minHeight, b10.Height) - s.Equal(vec[validators[1]].count, uint64(3)) - - // Check b20. - vec = seq.candidateAckingStatusVectors[b20.Hash] - s.Require().NotNil(vec) - s.Len(vec, 1) - s.Equal(vec[validators[2]].minHeight, b20.Height) - s.Equal(vec[validators[2]].count, uint64(3)) - - // Check b30. - vec = seq.candidateAckingStatusVectors[b30.Hash] - s.Require().NotNil(vec) - s.Len(vec, 1) - s.Equal(vec[validators[3]].minHeight, b30.Height) - s.Equal(vec[validators[3]].count, uint64(3)) - - // Make sure b00 doesn't exist in current working set: - s.checkNotInWorkingSet(seq, b00) -} - -func (s *SequencerTestSuite) TestBasicCaseForK2() { - // It's a handcrafted test case. - seq := newSequencer(2, 3, 5) - validators := s.generateValidatorIDs(5) - - b00 := s.genRootBlock(validators[0], map[common.Hash]struct{}{}) - b10 := s.genRootBlock(validators[1], map[common.Hash]struct{}{}) - b20 := s.genRootBlock( - validators[2], map[common.Hash]struct{}{b10.Hash: struct{}{}}) - b30 := s.genRootBlock( - validators[3], map[common.Hash]struct{}{b20.Hash: struct{}{}}) - b40 := s.genRootBlock(validators[4], map[common.Hash]struct{}{}) - b11 := &types.Block{ - ProposerID: validators[1], - ParentHash: b10.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b10.Hash: struct{}{}, - b00.Hash: struct{}{}, - }, - } - b01 := &types.Block{ - ProposerID: validators[0], - ParentHash: b00.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b00.Hash: struct{}{}, - b11.Hash: struct{}{}, - }, - } - b21 := &types.Block{ - ProposerID: validators[2], - ParentHash: b20.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b20.Hash: struct{}{}, - b01.Hash: struct{}{}, - }, - } - b31 := &types.Block{ - ProposerID: validators[3], - ParentHash: b30.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b30.Hash: struct{}{}, - b21.Hash: struct{}{}, - }, - } - b02 := &types.Block{ - ProposerID: validators[0], - ParentHash: b01.Hash, - Hash: common.NewRandomHash(), - Height: 2, - Acks: map[common.Hash]struct{}{ - b01.Hash: struct{}{}, - b21.Hash: struct{}{}, - }, - } - b12 := &types.Block{ - ProposerID: validators[1], - ParentHash: b11.Hash, - Hash: common.NewRandomHash(), - Height: 2, - Acks: map[common.Hash]struct{}{ - b11.Hash: struct{}{}, - b21.Hash: struct{}{}, - }, - } - b32 := &types.Block{ - ProposerID: validators[3], - ParentHash: b31.Hash, - Hash: common.NewRandomHash(), - Height: 2, - Acks: map[common.Hash]struct{}{ - b31.Hash: struct{}{}, - }, - } - b22 := &types.Block{ - ProposerID: validators[2], - ParentHash: b21.Hash, - Hash: common.NewRandomHash(), - Height: 2, - Acks: map[common.Hash]struct{}{ - b21.Hash: struct{}{}, - b32.Hash: struct{}{}, - }, - } - b23 := &types.Block{ - ProposerID: validators[2], - ParentHash: b22.Hash, - Hash: common.NewRandomHash(), - Height: 3, - Acks: map[common.Hash]struct{}{ - b22.Hash: struct{}{}, - }, - } - b03 := &types.Block{ - ProposerID: validators[0], - ParentHash: b02.Hash, - Hash: common.NewRandomHash(), - Height: 3, - Acks: map[common.Hash]struct{}{ - b02.Hash: struct{}{}, - b22.Hash: struct{}{}, - }, - } - b13 := &types.Block{ - ProposerID: validators[1], - ParentHash: b12.Hash, - Hash: common.NewRandomHash(), - Height: 3, - Acks: map[common.Hash]struct{}{ - b12.Hash: struct{}{}, - b22.Hash: struct{}{}, - }, - } - b14 := &types.Block{ - ProposerID: validators[1], - ParentHash: b13.Hash, - Hash: common.NewRandomHash(), - Height: 4, - Acks: map[common.Hash]struct{}{ - b13.Hash: struct{}{}, - }, - } - b41 := &types.Block{ - ProposerID: validators[4], - ParentHash: b40.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b40.Hash: struct{}{}, - }, - } - b42 := &types.Block{ - ProposerID: validators[4], - ParentHash: b41.Hash, - Hash: common.NewRandomHash(), - Height: 2, - Acks: map[common.Hash]struct{}{ - b41.Hash: struct{}{}, - }, - } - - s.checkNotDeliver(seq, b00) - s.checkNotDeliver(seq, b10) - s.checkNotDeliver(seq, b11) - s.checkNotDeliver(seq, b01) - s.checkNotDeliver(seq, b20) - s.checkNotDeliver(seq, b30) - s.checkNotDeliver(seq, b21) - s.checkNotDeliver(seq, b31) - s.checkNotDeliver(seq, b32) - s.checkNotDeliver(seq, b22) - s.checkNotDeliver(seq, b12) - - // Make sure 'acked' for current precedings is correct. - acked := seq.acked[b00.Hash] - s.Require().NotNil(acked) - s.Len(acked, 7) - s.Contains(acked, b01.Hash) - s.Contains(acked, b11.Hash) - s.Contains(acked, b12.Hash) - s.Contains(acked, b21.Hash) - s.Contains(acked, b22.Hash) - s.Contains(acked, b31.Hash) - s.Contains(acked, b32.Hash) - - acked = seq.acked[b10.Hash] - s.Require().NotNil(acked) - s.Len(acked, 9) - s.Contains(acked, b01.Hash) - s.Contains(acked, b11.Hash) - s.Contains(acked, b12.Hash) - s.Contains(acked, b20.Hash) - s.Contains(acked, b21.Hash) - s.Contains(acked, b22.Hash) - s.Contains(acked, b30.Hash) - s.Contains(acked, b31.Hash) - s.Contains(acked, b32.Hash) - - // Make sure there are 2 candidates. - s.Require().Len(seq.candidateAckingStatusVectors, 2) - - // Check b00's height vector. - vec := seq.candidateAckingStatusVectors[b00.Hash] - s.Require().NotNil(vec) - s.NotContains(vec, validators[4]) - s.Equal(vec[validators[0]].minHeight, b00.Height) - s.Equal(vec[validators[0]].count, uint64(2)) - s.Equal(vec[validators[1]].minHeight, b11.Height) - s.Equal(vec[validators[1]].count, uint64(2)) - s.Equal(vec[validators[2]].minHeight, b21.Height) - s.Equal(vec[validators[2]].count, uint64(2)) - s.Equal(vec[validators[3]].minHeight, b31.Height) - s.Equal(vec[validators[3]].count, uint64(2)) - - // Check b10's height vector. - vec = seq.candidateAckingStatusVectors[b10.Hash] - s.Require().NotNil(vec) - s.NotContains(vec, validators[4]) - s.Equal(vec[validators[0]].minHeight, b01.Height) - s.Equal(vec[validators[0]].count, uint64(1)) - s.Equal(vec[validators[1]].minHeight, b10.Height) - s.Equal(vec[validators[1]].count, uint64(3)) - s.Equal(vec[validators[2]].minHeight, b20.Height) - s.Equal(vec[validators[2]].count, uint64(3)) - s.Equal(vec[validators[3]].minHeight, b30.Height) - s.Equal(vec[validators[3]].count, uint64(3)) - - // Check the first deliver. - hashes, early, err := seq.processBlock(b02) - s.True(early) - s.Nil(err) - expected := common.Hashes{b00.Hash, b10.Hash} - sort.Sort(expected) - s.Equal(hashes, expected) - - // Make sure b00, b10 are removed from current working set. - s.checkNotInWorkingSet(seq, b00) - s.checkNotInWorkingSet(seq, b10) - - // Check if candidates of next round are picked correctly. - s.Len(seq.candidateAckingStatusVectors, 2) - - // Check b01's height vector. - vec = seq.candidateAckingStatusVectors[b11.Hash] - s.Require().NotNil(vec) - s.NotContains(vec, validators[4]) - s.Equal(vec[validators[0]].minHeight, b01.Height) - s.Equal(vec[validators[0]].count, uint64(2)) - s.Equal(vec[validators[1]].minHeight, b11.Height) - s.Equal(vec[validators[1]].count, uint64(2)) - s.Equal(vec[validators[2]].minHeight, b21.Height) - s.Equal(vec[validators[2]].count, uint64(2)) - s.Equal(vec[validators[3]].minHeight, b11.Height) - s.Equal(vec[validators[3]].count, uint64(2)) - - // Check b20's height vector. - vec = seq.candidateAckingStatusVectors[b20.Hash] - s.Require().NotNil(vec) - s.NotContains(vec, validators[4]) - s.Equal(vec[validators[0]].minHeight, b02.Height) - s.Equal(vec[validators[0]].count, uint64(1)) - s.Equal(vec[validators[1]].minHeight, b12.Height) - s.Equal(vec[validators[1]].count, uint64(1)) - s.Equal(vec[validators[2]].minHeight, b20.Height) - s.Equal(vec[validators[2]].count, uint64(3)) - s.Equal(vec[validators[3]].minHeight, b30.Height) - s.Equal(vec[validators[3]].count, uint64(3)) - - s.checkNotDeliver(seq, b13) - - // Check the second deliver. - hashes, early, err = seq.processBlock(b03) - s.True(early) - s.Nil(err) - expected = common.Hashes{b11.Hash, b20.Hash} - sort.Sort(expected) - s.Equal(hashes, expected) - - // Make sure b11, b20 are removed from current working set. - s.checkNotInWorkingSet(seq, b11) - s.checkNotInWorkingSet(seq, b20) - - // Add b40, b41, b42 to pending set. - s.checkNotDeliver(seq, b40) - s.checkNotDeliver(seq, b41) - s.checkNotDeliver(seq, b42) - s.checkNotDeliver(seq, b14) - - // Make sure b01, b30, b40 are candidate in next round. - s.Len(seq.candidateAckingStatusVectors, 3) - vec = seq.candidateAckingStatusVectors[b01.Hash] - s.Require().NotNil(vec) - s.NotContains(vec, validators[4]) - s.Equal(vec[validators[0]].minHeight, b01.Height) - s.Equal(vec[validators[0]].count, uint64(3)) - s.Equal(vec[validators[1]].minHeight, b12.Height) - s.Equal(vec[validators[1]].count, uint64(3)) - s.Equal(vec[validators[2]].minHeight, b21.Height) - s.Equal(vec[validators[2]].count, uint64(2)) - s.Equal(vec[validators[3]].minHeight, b31.Height) - s.Equal(vec[validators[3]].count, uint64(2)) - - vec = seq.candidateAckingStatusVectors[b30.Hash] - s.Require().NotNil(vec) - s.NotContains(vec, validators[4]) - s.Equal(vec[validators[0]].minHeight, b03.Height) - s.Equal(vec[validators[0]].count, uint64(1)) - s.Equal(vec[validators[1]].minHeight, b13.Height) - s.Equal(vec[validators[1]].count, uint64(2)) - s.Equal(vec[validators[2]].minHeight, b22.Height) - s.Equal(vec[validators[2]].count, uint64(1)) - s.Equal(vec[validators[3]].minHeight, b30.Height) - s.Equal(vec[validators[3]].count, uint64(3)) - - vec = seq.candidateAckingStatusVectors[b40.Hash] - s.Require().NotNil(vec) - s.NotContains(vec, validators[0]) - s.NotContains(vec, validators[1]) - s.NotContains(vec, validators[2]) - s.NotContains(vec, validators[3]) - s.Equal(vec[validators[4]].minHeight, b40.Height) - s.Equal(vec[validators[4]].count, uint64(3)) - - // Make 'Acking Node Set' contains blocks from all validators, - // this should trigger not-early deliver. - hashes, early, err = seq.processBlock(b23) - s.False(early) - s.Nil(err) - expected = common.Hashes{b01.Hash, b30.Hash} - sort.Sort(expected) - s.Equal(expected, hashes) - - // Make sure b01, b30 not in working set - s.checkNotInWorkingSet(seq, b01) - s.checkNotInWorkingSet(seq, b30) - - // Make sure b21, b40 are candidates of next round. - s.Contains(seq.candidateAckingStatusVectors, b21.Hash) - s.Contains(seq.candidateAckingStatusVectors, b40.Hash) -} - -func (s *SequencerTestSuite) TestBasicCaseForK0() { - // This is a relatively simple test for K=0. - // - // 0 1 2 3 4 - // ------------------- - // . . . . . - // . . . . . - // o o o <- o <- o Height: 1 - // | \ | \ | | - // v v v v - // o o o <- o Height: 0 - seq := newSequencer(0, 3, 5) - validators := s.generateValidatorIDs(5) - - b00 := s.genRootBlock(validators[0], map[common.Hash]struct{}{}) - b10 := s.genRootBlock(validators[1], map[common.Hash]struct{}{}) - b20 := s.genRootBlock(validators[2], map[common.Hash]struct{}{}) - b30 := s.genRootBlock(validators[3], map[common.Hash]struct{}{ - b20.Hash: struct{}{}, - }) - b01 := &types.Block{ - ProposerID: validators[0], - ParentHash: b00.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b00.Hash: struct{}{}, - b10.Hash: struct{}{}, - }, - } - b11 := &types.Block{ - ProposerID: validators[1], - ParentHash: b10.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b10.Hash: struct{}{}, - b20.Hash: struct{}{}, - }, - } - b21 := &types.Block{ - ProposerID: validators[2], - ParentHash: b20.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b20.Hash: struct{}{}, - }, - } - b31 := &types.Block{ - ProposerID: validators[3], - ParentHash: b30.Hash, - Hash: common.NewRandomHash(), - Height: 1, - Acks: map[common.Hash]struct{}{ - b21.Hash: struct{}{}, - b30.Hash: struct{}{}, - }, - } - b40 := s.genRootBlock(validators[4], map[common.Hash]struct{}{ - b31.Hash: struct{}{}, - }) - - s.checkNotDeliver(seq, b00) - s.checkNotDeliver(seq, b10) - s.checkNotDeliver(seq, b20) - s.checkNotDeliver(seq, b30) - s.checkNotDeliver(seq, b01) - s.checkNotDeliver(seq, b11) - s.checkNotDeliver(seq, b21) - s.checkNotDeliver(seq, b31) - - // Check status before delivering. - vec := seq.candidateAckingStatusVectors[b00.Hash] - s.Require().NotNil(vec) - s.Len(vec, 1) - s.Equal(vec[validators[0]].minHeight, b00.Height) - s.Equal(vec[validators[0]].count, uint64(2)) - - vec = seq.candidateAckingStatusVectors[b10.Hash] - s.Require().NotNil(vec) - s.Len(vec, 2) - s.Equal(vec[validators[0]].minHeight, b01.Height) - s.Equal(vec[validators[0]].count, uint64(1)) - s.Equal(vec[validators[1]].minHeight, b10.Height) - s.Equal(vec[validators[1]].count, uint64(2)) - - vec = seq.candidateAckingStatusVectors[b20.Hash] - s.Require().NotNil(vec) - s.Len(vec, 3) - s.Equal(vec[validators[1]].minHeight, b11.Height) - s.Equal(vec[validators[1]].count, uint64(1)) - s.Equal(vec[validators[2]].minHeight, b20.Height) - s.Equal(vec[validators[2]].count, uint64(2)) - s.Equal(vec[validators[3]].minHeight, b30.Height) - s.Equal(vec[validators[3]].count, uint64(2)) - - // This new block should trigger non-early deliver. - hashes, early, err := seq.processBlock(b40) - s.False(early) - s.Nil(err) - expected := common.Hashes{b20.Hash} - sort.Sort(expected) - s.Equal(expected, hashes) - - // Make sure b20 is no long existing in working set. - s.checkNotInWorkingSet(seq, b20) - - // Make sure b10, b30 are candidates for next round. - s.Contains(seq.candidateAckingStatusVectors, b10.Hash) - s.Contains(seq.candidateAckingStatusVectors, b30.Hash) -} - -func (s *SequencerTestSuite) baseTestRandomlyGeneratedBlocks( - seqConstructor func() *sequencer, - revealer test.Revealer, - repeat int) { - - // TODO (mission): make this part run concurrently. - revealingSequence := map[string]struct{}{} - orderingSequence := map[string]struct{}{} - for i := 0; i < repeat; i++ { - revealed := "" - ordered := "" - revealer.Reset() - seq := seqConstructor() - for { - // Reveal next block. - b, err := revealer.Next() - if err != nil { - if err == blockdb.ErrIterationFinished { - err = nil - break - } - } - s.Require().Nil(err) - revealed += b.Hash.String() + "," - - // Perform total ordering. - hashes, _, err := seq.processBlock(&b) - s.Require().Nil(err) - for _, h := range hashes { - ordered += h.String() + "," - } - } - revealingSequence[revealed] = struct{}{} - orderingSequence[ordered] = struct{}{} - } - - // Make sure we test at least two different - // revealing sequence. - s.True(len(revealingSequence) > 1) - // Make sure all ordering are equal or prefixed - // to another one. - for orderFrom := range orderingSequence { - for orderTo := range orderingSequence { - if orderFrom == orderTo { - continue - } - ok := strings.HasPrefix(orderFrom, orderTo) || - strings.HasPrefix(orderTo, orderFrom) - s.True(ok) - } - } -} - -func (s *SequencerTestSuite) TestRandomlyGeneratedBlocks() { - var ( - validatorCount = 19 - blockCount = 50 - phi uint64 = 10 - repeat = 10 - ) - - // Prepare a randomly genearated blocks. - db, err := blockdb.NewMemBackedBlockDB("test-sequencer-random.blockdb") - s.Require().Nil(err) - defer func() { - // If the test fails, keep the block database for troubleshooting. - if s.T().Failed() { - s.Nil(db.Close()) - } - }() - - gen := test.NewBlocksGenerator(nil) - s.Require().Nil(gen.Generate(validatorCount, blockCount, nil, db)) - iter, err := db.GetAll() - s.Require().Nil(err) - // Setup a revealer that would reveal blocks forming - // valid DAGs. - revealer, err := test.NewRandomDAGRevealer(iter) - s.Require().Nil(err) - - // Test for K=0. - constructor := func() *sequencer { - return newSequencer(0, phi, uint64(validatorCount)) - } - s.baseTestRandomlyGeneratedBlocks(constructor, revealer, repeat) - // Test for K=1, - constructor = func() *sequencer { - return newSequencer(1, phi, uint64(validatorCount)) - } - s.baseTestRandomlyGeneratedBlocks(constructor, revealer, repeat) - // Test for K=2, - constructor = func() *sequencer { - return newSequencer(2, phi, uint64(validatorCount)) - } - s.baseTestRandomlyGeneratedBlocks(constructor, revealer, repeat) - // Test for K=3, - constructor = func() *sequencer { - return newSequencer(2, phi, uint64(validatorCount)) - } - s.baseTestRandomlyGeneratedBlocks(constructor, revealer, repeat) -} - -func TestSequencer(t *testing.T) { - suite.Run(t, new(SequencerTestSuite)) -} diff --git a/core/timestamp.go b/core/timestamp.go deleted file mode 100644 index 896d137..0000000 --- a/core/timestamp.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2018 The dexon-consensus-core Authors -// This file is part of the dexon-consensus-core library. -// -// The dexon-consensus-core library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus-core library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus-core library. If not, see -// . - -package core - -import ( - "errors" - "sort" - "time" - - "github.com/dexon-foundation/dexon-consensus-core/common" - "github.com/dexon-foundation/dexon-consensus-core/core/types" -) - -// Timestamp is for Concensus Timestamp Algorithm. -type Timestamp struct { - lastMainChainBlock *types.Block - blocksNotInMainChain []*types.Block -} - -var ( - // ErrInvalidMainChain would be reported if the invalid result from - // main chain selection algorithm is detected. - ErrInvalidMainChain = errors.New("invalid main chain") - // ErrEmptyTimestamps would be reported if Block.timestamps is empty. - ErrEmptyTimestamps = errors.New("timestamp vector should not be empty") -) - -// NewTimestamp create Timestamp object. -func NewTimestamp() *Timestamp { - return &Timestamp{} -} - -// ProcessBlocks is the entry function. -func (ts *Timestamp) ProcessBlocks(blocks []*types.Block) ( - blocksWithTimestamp []*types.Block, mainChain []*types.Block, err error) { - if len(blocks) == 0 { - // TODO (jimmy-dexon): Remove this panic before release. - panic("Unexpected empty block list.") - } - outputFirstBlock := true - blocks = append(ts.blocksNotInMainChain, blocks...) - if ts.lastMainChainBlock != nil { - // TODO (jimmy-dexon): The performance here can be optimized. - blocks = append([]*types.Block{ts.lastMainChainBlock}, blocks...) - outputFirstBlock = false - } - mainChain, nonMainChain := ts.selectMainChain(blocks) - ts.blocksNotInMainChain = nonMainChain - ts.lastMainChainBlock = mainChain[len(mainChain)-1] - blocksWithTimestamp = blocks[:len(blocks)-len(nonMainChain)] - leftMainChainIdx := 0 - rightMainChainIdx := 0 - idxMainChain := 0 - for idx, block := range blocksWithTimestamp { - if idxMainChain >= len(mainChain) { - err = ErrInvalidMainChain - return - } else if block.Hash == mainChain[idxMainChain].Hash { - rightMainChainIdx = idx - blocksWithTimestamp[idx].ConsensusTime, err = ts.getMedianTime(block) - if err != nil { - return - } - // Process Non-MainChain blocks. - if rightMainChainIdx > leftMainChainIdx { - for idx, timestamp := range interpoTime( - blocksWithTimestamp[leftMainChainIdx].ConsensusTime, - blocksWithTimestamp[rightMainChainIdx].ConsensusTime, - rightMainChainIdx-leftMainChainIdx-1) { - blocksWithTimestamp[leftMainChainIdx+idx+1].ConsensusTime = timestamp - } - } - leftMainChainIdx = idx - idxMainChain++ - } - } - if !outputFirstBlock { - blocksWithTimestamp = blocksWithTimestamp[1:] - } - return -} - -func (ts *Timestamp) selectMainChain(blocks []*types.Block) ( - mainChain []*types.Block, nonMainChain []*types.Block) { - for _, block := range blocks { - if len(mainChain) != 0 { - if _, exists := block.Acks[mainChain[len(mainChain)-1].Hash]; !exists { - nonMainChain = append(nonMainChain, block) - continue - } - } - nonMainChain = []*types.Block{} - mainChain = append(mainChain, block) - } - return -} - -func (ts *Timestamp) getMedianTime(block *types.Block) ( - timestamp time.Time, err error) { - timestamps := []time.Time{} - for _, timestamp := range block.Timestamps { - timestamps = append(timestamps, timestamp) - } - if len(timestamps) == 0 { - err = ErrEmptyTimestamps - return - } - sort.Sort(common.ByTime(timestamps)) - if len(timestamps)%2 == 0 { - t1 := timestamps[len(timestamps)/2-1] - t2 := timestamps[len(timestamps)/2] - timestamp = interpoTime(t1, t2, 1)[0] - } else { - timestamp = timestamps[len(timestamps)/2] - } - return -} - -func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time { - if sep == 0 { - return []time.Time{} - } - if t1.After(t2) { - return interpoTime(t2, t1, sep) - } - timestamps := make([]time.Time, sep) - duration := t2.Sub(t1) - period := time.Duration( - (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond - prevTime := t1 - for idx := range timestamps { - prevTime = prevTime.Add(period) - timestamps[idx] = prevTime - } - return timestamps -} diff --git a/core/timestamp_test.go b/core/timestamp_test.go deleted file mode 100644 index d47c776..0000000 --- a/core/timestamp_test.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2018 The dexon-consensus-core Authors -// This file is part of the dexon-consensus-core library. -// -// The dexon-consensus-core library is free software: you can redistribute it -// and/or modify it under the terms of the GNU Lesser General Public License as -// published by the Free Software Foundation, either version 3 of the License, -// or (at your option) any later version. -// -// The dexon-consensus-core library is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -// General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the dexon-consensus-core library. If not, see -// . - -package core - -import ( - "math" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/suite" - - "github.com/dexon-foundation/dexon-consensus-core/common" - "github.com/dexon-foundation/dexon-consensus-core/core/types" -) - -type TimestampTest struct { - suite.Suite -} - -func generateBlocksWithAcks(blockNum, maxAcks int) []*types.Block { - chain := []*types.Block{ - &types.Block{ - Hash: common.NewRandomHash(), - Acks: make(map[common.Hash]struct{}), - }, - } - for i := 1; i < blockNum; i++ { - acks := make(map[common.Hash]struct{}) - ackNum := rand.Intn(maxAcks) + 1 - for j := 0; j < ackNum; j++ { - ack := rand.Intn(len(chain)) - acks[chain[ack].Hash] = struct{}{} - } - block := &types.Block{ - Hash: common.NewRandomHash(), - Acks: acks, - } - chain = append(chain, block) - } - return chain -} - -func fillBlocksTimestamps(blocks []*types.Block, validatorNum int, - step, sigma time.Duration) { - curTime := time.Now().UTC() - vIDs := make([]types.ValidatorID, validatorNum) - for i := 0; i < validatorNum; i++ { - vIDs[i] = types.ValidatorID{Hash: common.NewRandomHash()} - } - for _, block := range blocks { - block.Timestamps = make(map[types.ValidatorID]time.Time) - for _, vID := range vIDs { - diffSeconds := rand.NormFloat64() * sigma.Seconds() - diffSeconds = math.Min(diffSeconds, step.Seconds()/2) - diffSeconds = math.Max(diffSeconds, -step.Seconds()/2) - diffDuration := time.Duration(diffSeconds*1000) * time.Millisecond - block.Timestamps[vID] = curTime.Add(diffDuration) - } - curTime = curTime.Add(step) - } -} - -func extractTimestamps(blocks []*types.Block) []time.Time { - timestamps := make([]time.Time, len(blocks)) - for idx, block := range blocks { - timestamps[idx] = block.ConsensusTime - } - return timestamps -} - -func (s *TimestampTest) TestMainChainSelection() { - ts := NewTimestamp() - ts2 := NewTimestamp() - blockNums := []int{50, 100, 30} - maxAcks := 5 - for _, blockNum := range blockNums { - chain := generateBlocksWithAcks(blockNum, maxAcks) - mainChain, _ := ts.selectMainChain(chain) - // Verify the selected main chain. - for i := 1; i < len(mainChain); i++ { - _, exists := mainChain[i].Acks[mainChain[i-1].Hash] - s.True(exists) - } - // Verify if selectMainChain is stable. - mainChain2, _ := ts2.selectMainChain(chain) - s.Equal(mainChain, mainChain2) - } -} - -func (s *TimestampTest) TestTimestampPartition() { - blockNums := []int{50, 100, 30} - validatorNum := 19 - sigma := 100 * time.Millisecond - maxAcks := 5 - totalMainChain := make([]*types.Block, 1) - totalChain := make([]*types.Block, 0) - totalTimestamps := make([]time.Time, 0) - ts := NewTimestamp() - var lastMainChainBlock *types.Block - for _, blockNum := range blockNums { - chain := generateBlocksWithAcks(blockNum, maxAcks) - fillBlocksTimestamps(chain, validatorNum, time.Second, sigma) - blocksWithTimestamps, mainChain, err := ts.ProcessBlocks(chain) - s.Require().Nil(err) - timestamps := extractTimestamps(blocksWithTimestamps) - if lastMainChainBlock != nil { - s.Require().Equal(mainChain[0], lastMainChainBlock) - } - s.Require().Equal(mainChain[len(mainChain)-1], ts.lastMainChainBlock) - lastMainChainBlock = ts.lastMainChainBlock - totalMainChain = - append(totalMainChain[:len(totalMainChain)-1], mainChain...) - totalChain = append(totalChain, chain...) - totalTimestamps = append(totalTimestamps, timestamps...) - } - ts2 := NewTimestamp() - blocksWithTimestamps2, mainChain2, err := ts2.ProcessBlocks(totalChain) - s.Require().Nil(err) - timestamps2 := extractTimestamps(blocksWithTimestamps2) - s.Equal(totalMainChain, mainChain2) - s.Equal(totalTimestamps, timestamps2) -} - -func timeDiffWithinTolerance(t1, t2 time.Time, tolerance time.Duration) bool { - if t1.After(t2) { - return timeDiffWithinTolerance(t2, t1, tolerance) - } - return t1.Add(tolerance).After(t2) -} - -func (s *TimestampTest) TestTimestampIncrease() { - validatorNum := 19 - sigma := 100 * time.Millisecond - ts := NewTimestamp() - chain := generateBlocksWithAcks(1000, 5) - fillBlocksTimestamps(chain, validatorNum, time.Second, sigma) - blocksWithTimestamps, _, err := ts.ProcessBlocks(chain) - s.Require().Nil(err) - timestamps := extractTimestamps(blocksWithTimestamps) - for i := 1; i < len(timestamps); i++ { - s.True(timestamps[i].After(timestamps[i-1])) - } - // Test if the ProcessBlocks is stable. - ts2 := NewTimestamp() - blocksWithTimestamps2, _, err := ts2.ProcessBlocks(chain) - s.Require().Nil(err) - timestamps2 := extractTimestamps(blocksWithTimestamps2) - s.Equal(timestamps, timestamps2) -} - -func (s *TimestampTest) TestByzantineBiasTime() { - // Test that Byzantine node cannot bias the timestamps. - validatorNum := 19 - sigma := 100 * time.Millisecond - tolerance := 4 * sigma - ts := NewTimestamp() - chain := generateBlocksWithAcks(1000, 5) - fillBlocksTimestamps(chain, validatorNum, time.Second, sigma) - blocksWithTimestamps, _, err := ts.ProcessBlocks(chain) - s.Require().Nil(err) - timestamps := extractTimestamps(blocksWithTimestamps) - byzantine := validatorNum / 3 - validators := make([]types.ValidatorID, 0, validatorNum) - for vID := range chain[0].Timestamps { - validators = append(validators, vID) - } - // The number of Byzantine node is at most N/3. - for i := 0; i < byzantine; i++ { - // Pick one validator to be Byzantine node. - // It is allowed to have the vID be duplicated, - // because the number of Byzantine node is between 1 and N/3. - vID := validators[rand.Intn(validatorNum)] - for _, block := range chain { - block.Timestamps[vID] = time.Time{} - } - } - tsByzantine := NewTimestamp() - blocksWithTimestampsB, _, err := tsByzantine.ProcessBlocks(chain) - s.Require().Nil(err) - timestampsWithByzantine := extractTimestamps(blocksWithTimestampsB) - for idx, timestamp := range timestamps { - timestampWithByzantine := timestampsWithByzantine[idx] - s.True(timeDiffWithinTolerance( - timestamp, timestampWithByzantine, tolerance)) - } -} - -func TestTimestamp(t *testing.T) { - suite.Run(t, new(TimestampTest)) -} diff --git a/core/total-ordering.go b/core/total-ordering.go new file mode 100644 index 0000000..63d3416 --- /dev/null +++ b/core/total-ordering.go @@ -0,0 +1,522 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// . + +package core + +import ( + "fmt" + "sort" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/types" +) + +// ErrNotValidDAG would be reported when block subbmitted to totalOrdering +// didn't form a DAG. +var ErrNotValidDAG = fmt.Errorf("not a valid dag") + +// ackingStatusVector describes the acking status, either globally or just +// for one candidate. +// +// When block A acks block B, all blocks proposed from the same proposer +// as block A with higher height would also acks block B. Therefore, +// we just need to record: +// - the minimum height of acking block from that proposer +// - count of acking blocks from that proposer +// to repsent the acking status for block A. +type ackingStatusVector map[types.ValidatorID]*struct{ minHeight, count uint64 } + +// addBlock would update ackingStatusVector, it's caller's duty +// to make sure the input block acutally acking the target block. +func (v ackingStatusVector) addBlock(b *types.Block) (err error) { + rec, exists := v[b.ProposerID] + if !exists { + v[b.ProposerID] = &struct { + minHeight, count uint64 + }{ + minHeight: b.Height, + count: 1, + } + } else { + if b.Height < rec.minHeight { + err = ErrNotValidDAG + return + } + rec.count++ + } + return +} + +// getAckingNodeSet would generate the Acking Node Set. +// Only block height larger than +// +// global minimum height + k +// +// would be taken into consideration, ex. +// +// For some validator X: +// - the global minimum acking height = 1, +// - k = 1 +// then only block height >= 2 would be added to acking node set. +func (v ackingStatusVector) getAckingNodeSet( + global ackingStatusVector, k uint64) map[types.ValidatorID]struct{} { + + ret := make(map[types.ValidatorID]struct{}) + for vID, gRec := range global { + rec, exists := v[vID] + if !exists { + continue + } + + // This line would check if these two ranges would overlap: + // - (global minimum height + k, infinity) + // - (local minimum height, local minimum height + count - 1) + if rec.minHeight+rec.count-1 >= gRec.minHeight+k { + ret[vID] = struct{}{} + } + } + return ret +} + +// getAckingHeightVector would convert 'ackingStatusVector' to +// Acking Height Vector. +// +// Only block height equals to (global minimum block height + k) would be +// taken into consideration. +func (v ackingStatusVector) getAckingHeightVector( + global ackingStatusVector, k uint64) map[types.ValidatorID]uint64 { + + ret := make(map[types.ValidatorID]uint64) + for vID, gRec := range global { + rec, exists := v[vID] + + if gRec.count <= k { + continue + } else if !exists { + ret[vID] = infinity + } else if rec.minHeight <= gRec.minHeight+k { + // This check is sufficient to make sure the block height: + // + // gRec.minHeight + k + // + // would be included in this ackingStatusVector. + ret[vID] = gRec.minHeight + k + } else { + ret[vID] = infinity + } + } + return ret +} + +// blockVector stores all blocks grouped by their proposers and +// sorted by their block height. +type blockVector map[types.ValidatorID][]*types.Block + +func (v blockVector) addBlock(b *types.Block) (err error) { + blocksFromProposer := v[b.ProposerID] + if len(blocksFromProposer) > 0 { + lastBlock := blocksFromProposer[len(blocksFromProposer)-1] + if b.Height-lastBlock.Height != 1 { + err = ErrNotValidDAG + return + } + } + v[b.ProposerID] = append(blocksFromProposer, b) + return +} + +// getHeightVector would convert a blockVector to +// ackingStatusVectorAckingStatus. +func (v blockVector) getHeightVector() ackingStatusVector { + ret := ackingStatusVector{} + for vID, vec := range v { + if len(vec) == 0 { + continue + } + ret[vID] = &struct { + minHeight, count uint64 + }{ + minHeight: vec[0].Height, + count: uint64(len(vec)), + } + } + return ret +} + +// totalOrdering represent a process unit to handle total ordering +// for blocks. +type totalOrdering struct { + // pendings stores blocks awaiting to be ordered. + pendings map[common.Hash]*types.Block + + // k represents the k in 'k-level total ordering'. + // In short, only block height equals to (global minimum height + k) + // would be taken into consideration. + k uint64 + + // phi is a const to control how strong the leading preceding block + // should be. + phi uint64 + + // validatorCount is the count of validator set. + validatorCount uint64 + + // globalVector group all pending blocks by proposers and + // sort them by block height. This structure is helpful when: + // + // - build global height vector + // - picking candidates next round + globalVector blockVector + + // candidateAckingStatusVectors caches ackingStatusVector of candidates. + candidateAckingStatusVectors map[common.Hash]ackingStatusVector + + // acked cache the 'block A acked by block B' relation by + // keeping a record in acked[A.Hash][B.Hash] + acked map[common.Hash]map[common.Hash]struct{} +} + +func newTotalOrdering(k, phi, validatorCount uint64) *totalOrdering { + return &totalOrdering{ + candidateAckingStatusVectors: make(map[common.Hash]ackingStatusVector), + pendings: make(map[common.Hash]*types.Block), + k: k, + phi: phi, + validatorCount: validatorCount, + globalVector: blockVector{}, + acked: make(map[common.Hash]map[common.Hash]struct{}), + } +} + +// buildBlockRelation populates the acked according their acking relationships. +func (to *totalOrdering) buildBlockRelation(b *types.Block) { + // populateAcked would update all blocks implcitly acked + // by input block recursively. + var populateAcked func(bx, target *types.Block) + populateAcked = func(bx, target *types.Block) { + for ack := range bx.Acks { + acked, exists := to.acked[ack] + if !exists { + acked = make(map[common.Hash]struct{}) + to.acked[ack] = acked + } + + // This means we've walked this block already. + if _, alreadyPopulated := acked[target.Hash]; alreadyPopulated { + continue + } + acked[target.Hash] = struct{}{} + + // See if we need to go forward. + if nextBlock, exists := to.pendings[ack]; !exists { + continue + } else { + populateAcked(nextBlock, target) + } + } + } + populateAcked(b, b) +} + +// clean would remove a block from working set. This behaviour +// would prevent our memory usage growing infinity. +func (to *totalOrdering) clean(h common.Hash) { + delete(to.acked, h) + delete(to.pendings, h) + delete(to.candidateAckingStatusVectors, h) +} + +// updateVectors is a helper function to update all cached vectors. +func (to *totalOrdering) updateVectors(b *types.Block) (err error) { + // Update global height vector + err = to.globalVector.addBlock(b) + if err != nil { + return + } + + // Update acking status of candidates. + for candidate, vector := range to.candidateAckingStatusVectors { + if _, acked := to.acked[candidate][b.Hash]; !acked { + continue + } + if err = vector.addBlock(b); err != nil { + return + } + } + return +} + +// grade implements the 'grade' potential function described in white paper. +func (to *totalOrdering) grade( + hvFrom, hvTo map[types.ValidatorID]uint64, + globalAns map[types.ValidatorID]struct{}) int { + + count := uint64(0) + for vID, hFrom := range hvFrom { + hTo, exists := hvTo[vID] + if !exists { + continue + } + + if hFrom != infinity && hTo == infinity { + count++ + } + } + + if count >= to.phi { + return 1 + } else if count < to.phi-to.validatorCount+uint64(len(globalAns)) { + return 0 + } else { + return -1 + } +} + +// buildAckingStatusVectorForNewCandidate is a helper function to +// build ackingStatusVector for new candidate. +func (to *totalOrdering) buildAckingStatusVectorForNewCandidate( + candidate *types.Block) (hVec ackingStatusVector) { + + blocks := to.globalVector[candidate.ProposerID] + hVec = ackingStatusVector{ + candidate.ProposerID: &struct { + minHeight, count uint64 + }{ + minHeight: candidate.Height, + count: uint64(len(blocks)), + }, + } + + ackedsForCandidate, exists := to.acked[candidate.Hash] + if !exists { + // This candidate is acked by nobody. + return + } + + for vID, blocks := range to.globalVector { + if vID == candidate.ProposerID { + continue + } + + for i, b := range blocks { + if _, acked := ackedsForCandidate[b.Hash]; !acked { + continue + } + + // If this block acks this candidate, all newer blocks + // from the same validator also 'indirect' acks it. + hVec[vID] = &struct { + minHeight, count uint64 + }{ + minHeight: b.Height, + count: uint64(len(blocks) - i), + } + break + } + } + return +} + +// isAckOnlyPrecedings is a helper function to check if a block +// only contain acks to delivered blocks. +func (to *totalOrdering) isAckOnlyPrecedings(b *types.Block) bool { + for ack := range b.Acks { + if _, pending := to.pendings[ack]; pending { + return false + } + } + return true +} + +// output is a helper function to finish the delivery of +// deliverable preceding set. +func (to *totalOrdering) output(precedings map[common.Hash]struct{}) common.Hashes { + ret := common.Hashes{} + for p := range precedings { + ret = append(ret, p) + + // Remove the first element from corresponding blockVector. + b := to.pendings[p] + to.globalVector[b.ProposerID] = to.globalVector[b.ProposerID][1:] + + // Remove block relations. + to.clean(p) + } + sort.Sort(ret) + + // Find new candidates from tip of globalVector of each validator. + // The complexity here is O(N^2logN). + for _, blocks := range to.globalVector { + if len(blocks) == 0 { + continue + } + + tip := blocks[0] + if _, alreadyCandidate := + to.candidateAckingStatusVectors[tip.Hash]; alreadyCandidate { + continue + } + + if !to.isAckOnlyPrecedings(tip) { + continue + } + + // Build ackingStatusVector for new candidate. + to.candidateAckingStatusVectors[tip.Hash] = + to.buildAckingStatusVectorForNewCandidate(tip) + } + return ret +} + +// generateDeliverSet would: +// - generate preceding set +// - check if the preceding set deliverable by checking potential function +func (to *totalOrdering) generateDeliverSet() ( + delivered map[common.Hash]struct{}, early bool) { + + globalHeightVector := to.globalVector.getHeightVector() + ahvs := map[common.Hash]map[types.ValidatorID]uint64{} + for candidate, v := range to.candidateAckingStatusVectors { + ahvs[candidate] = v.getAckingHeightVector(globalHeightVector, to.k) + } + + globalAns := globalHeightVector.getAckingNodeSet(globalHeightVector, to.k) + precedings := make(map[common.Hash]struct{}) + +CheckNextCandidateLoop: + for candidate := range to.candidateAckingStatusVectors { + for otherCandidate := range to.candidateAckingStatusVectors { + if candidate == otherCandidate { + continue + } + if to.grade(ahvs[otherCandidate], ahvs[candidate], globalAns) != 0 { + continue CheckNextCandidateLoop + } + } + precedings[candidate] = struct{}{} + } + + if len(precedings) == 0 { + return + } + + // internal is a helper function to verify internal stability. + internal := func() bool { + for candidate := range to.candidateAckingStatusVectors { + if _, isPreceding := precedings[candidate]; isPreceding { + continue + } + + beaten := false + for p := range precedings { + if beaten = + to.grade(ahvs[p], ahvs[candidate], globalAns) == 1; beaten { + break + } + } + if !beaten { + return false + } + } + return true + } + + // checkAHV is a helper function to verify external stability. + // It would make sure some preceding block is strong enough + // to lead the whole preceding set. + checkAHV := func() bool { + for p := range precedings { + count := uint64(0) + for _, v := range ahvs[p] { + if v != infinity { + count++ + } + } + + if count > to.phi { + return true + } + } + return false + } + + // checkANS is a helper function to verify external stability. + // It would make sure all preceding blocks are strong enough + // to be delivered. + checkANS := func() bool { + for p := range precedings { + validatorAns := to.candidateAckingStatusVectors[p].getAckingNodeSet( + globalHeightVector, to.k) + if uint64(len(validatorAns)) < to.validatorCount-to.phi { + return false + } + } + + return true + } + + // Check internal stability first. + if !internal() { + return + } + + // If all validators propose enough blocks, we should force + // to deliver since the whole picture of the DAG is revealed. + if uint64(len(globalAns)) != to.validatorCount { + // The whole picture is not ready, we need to check if + // exteranl stability is met, and we can deliver earlier. + if checkAHV() && checkANS() { + early = true + } else { + return + } + } + + delivered = precedings + return +} + +// processBlock is the entry point of totalOrdering. +func (to *totalOrdering) processBlock(b *types.Block) ( + delivered common.Hashes, early bool, err error) { + + // NOTE: I assume the block 'b' is already safe for total ordering. + // That means, all its acking blocks are during/after + // total ordering stage. + + // Incremental part. + to.pendings[b.Hash] = b + to.buildBlockRelation(b) + if err = to.updateVectors(b); err != nil { + return + } + if to.isAckOnlyPrecedings(b) { + to.candidateAckingStatusVectors[b.Hash] = + to.buildAckingStatusVectorForNewCandidate(b) + } + + // Not-Incremental part (yet). + // - generate ahv for each candidate + // - generate ans for each candidate + // - generate global ans + // - find preceding set + hashes, early := to.generateDeliverSet() + + // output precedings + delivered = to.output(hashes) + return +} diff --git a/core/total-ordering_test.go b/core/total-ordering_test.go new file mode 100644 index 0000000..11e253a --- /dev/null +++ b/core/total-ordering_test.go @@ -0,0 +1,996 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// . + +package core + +import ( + "sort" + "strings" + "testing" + + "github.com/dexon-foundation/dexon-consensus-core/blockdb" + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/test" + "github.com/dexon-foundation/dexon-consensus-core/core/types" + "github.com/stretchr/testify/suite" +) + +type TotalOrderingTestSuite struct { + suite.Suite +} + +func (s *TotalOrderingTestSuite) generateValidatorIDs( + count int) []types.ValidatorID { + + validatorIDs := []types.ValidatorID{} + for i := 0; i < count; i++ { + validatorIDs = append(validatorIDs, + types.ValidatorID{Hash: common.NewRandomHash()}) + } + + return validatorIDs +} + +func (s *TotalOrderingTestSuite) genRootBlock( + vID types.ValidatorID, acks map[common.Hash]struct{}) *types.Block { + + hash := common.NewRandomHash() + return &types.Block{ + ProposerID: vID, + ParentHash: hash, + Hash: hash, + Height: 0, + Acks: acks, + } +} + +func (s *TotalOrderingTestSuite) checkNotDeliver(to *totalOrdering, b *types.Block) { + hashes, eqrly, err := to.processBlock(b) + s.Empty(hashes) + s.False(eqrly) + s.Nil(err) +} + +func (s *TotalOrderingTestSuite) checkNotInWorkingSet( + to *totalOrdering, b *types.Block) { + + s.NotContains(to.pendings, b.Hash) + s.NotContains(to.acked, b.Hash) +} + +func (s *TotalOrderingTestSuite) TestBlockRelation() { + // This test case would verify if 'acking' and 'acked' + // accumulated correctly. + // + // The DAG used below is: + // A <- B <- C + + vID := types.ValidatorID{Hash: common.NewRandomHash()} + + hash := common.NewRandomHash() + blockA := &types.Block{ + ProposerID: vID, + ParentHash: hash, + Hash: hash, + Height: 0, + Acks: map[common.Hash]struct{}{}, + } + blockB := &types.Block{ + ProposerID: vID, + ParentHash: blockA.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + blockA.Hash: struct{}{}, + }, + } + blockC := &types.Block{ + ProposerID: vID, + ParentHash: blockB.Hash, + Hash: common.NewRandomHash(), + Height: 2, + Acks: map[common.Hash]struct{}{ + blockB.Hash: struct{}{}, + }, + } + + to := newTotalOrdering(1, 3, 5) + s.checkNotDeliver(to, blockA) + s.checkNotDeliver(to, blockB) + s.checkNotDeliver(to, blockC) + + // Check 'acked'. + ackedA := to.acked[blockA.Hash] + s.Require().NotNil(ackedA) + s.Len(ackedA, 2) + s.Contains(ackedA, blockB.Hash) + s.Contains(ackedA, blockC.Hash) + + ackedB := to.acked[blockB.Hash] + s.Require().NotNil(ackedB) + s.Len(ackedB, 1) + s.Contains(ackedB, blockC.Hash) + + s.Nil(to.acked[blockC.Hash]) +} + +func (s *TotalOrderingTestSuite) TestCreateAckingHeightVectorFromHeightVector() { + validators := s.generateValidatorIDs(5) + global := ackingStatusVector{ + validators[0]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 5}, + validators[1]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 5}, + validators[2]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 5}, + validators[3]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 5}, + } + + // For 'not existed' record in local but exist in global, + // should be infinity. + ahv := ackingStatusVector{ + validators[0]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 2}, + }.getAckingHeightVector(global, 0) + s.Len(ahv, 4) + s.Equal(ahv[validators[0]], uint64(0)) + s.Equal(ahv[validators[1]], infinity) + s.Equal(ahv[validators[2]], infinity) + s.Equal(ahv[validators[3]], infinity) + + // For local min exceeds global's min+k-1, should be infinity + hv := ackingStatusVector{ + validators[0]: &struct{ minHeight, count uint64 }{ + minHeight: 3, count: 1}, + } + ahv = hv.getAckingHeightVector(global, 2) + s.Equal(ahv[validators[0]], infinity) + ahv = hv.getAckingHeightVector(global, 3) + s.Equal(ahv[validators[0]], uint64(3)) + + ahv = ackingStatusVector{ + validators[0]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 3}, + validators[1]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 3}, + }.getAckingHeightVector(global, 5) + s.Len(ahv, 0) +} + +func (s *TotalOrderingTestSuite) TestCreateAckingNodeSetFromHeightVector() { + validators := s.generateValidatorIDs(5) + global := ackingStatusVector{ + validators[0]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 5}, + validators[1]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 5}, + validators[2]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 5}, + validators[3]: &struct{ minHeight, count uint64 }{ + minHeight: 0, count: 5}, + } + + local := ackingStatusVector{ + validators[0]: &struct{ minHeight, count uint64 }{ + minHeight: 1, count: 2}, + } + s.Len(local.getAckingNodeSet(global, 1), 1) + s.Len(local.getAckingNodeSet(global, 2), 1) + s.Len(local.getAckingNodeSet(global, 3), 0) +} + +func (s *TotalOrderingTestSuite) TestGrade() { + validators := s.generateValidatorIDs(5) + to := newTotalOrdering(1, 3, 5) // K doesn't matter when calculating preceding. + + ans := map[types.ValidatorID]struct{}{ + validators[0]: struct{}{}, + validators[1]: struct{}{}, + validators[2]: struct{}{}, + validators[3]: struct{}{}, + } + + ahv1 := map[types.ValidatorID]uint64{ + validators[0]: 1, + validators[1]: infinity, + validators[2]: infinity, + validators[3]: infinity, + } + ahv2 := map[types.ValidatorID]uint64{ + validators[0]: 1, + validators[1]: 1, + validators[2]: 1, + validators[3]: 1, + } + ahv3 := map[types.ValidatorID]uint64{ + validators[0]: 1, + validators[1]: 1, + validators[2]: infinity, + validators[3]: infinity, + } + s.Equal(to.grade(ahv2, ahv1, ans), 1) + s.Equal(to.grade(ahv1, ahv2, ans), 0) + s.Equal(to.grade(ahv2, ahv3, ans), -1) + s.Equal(to.grade(ahv3, ahv2, ans), 0) +} + +func (s *TotalOrderingTestSuite) TestCycleDetection() { + // Make sure we don't get hang by cycle from + // block's acks. + validators := s.generateValidatorIDs(5) + + // create blocks with cycles in acking relation. + cycledHash := common.NewRandomHash() + hash := common.NewRandomHash() + b00 := &types.Block{ + ProposerID: validators[0], + ParentHash: hash, + Hash: hash, + Height: 0, + Acks: map[common.Hash]struct{}{ + cycledHash: struct{}{}, + }, + } + b01 := &types.Block{ + ProposerID: validators[0], + ParentHash: b00.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b00.Hash: struct{}{}, + }, + } + b02 := &types.Block{ + ProposerID: validators[0], + ParentHash: b01.Hash, + Hash: common.NewRandomHash(), + Height: 2, + Acks: map[common.Hash]struct{}{ + b01.Hash: struct{}{}, + }, + } + b03 := &types.Block{ + ProposerID: validators[0], + ParentHash: b02.Hash, + Hash: cycledHash, + Height: 3, + Acks: map[common.Hash]struct{}{ + b02.Hash: struct{}{}, + }, + } + + // Create a block acks self. + hash = common.NewRandomHash() + b10 := &types.Block{ + ProposerID: validators[1], + ParentHash: hash, + Hash: hash, + Height: 0, + Acks: map[common.Hash]struct{}{ + hash: struct{}{}, + }, + } + + // Make sure we won't hang when cycle exists. + to := newTotalOrdering(1, 3, 5) + s.checkNotDeliver(to, b00) + s.checkNotDeliver(to, b01) + s.checkNotDeliver(to, b02) + + // Should not hang in this line. + s.checkNotDeliver(to, b03) + // Should not hang in this line + s.checkNotDeliver(to, b10) +} + +func (s *TotalOrderingTestSuite) TestNotValidDAGDetection() { + validators := s.generateValidatorIDs(4) + to := newTotalOrdering(1, 3, 5) + + hash := common.NewRandomHash() + b00 := &types.Block{ + ProposerID: validators[0], + ParentHash: hash, + Hash: hash, + Height: 0, + Acks: map[common.Hash]struct{}{}, + } + b01 := &types.Block{ + ProposerID: validators[0], + ParentHash: b00.Hash, + Hash: common.NewRandomHash(), + } + + // When submit to block with lower height to totalOrdering, + // caller should receive an error. + s.checkNotDeliver(to, b01) + _, _, err := to.processBlock(b00) + s.Equal(err, ErrNotValidDAG) +} + +func (s *TotalOrderingTestSuite) TestEarlyDeliver() { + // The test scenario: + // + // o o o o o + // : : : : : <- (K - 1) layers + // o o o o o + // \ v / | + // o o + // A B + // Even when B is not received, A should + // be able to be delivered. + to := newTotalOrdering(2, 3, 5) + validators := s.generateValidatorIDs(5) + + genNextBlock := func(b *types.Block) *types.Block { + return &types.Block{ + ProposerID: b.ProposerID, + ParentHash: b.Hash, + Hash: common.NewRandomHash(), + Height: b.Height + 1, + Acks: map[common.Hash]struct{}{ + b.Hash: struct{}{}, + }, + } + } + + b00 := s.genRootBlock(validators[0], map[common.Hash]struct{}{}) + b01 := genNextBlock(b00) + b02 := genNextBlock(b01) + + b10 := s.genRootBlock(validators[1], map[common.Hash]struct{}{ + b00.Hash: struct{}{}, + }) + b11 := genNextBlock(b10) + b12 := genNextBlock(b11) + + b20 := s.genRootBlock(validators[2], map[common.Hash]struct{}{ + b00.Hash: struct{}{}, + }) + b21 := genNextBlock(b20) + b22 := genNextBlock(b21) + + b30 := s.genRootBlock(validators[3], map[common.Hash]struct{}{ + b00.Hash: struct{}{}, + }) + b31 := genNextBlock(b30) + b32 := genNextBlock(b31) + + // It's a valid block sequence to deliver + // to total ordering algorithm: DAG. + s.checkNotDeliver(to, b00) + s.checkNotDeliver(to, b01) + s.checkNotDeliver(to, b02) + + vec := to.candidateAckingStatusVectors[b00.Hash] + s.Require().NotNil(vec) + s.Len(vec, 1) + s.Equal(vec[validators[0]].minHeight, b00.Height) + s.Equal(vec[validators[0]].count, uint64(3)) + + s.checkNotDeliver(to, b10) + s.checkNotDeliver(to, b11) + s.checkNotDeliver(to, b12) + s.checkNotDeliver(to, b20) + s.checkNotDeliver(to, b21) + s.checkNotDeliver(to, b22) + s.checkNotDeliver(to, b30) + s.checkNotDeliver(to, b31) + + // Check the internal state before delivering. + s.Len(to.candidateAckingStatusVectors, 1) // b00 is the only candidate. + + vec = to.candidateAckingStatusVectors[b00.Hash] + s.Require().NotNil(vec) + s.Len(vec, 4) + s.Equal(vec[validators[0]].minHeight, b00.Height) + s.Equal(vec[validators[0]].count, uint64(3)) + s.Equal(vec[validators[1]].minHeight, b10.Height) + s.Equal(vec[validators[1]].count, uint64(3)) + s.Equal(vec[validators[2]].minHeight, b20.Height) + s.Equal(vec[validators[2]].count, uint64(3)) + s.Equal(vec[validators[3]].minHeight, b30.Height) + s.Equal(vec[validators[3]].count, uint64(2)) + + hashes, early, err := to.processBlock(b32) + s.Require().Len(hashes, 1) + s.True(early) + s.Nil(err) + s.Equal(hashes[0], b00.Hash) + + // Check the internal state after delivered. + s.Len(to.candidateAckingStatusVectors, 4) // b01, b10, b20, b30 are candidates. + + // Check b01. + vec = to.candidateAckingStatusVectors[b01.Hash] + s.Require().NotNil(vec) + s.Len(vec, 1) + s.Equal(vec[validators[0]].minHeight, b01.Height) + s.Equal(vec[validators[0]].count, uint64(2)) + + // Check b10. + vec = to.candidateAckingStatusVectors[b10.Hash] + s.Require().NotNil(vec) + s.Len(vec, 1) + s.Equal(vec[validators[1]].minHeight, b10.Height) + s.Equal(vec[validators[1]].count, uint64(3)) + + // Check b20. + vec = to.candidateAckingStatusVectors[b20.Hash] + s.Require().NotNil(vec) + s.Len(vec, 1) + s.Equal(vec[validators[2]].minHeight, b20.Height) + s.Equal(vec[validators[2]].count, uint64(3)) + + // Check b30. + vec = to.candidateAckingStatusVectors[b30.Hash] + s.Require().NotNil(vec) + s.Len(vec, 1) + s.Equal(vec[validators[3]].minHeight, b30.Height) + s.Equal(vec[validators[3]].count, uint64(3)) + + // Make sure b00 doesn't exist in current working set: + s.checkNotInWorkingSet(to, b00) +} + +func (s *TotalOrderingTestSuite) TestBasicCaseForK2() { + // It's a handcrafted test case. + to := newTotalOrdering(2, 3, 5) + validators := s.generateValidatorIDs(5) + + b00 := s.genRootBlock(validators[0], map[common.Hash]struct{}{}) + b10 := s.genRootBlock(validators[1], map[common.Hash]struct{}{}) + b20 := s.genRootBlock( + validators[2], map[common.Hash]struct{}{b10.Hash: struct{}{}}) + b30 := s.genRootBlock( + validators[3], map[common.Hash]struct{}{b20.Hash: struct{}{}}) + b40 := s.genRootBlock(validators[4], map[common.Hash]struct{}{}) + b11 := &types.Block{ + ProposerID: validators[1], + ParentHash: b10.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b10.Hash: struct{}{}, + b00.Hash: struct{}{}, + }, + } + b01 := &types.Block{ + ProposerID: validators[0], + ParentHash: b00.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b00.Hash: struct{}{}, + b11.Hash: struct{}{}, + }, + } + b21 := &types.Block{ + ProposerID: validators[2], + ParentHash: b20.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b20.Hash: struct{}{}, + b01.Hash: struct{}{}, + }, + } + b31 := &types.Block{ + ProposerID: validators[3], + ParentHash: b30.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b30.Hash: struct{}{}, + b21.Hash: struct{}{}, + }, + } + b02 := &types.Block{ + ProposerID: validators[0], + ParentHash: b01.Hash, + Hash: common.NewRandomHash(), + Height: 2, + Acks: map[common.Hash]struct{}{ + b01.Hash: struct{}{}, + b21.Hash: struct{}{}, + }, + } + b12 := &types.Block{ + ProposerID: validators[1], + ParentHash: b11.Hash, + Hash: common.NewRandomHash(), + Height: 2, + Acks: map[common.Hash]struct{}{ + b11.Hash: struct{}{}, + b21.Hash: struct{}{}, + }, + } + b32 := &types.Block{ + ProposerID: validators[3], + ParentHash: b31.Hash, + Hash: common.NewRandomHash(), + Height: 2, + Acks: map[common.Hash]struct{}{ + b31.Hash: struct{}{}, + }, + } + b22 := &types.Block{ + ProposerID: validators[2], + ParentHash: b21.Hash, + Hash: common.NewRandomHash(), + Height: 2, + Acks: map[common.Hash]struct{}{ + b21.Hash: struct{}{}, + b32.Hash: struct{}{}, + }, + } + b23 := &types.Block{ + ProposerID: validators[2], + ParentHash: b22.Hash, + Hash: common.NewRandomHash(), + Height: 3, + Acks: map[common.Hash]struct{}{ + b22.Hash: struct{}{}, + }, + } + b03 := &types.Block{ + ProposerID: validators[0], + ParentHash: b02.Hash, + Hash: common.NewRandomHash(), + Height: 3, + Acks: map[common.Hash]struct{}{ + b02.Hash: struct{}{}, + b22.Hash: struct{}{}, + }, + } + b13 := &types.Block{ + ProposerID: validators[1], + ParentHash: b12.Hash, + Hash: common.NewRandomHash(), + Height: 3, + Acks: map[common.Hash]struct{}{ + b12.Hash: struct{}{}, + b22.Hash: struct{}{}, + }, + } + b14 := &types.Block{ + ProposerID: validators[1], + ParentHash: b13.Hash, + Hash: common.NewRandomHash(), + Height: 4, + Acks: map[common.Hash]struct{}{ + b13.Hash: struct{}{}, + }, + } + b41 := &types.Block{ + ProposerID: validators[4], + ParentHash: b40.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b40.Hash: struct{}{}, + }, + } + b42 := &types.Block{ + ProposerID: validators[4], + ParentHash: b41.Hash, + Hash: common.NewRandomHash(), + Height: 2, + Acks: map[common.Hash]struct{}{ + b41.Hash: struct{}{}, + }, + } + + s.checkNotDeliver(to, b00) + s.checkNotDeliver(to, b10) + s.checkNotDeliver(to, b11) + s.checkNotDeliver(to, b01) + s.checkNotDeliver(to, b20) + s.checkNotDeliver(to, b30) + s.checkNotDeliver(to, b21) + s.checkNotDeliver(to, b31) + s.checkNotDeliver(to, b32) + s.checkNotDeliver(to, b22) + s.checkNotDeliver(to, b12) + + // Make sure 'acked' for current precedings is correct. + acked := to.acked[b00.Hash] + s.Require().NotNil(acked) + s.Len(acked, 7) + s.Contains(acked, b01.Hash) + s.Contains(acked, b11.Hash) + s.Contains(acked, b12.Hash) + s.Contains(acked, b21.Hash) + s.Contains(acked, b22.Hash) + s.Contains(acked, b31.Hash) + s.Contains(acked, b32.Hash) + + acked = to.acked[b10.Hash] + s.Require().NotNil(acked) + s.Len(acked, 9) + s.Contains(acked, b01.Hash) + s.Contains(acked, b11.Hash) + s.Contains(acked, b12.Hash) + s.Contains(acked, b20.Hash) + s.Contains(acked, b21.Hash) + s.Contains(acked, b22.Hash) + s.Contains(acked, b30.Hash) + s.Contains(acked, b31.Hash) + s.Contains(acked, b32.Hash) + + // Make sure there are 2 candidates. + s.Require().Len(to.candidateAckingStatusVectors, 2) + + // Check b00's height vector. + vec := to.candidateAckingStatusVectors[b00.Hash] + s.Require().NotNil(vec) + s.NotContains(vec, validators[4]) + s.Equal(vec[validators[0]].minHeight, b00.Height) + s.Equal(vec[validators[0]].count, uint64(2)) + s.Equal(vec[validators[1]].minHeight, b11.Height) + s.Equal(vec[validators[1]].count, uint64(2)) + s.Equal(vec[validators[2]].minHeight, b21.Height) + s.Equal(vec[validators[2]].count, uint64(2)) + s.Equal(vec[validators[3]].minHeight, b31.Height) + s.Equal(vec[validators[3]].count, uint64(2)) + + // Check b10's height vector. + vec = to.candidateAckingStatusVectors[b10.Hash] + s.Require().NotNil(vec) + s.NotContains(vec, validators[4]) + s.Equal(vec[validators[0]].minHeight, b01.Height) + s.Equal(vec[validators[0]].count, uint64(1)) + s.Equal(vec[validators[1]].minHeight, b10.Height) + s.Equal(vec[validators[1]].count, uint64(3)) + s.Equal(vec[validators[2]].minHeight, b20.Height) + s.Equal(vec[validators[2]].count, uint64(3)) + s.Equal(vec[validators[3]].minHeight, b30.Height) + s.Equal(vec[validators[3]].count, uint64(3)) + + // Check the first deliver. + hashes, early, err := to.processBlock(b02) + s.True(early) + s.Nil(err) + expected := common.Hashes{b00.Hash, b10.Hash} + sort.Sort(expected) + s.Equal(hashes, expected) + + // Make sure b00, b10 are removed from current working set. + s.checkNotInWorkingSet(to, b00) + s.checkNotInWorkingSet(to, b10) + + // Check if candidates of next round are picked correctly. + s.Len(to.candidateAckingStatusVectors, 2) + + // Check b01's height vector. + vec = to.candidateAckingStatusVectors[b11.Hash] + s.Require().NotNil(vec) + s.NotContains(vec, validators[4]) + s.Equal(vec[validators[0]].minHeight, b01.Height) + s.Equal(vec[validators[0]].count, uint64(2)) + s.Equal(vec[validators[1]].minHeight, b11.Height) + s.Equal(vec[validators[1]].count, uint64(2)) + s.Equal(vec[validators[2]].minHeight, b21.Height) + s.Equal(vec[validators[2]].count, uint64(2)) + s.Equal(vec[validators[3]].minHeight, b11.Height) + s.Equal(vec[validators[3]].count, uint64(2)) + + // Check b20's height vector. + vec = to.candidateAckingStatusVectors[b20.Hash] + s.Require().NotNil(vec) + s.NotContains(vec, validators[4]) + s.Equal(vec[validators[0]].minHeight, b02.Height) + s.Equal(vec[validators[0]].count, uint64(1)) + s.Equal(vec[validators[1]].minHeight, b12.Height) + s.Equal(vec[validators[1]].count, uint64(1)) + s.Equal(vec[validators[2]].minHeight, b20.Height) + s.Equal(vec[validators[2]].count, uint64(3)) + s.Equal(vec[validators[3]].minHeight, b30.Height) + s.Equal(vec[validators[3]].count, uint64(3)) + + s.checkNotDeliver(to, b13) + + // Check the second deliver. + hashes, early, err = to.processBlock(b03) + s.True(early) + s.Nil(err) + expected = common.Hashes{b11.Hash, b20.Hash} + sort.Sort(expected) + s.Equal(hashes, expected) + + // Make sure b11, b20 are removed from current working set. + s.checkNotInWorkingSet(to, b11) + s.checkNotInWorkingSet(to, b20) + + // Add b40, b41, b42 to pending set. + s.checkNotDeliver(to, b40) + s.checkNotDeliver(to, b41) + s.checkNotDeliver(to, b42) + s.checkNotDeliver(to, b14) + + // Make sure b01, b30, b40 are candidate in next round. + s.Len(to.candidateAckingStatusVectors, 3) + vec = to.candidateAckingStatusVectors[b01.Hash] + s.Require().NotNil(vec) + s.NotContains(vec, validators[4]) + s.Equal(vec[validators[0]].minHeight, b01.Height) + s.Equal(vec[validators[0]].count, uint64(3)) + s.Equal(vec[validators[1]].minHeight, b12.Height) + s.Equal(vec[validators[1]].count, uint64(3)) + s.Equal(vec[validators[2]].minHeight, b21.Height) + s.Equal(vec[validators[2]].count, uint64(2)) + s.Equal(vec[validators[3]].minHeight, b31.Height) + s.Equal(vec[validators[3]].count, uint64(2)) + + vec = to.candidateAckingStatusVectors[b30.Hash] + s.Require().NotNil(vec) + s.NotContains(vec, validators[4]) + s.Equal(vec[validators[0]].minHeight, b03.Height) + s.Equal(vec[validators[0]].count, uint64(1)) + s.Equal(vec[validators[1]].minHeight, b13.Height) + s.Equal(vec[validators[1]].count, uint64(2)) + s.Equal(vec[validators[2]].minHeight, b22.Height) + s.Equal(vec[validators[2]].count, uint64(1)) + s.Equal(vec[validators[3]].minHeight, b30.Height) + s.Equal(vec[validators[3]].count, uint64(3)) + + vec = to.candidateAckingStatusVectors[b40.Hash] + s.Require().NotNil(vec) + s.NotContains(vec, validators[0]) + s.NotContains(vec, validators[1]) + s.NotContains(vec, validators[2]) + s.NotContains(vec, validators[3]) + s.Equal(vec[validators[4]].minHeight, b40.Height) + s.Equal(vec[validators[4]].count, uint64(3)) + + // Make 'Acking Node Set' contains blocks from all validators, + // this should trigger not-early deliver. + hashes, early, err = to.processBlock(b23) + s.False(early) + s.Nil(err) + expected = common.Hashes{b01.Hash, b30.Hash} + sort.Sort(expected) + s.Equal(expected, hashes) + + // Make sure b01, b30 not in working set + s.checkNotInWorkingSet(to, b01) + s.checkNotInWorkingSet(to, b30) + + // Make sure b21, b40 are candidates of next round. + s.Contains(to.candidateAckingStatusVectors, b21.Hash) + s.Contains(to.candidateAckingStatusVectors, b40.Hash) +} + +func (s *TotalOrderingTestSuite) TestBasicCaseForK0() { + // This is a relatively simple test for K=0. + // + // 0 1 2 3 4 + // ------------------- + // . . . . . + // . . . . . + // o o o <- o <- o Height: 1 + // | \ | \ | | + // v v v v + // o o o <- o Height: 0 + to := newTotalOrdering(0, 3, 5) + validators := s.generateValidatorIDs(5) + + b00 := s.genRootBlock(validators[0], map[common.Hash]struct{}{}) + b10 := s.genRootBlock(validators[1], map[common.Hash]struct{}{}) + b20 := s.genRootBlock(validators[2], map[common.Hash]struct{}{}) + b30 := s.genRootBlock(validators[3], map[common.Hash]struct{}{ + b20.Hash: struct{}{}, + }) + b01 := &types.Block{ + ProposerID: validators[0], + ParentHash: b00.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b00.Hash: struct{}{}, + b10.Hash: struct{}{}, + }, + } + b11 := &types.Block{ + ProposerID: validators[1], + ParentHash: b10.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b10.Hash: struct{}{}, + b20.Hash: struct{}{}, + }, + } + b21 := &types.Block{ + ProposerID: validators[2], + ParentHash: b20.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b20.Hash: struct{}{}, + }, + } + b31 := &types.Block{ + ProposerID: validators[3], + ParentHash: b30.Hash, + Hash: common.NewRandomHash(), + Height: 1, + Acks: map[common.Hash]struct{}{ + b21.Hash: struct{}{}, + b30.Hash: struct{}{}, + }, + } + b40 := s.genRootBlock(validators[4], map[common.Hash]struct{}{ + b31.Hash: struct{}{}, + }) + + s.checkNotDeliver(to, b00) + s.checkNotDeliver(to, b10) + s.checkNotDeliver(to, b20) + s.checkNotDeliver(to, b30) + s.checkNotDeliver(to, b01) + s.checkNotDeliver(to, b11) + s.checkNotDeliver(to, b21) + s.checkNotDeliver(to, b31) + + // Check status before delivering. + vec := to.candidateAckingStatusVectors[b00.Hash] + s.Require().NotNil(vec) + s.Len(vec, 1) + s.Equal(vec[validators[0]].minHeight, b00.Height) + s.Equal(vec[validators[0]].count, uint64(2)) + + vec = to.candidateAckingStatusVectors[b10.Hash] + s.Require().NotNil(vec) + s.Len(vec, 2) + s.Equal(vec[validators[0]].minHeight, b01.Height) + s.Equal(vec[validators[0]].count, uint64(1)) + s.Equal(vec[validators[1]].minHeight, b10.Height) + s.Equal(vec[validators[1]].count, uint64(2)) + + vec = to.candidateAckingStatusVectors[b20.Hash] + s.Require().NotNil(vec) + s.Len(vec, 3) + s.Equal(vec[validators[1]].minHeight, b11.Height) + s.Equal(vec[validators[1]].count, uint64(1)) + s.Equal(vec[validators[2]].minHeight, b20.Height) + s.Equal(vec[validators[2]].count, uint64(2)) + s.Equal(vec[validators[3]].minHeight, b30.Height) + s.Equal(vec[validators[3]].count, uint64(2)) + + // This new block should trigger non-early deliver. + hashes, early, err := to.processBlock(b40) + s.False(early) + s.Nil(err) + expected := common.Hashes{b20.Hash} + sort.Sort(expected) + s.Equal(expected, hashes) + + // Make sure b20 is no long existing in working set. + s.checkNotInWorkingSet(to, b20) + + // Make sure b10, b30 are candidates for next round. + s.Contains(to.candidateAckingStatusVectors, b10.Hash) + s.Contains(to.candidateAckingStatusVectors, b30.Hash) +} + +func (s *TotalOrderingTestSuite) baseTestRandomlyGeneratedBlocks( + totalOrderingConstructor func() *totalOrdering, + revealer test.Revealer, + repeat int) { + + // TODO (mission): make this part run concurrently. + revealingSequence := map[string]struct{}{} + orderingSequence := map[string]struct{}{} + for i := 0; i < repeat; i++ { + revealed := "" + ordered := "" + revealer.Reset() + to := totalOrderingConstructor() + for { + // Reveal next block. + b, err := revealer.Next() + if err != nil { + if err == blockdb.ErrIterationFinished { + err = nil + break + } + } + s.Require().Nil(err) + revealed += b.Hash.String() + "," + + // Perform total ordering. + hashes, _, err := to.processBlock(&b) + s.Require().Nil(err) + for _, h := range hashes { + ordered += h.String() + "," + } + } + revealingSequence[revealed] = struct{}{} + orderingSequence[ordered] = struct{}{} + } + + // Make sure we test at least two different + // revealing sequence. + s.True(len(revealingSequence) > 1) + // Make sure all ordering are equal or prefixed + // to another one. + for orderFrom := range orderingSequence { + for orderTo := range orderingSequence { + if orderFrom == orderTo { + continue + } + ok := strings.HasPrefix(orderFrom, orderTo) || + strings.HasPrefix(orderTo, orderFrom) + s.True(ok) + } + } +} + +func (s *TotalOrderingTestSuite) TestRandomlyGeneratedBlocks() { + var ( + validatorCount = 19 + blockCount = 50 + phi uint64 = 10 + repeat = 10 + ) + + // Prepare a randomly genearated blocks. + db, err := blockdb.NewMemBackedBlockDB("test-total-ordering-random.blockdb") + s.Require().Nil(err) + defer func() { + // If the test fails, keep the block database for troubleshooting. + if s.T().Failed() { + s.Nil(db.Close()) + } + }() + + gen := test.NewBlocksGenerator(nil) + s.Require().Nil(gen.Generate(validatorCount, blockCount, nil, db)) + iter, err := db.GetAll() + s.Require().Nil(err) + // Setup a revealer that would reveal blocks forming + // valid DAGs. + revealer, err := test.NewRandomDAGRevealer(iter) + s.Require().Nil(err) + + // Test for K=0. + constructor := func() *totalOrdering { + return newTotalOrdering(0, phi, uint64(validatorCount)) + } + s.baseTestRandomlyGeneratedBlocks(constructor, revealer, repeat) + // Test for K=1, + constructor = func() *totalOrdering { + return newTotalOrdering(1, phi, uint64(validatorCount)) + } + s.baseTestRandomlyGeneratedBlocks(constructor, revealer, repeat) + // Test for K=2, + constructor = func() *totalOrdering { + return newTotalOrdering(2, phi, uint64(validatorCount)) + } + s.baseTestRandomlyGeneratedBlocks(constructor, revealer, repeat) + // Test for K=3, + constructor = func() *totalOrdering { + return newTotalOrdering(2, phi, uint64(validatorCount)) + } + s.baseTestRandomlyGeneratedBlocks(constructor, revealer, repeat) +} + +func TestTotalOrdering(t *testing.T) { + suite.Run(t, new(TotalOrderingTestSuite)) +} -- cgit v1.2.3