aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorMission Liao <mission.liao@dexon.org>2018-08-08 19:32:20 +0800
committerWei-Ning Huang <aitjcize@gmail.com>2018-08-08 19:32:20 +0800
commit295c7b5efbc36f59e3ae8d10bc3abc3a5d17e785 (patch)
treebdb93aaa638ec890596a18490505a45204dc2cc7 /core
parenta418ea95c0f5afb50cbb78aedecc68373353d06e (diff)
downloaddexon-consensus-295c7b5efbc36f59e3ae8d10bc3abc3a5d17e785.tar
dexon-consensus-295c7b5efbc36f59e3ae8d10bc3abc3a5d17e785.tar.gz
dexon-consensus-295c7b5efbc36f59e3ae8d10bc3abc3a5d17e785.tar.bz2
dexon-consensus-295c7b5efbc36f59e3ae8d10bc3abc3a5d17e785.tar.lz
dexon-consensus-295c7b5efbc36f59e3ae8d10bc3abc3a5d17e785.tar.xz
dexon-consensus-295c7b5efbc36f59e3ae8d10bc3abc3a5d17e785.tar.zst
dexon-consensus-295c7b5efbc36f59e3ae8d10bc3abc3a5d17e785.zip
core: Add Consensus to replace core.Blocklattice (#35)
* Make Sequencer return slice of blocks. * Fix naming issue The function 'getHeightVecto' would return ackingStatusVector. * Fix comment error. * Add methods to collect info when proposing blocks. * Add test.App * Add test.Gov * Move this type to core.types to avoid cyclic import. * Add core.Consensus * Move getMedianTime, interpoTime to util These functions are not depending on members of core.consensusTimestamp and is required when testing core.Consensus. * Make sure types.Block.Clone would copy critical fields. * Remove core.blocklattice * Define 'infinity' in core/total-ordering This definition is defined in core/blocklattice originally. * Fix a bug when processing the same block twice. * Integrate simulation with core.Consensus core.Consensus is a replacement of core.Blocklattice * Fix the comment to use sigular form. * Move lock mechanism to sub modules. * phi should be 2*fmax+1 * Fixup: should aborting when the validator is added * Fix for new block fields * Fix the bug that the total ordering sequence is wrong.
Diffstat (limited to 'core')
-rw-r--r--core/application.go6
-rw-r--r--core/blocklattice.go566
-rw-r--r--core/blocklattice_test.go531
-rw-r--r--core/consensus-timestamp.go51
-rw-r--r--core/consensus.go130
-rw-r--r--core/consensus_test.go305
-rw-r--r--core/governance.go17
-rw-r--r--core/reliable-broadcast.go83
-rw-r--r--core/reliable-broadcast_test.go98
-rw-r--r--core/test/app.go72
-rw-r--r--core/test/gov.go63
-rw-r--r--core/total-ordering.go29
-rw-r--r--core/total-ordering_test.go41
-rw-r--r--core/types/block.go10
-rw-r--r--core/types/membership-event.go16
-rw-r--r--core/utils.go51
16 files changed, 868 insertions, 1201 deletions
diff --git a/core/application.go b/core/application.go
index edeb686..763954d 100644
--- a/core/application.go
+++ b/core/application.go
@@ -27,8 +27,10 @@ import (
// Application describes the application interface that interacts with DEXON
// consensus core.
type Application interface {
- // TotalOrderingDeliver is called when the total ordering algorithm deliver
- // a set of block.
+ // StronglyAcked is called when a block is strongly acked.
+ StronglyAcked(blockHash common.Hash)
+
+ // TotalOrderingDeliver is called when the total ordering algorithm deliver // a set of block.
TotalOrderingDeliver(blocks []*types.Block, early bool)
// DeliverBlock is called when a block is add to the compaction chain.
diff --git a/core/blocklattice.go b/core/blocklattice.go
deleted file mode 100644
index 0f5fc11..0000000
--- a/core/blocklattice.go
+++ /dev/null
@@ -1,566 +0,0 @@
-// Copyright 2018 The dexon-consensus-core Authors
-// This file is part of the dexon-consensus-core library.
-//
-// The dexon-consensus-core library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus-core library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus-core library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "fmt"
- "math"
- "sort"
- "sync"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus-core/blockdb"
- "github.com/dexon-foundation/dexon-consensus-core/common"
- "github.com/dexon-foundation/dexon-consensus-core/core/types"
-)
-
-const (
- epsilon = 1 * time.Microsecond
- tdelay = 500 * time.Millisecond
-)
-
-const (
- infinity uint64 = math.MaxUint64
-)
-
-// BlockLattice represents the local view of a single validator.
-//
-// blockDB stores blocks that are final. blocks stores blocks that are in ToTo
-// Status.
-type BlockLattice struct {
- owner types.ValidatorID
- ValidatorSet map[types.ValidatorID]struct{}
- blocks map[common.Hash]*types.Block
-
- fmax int
- phi int
- lastSeenTimestamps map[types.ValidatorID]time.Time
-
- blockDB blockdb.BlockDatabase
- app Application
- mutex sync.Mutex
-
- // Reliable Broadcast.
- waitingSet map[common.Hash]*types.Block
- stronglyAckedSet map[common.Hash]*types.Block
- ackCandidateSet map[types.ValidatorID]*types.Block
- restricted map[types.ValidatorID]struct{}
-
- // Total Ordering.
- pendingSet map[common.Hash]*types.Block
- candidateSet map[common.Hash]*types.Block
- ABS map[common.Hash]map[types.ValidatorID]uint64
- AHV map[common.Hash]map[types.ValidatorID]uint64
-
- // Timestamp.
- timestampEngine consensusTimestamp
-}
-
-// NewBlockLattice returns a new empty BlockLattice instance.
-func NewBlockLattice(
- db blockdb.BlockDatabase,
- app Application) *BlockLattice {
- return &BlockLattice{
- ValidatorSet: make(map[types.ValidatorID]struct{}),
- blocks: make(map[common.Hash]*types.Block),
- lastSeenTimestamps: make(map[types.ValidatorID]time.Time),
- blockDB: db,
- app: app,
- waitingSet: make(map[common.Hash]*types.Block),
- stronglyAckedSet: make(map[common.Hash]*types.Block),
- ackCandidateSet: make(map[types.ValidatorID]*types.Block),
- restricted: make(map[types.ValidatorID]struct{}),
- pendingSet: make(map[common.Hash]*types.Block),
- candidateSet: make(map[common.Hash]*types.Block),
- ABS: make(map[common.Hash]map[types.ValidatorID]uint64),
- AHV: make(map[common.Hash]map[types.ValidatorID]uint64),
- timestampEngine: *newConsensusTimestamp(),
- }
-}
-
-// AddValidator adds a validator into the lattice.
-func (l *BlockLattice) AddValidator(
- id types.ValidatorID, genesis *types.Block) {
-
- l.ValidatorSet[id] = struct{}{}
- l.fmax = (len(l.ValidatorSet) - 1) / 3
- l.phi = 2*l.fmax + 1
-
- // TODO: We should not make genesis blocks 'final' directly.
- genesis.Status = types.BlockStatusFinal
- l.blockDB.Put(*genesis)
-}
-
-// SetOwner sets the blocklattice's owner, which is the localview of whom.
-func (l *BlockLattice) SetOwner(id types.ValidatorID) {
- if _, exists := l.ValidatorSet[id]; !exists {
- panic("SetOnwer: owner is not a valid validator")
- }
- l.owner = id
-}
-
-// getBlock returns a block no matter where it is located at (either local
-// blocks cache or blockDB).
-func (l *BlockLattice) getBlock(hash common.Hash) *types.Block {
- if b, exists := l.blocks[hash]; exists {
- return b
- }
- if b, err := l.blockDB.Get(hash); err == nil {
- return &b
- }
- return nil
-}
-
-// processAcks updates the ack count of the blocks that is acked by *b*.
-func (l *BlockLattice) processAcks(b *types.Block) {
- // Always acks it's own parent.
- b.Acks[b.ParentHash] = struct{}{}
-
- for ackBlockHash := range b.Acks {
- ackedBlock, ok := l.blocks[ackBlockHash]
- if !ok {
- // Acks a finalized block, don't need to increase it's count.
- if l.blockDB.Has(ackBlockHash) {
- continue
- }
- panic(fmt.Sprintf("failed to get block: %v", ackBlockHash))
- }
-
- // Populate Ackeds.
- if ackedBlock.Ackeds == nil {
- ackedBlock.Ackeds = make(map[common.Hash]struct{})
- }
- ackedBlock.Ackeds[b.Hash] = struct{}{}
-
- bp := ackedBlock
- for bp != nil && bp.Status < types.BlockStatusAcked {
- if bp.Ackeds == nil {
- bp.Ackeds = make(map[common.Hash]struct{})
- }
- if _, exists := bp.Ackeds[b.Hash]; !exists {
- bp.Ackeds[b.Hash] = struct{}{}
- }
-
- // Calculate acked by nodes.
- ackedByNodes := make(map[types.ValidatorID]struct{})
- for hash := range bp.Ackeds {
- bp := l.getBlock(hash)
- ackedByNodes[bp.ProposerID] = struct{}{}
- }
-
- if len(ackedByNodes) > 2*l.fmax {
- bp.Status = types.BlockStatusAcked
- l.stronglyAckedSet[bp.Hash] = bp
- }
- bp = l.getBlock(bp.ParentHash)
- }
-
- var populateAckBy func(bx, target *types.Block)
- populateAckBy = func(bx, target *types.Block) {
- for ab := range bx.Acks {
- abb := l.getBlock(ab)
- if abb.Status < types.BlockStatusFinal {
- if abb.Ackeds == nil {
- abb.Ackeds = make(map[common.Hash]struct{})
- }
- abb.Ackeds[target.Hash] = struct{}{}
- populateAckBy(abb, target)
- }
- }
- }
- populateAckBy(ackedBlock, b)
- }
-}
-
-// updateTimestamps updates the last seen timestamp of the lattice local view.
-func (l *BlockLattice) updateTimestamps(b *types.Block) {
- q := b.ProposerID
- l.lastSeenTimestamps[q] = b.Timestamps[q].Add(epsilon)
- for vid := range l.ValidatorSet {
- if b.Timestamps[vid].After(l.lastSeenTimestamps[vid]) {
- l.lastSeenTimestamps[vid] = b.Timestamps[vid]
- }
- }
-}
-
-func (l *BlockLattice) recievedAndNotInWaitingSet(hash common.Hash) bool {
- if _, exists := l.blocks[hash]; !exists {
- if !l.blockDB.Has(hash) {
- return false
- }
- }
- return true
-}
-
-func (l *BlockLattice) isValidAckCandidate(b *types.Block) bool {
- // Block proposer is not restricted.
- if _, isRestricted := l.restricted[b.ProposerID]; isRestricted {
- return false
- }
-
- hasHistoryBeenRecieved := func(hash common.Hash) bool {
- bx := l.getBlock(hash)
- if bx == nil {
- return false
- }
-
- for {
- bx = l.getBlock(bx.ParentHash)
- if bx == nil {
- return false
- }
- if bx.Status == types.BlockStatusFinal {
- return true
- }
- }
- }
-
- // Previous block is recieved.
- if !hasHistoryBeenRecieved(b.ParentHash) {
- return false
- }
-
- // All acked blocks are recieved.
- for ackedBlockHash := range b.Acks {
- if !hasHistoryBeenRecieved(ackedBlockHash) {
- return false
- }
- }
-
- return true
-}
-
-// ProcessBlock implements the recieving part of DEXON reliable broadcast.
-func (l *BlockLattice) ProcessBlock(b *types.Block, runTotal ...bool) {
- l.mutex.Lock()
- defer l.mutex.Unlock()
-
- if b.Hash == b.ParentHash {
- if _, exists := l.ValidatorSet[b.ProposerID]; !exists {
- l.AddValidator(b.ProposerID, b)
- }
- }
-
- if l.getBlock(b.Hash) != nil {
- return
- }
-
- // TODO(w): drop if it does not pass sanity check.
-
- // Store into local blocks cache.
- l.blocks[b.Hash] = b
-
- if l.isValidAckCandidate(b) {
- l.ackCandidateSet[b.ProposerID] = b
- l.processAcks(b)
- } else {
- l.waitingSet[b.Hash] = b
- }
-
- // Scan the rest of waiting set for valid candidate.
- for bpHash, bp := range l.waitingSet {
- if l.isValidAckCandidate(bp) {
- l.ackCandidateSet[bp.ProposerID] = bp
- l.processAcks(bp)
- delete(l.waitingSet, bpHash)
- }
- }
-
-IterateStronglyAckedSet:
- for bpHash, bp := range l.stronglyAckedSet {
- for ackBlockHash := range bp.Acks {
- bx := l.getBlock(ackBlockHash)
- if bx == nil || bx.Status < types.BlockStatusAcked {
- break IterateStronglyAckedSet
- }
- }
- bp.Status = types.BlockStatusOrdering
- l.pendingSet[bp.Hash] = bp
- delete(l.stronglyAckedSet, bpHash)
-
- if len(runTotal) > 0 && runTotal[0] {
- l.totalOrdering(bp)
- }
- }
-}
-
-// PrepareBlock prepare a block for broadcast.
-func (l *BlockLattice) PrepareBlock(b *types.Block) {
- l.mutex.Lock()
- defer l.mutex.Unlock()
-
- b.Acks = make(map[common.Hash]struct{})
- for _, bp := range l.ackCandidateSet {
- b.Acks[bp.Hash] = struct{}{}
- l.updateTimestamps(b)
- }
- l.lastSeenTimestamps[l.owner] = time.Now().UTC()
-
- b.Timestamps = make(map[types.ValidatorID]time.Time)
- for vID, ts := range l.lastSeenTimestamps {
- b.Timestamps[vID] = ts
- }
-
- //l.ProcessBlock(b)
- l.ackCandidateSet = make(map[types.ValidatorID]*types.Block)
-}
-
-// detectNack implements the NACK detection.
-func (l *BlockLattice) detectNack() {
-
-}
-
-func (l *BlockLattice) abs() map[types.ValidatorID]struct{} {
- abs := make(map[types.ValidatorID]struct{})
- for blockHash := range l.candidateSet {
- for x := range l.ABS[blockHash] {
- abs[x] = struct{}{}
- }
- }
- return abs
-}
-
-func (l *BlockLattice) calculateABSofBlock(b *types.Block) {
- // Calculate ABS of a block.
- l.ABS[b.Hash] = make(map[types.ValidatorID]uint64)
-
- var calculateABSRecursive func(target *types.Block)
-
- calculateABSRecursive = func(target *types.Block) {
- for hash := range target.Ackeds {
- ackedByBlock := l.getBlock(hash)
- if ackedByBlock.Status != types.BlockStatusOrdering {
- continue
- }
- v, exists := l.ABS[b.Hash][ackedByBlock.ProposerID]
- if !exists || ackedByBlock.Height < v {
- l.ABS[b.Hash][ackedByBlock.ProposerID] = ackedByBlock.Height
- }
- calculateABSRecursive(ackedByBlock)
- }
- }
-
- // ABS always include the block's proposer
- l.ABS[b.Hash][b.ProposerID] = b.Height
-
- calculateABSRecursive(b)
-}
-
-func (l *BlockLattice) calculateAHVofBlock(
- b *types.Block, globalMins map[types.ValidatorID]uint64) {
-
- // Calculate ABS of a block.
- l.AHV[b.Hash] = make(map[types.ValidatorID]uint64)
-
- for v := range l.ValidatorSet {
- gv, gExists := globalMins[v]
- lv, lExists := l.ABS[b.Hash][v]
-
- if !gExists {
- // Do nothing.
- } else if !lExists || lv > gv {
- l.AHV[b.Hash][v] = infinity
- } else {
- l.AHV[b.Hash][v] = gv
- }
- }
-}
-
-func (l *BlockLattice) updateABSAHV() {
- globalMins := make(map[types.ValidatorID]uint64)
-
- for _, block := range l.pendingSet {
- v, exists := globalMins[block.ProposerID]
- if !exists || block.Height < v {
- globalMins[block.ProposerID] = block.Height
- }
- }
-
- for _, block := range l.candidateSet {
- l.calculateABSofBlock(block)
- l.calculateAHVofBlock(block, globalMins)
- }
-}
-
-// totalOrdering implements the DEXON total ordering algorithm.
-func (l *BlockLattice) totalOrdering(b *types.Block) {
- acksOnlyFinal := true
- for ackedBlockHash := range b.Acks {
- bp := l.getBlock(ackedBlockHash)
- if bp.Status != types.BlockStatusFinal {
- acksOnlyFinal = false
- break
- }
- }
-
- if acksOnlyFinal {
- l.candidateSet[b.Hash] = b
- }
-
- // Update ABS and AHV.
- l.updateABSAHV()
- abs := l.abs()
-
- // Calculate preceding set.
- precedingSet := make(map[common.Hash]*types.Block)
-
- // Grade(b', b) = 0 for all b' in candidate set.
- for targetHash, targetBlock := range l.candidateSet {
- winAll := true
- for otherHash := range l.candidateSet {
- if targetHash.Equal(otherHash) {
- continue
- }
-
- lose := 0
- for vID, targetAHV := range l.AHV[targetHash] {
- if otherAHV, exists := l.AHV[otherHash][vID]; exists {
- if otherAHV < targetAHV {
- lose++
- }
- } else if otherAHV != infinity {
- lose++
- }
- }
-
- if lose >= l.phi {
- winAll = false
- break
- } else if lose < l.phi-len(l.ValidatorSet)+len(abs) {
- // Do nothing.
- } else {
- winAll = false
- break
- }
- }
-
- if winAll {
- precedingSet[targetHash] = targetBlock
- }
- }
-
- // Internal stability.
- winned := false
- for hash := range l.candidateSet {
- if _, exists := precedingSet[hash]; exists {
- continue
- }
-
- // Grade(b, b') = 1
- for precedingHash := range precedingSet {
- win := 0
- for vID, precedingAHV := range l.AHV[precedingHash] {
- if candidateAHV, exists := l.AHV[hash][vID]; exists {
- if precedingAHV < candidateAHV {
- win++
- }
- } else if precedingAHV != infinity {
- win++
- }
- }
- if win > l.phi {
- winned = true
- break
- }
- }
- if !winned {
- return
- }
- }
-
- earlyDelivery := false
-
- // Does not satisfy External stability a.
- if len(abs) < len(l.ValidatorSet) {
- earlyDelivery = true
-
- // External stability b.
- extBSatisfied := false
- for precedingHash := range precedingSet {
- count := 0
- for _, ahv := range l.AHV[precedingHash] {
- if ahv != infinity {
- count++
- }
- }
- if count > l.phi {
- extBSatisfied = true
- break
- }
- }
- if !extBSatisfied {
- return
- }
- for precedingHash := range precedingSet {
- if len(l.ABS[precedingHash]) < len(l.ValidatorSet)-l.phi {
- extBSatisfied = false
- }
- }
- if !extBSatisfied {
- return
- }
- }
-
- var output []*types.Block
- for hash, x := range precedingSet {
- output = append(output, x)
- x.Status = types.BlockStatusFinal
-
- // Remove from pending set and candidate set.
- delete(l.pendingSet, hash)
- delete(l.candidateSet, hash)
-
- // Delete ABS and AHV
- delete(l.ABS, hash)
- delete(l.AHV, hash)
-
- // Store output blocks into blockDB.
- l.blockDB.Put(*x)
- delete(l.blocks, hash)
- }
- sort.Sort(types.ByHash(output))
-
- if len(output) > 0 {
- l.app.TotalOrderingDeliver(output, earlyDelivery)
- blocksReady, _, err := l.timestampEngine.processBlocks(output)
- if err != nil && err != ErrEmptyTimestamps {
- panic(err)
- }
- for _, block := range blocksReady {
- l.app.DeliverBlock(block.Hash, block.ConsensusInfo.Timestamp)
- }
- }
-
- // Rescan pending blocks to add into candidate set.
- for hash, block := range l.pendingSet {
- if _, exists := l.candidateSet[hash]; exists {
- continue
- }
- acksOnlyFinal := true
- for ackedBlockHash := range block.Acks {
- bp := l.getBlock(ackedBlockHash)
- if bp.Status != types.BlockStatusFinal {
- acksOnlyFinal = false
- break
- }
- }
- if acksOnlyFinal {
- l.candidateSet[hash] = block
- }
- }
-}
diff --git a/core/blocklattice_test.go b/core/blocklattice_test.go
deleted file mode 100644
index 835c35e..0000000
--- a/core/blocklattice_test.go
+++ /dev/null
@@ -1,531 +0,0 @@
-// Copyright 2018 The dexon-consensus-core Authors
-// This file is part of the dexon-consensus-core library.
-//
-// The dexon-consensus-core library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus-core library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus-core library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "sort"
- "testing"
- "time"
-
- "github.com/stretchr/testify/suite"
-
- "github.com/dexon-foundation/dexon-consensus-core/blockdb"
- "github.com/dexon-foundation/dexon-consensus-core/common"
- "github.com/dexon-foundation/dexon-consensus-core/core/types"
-)
-
-var lattice *BlockLattice
-var validators []types.ValidatorID
-var genesises []*types.Block
-
-var b01, b11, b21, b31,
- b02, b12, b22, b32,
- b03, b13, b23, b33 *types.Block
-
-// TestApp.
-type TestApp struct {
- Outputs []*types.Block
- Early bool
-}
-
-func (a *TestApp) TotalOrderingDeliver(blocks []*types.Block, early bool) {
- a.Outputs = append(a.Outputs, blocks...)
- a.Early = early
-}
-
-func (a *TestApp) DeliverBlock(blockHashes common.Hash, timestamp time.Time) {
-}
-
-func (a *TestApp) Clear() {
- a.Outputs = nil
- a.Early = false
-}
-
-type BlockLatticeTest struct {
- suite.Suite
-
- app *TestApp
-}
-
-func (s *BlockLatticeTest) SetupTest() {
- Debugf("--------------------------------------------" +
- "-------------------------\n")
-
- s.app = &TestApp{}
-
- db, err := blockdb.NewMemBackedBlockDB()
- s.Require().Nil(err)
- lattice = NewBlockLattice(db, s.app)
-
- for i := 0; i < 4; i++ {
- validators = append(validators,
- types.ValidatorID{Hash: common.NewRandomHash()})
- Debugf("V%d: %s\n", i, validators[i])
- }
- Debugf("\n")
- for i := 0; i < 4; i++ {
- hash := common.NewRandomHash()
- genesises = append(genesises, &types.Block{
- ProposerID: validators[i],
- ParentHash: hash,
- Hash: hash,
- Height: 0,
- Acks: map[common.Hash]struct{}{},
- })
-
- Debugf("G%d: %s\n", i, hash)
- lattice.AddValidator(validators[i], genesises[i])
- }
-
- // Make lattice validator[0]'s local view.
- lattice.SetOwner(validators[0])
-
- b01 = &types.Block{
- ProposerID: validators[0],
- ParentHash: genesises[0].Hash,
- Hash: common.NewRandomHash(),
- Height: 1,
- Acks: map[common.Hash]struct{}{
- genesises[1].Hash: struct{}{},
- genesises[2].Hash: struct{}{},
- genesises[3].Hash: struct{}{},
- },
- }
-
- b11 = &types.Block{
- ProposerID: validators[1],
- ParentHash: genesises[1].Hash,
- Hash: common.NewRandomHash(),
- Height: 1,
- Acks: map[common.Hash]struct{}{
- b01.Hash: struct{}{},
- genesises[2].Hash: struct{}{},
- genesises[3].Hash: struct{}{},
- },
- }
- b21 = &types.Block{
- ProposerID: validators[2],
- ParentHash: genesises[2].Hash,
- Hash: common.NewRandomHash(),
- Height: 1,
- Acks: map[common.Hash]struct{}{
- b01.Hash: struct{}{},
- genesises[1].Hash: struct{}{},
- genesises[3].Hash: struct{}{},
- },
- }
- b31 = &types.Block{
- ProposerID: validators[3],
- ParentHash: genesises[3].Hash,
- Hash: common.NewRandomHash(),
- Height: 1,
- Acks: map[common.Hash]struct{}{
- b01.Hash: struct{}{},
- b11.Hash: struct{}{},
- genesises[2].Hash: struct{}{},
- },
- }
-
- b02 = &types.Block{
- ProposerID: validators[0],
- ParentHash: b01.Hash,
- Hash: common.NewRandomHash(),
- Height: 2,
- Acks: map[common.Hash]struct{}{
- b11.Hash: struct{}{},
- b21.Hash: struct{}{},
- b31.Hash: struct{}{},
- },
- }
- b12 = &types.Block{
- ProposerID: validators[1],
- ParentHash: b11.Hash,
- Hash: common.NewRandomHash(),
- Height: 2,
- Acks: map[common.Hash]struct{}{
- b21.Hash: struct{}{},
- b31.Hash: struct{}{},
- },
- }
- b22 = &types.Block{
- ProposerID: validators[2],
- ParentHash: b21.Hash,
- Hash: common.NewRandomHash(),
- Height: 2,
- Acks: map[common.Hash]struct{}{
- b02.Hash: struct{}{},
- b12.Hash: struct{}{},
- b31.Hash: struct{}{},
- },
- }
- b32 = &types.Block{
- ProposerID: validators[3],
- ParentHash: b31.Hash,
- Hash: common.NewRandomHash(),
- Height: 2,
- Acks: map[common.Hash]struct{}{
- b02.Hash: struct{}{},
- b12.Hash: struct{}{},
- b21.Hash: struct{}{},
- },
- }
-
- b03 = &types.Block{
- ProposerID: validators[0],
- ParentHash: b02.Hash,
- Hash: common.NewRandomHash(),
- Height: 3,
- Acks: map[common.Hash]struct{}{
- b12.Hash: struct{}{},
- b32.Hash: struct{}{},
- },
- }
- b13 = &types.Block{
- ProposerID: validators[1],
- ParentHash: b12.Hash,
- Hash: common.NewRandomHash(),
- Height: 3,
- Acks: map[common.Hash]struct{}{
- b02.Hash: struct{}{},
- b22.Hash: struct{}{},
- b32.Hash: struct{}{},
- },
- }
- b23 = &types.Block{
- ProposerID: validators[2],
- ParentHash: b22.Hash,
- Hash: common.NewRandomHash(),
- Height: 3,
- Acks: map[common.Hash]struct{}{
- b02.Hash: struct{}{},
- b12.Hash: struct{}{},
- b32.Hash: struct{}{},
- },
- }
- b33 = &types.Block{
- ProposerID: validators[3],
- ParentHash: b32.Hash,
- Hash: common.NewRandomHash(),
- Height: 3,
- Acks: map[common.Hash]struct{}{
- b02.Hash: struct{}{},
- b12.Hash: struct{}{},
- b22.Hash: struct{}{},
- },
- }
- Debugf("\n")
- Debugf("B01: %s\n", b01.Hash)
- Debugf("B11: %s\n", b11.Hash)
- Debugf("B21: %s\n", b21.Hash)
- Debugf("B31: %s\n", b31.Hash)
- Debugf("\n")
- Debugf("B02: %s\n", b02.Hash)
- Debugf("B12: %s\n", b12.Hash)
- Debugf("B22: %s\n", b22.Hash)
- Debugf("B32: %s\n", b32.Hash)
- Debugf("\n")
- Debugf("B03: %s\n", b03.Hash)
- Debugf("B13: %s\n", b13.Hash)
- Debugf("B23: %s\n", b23.Hash)
- Debugf("B33: %s\n", b33.Hash)
- Debugf("\n")
-}
-
-func (s *BlockLatticeTest) TestAckAndStatusTransition() {
- // Recieve Order:
- // B01 -> B12 -> B11 -> B21 -> B31 -> B02 -> B32 -> B22 -> B13 -> B33
- // -> B03 -> B23
-
- // B01
- lattice.ProcessBlock(b01)
-
- // Set status check.
- s.Require().Equal(0, len(lattice.waitingSet))
- s.Require().Equal(1, len(lattice.ackCandidateSet))
- s.Require().Equal(0, len(lattice.stronglyAckedSet))
- s.Require().Equal(0, len(lattice.pendingSet))
-
- s.Require().Equal(0, len(b01.Ackeds))
-
- // B12
- lattice.ProcessBlock(b12)
-
- // Set status check.
- s.Require().Equal(1, len(lattice.waitingSet))
- s.Require().Equal(1, len(lattice.ackCandidateSet))
- s.Require().Equal(0, len(lattice.stronglyAckedSet))
- s.Require().Equal(0, len(lattice.pendingSet))
-
- s.Require().NotNil(lattice.waitingSet[b12.Hash])
-
- // B11
- lattice.ProcessBlock(b11)
-
- // b01 is acked.
- s.Require().Equal(1, len(b01.Ackeds))
- s.Require().NotNil(b01.Ackeds[b11.Hash])
- // b11 indirect acks.
- s.Require().Equal(0, len(b11.Ackeds))
-
- // Set status check.
- s.Require().Equal(1, len(lattice.waitingSet))
- s.Require().Equal(2, len(lattice.ackCandidateSet))
- s.Require().Equal(0, len(lattice.stronglyAckedSet))
- s.Require().Equal(0, len(lattice.pendingSet))
-
- // B21
- lattice.ProcessBlock(b21)
-
- // Set status check.
- s.Require().Equal(1, len(lattice.waitingSet))
- s.Require().Equal(3, len(lattice.ackCandidateSet))
- s.Require().Equal(0, len(lattice.stronglyAckedSet))
- s.Require().Equal(0, len(lattice.pendingSet))
-
- // b01 is acked.
- s.Require().Equal(2, len(b01.Ackeds))
- s.Require().NotNil(b01.Ackeds[b21.Hash])
- // b21 indirect acks.
- s.Require().Equal(0, len(b21.Ackeds))
-
- // B31
- lattice.ProcessBlock(b31)
-
- // Set status check.
- s.Require().Equal(0, len(lattice.waitingSet))
- s.Require().Equal(4, len(lattice.ackCandidateSet))
- s.Require().Equal(0, len(lattice.stronglyAckedSet))
- s.Require().Equal(1, len(lattice.pendingSet))
-
- s.Require().NotNil(lattice.pendingSet[b01.Hash])
- s.Require().Equal(types.BlockStatusOrdering, b01.Status)
-
- // b01 is acked.
- s.Require().Equal(4, len(b01.Ackeds))
- s.Require().NotNil(b01.Ackeds[b31.Hash])
-
- // b11 is acked.
- s.Require().Equal(2, len(b11.Ackeds))
- s.Require().NotNil(b11.Ackeds[b31.Hash])
- s.Require().NotNil(b11.Ackeds[b12.Hash])
-
- // b31 indirect acks.
- s.Require().Equal(1, len(b31.Ackeds))
- s.Require().NotNil(b31.Ackeds[b12.Hash])
-
- // b21 & b31 is acked by b12 (which is previously in waiting set).
- s.Require().Equal(1, len(b21.Ackeds))
- s.Require().NotNil(b21.Ackeds[b12.Hash])
- s.Require().Equal(1, len(b31.Ackeds))
- s.Require().NotNil(b31.Ackeds[b12.Hash])
-
- // B02
- lattice.ProcessBlock(b02)
-
- // Set status check.
- s.Require().Equal(0, len(lattice.waitingSet))
- s.Require().Equal(4, len(lattice.ackCandidateSet))
- s.Require().Equal(0, len(lattice.stronglyAckedSet))
- s.Require().Equal(2, len(lattice.pendingSet))
-
- s.Require().NotNil(lattice.pendingSet[b01.Hash])
- s.Require().NotNil(lattice.pendingSet[b11.Hash])
-
- // b11 is acked.
- s.Require().Equal(3, len(b11.Ackeds))
- s.Require().NotNil(b11.Ackeds[b02.Hash])
- // b21 is acked.
- s.Require().Equal(2, len(b21.Ackeds))
- s.Require().NotNil(b21.Ackeds[b02.Hash])
- s.Require().NotNil(b21.Ackeds[b12.Hash])
- // b31 is acked.
- s.Require().Equal(2, len(b31.Ackeds))
- s.Require().NotNil(b31.Ackeds[b02.Hash])
- s.Require().NotNil(b31.Ackeds[b12.Hash])
-
- // B32
- lattice.ProcessBlock(b32)
-
- // Set status check.
- s.Require().Equal(0, len(lattice.waitingSet))
- s.Require().Equal(4, len(lattice.ackCandidateSet))
- s.Require().Equal(0, len(lattice.stronglyAckedSet))
- s.Require().Equal(4, len(lattice.pendingSet))
-
- s.Require().NotNil(lattice.pendingSet[b01.Hash])
- s.Require().NotNil(lattice.pendingSet[b11.Hash])
- s.Require().NotNil(lattice.pendingSet[b21.Hash])
- s.Require().NotNil(lattice.pendingSet[b31.Hash])
-
- // b02 is acked.
- s.Require().Equal(1, len(b02.Ackeds))
- s.Require().NotNil(b02.Ackeds[b32.Hash])
- // b12 is acked.
- s.Require().Equal(1, len(b12.Ackeds))
- s.Require().NotNil(b12.Ackeds[b32.Hash])
-
- // B22
- lattice.ProcessBlock(b22)
-
- // Set status check.
- s.Require().Equal(0, len(lattice.waitingSet))
- s.Require().Equal(4, len(lattice.ackCandidateSet))
- s.Require().Equal(0, len(lattice.stronglyAckedSet))
- s.Require().Equal(4, len(lattice.pendingSet))
-
- s.Require().NotNil(lattice.pendingSet[b01.Hash])
- s.Require().NotNil(lattice.pendingSet[b11.Hash])
- s.Require().NotNil(lattice.pendingSet[b21.Hash])
- s.Require().NotNil(lattice.pendingSet[b31.Hash])
- s.Require().Equal(types.BlockStatusOrdering, b01.Status)
- s.Require().Equal(types.BlockStatusOrdering, b11.Status)
- s.Require().Equal(types.BlockStatusOrdering, b21.Status)
- s.Require().Equal(types.BlockStatusOrdering, b31.Status)
-
- // b02 is acked.
- s.Require().Equal(2, len(b02.Ackeds))
- s.Require().NotNil(b02.Ackeds[b22.Hash])
- // b12 is acked.
- s.Require().Equal(2, len(b12.Ackeds))
- s.Require().NotNil(b12.Ackeds[b22.Hash])
- // b31 is acked.
- s.Require().Equal(4, len(b31.Ackeds))
- s.Require().NotNil(b31.Ackeds[b22.Hash])
-
- // B13, B33, B03, B23
- lattice.ProcessBlock(b13)
- lattice.ProcessBlock(b33)
- lattice.ProcessBlock(b03)
- lattice.ProcessBlock(b23)
-
- s.Require().Equal(8, len(lattice.pendingSet))
-}
-
-func (s *BlockLatticeTest) TesttotalOrdering() {
- // Recieve Order:
- // B01 -> B12 -> B11 -> B21 -> B31 -> B02 -> B32 -> B22 -> B13 -> B33
- // -> B03 -> B23
-
- lattice.ProcessBlock(b01, true)
- lattice.ProcessBlock(b12, true)
- lattice.ProcessBlock(b11, true)
- lattice.ProcessBlock(b21, true)
-
- // B01 in pendingSet after b31 is recieved.
- lattice.ProcessBlock(b31, true)
- s.Require().NotNil(lattice.pendingSet[b01.Hash])
-
- s.Require().Equal(0, len(s.app.Outputs))
- s.Require().Equal(1, len(lattice.candidateSet))
- s.Require().Equal(1, len(lattice.pendingSet))
- s.Require().NotNil(lattice.candidateSet[b01.Hash])
-
- // ABS & AHV
- s.Require().Equal(1, len(lattice.AHV[b01.Hash]))
-
- lattice.ProcessBlock(b02, true)
- lattice.ProcessBlock(b32, true)
-
- // B21 in pendingSet after b32 is recieved.
- s.Require().Equal(3, len(lattice.pendingSet))
- s.Require().NotNil(lattice.pendingSet[b11.Hash])
- s.Require().NotNil(lattice.pendingSet[b21.Hash])
- s.Require().NotNil(lattice.pendingSet[b31.Hash])
-
- s.Require().Equal(3, len(lattice.pendingSet))
- s.Require().Equal(1, len(s.app.Outputs))
- s.Require().Equal(b01.Hash, s.app.Outputs[0].Hash)
-
- // ABS & AHV
- lattice.updateABSAHV()
-
- s.Require().Equal(2, len(lattice.ABS[b11.Hash]))
- s.Require().Equal(uint64(1), lattice.ABS[b11.Hash][validators[1]])
- s.Require().Equal(uint64(1), lattice.ABS[b11.Hash][validators[3]])
- s.Require().Equal(1, len(lattice.ABS[b21.Hash]))
- s.Require().Equal(uint64(1), lattice.ABS[b21.Hash][validators[2]])
-
- s.Require().Equal(uint64(1), lattice.AHV[b11.Hash][validators[1]])
- s.Require().Equal(infinity, lattice.AHV[b11.Hash][validators[2]])
- s.Require().Equal(uint64(1), lattice.AHV[b11.Hash][validators[3]])
-
- s.Require().Equal(infinity, lattice.AHV[b21.Hash][validators[1]])
- s.Require().Equal(uint64(1), lattice.AHV[b21.Hash][validators[2]])
- s.Require().Equal(infinity, lattice.AHV[b21.Hash][validators[3]])
-
- // B22
- s.app.Clear()
- lattice.ProcessBlock(b22, true)
- s.Require().Equal(0, len(s.app.Outputs))
- s.Require().Equal(3, len(lattice.pendingSet))
- s.Require().Equal(2, len(lattice.candidateSet))
-
- // ABS & AHV
- lattice.updateABSAHV()
- s.Require().Equal(3, len(lattice.abs()))
-
- // B13
- s.app.Clear()
- lattice.ProcessBlock(b13, true)
- s.Require().Equal(2, len(s.app.Outputs))
- expected := common.Hashes{b21.Hash, b11.Hash}
- sort.Sort(expected)
- got := common.Hashes{s.app.Outputs[0].Hash, s.app.Outputs[1].Hash}
- sort.Sort(got)
- s.Require().Equal(expected, got)
- s.Require().Equal(false, s.app.Early)
-
- s.Require().Equal(1, len(lattice.candidateSet))
- s.Require().NotNil(lattice.candidateSet[b31.Hash])
-
- // ABS & AHV
- lattice.updateABSAHV()
- s.Require().Equal(3, len(lattice.abs()))
-
- // B33
- s.app.Clear()
- lattice.ProcessBlock(b33, true)
- s.Require().Equal(0, len(s.app.Outputs))
- s.Require().Equal(1, len(lattice.candidateSet))
- s.Require().Equal(3, len(lattice.pendingSet))
- s.Require().NotNil(lattice.candidateSet[b31.Hash])
-
- // ABS & AHV
- lattice.updateABSAHV()
- s.Require().Equal(3, len(lattice.abs()))
-
- // B03
- s.app.Clear()
- lattice.ProcessBlock(b03, true)
- s.Require().Equal(0, len(s.app.Outputs))
-
- // B23
- s.app.Clear()
- lattice.ProcessBlock(b23, true)
- s.Require().Equal(1, len(s.app.Outputs))
- s.Require().Equal(b31.Hash, s.app.Outputs[0].Hash)
-
- s.Require().Equal(0, len(lattice.waitingSet))
- s.Require().Equal(0, len(lattice.stronglyAckedSet))
- s.Require().Equal(4, len(lattice.pendingSet))
- s.Require().Equal(2, len(lattice.candidateSet))
-}
-
-func TestBlockLattice(t *testing.T) {
- suite.Run(t, new(BlockLatticeTest))
-}
diff --git a/core/consensus-timestamp.go b/core/consensus-timestamp.go
index ab904fb..7694f60 100644
--- a/core/consensus-timestamp.go
+++ b/core/consensus-timestamp.go
@@ -19,14 +19,11 @@ package core
import (
"errors"
- "sort"
- "time"
- "github.com/dexon-foundation/dexon-consensus-core/common"
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
-// Timestamp is for Concensus Timestamp Algorithm.
+// consensusTimestamp is for Concensus Timestamp Algorithm.
type consensusTimestamp struct {
lastMainChainBlock *types.Block
blocksNotInMainChain []*types.Block
@@ -36,11 +33,9 @@ var (
// ErrInvalidMainChain would be reported if the invalid result from
// main chain selection algorithm is detected.
ErrInvalidMainChain = errors.New("invalid main chain")
- // ErrEmptyTimestamps would be reported if Block.timestamps is empty.
- ErrEmptyTimestamps = errors.New("timestamp vector should not be empty")
)
-// NewTimestamp create Timestamp object.
+// newConsensusTimestamp create timestamper object.
func newConsensusTimestamp() *consensusTimestamp {
return &consensusTimestamp{}
}
@@ -73,7 +68,7 @@ func (ct *consensusTimestamp) processBlocks(blocks []*types.Block) (
} else if block.Hash == mainChain[idxMainChain].Hash {
rightMainChainIdx = idx
blocksWithTimestamp[idx].ConsensusInfo.Timestamp, err =
- ct.getMedianTime(block)
+ getMedianTime(block)
if err != nil {
return
}
@@ -111,43 +106,3 @@ func (ct *consensusTimestamp) selectMainChain(blocks []*types.Block) (
}
return
}
-
-func (ct *consensusTimestamp) getMedianTime(block *types.Block) (
- timestamp time.Time, err error) {
- timestamps := []time.Time{}
- for _, timestamp := range block.Timestamps {
- timestamps = append(timestamps, timestamp)
- }
- if len(timestamps) == 0 {
- err = ErrEmptyTimestamps
- return
- }
- sort.Sort(common.ByTime(timestamps))
- if len(timestamps)%2 == 0 {
- t1 := timestamps[len(timestamps)/2-1]
- t2 := timestamps[len(timestamps)/2]
- timestamp = interpoTime(t1, t2, 1)[0]
- } else {
- timestamp = timestamps[len(timestamps)/2]
- }
- return
-}
-
-func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time {
- if sep == 0 {
- return []time.Time{}
- }
- if t1.After(t2) {
- return interpoTime(t2, t1, sep)
- }
- timestamps := make([]time.Time, sep)
- duration := t2.Sub(t1)
- period := time.Duration(
- (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond
- prevTime := t1
- for idx := range timestamps {
- prevTime = prevTime.Add(period)
- timestamps[idx] = prevTime
- }
- return timestamps
-}
diff --git a/core/consensus.go b/core/consensus.go
new file mode 100644
index 0000000..6a97e9e
--- /dev/null
+++ b/core/consensus.go
@@ -0,0 +1,130 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "sync"
+ "time"
+
+ "github.com/dexon-foundation/dexon-consensus-core/blockdb"
+ "github.com/dexon-foundation/dexon-consensus-core/core/types"
+)
+
+// Consensus implements DEXON Consensus algorithm.
+type Consensus struct {
+ app Application
+ gov Governance
+ rbModule *reliableBroadcast
+ toModule *totalOrdering
+ ctModule *consensusTimestamp
+ db blockdb.BlockDatabase
+ lock sync.RWMutex
+}
+
+// NewConsensus construct an Consensus instance.
+func NewConsensus(
+ app Application,
+ gov Governance,
+ db blockdb.BlockDatabase) *Consensus {
+
+ validatorSet := gov.GetValidatorSet()
+
+ // Setup acking by information returned from Governace.
+ rb := newReliableBroadcast()
+ for vID := range validatorSet {
+ rb.addValidator(vID)
+ }
+
+ // Setup sequencer by information returned from Governace.
+ // TODO(mission): the value of 'K' should be in governace.
+ // TODO(mission): the ratio of 'phi' should be in governance.
+ to := newTotalOrdering(
+ 0,
+ uint64(2*(len(validatorSet)-1)/3+1),
+ uint64(len(validatorSet)))
+
+ return &Consensus{
+ rbModule: rb,
+ toModule: to,
+ ctModule: newConsensusTimestamp(),
+ app: app,
+ gov: gov,
+ db: db,
+ }
+}
+
+// ProcessBlock is the entry point to submit one block to a Consensus instance.
+func (con *Consensus) ProcessBlock(b *types.Block) (err error) {
+ var (
+ deliveredBlocks []*types.Block
+ earlyDelivered bool
+ )
+ // To avoid application layer modify the content of block during
+ // processing, we should always operate based on the cloned one.
+ b = b.Clone()
+
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ // Perform reliable broadcast checking.
+ if err = con.rbModule.processBlock(b); err != nil {
+ return err
+ }
+ for _, b := range con.rbModule.extractBlocks() {
+ // Notify application layer that some block is strongly acked.
+ con.app.StronglyAcked(b.Hash)
+ // Perform total ordering.
+ deliveredBlocks, earlyDelivered, err = con.toModule.processBlock(b)
+ if err != nil {
+ return
+ }
+ if len(deliveredBlocks) == 0 {
+ continue
+ }
+ for _, b := range deliveredBlocks {
+ if err = con.db.Put(*b); err != nil {
+ return
+ }
+ }
+ // TODO(mission): handle membership events here.
+ // TODO(mission): return block hash instead of whole block here.
+ con.app.TotalOrderingDeliver(deliveredBlocks, earlyDelivered)
+ // Perform timestamp generation.
+ deliveredBlocks, _, err = con.ctModule.processBlocks(
+ deliveredBlocks)
+ if err != nil {
+ return
+ }
+ for _, b := range deliveredBlocks {
+ if err = con.db.Update(*b); err != nil {
+ return
+ }
+ con.app.DeliverBlock(b.Hash, b.ConsensusInfo.Timestamp)
+ }
+ }
+ return
+}
+
+// PrepareBlock would setup header fields of block based on its ProposerID.
+func (con *Consensus) PrepareBlock(b *types.Block) (err error) {
+ con.lock.RLock()
+ defer con.lock.RUnlock()
+
+ con.rbModule.prepareBlock(b)
+ b.Timestamps[b.ProposerID] = time.Now().UTC()
+ return
+}
diff --git a/core/consensus_test.go b/core/consensus_test.go
new file mode 100644
index 0000000..7ce06e9
--- /dev/null
+++ b/core/consensus_test.go
@@ -0,0 +1,305 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/dexon-foundation/dexon-consensus-core/blockdb"
+ "github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/test"
+ "github.com/dexon-foundation/dexon-consensus-core/core/types"
+ "github.com/stretchr/testify/suite"
+)
+
+type ConsensusTestSuite struct {
+ suite.Suite
+}
+
+func (s *ConsensusTestSuite) prepareGenesisBlock(
+ proposerID types.ValidatorID,
+ gov Governance) *types.Block {
+
+ hash := common.NewRandomHash()
+ block := &types.Block{
+ ProposerID: proposerID,
+ ParentHash: hash,
+ Hash: hash,
+ Height: 0,
+ Acks: make(map[common.Hash]struct{}),
+ Timestamps: make(map[types.ValidatorID]time.Time),
+ }
+ for vID := range gov.GetValidatorSet() {
+ block.Timestamps[vID] = time.Time{}
+ }
+ block.Timestamps[proposerID] = time.Now().UTC()
+ return block
+}
+
+func (s *ConsensusTestSuite) prepareConsensus(gov *test.Gov) (
+ *test.App, *Consensus) {
+
+ app := test.NewApp()
+ db, err := blockdb.NewMemBackedBlockDB()
+ s.Require().Nil(err)
+ con := NewConsensus(app, gov, db)
+ return app, con
+}
+
+func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
+ // This test scenario:
+ // o o o o <- this layer makes older blocks strongly acked.
+ // |x|x|x| <- lots of acks.
+ // o | o o <- this layer would be sent to total ordering.
+ // |\|/|-|
+ // | o | | <- the only block which is acked by all other blocks
+ // |/|\|\| at the same height.
+ // o o o o <- genesis blocks
+ // 0 1 2 3 <- index of validator ID
+ //
+ // This test case only works for Total Ordering with K=0.
+ var (
+ minInterval = 50 * time.Millisecond
+ gov = test.NewGov(4, 1000)
+ req = s.Require()
+ validators []types.ValidatorID
+ )
+
+ for vID := range gov.GetValidatorSet() {
+ validators = append(validators, vID)
+ }
+
+ // Setup core.Consensus and test.App.
+ objs := map[types.ValidatorID]*struct {
+ app *test.App
+ con *Consensus
+ }{}
+ for _, vID := range validators {
+ app, con := s.prepareConsensus(gov)
+ objs[vID] = &struct {
+ app *test.App
+ con *Consensus
+ }{app, con}
+ }
+ // It's a helper function to emit one block
+ // to all core.Consensus objects.
+ broadcast := func(b *types.Block) {
+ for _, obj := range objs {
+ req.Nil(obj.con.ProcessBlock(b))
+ }
+ }
+ // Genesis blocks
+ b00 := s.prepareGenesisBlock(validators[0], gov)
+ time.Sleep(minInterval)
+ b10 := s.prepareGenesisBlock(validators[1], gov)
+ time.Sleep(minInterval)
+ b20 := s.prepareGenesisBlock(validators[2], gov)
+ time.Sleep(minInterval)
+ b30 := s.prepareGenesisBlock(validators[3], gov)
+ broadcast(b00)
+ broadcast(b10)
+ broadcast(b20)
+ broadcast(b30)
+ // Setup b11.
+ time.Sleep(minInterval)
+ b11 := &types.Block{
+ ProposerID: validators[1],
+ Hash: common.NewRandomHash(),
+ }
+ req.Nil(objs[validators[1]].con.PrepareBlock(b11))
+ req.Len(b11.Acks, 4)
+ req.Contains(b11.Acks, b00.Hash)
+ req.Contains(b11.Acks, b10.Hash)
+ req.Contains(b11.Acks, b20.Hash)
+ req.Contains(b11.Acks, b30.Hash)
+ broadcast(b11)
+ // Setup b01.
+ time.Sleep(minInterval)
+ b01 := &types.Block{
+ ProposerID: validators[0],
+ Hash: common.NewRandomHash(),
+ }
+ req.Nil(objs[validators[0]].con.PrepareBlock(b01))
+ req.Len(b01.Acks, 4)
+ req.Contains(b01.Acks, b11.Hash)
+ // Setup b21.
+ time.Sleep(minInterval)
+ b21 := &types.Block{
+ ProposerID: validators[2],
+ Hash: common.NewRandomHash(),
+ }
+ req.Nil(objs[validators[2]].con.PrepareBlock(b21))
+ req.Len(b21.Acks, 4)
+ req.Contains(b21.Acks, b11.Hash)
+ // Setup b31.
+ time.Sleep(minInterval)
+ b31 := &types.Block{
+ ProposerID: validators[3],
+ Hash: common.NewRandomHash(),
+ }
+ req.Nil(objs[validators[3]].con.PrepareBlock(b31))
+ req.Len(b31.Acks, 4)
+ req.Contains(b31.Acks, b11.Hash)
+ // Broadcast other height=1 blocks.
+ broadcast(b01)
+ broadcast(b21)
+ broadcast(b31)
+ // Setup height=2 blocks.
+ // Setup b02.
+ time.Sleep(minInterval)
+ b02 := &types.Block{
+ ProposerID: validators[0],
+ Hash: common.NewRandomHash(),
+ }
+ req.Nil(objs[validators[0]].con.PrepareBlock(b02))
+ req.Len(b02.Acks, 3)
+ req.Contains(b02.Acks, b01.Hash)
+ req.Contains(b02.Acks, b21.Hash)
+ req.Contains(b02.Acks, b31.Hash)
+ // Setup b12.
+ time.Sleep(minInterval)
+ b12 := &types.Block{
+ ProposerID: validators[1],
+ Hash: common.NewRandomHash(),
+ }
+ req.Nil(objs[validators[1]].con.PrepareBlock(b12))
+ req.Len(b12.Acks, 4)
+ req.Contains(b12.Acks, b01.Hash)
+ req.Contains(b12.Acks, b11.Hash)
+ req.Contains(b12.Acks, b21.Hash)
+ req.Contains(b12.Acks, b31.Hash)
+ // Setup b22.
+ time.Sleep(minInterval)
+ b22 := &types.Block{
+ ProposerID: validators[2],
+ Hash: common.NewRandomHash(),
+ }
+ req.Nil(objs[validators[2]].con.PrepareBlock(b22))
+ req.Len(b22.Acks, 3)
+ req.Contains(b22.Acks, b01.Hash)
+ req.Contains(b22.Acks, b21.Hash)
+ req.Contains(b22.Acks, b31.Hash)
+ // Setup b32.
+ time.Sleep(minInterval)
+ b32 := &types.Block{
+ ProposerID: validators[3],
+ Hash: common.NewRandomHash(),
+ }
+ req.Nil(objs[validators[3]].con.PrepareBlock(b32))
+ req.Len(b32.Acks, 3)
+ req.Contains(b32.Acks, b01.Hash)
+ req.Contains(b32.Acks, b21.Hash)
+ req.Contains(b32.Acks, b31.Hash)
+ // Broadcast blocks at height=2.
+ broadcast(b02)
+ broadcast(b12)
+ broadcast(b22)
+ broadcast(b32)
+
+ // Verify the cached status of each app.
+ verify := func(app *test.App) {
+ // Check blocks that are strongly acked.
+ req.Contains(app.Acked, b00.Hash)
+ req.Contains(app.Acked, b10.Hash)
+ req.Contains(app.Acked, b20.Hash)
+ req.Contains(app.Acked, b30.Hash)
+ req.Contains(app.Acked, b01.Hash)
+ req.Contains(app.Acked, b11.Hash)
+ req.Contains(app.Acked, b21.Hash)
+ req.Contains(app.Acked, b31.Hash)
+ // Genesis blocks are delivered by total ordering as a set.
+ delivered0 := common.Hashes{b00.Hash, b10.Hash, b20.Hash, b30.Hash}
+ sort.Sort(delivered0)
+ req.Len(app.TotalOrdered, 2)
+ req.Equal(app.TotalOrdered[0].BlockHashes, delivered0)
+ req.False(app.TotalOrdered[0].Early)
+ // b11 is the sencond set delivered by total ordering.
+ delivered1 := common.Hashes{b11.Hash}
+ sort.Sort(delivered1)
+ req.Equal(app.TotalOrdered[1].BlockHashes, delivered1)
+ req.False(app.TotalOrdered[1].Early)
+ // Check generated timestamps.
+ req.Contains(app.Delivered, b00.Hash)
+ req.Contains(app.Delivered, b10.Hash)
+ req.Contains(app.Delivered, b20.Hash)
+ req.Contains(app.Delivered, b30.Hash)
+ req.Contains(app.Delivered, b11.Hash)
+ // Check timestamps, there is no direct way to know which block is
+ // selected as main chain, we can only detect it by making sure
+ // its ConsensusTimestamp is not interpolated.
+ t, err := getMedianTime(b11)
+ req.Nil(err)
+ req.Equal(t, app.Delivered[b11.Hash])
+ }
+ for _, obj := range objs {
+ verify(obj.app)
+ }
+}
+
+func (s *ConsensusTestSuite) TestPrepareBlock() {
+ // This test case would test these steps:
+ // - Add all genesis blocks into lattice.
+ // - Make sure Consensus.PrepareBlock would attempt to ack
+ // all genesis blocks.
+ // - Add the prepared block into lattice.
+ // - Make sure Consensus.PrepareBlock would only attempt to
+ // ack the prepared block.
+ var (
+ gov = test.NewGov(4, 1000)
+ req = s.Require()
+ validators []types.ValidatorID
+ )
+ for vID := range gov.GetValidatorSet() {
+ validators = append(validators, vID)
+ }
+ _, con := s.prepareConsensus(gov)
+ b00 := s.prepareGenesisBlock(validators[0], gov)
+ b10 := s.prepareGenesisBlock(validators[1], gov)
+ b20 := s.prepareGenesisBlock(validators[2], gov)
+ b30 := s.prepareGenesisBlock(validators[3], gov)
+ req.Nil(con.ProcessBlock(b00))
+ req.Nil(con.ProcessBlock(b10))
+ req.Nil(con.ProcessBlock(b20))
+ req.Nil(con.ProcessBlock(b30))
+ b11 := &types.Block{
+ ProposerID: validators[1],
+ Hash: common.NewRandomHash(),
+ }
+ // Sleep to make sure 'now' is slower than b10's timestamp.
+ time.Sleep(100 * time.Millisecond)
+ req.Nil(con.PrepareBlock(b11))
+ // Make sure we would assign 'now' to the timestamp belongs to
+ // the proposer.
+ req.True(
+ b11.Timestamps[validators[1]].Sub(
+ b10.Timestamps[validators[1]]) > 100*time.Millisecond)
+ req.Nil(con.ProcessBlock(b11))
+ b12 := &types.Block{
+ ProposerID: validators[1],
+ Hash: common.NewRandomHash(),
+ }
+ req.Nil(con.PrepareBlock(b12))
+ req.Len(b12.Acks, 1)
+ req.Contains(b12.Acks, b11.Hash)
+}
+
+func TestConsensus(t *testing.T) {
+ suite.Run(t, new(ConsensusTestSuite))
+}
diff --git a/core/governance.go b/core/governance.go
index bd6354d..a7069a5 100644
--- a/core/governance.go
+++ b/core/governance.go
@@ -23,21 +23,6 @@ import (
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
-// MembershipActionType specifies the action of a membership event.
-type MembershipActionType int
-
-// Event enums.
-const (
- MembershipActionAdd MembershipActionType = 0
- MembershipActionDelete MembershipActionType = 1
-)
-
-// MembershipEvent specifies the event of membership changes.
-type MembershipEvent struct {
- Epoch int
- Action MembershipActionType
-}
-
// Governance interface specifies interface to control the governance contract.
// Note that there are a lot more methods in the governance contract, that this
// interface only define those that are required to run the consensus algorithm.
@@ -49,5 +34,5 @@ type Governance interface {
GetBlockProposingInterval() int
// Get membership events after a certain epoch.
- GetMembershipEvents(epoch int) []MembershipEvent
+ GetMembershipEvents(epoch int) []types.MembershipEvent
}
diff --git a/core/reliable-broadcast.go b/core/reliable-broadcast.go
index dd57241..7db8212 100644
--- a/core/reliable-broadcast.go
+++ b/core/reliable-broadcast.go
@@ -19,6 +19,7 @@ package core
import (
"fmt"
+ "time"
"github.com/dexon-foundation/dexon-consensus-core/common"
"github.com/dexon-foundation/dexon-consensus-core/core/types"
@@ -66,6 +67,7 @@ var (
ErrNotAckParent = fmt.Errorf("not ack parent")
ErrDoubleAck = fmt.Errorf("double ack")
ErrInvalidBlockHeight = fmt.Errorf("invalid block height")
+ ErrAlreadyInLattice = fmt.Errorf("block already in lattice")
)
// newReliableBroadcast creates a new reliableBroadcast struct.
@@ -89,6 +91,7 @@ func (rb *reliableBroadcast) sanityCheck(b *types.Block) error {
if b.Hash != bInLattice.Hash {
return ErrForkBlock
}
+ return ErrAlreadyInLattice
}
// Check non-genesis blocks if it acks its parent.
@@ -136,9 +139,9 @@ func (rb *reliableBroadcast) areAllAcksInLattice(b *types.Block) bool {
// processBlock processes block, it does sanity check, inserts block into
// lattice, handles strong acking and deletes blocks which will not be used.
-func (rb *reliableBroadcast) processBlock(block *types.Block) {
+func (rb *reliableBroadcast) processBlock(block *types.Block) (err error) {
// If a block does not pass sanity check, discard this block.
- if err := rb.sanityCheck(block); err != nil {
+ if err = rb.sanityCheck(block); err != nil {
return
}
rb.blocks[block.Hash] = block
@@ -166,10 +169,11 @@ func (rb *reliableBroadcast) processBlock(block *types.Block) {
// B C Block B and C both ack A and are valid. B, C received first
// \ / (added in receivedBlocks), and A comes, if sanity check is
// A not being executed here, B and C will both be added in lattice
- if err := rb.sanityCheck(b); err != nil {
+ if err = rb.sanityCheck(b); err != nil {
delete(rb.blocks, b.Hash)
delete(rb.receivedBlocks, b.Hash)
continue
+ // TODO(mission): how to return for multiple errors?
}
rb.lattice[b.ProposerID].blocks[b.Height] = b
delete(rb.receivedBlocks, b.Hash)
@@ -246,6 +250,7 @@ func (rb *reliableBroadcast) processBlock(block *types.Block) {
min--
}
}
+ return
}
// extractBlocks returns all blocks that can be inserted into total ordering's
@@ -290,6 +295,78 @@ func (rb *reliableBroadcast) extractBlocks() []*types.Block {
return ret
}
+// prepareBlock helps to setup fields of block based on its ProposerID,
+// including:
+// - Set 'Acks' and 'Timestamps' for the highest block of each validator not
+// acked by this proposer before.
+// - Set 'ParentHash' and 'Height' from parent block, if we can't find a
+// parent, these fields would be setup like a genesis block.
+func (rb *reliableBroadcast) prepareBlock(block *types.Block) {
+ // Reset fields to make sure we got these information from parent block.
+ block.Height = 0
+ // TODO(mission): make all genesis block would contain zero ParentHash.
+ block.ParentHash = common.Hash{}
+ // The helper function to accumulate timestamps.
+ accumulateTimestamps := func(
+ times map[types.ValidatorID]time.Time, b *types.Block) {
+
+ // Update timestamps with the block's proposer time.
+ // TODO (mission): make epslon configurable.
+ times[b.ProposerID] = b.Timestamps[b.ProposerID].Add(
+ 1 * time.Millisecond)
+
+ // Update timestamps from the block if it's later than
+ // current cached ones.
+ for vID, t := range b.Timestamps {
+ cachedTime, exists := times[vID]
+ if !exists {
+ // This means the block contains timestamps from
+ // removed validators.
+ continue
+ }
+ if cachedTime.After(t) {
+ continue
+ }
+ times[vID] = t
+ }
+ return
+ }
+ // Initial timestamps with current validator set.
+ times := make(map[types.ValidatorID]time.Time)
+ for vID := range rb.lattice {
+ times[vID] = time.Time{}
+ }
+ acks := make(map[common.Hash]struct{})
+ for vID := range rb.lattice {
+ // find height of the latest block for that validator.
+ var (
+ curBlock *types.Block
+ nextHeight = rb.lattice[block.ProposerID].nextAck[vID]
+ )
+
+ for {
+ tmpBlock, exists := rb.lattice[vID].blocks[nextHeight]
+ if !exists {
+ break
+ }
+ curBlock = tmpBlock
+ nextHeight++
+ }
+ if curBlock == nil {
+ continue
+ }
+ acks[curBlock.Hash] = struct{}{}
+ accumulateTimestamps(times, curBlock)
+ if vID == block.ProposerID {
+ block.ParentHash = curBlock.Hash
+ block.Height = curBlock.Height + 1
+ }
+ }
+ block.Timestamps = times
+ block.Acks = acks
+ return
+}
+
// addValidator adds validator in the validator set.
func (rb *reliableBroadcast) addValidator(h types.ValidatorID) {
rb.lattice[h] = &ackingValidatorStatus{
diff --git a/core/reliable-broadcast_test.go b/core/reliable-broadcast_test.go
index 7c15fe5..bd77ea3 100644
--- a/core/reliable-broadcast_test.go
+++ b/core/reliable-broadcast_test.go
@@ -15,12 +15,15 @@
// along with the dexon-consensus-core library. If not, see
// <http://www.gnu.org/licenses/>.
+// TODO(mission): we should check the return value from processBlock.
+
package core
import (
"math/rand"
"sort"
"testing"
+ "time"
"github.com/stretchr/testify/suite"
@@ -42,6 +45,26 @@ func (s *ReliableBroadcastTest) SetupTest() {
}
+func (s *ReliableBroadcastTest) prepareGenesisBlock(
+ proposerID types.ValidatorID,
+ validatorIDs []types.ValidatorID) (b *types.Block) {
+
+ hash := common.NewRandomHash()
+ b = &types.Block{
+ ProposerID: proposerID,
+ ParentHash: hash,
+ Hash: hash,
+ Height: 0,
+ Acks: make(map[common.Hash]struct{}),
+ Timestamps: make(map[types.ValidatorID]time.Time),
+ }
+ for _, vID := range validatorIDs {
+ b.Timestamps[vID] = time.Time{}
+ }
+ b.Timestamps[proposerID] = time.Now().UTC()
+ return
+}
+
// genTestCase1 generates test case 1,
// 3
// |
@@ -495,6 +518,81 @@ func (s *ReliableBroadcastTest) TestRandomlyGeneratedBlocks() {
}
}
+func (s *ReliableBroadcastTest) TestPrepareBlock() {
+ var (
+ req = s.Require()
+ rb = newReliableBroadcast()
+ minInterval = 50 * time.Millisecond
+ validators []types.ValidatorID
+ )
+ // Prepare validator IDs.
+ for i := 0; i < 4; i++ {
+ vID := types.ValidatorID{Hash: common.NewRandomHash()}
+ validators = append(validators, vID)
+ rb.addValidator(vID)
+ }
+ // Setup genesis blocks.
+ b00 := s.prepareGenesisBlock(validators[0], validators)
+ time.Sleep(minInterval)
+ b10 := s.prepareGenesisBlock(validators[1], validators)
+ time.Sleep(minInterval)
+ b20 := s.prepareGenesisBlock(validators[2], validators)
+ time.Sleep(minInterval)
+ b30 := s.prepareGenesisBlock(validators[3], validators)
+ // Submit these blocks to reliableBroadcast instance.
+ rb.processBlock(b00)
+ rb.processBlock(b10)
+ rb.processBlock(b20)
+ rb.processBlock(b30)
+ // We should be able to collect all 4 genesis blocks by calling
+ // prepareBlock.
+ b11 := &types.Block{
+ ProposerID: validators[1],
+ Hash: common.NewRandomHash(),
+ }
+ rb.prepareBlock(b11)
+ req.Contains(b11.Acks, b00.Hash)
+ req.Contains(b11.Acks, b10.Hash)
+ req.Contains(b11.Acks, b20.Hash)
+ req.Contains(b11.Acks, b30.Hash)
+ req.Equal(b11.Timestamps[validators[0]],
+ b00.Timestamps[b00.ProposerID].Add(time.Millisecond))
+ req.Equal(b11.Timestamps[validators[1]],
+ b10.Timestamps[b10.ProposerID].Add(time.Millisecond))
+ req.Equal(b11.Timestamps[validators[2]],
+ b20.Timestamps[b20.ProposerID].Add(time.Millisecond))
+ req.Equal(b11.Timestamps[validators[3]],
+ b30.Timestamps[b30.ProposerID].Add(time.Millisecond))
+ req.Equal(b11.ParentHash, b10.Hash)
+ req.Equal(b11.Height, uint64(1))
+ rb.processBlock(b11)
+ // Propose/Process a block based on collected info.
+ b12 := &types.Block{
+ ProposerID: validators[1],
+ Hash: common.NewRandomHash(),
+ }
+ rb.prepareBlock(b12)
+ // This time we only need to ack b11.
+ req.Len(b12.Acks, 1)
+ req.Contains(b12.Acks, b11.Hash)
+ req.Equal(b12.ParentHash, b11.Hash)
+ req.Equal(b12.Height, uint64(2))
+ // When calling with other validator ID, we should be able to
+ // get 4 blocks to ack.
+ b01 := &types.Block{
+ ProposerID: validators[0],
+ Hash: common.NewRandomHash(),
+ }
+ rb.prepareBlock(b01)
+ req.Len(b01.Acks, 4)
+ req.Contains(b01.Acks, b00.Hash)
+ req.Contains(b01.Acks, b11.Hash)
+ req.Contains(b01.Acks, b20.Hash)
+ req.Contains(b01.Acks, b30.Hash)
+ req.Equal(b01.ParentHash, b00.Hash)
+ req.Equal(b01.Height, uint64(1))
+}
+
func TestReliableBroadcast(t *testing.T) {
suite.Run(t, new(ReliableBroadcastTest))
}
diff --git a/core/test/app.go b/core/test/app.go
new file mode 100644
index 0000000..f596afb
--- /dev/null
+++ b/core/test/app.go
@@ -0,0 +1,72 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package test
+
+import (
+ "time"
+
+ "github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/types"
+)
+
+// App implements Application interface for testing purpose.
+type App struct {
+ Acked map[common.Hash]struct{}
+ TotalOrdered []*struct {
+ BlockHashes common.Hashes
+ Early bool
+ }
+ Delivered map[common.Hash]time.Time
+}
+
+// NewApp constructs a TestApp instance.
+func NewApp() *App {
+ return &App{
+ Acked: make(map[common.Hash]struct{}),
+ TotalOrdered: []*struct {
+ BlockHashes common.Hashes
+ Early bool
+ }{},
+ Delivered: make(map[common.Hash]time.Time),
+ }
+}
+
+// StronglyAcked implements Application interface.
+func (app *App) StronglyAcked(blockHash common.Hash) {
+ app.Acked[blockHash] = struct{}{}
+}
+
+// TotalOrderingDeliver implements Application interface.
+func (app *App) TotalOrderingDeliver(blocks []*types.Block, early bool) {
+ var hashes common.Hashes
+ for _, b := range blocks {
+ hashes = append(hashes, b.Hash)
+ }
+ app.TotalOrdered = append(app.TotalOrdered, &struct {
+ BlockHashes common.Hashes
+ Early bool
+ }{
+ BlockHashes: hashes,
+ Early: early,
+ })
+}
+
+// DeliverBlock implements Application interface.
+func (app *App) DeliverBlock(blockHash common.Hash, timestamp time.Time) {
+ app.Delivered[blockHash] = timestamp
+}
diff --git a/core/test/gov.go b/core/test/gov.go
new file mode 100644
index 0000000..7a5bdfc
--- /dev/null
+++ b/core/test/gov.go
@@ -0,0 +1,63 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package test
+
+import (
+ "github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/types"
+ "github.com/shopspring/decimal"
+)
+
+// Gov is an implementation of Goverance for testing purpose.
+type Gov struct {
+ BlockProposingInterval int
+ Validators map[types.ValidatorID]decimal.Decimal
+}
+
+// NewGov constructs a Gov instance.
+func NewGov(
+ validatorCount, proposingInterval int) (gov *Gov) {
+
+ gov = &Gov{
+ BlockProposingInterval: proposingInterval,
+ Validators: make(map[types.ValidatorID]decimal.Decimal),
+ }
+ for i := 0; i < validatorCount; i++ {
+ gov.Validators[types.ValidatorID{Hash: common.NewRandomHash()}] =
+ decimal.NewFromFloat(0)
+ }
+ return
+}
+
+// GetValidatorSet implements Governance interface to return current
+// validator set.
+func (gov *Gov) GetValidatorSet() map[types.ValidatorID]decimal.Decimal {
+ return gov.Validators
+}
+
+// GetBlockProposingInterval implements Governance interface to return maximum
+// allowed block proposing interval in millisecond.
+func (gov *Gov) GetBlockProposingInterval() int {
+ return gov.BlockProposingInterval
+}
+
+// GetMembershipEvents implements Governance interface to return membership
+// changed events.
+func (gov *Gov) GetMembershipEvents(epoch int) []types.MembershipEvent {
+ return nil
+}
diff --git a/core/total-ordering.go b/core/total-ordering.go
index 63d3416..9f85813 100644
--- a/core/total-ordering.go
+++ b/core/total-ordering.go
@@ -19,12 +19,17 @@ package core
import (
"fmt"
+ "math"
"sort"
"github.com/dexon-foundation/dexon-consensus-core/common"
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
+const (
+ infinity uint64 = math.MaxUint64
+)
+
// ErrNotValidDAG would be reported when block subbmitted to totalOrdering
// didn't form a DAG.
var ErrNotValidDAG = fmt.Errorf("not a valid dag")
@@ -139,9 +144,9 @@ func (v blockVector) addBlock(b *types.Block) (err error) {
return
}
-// getHeightVector would convert a blockVector to
+// getAckingStatusVector would convert a blockVector to
// ackingStatusVectorAckingStatus.
-func (v blockVector) getHeightVector() ackingStatusVector {
+func (v blockVector) getAckingStatusVector() ackingStatusVector {
ret := ackingStatusVector{}
for vID, vec := range v {
if len(vec) == 0 {
@@ -344,19 +349,17 @@ func (to *totalOrdering) isAckOnlyPrecedings(b *types.Block) bool {
// output is a helper function to finish the delivery of
// deliverable preceding set.
-func (to *totalOrdering) output(precedings map[common.Hash]struct{}) common.Hashes {
- ret := common.Hashes{}
+func (to *totalOrdering) output(precedings map[common.Hash]struct{}) (ret []*types.Block) {
for p := range precedings {
- ret = append(ret, p)
-
// Remove the first element from corresponding blockVector.
b := to.pendings[p]
to.globalVector[b.ProposerID] = to.globalVector[b.ProposerID][1:]
+ ret = append(ret, b)
// Remove block relations.
to.clean(p)
}
- sort.Sort(ret)
+ sort.Sort(types.ByHash(ret))
// Find new candidates from tip of globalVector of each validator.
// The complexity here is O(N^2logN).
@@ -388,13 +391,14 @@ func (to *totalOrdering) output(precedings map[common.Hash]struct{}) common.Hash
func (to *totalOrdering) generateDeliverSet() (
delivered map[common.Hash]struct{}, early bool) {
- globalHeightVector := to.globalVector.getHeightVector()
+ globalAckingStatusVector := to.globalVector.getAckingStatusVector()
ahvs := map[common.Hash]map[types.ValidatorID]uint64{}
for candidate, v := range to.candidateAckingStatusVectors {
- ahvs[candidate] = v.getAckingHeightVector(globalHeightVector, to.k)
+ ahvs[candidate] = v.getAckingHeightVector(globalAckingStatusVector, to.k)
}
- globalAns := globalHeightVector.getAckingNodeSet(globalHeightVector, to.k)
+ globalAns := globalAckingStatusVector.getAckingNodeSet(
+ globalAckingStatusVector, to.k)
precedings := make(map[common.Hash]struct{})
CheckNextCandidateLoop:
@@ -460,7 +464,7 @@ CheckNextCandidateLoop:
checkANS := func() bool {
for p := range precedings {
validatorAns := to.candidateAckingStatusVectors[p].getAckingNodeSet(
- globalHeightVector, to.k)
+ globalAckingStatusVector, to.k)
if uint64(len(validatorAns)) < to.validatorCount-to.phi {
return false
}
@@ -492,8 +496,7 @@ CheckNextCandidateLoop:
// processBlock is the entry point of totalOrdering.
func (to *totalOrdering) processBlock(b *types.Block) (
- delivered common.Hashes, early bool, err error) {
-
+ delivered []*types.Block, early bool, err error) {
// NOTE: I assume the block 'b' is already safe for total ordering.
// That means, all its acking blocks are during/after
// total ordering stage.
diff --git a/core/total-ordering_test.go b/core/total-ordering_test.go
index 11e253a..b4fe13c 100644
--- a/core/total-ordering_test.go
+++ b/core/total-ordering_test.go
@@ -59,12 +59,19 @@ func (s *TotalOrderingTestSuite) genRootBlock(
}
func (s *TotalOrderingTestSuite) checkNotDeliver(to *totalOrdering, b *types.Block) {
- hashes, eqrly, err := to.processBlock(b)
- s.Empty(hashes)
+ blocks, eqrly, err := to.processBlock(b)
+ s.Empty(blocks)
s.False(eqrly)
s.Nil(err)
}
+func (s *TotalOrderingTestSuite) checkHashSequence(blocks []*types.Block, hashes common.Hashes) {
+ sort.Sort(hashes)
+ for i, h := range hashes {
+ s.Equal(blocks[i].Hash, h)
+ }
+}
+
func (s *TotalOrderingTestSuite) checkNotInWorkingSet(
to *totalOrdering, b *types.Block) {
@@ -407,11 +414,11 @@ func (s *TotalOrderingTestSuite) TestEarlyDeliver() {
s.Equal(vec[validators[3]].minHeight, b30.Height)
s.Equal(vec[validators[3]].count, uint64(2))
- hashes, early, err := to.processBlock(b32)
- s.Require().Len(hashes, 1)
+ blocks, early, err := to.processBlock(b32)
+ s.Require().Len(blocks, 1)
s.True(early)
s.Nil(err)
- s.Equal(hashes[0], b00.Hash)
+ s.checkHashSequence(blocks, common.Hashes{b00.Hash})
// Check the internal state after delivered.
s.Len(to.candidateAckingStatusVectors, 4) // b01, b10, b20, b30 are candidates.
@@ -663,12 +670,10 @@ func (s *TotalOrderingTestSuite) TestBasicCaseForK2() {
s.Equal(vec[validators[3]].count, uint64(3))
// Check the first deliver.
- hashes, early, err := to.processBlock(b02)
+ blocks, early, err := to.processBlock(b02)
s.True(early)
s.Nil(err)
- expected := common.Hashes{b00.Hash, b10.Hash}
- sort.Sort(expected)
- s.Equal(hashes, expected)
+ s.checkHashSequence(blocks, common.Hashes{b00.Hash, b10.Hash})
// Make sure b00, b10 are removed from current working set.
s.checkNotInWorkingSet(to, b00)
@@ -706,12 +711,10 @@ func (s *TotalOrderingTestSuite) TestBasicCaseForK2() {
s.checkNotDeliver(to, b13)
// Check the second deliver.
- hashes, early, err = to.processBlock(b03)
+ blocks, early, err = to.processBlock(b03)
s.True(early)
s.Nil(err)
- expected = common.Hashes{b11.Hash, b20.Hash}
- sort.Sort(expected)
- s.Equal(hashes, expected)
+ s.checkHashSequence(blocks, common.Hashes{b11.Hash, b20.Hash})
// Make sure b11, b20 are removed from current working set.
s.checkNotInWorkingSet(to, b11)
@@ -760,12 +763,10 @@ func (s *TotalOrderingTestSuite) TestBasicCaseForK2() {
// Make 'Acking Node Set' contains blocks from all validators,
// this should trigger not-early deliver.
- hashes, early, err = to.processBlock(b23)
+ blocks, early, err = to.processBlock(b23)
s.False(early)
s.Nil(err)
- expected = common.Hashes{b01.Hash, b30.Hash}
- sort.Sort(expected)
- s.Equal(expected, hashes)
+ s.checkHashSequence(blocks, common.Hashes{b01.Hash, b30.Hash})
// Make sure b01, b30 not in working set
s.checkNotInWorkingSet(to, b01)
@@ -874,12 +875,10 @@ func (s *TotalOrderingTestSuite) TestBasicCaseForK0() {
s.Equal(vec[validators[3]].count, uint64(2))
// This new block should trigger non-early deliver.
- hashes, early, err := to.processBlock(b40)
+ blocks, early, err := to.processBlock(b40)
s.False(early)
s.Nil(err)
- expected := common.Hashes{b20.Hash}
- sort.Sort(expected)
- s.Equal(expected, hashes)
+ s.checkHashSequence(blocks, common.Hashes{b20.Hash})
// Make sure b20 is no long existing in working set.
s.checkNotInWorkingSet(to, b20)
diff --git a/core/types/block.go b/core/types/block.go
index 8a27ab0..55e82da 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -91,7 +91,17 @@ func (b *Block) Clone() *Block {
Height: b.Height,
Timestamps: make(map[ValidatorID]time.Time),
Acks: make(map[common.Hash]struct{}),
+ CompactionChainAck: CompactionChainAck{
+ AckingBlockHash: b.CompactionChainAck.AckingBlockHash,
+ },
+ ConsensusInfo: ConsensusInfo{
+ Timestamp: b.ConsensusInfo.Timestamp,
+ Height: b.ConsensusInfo.Height,
+ },
+ ConsensusInfoParentHash: b.ConsensusInfoParentHash,
}
+ bcopy.CompactionChainAck.ConsensusSignature = append(
+ crypto.Signature(nil), b.CompactionChainAck.ConsensusSignature...)
for k, v := range b.Timestamps {
bcopy.Timestamps[k] = v
}
diff --git a/core/types/membership-event.go b/core/types/membership-event.go
new file mode 100644
index 0000000..8d05039
--- /dev/null
+++ b/core/types/membership-event.go
@@ -0,0 +1,16 @@
+package types
+
+// MembershipActionType specifies the action of a membership event.
+type MembershipActionType int
+
+// Event enums.
+const (
+ MembershipActionAdd MembershipActionType = iota
+ MembershipActionDelete
+)
+
+// MembershipEvent specifies the event of membership changes.
+type MembershipEvent struct {
+ Epoch int
+ Action MembershipActionType
+}
diff --git a/core/utils.go b/core/utils.go
index 2f87f97..d023d2e 100644
--- a/core/utils.go
+++ b/core/utils.go
@@ -18,11 +18,21 @@
package core
import (
+ "errors"
"fmt"
"os"
+ "sort"
+ "time"
+
+ "github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/types"
)
-var debug = false
+var (
+ debug = false
+ // ErrEmptyTimestamps would be reported if Block.timestamps is empty.
+ ErrEmptyTimestamps = errors.New("timestamp vector should not be empty")
+)
func init() {
if os.Getenv("DEBUG") != "" {
@@ -43,3 +53,42 @@ func Debugln(args ...interface{}) {
fmt.Println(args)
}
}
+
+func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time {
+ if sep == 0 {
+ return []time.Time{}
+ }
+ if t1.After(t2) {
+ return interpoTime(t2, t1, sep)
+ }
+ timestamps := make([]time.Time, sep)
+ duration := t2.Sub(t1)
+ period := time.Duration(
+ (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond
+ prevTime := t1
+ for idx := range timestamps {
+ prevTime = prevTime.Add(period)
+ timestamps[idx] = prevTime
+ }
+ return timestamps
+}
+func getMedianTime(block *types.Block) (t time.Time, err error) {
+ timestamps := []time.Time{}
+ for _, timestamp := range block.Timestamps {
+ timestamps = append(timestamps, timestamp)
+ }
+ if len(timestamps) == 0 {
+ err = ErrEmptyTimestamps
+ return
+ }
+ sort.Sort(common.ByTime(timestamps))
+ if len(timestamps)%2 == 0 {
+ t1 := timestamps[len(timestamps)/2-1]
+ t2 := timestamps[len(timestamps)/2]
+ t = interpoTime(t1, t2, 1)[0]
+ } else {
+ t = timestamps[len(timestamps)/2]
+ }
+ return
+
+}