aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/dexon-foundation/dexon-consensus
diff options
context:
space:
mode:
authorWei-Ning Huang <w@dexon.org>2019-01-24 15:05:24 +0800
committerWei-Ning Huang <w@dexon.org>2019-04-09 21:32:56 +0800
commit87db7f44d06ff4610b7a7a0b3bc845b8d8e342d0 (patch)
tree7243672ff3235c5fc741b09e0cadf7c69a7c6ec8 /vendor/github.com/dexon-foundation/dexon-consensus
parent6f9f78e54ceb93db5495a3e15568efbfa9e5b25b (diff)
downloaddexon-87db7f44d06ff4610b7a7a0b3bc845b8d8e342d0.tar
dexon-87db7f44d06ff4610b7a7a0b3bc845b8d8e342d0.tar.gz
dexon-87db7f44d06ff4610b7a7a0b3bc845b8d8e342d0.tar.bz2
dexon-87db7f44d06ff4610b7a7a0b3bc845b8d8e342d0.tar.lz
dexon-87db7f44d06ff4610b7a7a0b3bc845b8d8e342d0.tar.xz
dexon-87db7f44d06ff4610b7a7a0b3bc845b8d8e342d0.tar.zst
dexon-87db7f44d06ff4610b7a7a0b3bc845b8d8e342d0.zip
consensus: dexcon: snapshot round height when finalizing block (#170)
Instead of having BP to send a tx to register the round height, just modify the state when finalizing block.
Diffstat (limited to 'vendor/github.com/dexon-foundation/dexon-consensus')
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go152
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go18
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go72
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go63
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go4
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go32
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go7
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go12
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/utils/vote-filter.go61
9 files changed, 301 insertions, 120 deletions
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
index 9e863696a..a8fab7c69 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
@@ -94,6 +94,7 @@ type agreementMgr struct {
initRound uint64
configs []*agreementMgrConfig
baModules []*agreement
+ voteFilters []*utils.VoteFilter
waitGroup sync.WaitGroup
pendingVotes map[uint64][]*types.Vote
pendingBlocks map[uint64][]*types.Block
@@ -201,6 +202,7 @@ func (mgr *agreementMgr) appendConfig(
// Hacky way to make agreement module self contained.
recv.agreementModule = agrModule
mgr.baModules = append(mgr.baModules, agrModule)
+ mgr.voteFilters = append(mgr.voteFilters, utils.NewVoteFilter())
if mgr.isRunning {
mgr.waitGroup.Add(1)
go func(idx uint32) {
@@ -213,7 +215,6 @@ func (mgr *agreementMgr) appendConfig(
}
func (mgr *agreementMgr) processVote(v *types.Vote) error {
- v = v.Clone()
mgr.lock.RLock()
defer mgr.lock.RUnlock()
if v.Position.ChainID >= uint32(len(mgr.baModules)) {
@@ -224,7 +225,16 @@ func (mgr *agreementMgr) processVote(v *types.Vote) error {
"initRound", mgr.initRound)
return utils.ErrInvalidChainID
}
- return mgr.baModules[v.Position.ChainID].processVote(v)
+ filter := mgr.voteFilters[v.Position.ChainID]
+ if filter.Filter(v) {
+ return nil
+ }
+ v = v.Clone()
+ err := mgr.baModules[v.Position.ChainID].processVote(v)
+ if err == nil {
+ mgr.baModules[v.Position.ChainID].updateFilter(filter)
+ }
+ return err
}
func (mgr *agreementMgr) processBlock(b *types.Block) error {
@@ -419,7 +429,11 @@ Loop:
// Run BA for this round.
recv.roundValue.Store(currentRound)
recv.changeNotaryTime = roundEndTime
- recv.restartNotary <- types.Position{ChainID: math.MaxUint32}
+ recv.restartNotary <- types.Position{
+ Round: setting.recv.round(),
+ ChainID: math.MaxUint32,
+ }
+ mgr.voteFilters[chainID] = utils.NewVoteFilter()
if err := mgr.baRoutineForOneRound(&setting); err != nil {
mgr.logger.Error("BA routine failed",
"error", err,
@@ -435,6 +449,79 @@ func (mgr *agreementMgr) baRoutineForOneRound(
agr := setting.agr
recv := setting.recv
oldPos := agr.agreementID()
+ restart := func(restartPos types.Position) (breakLoop bool, err error) {
+ if !isStop(restartPos) {
+ if restartPos.Round > oldPos.Round {
+ for {
+ select {
+ case <-mgr.ctx.Done():
+ break
+ default:
+ }
+ tipRound := mgr.lattice.TipRound(setting.chainID)
+ if tipRound > restartPos.Round {
+ // It's a vary rare that this go routine sleeps for entire round.
+ break
+ } else if tipRound != restartPos.Round {
+ mgr.logger.Debug("Waiting lattice to change round...",
+ "pos", &restartPos)
+ } else {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ // This round is finished.
+ breakLoop = true
+ return
+ }
+ if restartPos.Older(&oldPos) {
+ // The restartNotary event is triggered by 'BlockConfirmed'
+ // of some older block.
+ return
+ }
+ }
+ var nextHeight uint64
+ var nextTime time.Time
+ for {
+ nextHeight, nextTime, err =
+ mgr.lattice.NextBlock(recv.round(), setting.chainID)
+ if err != nil {
+ mgr.logger.Debug("Error getting next height",
+ "error", err,
+ "round", recv.round(),
+ "chainID", setting.chainID)
+ err = nil
+ nextHeight = restartPos.Height
+ }
+ if isStop(oldPos) && nextHeight == 0 {
+ break
+ }
+ if isStop(restartPos) && nextHeight == 0 {
+ break
+ }
+ if nextHeight > restartPos.Height {
+ break
+ }
+ mgr.logger.Debug("Lattice not ready!!!",
+ "old", &oldPos, "restart", &restartPos, "next", nextHeight)
+ time.Sleep(100 * time.Millisecond)
+ }
+ nextPos := types.Position{
+ Round: recv.round(),
+ ChainID: setting.chainID,
+ Height: nextHeight,
+ }
+ oldPos = nextPos
+ var leader types.NodeID
+ leader, err = mgr.cache.GetLeaderNode(nextPos)
+ if err != nil {
+ return
+ }
+ time.Sleep(nextTime.Sub(time.Now()))
+ setting.ticker.Restart()
+ agr.restart(setting.notarySet, nextPos, leader, setting.crs)
+ return
+ }
Loop:
for {
select {
@@ -442,55 +529,30 @@ Loop:
break Loop
default:
}
- select {
- case restartPos := <-recv.restartNotary:
- if !isStop(restartPos) {
- if restartPos.Round > oldPos.Round {
- // This round is finished.
- break Loop
- }
- if restartPos.Older(&oldPos) {
- // The restartNotary event is triggered by 'BlockConfirmed'
- // of some older block.
- break
- }
- }
- var nextHeight uint64
- var nextTime time.Time
- for {
- nextHeight, nextTime, err =
- mgr.lattice.NextBlock(recv.round(), setting.chainID)
+ if agr.confirmed() {
+ // Block until receive restartPos
+ select {
+ case restartPos := <-recv.restartNotary:
+ breakLoop, err := restart(restartPos)
if err != nil {
- mgr.logger.Debug("Error getting next height",
- "error", err,
- "round", recv.round(),
- "chainID", setting.chainID)
- err = nil
- nextHeight = restartPos.Height
+ return err
}
- if isStop(restartPos) || nextHeight == 0 {
- break
- }
- if nextHeight > restartPos.Height {
- break
+ if breakLoop {
+ break Loop
}
- mgr.logger.Debug("Lattice not ready!!!",
- "old", &restartPos, "next", nextHeight)
- time.Sleep(100 * time.Millisecond)
- }
- nextPos := types.Position{
- Round: recv.round(),
- ChainID: setting.chainID,
- Height: nextHeight,
+ case <-mgr.ctx.Done():
+ break Loop
}
- oldPos = nextPos
- leader, err := mgr.cache.GetLeaderNode(nextPos)
+ }
+ select {
+ case restartPos := <-recv.restartNotary:
+ breakLoop, err := restart(restartPos)
if err != nil {
return err
}
- time.Sleep(nextTime.Sub(time.Now()))
- setting.ticker.Restart()
- agr.restart(setting.notarySet, nextPos, leader, setting.crs)
+ if breakLoop {
+ break Loop
+ }
default:
}
if agr.pullVotes() {
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go
index 5b2ce52e7..73d7b7ada 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go
@@ -20,7 +20,6 @@ package core
import (
"fmt"
- "github.com/dexon-foundation/dexon-consensus/common"
"github.com/dexon-foundation/dexon-consensus/core/types"
)
@@ -45,15 +44,6 @@ const (
stateSleep
)
-var nullBlockHash common.Hash
-var skipBlockHash common.Hash
-
-func init() {
- for idx := range skipBlockHash {
- skipBlockHash[idx] = 0xff
- }
-}
-
type agreementState interface {
state() agreementStateType
nextState() (agreementState, error)
@@ -78,7 +68,7 @@ func (s *fastState) nextState() (agreementState, error) {
return s.a.isLeader
}() {
hash := s.a.recv.ProposeBlock()
- if hash != nullBlockHash {
+ if hash != types.NullBlockHash {
s.a.lock.Lock()
defer s.a.lock.Unlock()
s.a.recv.ProposeVote(types.NewVote(types.VoteFast, hash, s.a.period))
@@ -143,7 +133,7 @@ func (s *preCommitState) nextState() (agreementState, error) {
s.a.lock.RLock()
defer s.a.lock.RUnlock()
hash := s.a.lockValue
- if hash == nullBlockHash {
+ if hash == types.NullBlockHash {
hash = s.a.leader.leaderBlockHash()
}
s.a.recv.ProposeVote(types.NewVote(types.VotePreCom, hash, s.a.period))
@@ -165,13 +155,13 @@ func (s *commitState) nextState() (agreementState, error) {
s.a.lock.Lock()
defer s.a.lock.Unlock()
hash, ok := s.a.countVoteNoLock(s.a.period, types.VotePreCom)
- if ok && hash != skipBlockHash {
+ if ok && hash != types.SkipBlockHash {
if s.a.period > s.a.lockIter {
s.a.lockValue = hash
s.a.lockIter = s.a.period
}
} else {
- hash = skipBlockHash
+ hash = types.SkipBlockHash
}
s.a.recv.ProposeVote(types.NewVote(types.VoteCom, hash, s.a.period))
return newForwardState(s.a), nil
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
index 97848c5e4..c08518ad8 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
@@ -29,6 +29,13 @@ import (
"github.com/dexon-foundation/dexon-consensus/core/utils"
)
+// closedchan is a reusable closed channel.
+var closedchan = make(chan struct{})
+
+func init() {
+ close(closedchan)
+}
+
// Errors for agreement module.
var (
ErrInvalidVote = fmt.Errorf("invalid vote")
@@ -110,6 +117,7 @@ type agreement struct {
state agreementState
data *agreementData
aID *atomic.Value
+ doneChan chan struct{}
notarySet map[types.NodeID]struct{}
hasVoteFast bool
hasOutput bool
@@ -168,9 +176,13 @@ func (a *agreement) restart(
a.data.blocks = make(map[types.NodeID]*types.Block)
a.data.requiredVote = len(notarySet)/3*2 + 1
a.data.leader.restart(crs)
- a.data.lockValue = nullBlockHash
+ a.data.lockValue = types.NullBlockHash
a.data.lockIter = 0
a.data.isLeader = a.data.ID == leader
+ if a.doneChan != nil {
+ close(a.doneChan)
+ }
+ a.doneChan = make(chan struct{})
a.fastForward = make(chan uint64, 1)
a.hasVoteFast = false
a.hasOutput = false
@@ -340,6 +352,17 @@ func (a *agreement) prepareVote(vote *types.Vote) (err error) {
return
}
+func (a *agreement) updateFilter(filter *utils.VoteFilter) {
+ a.lock.RLock()
+ defer a.lock.RUnlock()
+ a.data.lock.RLock()
+ defer a.data.lock.RUnlock()
+ filter.Confirm = a.hasOutput
+ filter.LockIter = a.data.lockIter
+ filter.Period = a.data.period
+ filter.Height = a.agreementID().Height
+}
+
// processVote is the entry point for processing Vote.
func (a *agreement) processVote(vote *types.Vote) error {
a.lock.Lock()
@@ -382,13 +405,16 @@ func (a *agreement) processVote(vote *types.Vote) error {
if _, exist := a.data.votes[vote.Period]; !exist {
a.data.votes[vote.Period] = newVoteListMap()
}
+ if _, exist := a.data.votes[vote.Period][vote.Type][vote.ProposerID]; exist {
+ return nil
+ }
a.data.votes[vote.Period][vote.Type][vote.ProposerID] = vote
if !a.hasOutput &&
(vote.Type == types.VoteCom ||
vote.Type == types.VoteFast ||
vote.Type == types.VoteFastCom) {
if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok &&
- hash != skipBlockHash {
+ hash != types.SkipBlockHash {
if vote.Type == types.VoteFast {
if !a.hasVoteFast {
a.data.recv.ProposeVote(
@@ -401,6 +427,8 @@ func (a *agreement) processVote(vote *types.Vote) error {
a.hasOutput = true
a.data.recv.ConfirmBlock(hash,
a.data.votes[vote.Period][vote.Type])
+ close(a.doneChan)
+ a.doneChan = nil
}
return nil
}
@@ -413,8 +441,12 @@ func (a *agreement) processVote(vote *types.Vote) error {
return nil
}
if vote.Type == types.VotePreCom {
+ if vote.Period < a.data.lockIter {
+ // This PreCom is useless for us.
+ return nil
+ }
if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok &&
- hash != skipBlockHash {
+ hash != types.SkipBlockHash {
// Condition 1.
if a.data.period >= vote.Period && vote.Period > a.data.lockIter &&
vote.BlockHash != a.data.lockValue {
@@ -439,7 +471,8 @@ func (a *agreement) processVote(vote *types.Vote) error {
hashes := common.Hashes{}
addPullBlocks := func(voteType types.VoteType) {
for _, vote := range a.data.votes[vote.Period][voteType] {
- if vote.BlockHash == nullBlockHash || vote.BlockHash == skipBlockHash {
+ if vote.BlockHash == types.NullBlockHash ||
+ vote.BlockHash == types.SkipBlockHash {
continue
}
if _, found := a.findCandidateBlockNoLock(vote.BlockHash); !found {
@@ -447,7 +480,6 @@ func (a *agreement) processVote(vote *types.Vote) error {
}
}
}
- addPullBlocks(types.VoteInit)
addPullBlocks(types.VotePreCom)
addPullBlocks(types.VoteCom)
if len(hashes) > 0 {
@@ -462,24 +494,24 @@ func (a *agreement) processVote(vote *types.Vote) error {
func (a *agreement) done() <-chan struct{} {
a.lock.Lock()
defer a.lock.Unlock()
+ if a.doneChan == nil {
+ return closedchan
+ }
a.data.lock.Lock()
defer a.data.lock.Unlock()
- ch := make(chan struct{}, 1)
- if a.hasOutput {
- ch <- struct{}{}
- } else {
- select {
- case period := <-a.fastForward:
- if period <= a.data.period {
- break
- }
- a.data.setPeriod(period)
- a.state = newPreCommitState(a.data)
- ch <- struct{}{}
- default:
+ select {
+ case period := <-a.fastForward:
+ if period <= a.data.period {
+ break
}
- }
- return ch
+ a.data.setPeriod(period)
+ a.state = newPreCommitState(a.data)
+ close(a.doneChan)
+ a.doneChan = make(chan struct{})
+ return closedchan
+ default:
+ }
+ return a.doneChan
}
func (a *agreement) confirmed() bool {
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
index 413f16caa..3a27b5fc1 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
@@ -99,7 +99,7 @@ func (recv *consensusBAReceiver) ProposeBlock() common.Hash {
block := recv.consensus.proposeBlock(recv.chainID, recv.round())
if block == nil {
recv.consensus.logger.Error("unable to propose block")
- return nullBlockHash
+ return types.NullBlockHash
}
go func() {
if err := recv.consensus.preProcessBlock(block); err != nil {
@@ -203,12 +203,7 @@ func (recv *consensusBAReceiver) ConfirmBlock(
"cur-position", &block.Position,
"chainID", recv.chainID)
recv.consensus.ccModule.registerBlock(block)
- if err := recv.consensus.processBlock(block); err != nil {
- recv.consensus.logger.Error("Failed to process block",
- "block", block,
- "error", err)
- return
- }
+ recv.consensus.processBlockChan <- block
parentHash = block.ParentHash
if block.Position.Height == 0 ||
recv.consensus.lattice.Exist(parentHash) {
@@ -235,12 +230,7 @@ func (recv *consensusBAReceiver) ConfirmBlock(
"result", result)
recv.consensus.network.BroadcastAgreementResult(result)
}
- if err := recv.consensus.processBlock(block); err != nil {
- recv.consensus.logger.Error("Failed to process block",
- "block", block,
- "error", err)
- return
- }
+ recv.consensus.processBlockChan <- block
// Clean the restartNotary channel so BA will not stuck by deadlock.
CleanChannelLoop:
for {
@@ -252,8 +242,8 @@ CleanChannelLoop:
}
newPos := block.Position
if block.Timestamp.After(recv.changeNotaryTime) {
- recv.roundValue.Store(recv.round() + 1)
newPos.Round++
+ recv.roundValue.Store(newPos.Round)
}
recv.restartNotary <- newPos
}
@@ -398,7 +388,7 @@ type Consensus struct {
dMoment time.Time
nodeSetCache *utils.NodeSetCache
round uint64
- roundToNotify uint64
+ roundForNewConfig uint64
lock sync.RWMutex
ctx context.Context
ctxCancel context.CancelFunc
@@ -409,6 +399,7 @@ type Consensus struct {
resetDeliveryGuardTicker chan struct{}
msgChan chan interface{}
waitGroup sync.WaitGroup
+ processBlockChan chan *types.Block
// Context of Dummy receiver during switching from syncer.
dummyCancel context.CancelFunc
@@ -577,7 +568,8 @@ func newConsensusForRound(
logger: logger,
resetRandomnessTicker: make(chan struct{}),
resetDeliveryGuardTicker: make(chan struct{}),
- msgChan: make(chan interface{}, 10240),
+ msgChan: make(chan interface{}, 1024),
+ processBlockChan: make(chan *types.Block, 1024),
}
con.ctx, con.ctxCancel = context.WithCancel(context.Background())
con.baMgr = newAgreementMgr(con, initRound, initRoundBeginTime)
@@ -594,7 +586,7 @@ func newConsensusForRound(
func (con *Consensus) prepare(initBlock *types.Block) error {
// The block past from full node should be delivered already or known by
// full node. We don't have to notify it.
- con.roundToNotify = initBlock.Position.Round + 1
+ con.roundForNewConfig = initBlock.Position.Round + 1
initRound := initBlock.Position.Round
initConfig := utils.GetConfigWithPanic(con.gov, initRound, con.logger)
// Setup context.
@@ -647,6 +639,7 @@ func (con *Consensus) Run() {
go con.deliverNetworkMsg()
con.waitGroup.Add(1)
go con.processMsg()
+ go con.processBlockLoop()
// Sleep until dMoment come.
time.Sleep(con.dMoment.Sub(time.Now().UTC()))
// Take some time to bootstrap.
@@ -1192,13 +1185,13 @@ func (con *Consensus) deliverBlock(b *types.Block) {
con.cfgModule.untouchTSigHash(b.Hash)
con.logger.Debug("Calling Application.BlockDelivered", "block", b)
con.app.BlockDelivered(b.Hash, b.Position, b.Finalization.Clone())
- if b.Position.Round == con.roundToNotify {
+ if b.Position.Round == con.roundForNewConfig {
// Get configuration for the round next to next round. Configuration
// for that round should be ready at this moment and is required for
// lattice module. This logic is related to:
// - roundShift
// - notifyGenesisRound
- futureRound := con.roundToNotify + 1
+ futureRound := con.roundForNewConfig + 1
futureConfig := utils.GetConfigWithPanic(con.gov, futureRound, con.logger)
con.logger.Debug("Append Config", "round", futureRound)
if err := con.lattice.AppendConfig(
@@ -1208,14 +1201,7 @@ func (con *Consensus) deliverBlock(b *types.Block) {
"error", err)
panic(err)
}
- // Only the first block delivered of that round would
- // trigger this noitification.
- con.logger.Debug("Calling Governance.NotifyRoundHeight",
- "round", con.roundToNotify,
- "height", b.Finalization.Height)
- con.gov.NotifyRoundHeight(
- con.roundToNotify, b.Finalization.Height)
- con.roundToNotify++
+ con.roundForNewConfig++
}
if con.debugApp != nil {
con.debugApp.BlockReady(b.Hash)
@@ -1242,11 +1228,28 @@ func (con *Consensus) deliverFinalizedBlocksWithoutLock() (err error) {
return
}
+func (con *Consensus) processBlockLoop() {
+ for {
+ select {
+ case <-con.ctx.Done():
+ return
+ default:
+ }
+ select {
+ case <-con.ctx.Done():
+ return
+ case block := <-con.processBlockChan:
+ if err := con.processBlock(block); err != nil {
+ con.logger.Error("Error processing block",
+ "block", block,
+ "error", err)
+ }
+ }
+ }
+}
+
// processBlock is the entry point to submit one block to a Consensus instance.
func (con *Consensus) processBlock(block *types.Block) (err error) {
- if err = con.db.PutBlock(*block); err != nil && err != db.ErrBlockExists {
- return
- }
con.lock.Lock()
defer con.lock.Unlock()
// Block processed by lattice can be out-of-order. But the output of lattice
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
index a77ec9385..408343f3b 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
@@ -115,10 +115,6 @@ type Governance interface {
// Return the genesis node set if round == 0.
NodeSet(round uint64) []crypto.PublicKey
- // NotifyRoundHeight notifies governance contract the consensus height of
- // the first block of the given round.
- NotifyRoundHeight(targetRound, consensusHeight uint64)
-
//// DKG-related methods.
// AddDKGComplaint adds a DKGComplaint.
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go
index cf81a1161..0bbe8902a 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go
@@ -106,7 +106,7 @@ func newLatticeDataConfig(
// latticeData is a module for storing lattice.
type latticeData struct {
// DB for getting blocks purged in memory.
- db db.Reader
+ db db.Database
// chains stores chains' blocks and other info.
chains []*chainStatus
// blockByHash stores blocks, indexed by block hash.
@@ -119,7 +119,7 @@ type latticeData struct {
// newLatticeData creates a new latticeData instance.
func newLatticeData(
- db db.Reader,
+ db db.Database,
dMoment time.Time,
round uint64,
config *types.Config) (data *latticeData) {
@@ -291,21 +291,26 @@ func (data *latticeData) addBlock(
bAck *types.Block
updated bool
)
+ if err = data.db.PutBlock(*block); err != nil {
+ if err == db.ErrBlockExists {
+ // If a node is crashed and restarted, we might encounter some
+ // blocks that already confirmed but not delivered yet. Then
+ // syncer might still try to add that block in this way.
+ err = nil
+ } else {
+ return
+ }
+ }
data.chains[block.Position.ChainID].addBlock(block)
data.blockByHash[block.Hash] = block
// Update lastAckPos.
for _, ack := range block.Acks {
if bAck, err = data.findBlock(ack); err != nil {
- if err == db.ErrBlockDoesNotExist {
- err = nil
- continue
- }
return
}
data.chains[bAck.Position.ChainID].lastAckPos[block.Position.ChainID] =
bAck.Position.Clone()
}
-
// Extract deliverable blocks to total ordering. A block is deliverable to
// total ordering iff all its ackings blocks were delivered to total ordering.
for {
@@ -382,6 +387,19 @@ func (data *latticeData) addFinalizedBlock(block *types.Block) (err error) {
return
}
+func (data *latticeData) tipRound(chainID uint32) uint64 {
+ if tip := data.chains[chainID].tip; tip != nil {
+ tipConfig := data.getConfig(tip.Position.Round)
+ offset := uint64(0)
+ if tip.Timestamp.After(tipConfig.roundEndTime) {
+ offset++
+ }
+ return tip.Position.Round + offset
+ }
+ return uint64(0)
+
+}
+
// isBindTip checks if a block's fields should follow up its parent block.
func (data *latticeData) isBindTip(
pos types.Position, tip *types.Block) (bindTip bool, err error) {
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go
index d531639b9..de0e54910 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go
@@ -290,6 +290,13 @@ func (l *Lattice) NextBlock(round uint64, chainID uint32) (
return l.data.nextBlock(round, chainID)
}
+// TipRound returns the round of the tip of given chain.
+func (l *Lattice) TipRound(chainID uint32) uint64 {
+ l.lock.RLock()
+ defer l.lock.RUnlock()
+ return l.data.tipRound(chainID)
+}
+
// PurgeBlocks purges blocks' cache in memory, this is called when the caller
// makes sure those blocks are already saved in db.
func (l *Lattice) PurgeBlocks(blocks []*types.Block) error {
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go
index ae86e51cc..46ea1dfb0 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go
@@ -38,6 +38,18 @@ const (
MaxVoteType
)
+// NullBlockHash is the blockHash for ⊥ value.
+var NullBlockHash common.Hash
+
+// SkipBlockHash is the blockHash for SKIP value.
+var SkipBlockHash common.Hash
+
+func init() {
+ for idx := range SkipBlockHash {
+ SkipBlockHash[idx] = 0xff
+ }
+}
+
// VoteHeader is the header for vote, which can be used as map keys.
type VoteHeader struct {
ProposerID NodeID `json:"proposer_id"`
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/vote-filter.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/vote-filter.go
new file mode 100644
index 000000000..a19902758
--- /dev/null
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/vote-filter.go
@@ -0,0 +1,61 @@
+// Copyright 2019 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package utils
+
+import (
+ "github.com/dexon-foundation/dexon-consensus/core/types"
+)
+
+// VoteFilter filters votes that are useless for now.
+// To maximize performance, this structure is not thread-safe and will never be.
+type VoteFilter struct {
+ Height uint64
+ LockIter uint64
+ Period uint64
+ Confirm bool
+}
+
+// NewVoteFilter creates a new vote filter instance.
+func NewVoteFilter() *VoteFilter {
+ return &VoteFilter{}
+}
+
+// Filter checks if the vote should be filtered out.
+func (vf *VoteFilter) Filter(vote *types.Vote) bool {
+ if vote.Type == types.VoteInit {
+ return true
+ }
+ if vote.Position.Height < vf.Height {
+ return true
+ } else if vote.Position.Height > vf.Height {
+ // It's impossible to check the vote of other height.
+ return false
+ }
+ if vf.Confirm {
+ return true
+ }
+ if vote.Type == types.VotePreCom && vote.Period < vf.LockIter {
+ return true
+ }
+ if vote.Type == types.VoteCom &&
+ vote.Period < vf.Period &&
+ vote.BlockHash == types.SkipBlockHash {
+ return true
+ }
+ return false
+}