aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/byzantine-lab/dexon-consensus/core
diff options
context:
space:
mode:
authorWei-Ning Huang <w@byzantine-lab.io>2019-06-12 17:31:08 +0800
committerWei-Ning Huang <w@byzantine-lab.io>2019-09-17 16:57:29 +0800
commitac088de6322fc16ebe75c2e5554be73754bf1fe2 (patch)
tree086b7827d46a4d07b834cd94be73beaabb77b734 /vendor/github.com/byzantine-lab/dexon-consensus/core
parent67d565f3f0e398e99bef96827f729e3e4b0edf31 (diff)
downloadgo-tangerine-ac088de6322fc16ebe75c2e5554be73754bf1fe2.tar
go-tangerine-ac088de6322fc16ebe75c2e5554be73754bf1fe2.tar.gz
go-tangerine-ac088de6322fc16ebe75c2e5554be73754bf1fe2.tar.bz2
go-tangerine-ac088de6322fc16ebe75c2e5554be73754bf1fe2.tar.lz
go-tangerine-ac088de6322fc16ebe75c2e5554be73754bf1fe2.tar.xz
go-tangerine-ac088de6322fc16ebe75c2e5554be73754bf1fe2.tar.zst
go-tangerine-ac088de6322fc16ebe75c2e5554be73754bf1fe2.zip
Rebrand as tangerine-network/go-tangerine
Diffstat (limited to 'vendor/github.com/byzantine-lab/dexon-consensus/core')
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-mgr.go676
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-state.go213
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/agreement.go797
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/blockchain.go681
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/interfaces.go70
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/level-db.go127
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/memory.go183
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/configuration-chain.go795
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/consensus.go1567
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/constant.go41
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/constant.go26
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/dkg.go637
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/utils.go92
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/ecdsa/ecdsa.go135
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/interfaces.go48
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/utils.go80
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/db/interfaces.go100
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/db/level-db.go573
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/db/memory.go262
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/dkg-tsig-protocol.go709
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/interfaces.go182
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/leader-selector.go149
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/nonblocking.go137
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/agreement.go301
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/consensus.go543
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/watch-cat.go156
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/ticker.go127
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/types/block-randomness.go44
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/types/block.go227
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/types/config.go75
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/types/dkg/dkg.go485
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/types/message.go24
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/types/node.go61
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/types/nodeset.go162
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/types/position.go51
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/types/vote.go100
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/utils.go255
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/utils/crypto.go376
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/utils/nodeset-cache.go245
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/utils/penalty-helper.go131
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-based-config.go112
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-event.go358
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/utils/signer.go154
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/utils/utils.go207
-rw-r--r--vendor/github.com/byzantine-lab/dexon-consensus/core/utils/vote-filter.go72
45 files changed, 12546 insertions, 0 deletions
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-mgr.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-mgr.go
new file mode 100644
index 000000000..cdbfadf13
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-mgr.go
@@ -0,0 +1,676 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "context"
+ "errors"
+ "math"
+ "sync"
+ "time"
+
+ lru "github.com/hashicorp/golang-lru"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+// Errors returned from BA modules
+var (
+ ErrPreviousRoundIsNotFinished = errors.New("previous round is not finished")
+ ErrRoundOutOfRange = errors.New("round out of range")
+ ErrInvalidBlock = errors.New("invalid block")
+ ErrNoValidLeader = errors.New("no valid leader")
+ ErrIncorrectCRSSignature = errors.New("incorrect CRS signature")
+ ErrBlockTooOld = errors.New("block too old")
+)
+
+const maxResultCache = 100
+const settingLimit = 3
+
+// genValidLeader generate a validLeader function for agreement modules.
+func genValidLeader(
+ mgr *agreementMgr) validLeaderFn {
+ return func(block *types.Block, crs common.Hash) (bool, error) {
+ if block.Timestamp.After(time.Now()) {
+ return false, nil
+ }
+ if block.Position.Round >= DKGDelayRound {
+ if mgr.recv.npks == nil {
+ return false, nil
+ }
+ if block.Position.Round > mgr.recv.npks.Round {
+ return false, nil
+ }
+ if block.Position.Round < mgr.recv.npks.Round {
+ return false, ErrBlockTooOld
+ }
+ }
+ if !utils.VerifyCRSSignature(block, crs, mgr.recv.npks) {
+ return false, ErrIncorrectCRSSignature
+ }
+ if err := mgr.bcModule.sanityCheck(block); err != nil {
+ if err == ErrRetrySanityCheckLater {
+ return false, nil
+ }
+ return false, err
+ }
+ mgr.logger.Debug("Calling Application.VerifyBlock", "block", block)
+ switch mgr.app.VerifyBlock(block) {
+ case types.VerifyInvalidBlock:
+ return false, ErrInvalidBlock
+ case types.VerifyRetryLater:
+ return false, nil
+ default:
+ }
+ return true, nil
+ }
+}
+
+type agreementMgrConfig struct {
+ utils.RoundBasedConfig
+
+ notarySetSize uint32
+ lambdaBA time.Duration
+ crs common.Hash
+}
+
+func (c *agreementMgrConfig) from(
+ round uint64, config *types.Config, crs common.Hash) {
+ c.notarySetSize = config.NotarySetSize
+ c.lambdaBA = config.LambdaBA
+ c.crs = crs
+ c.SetupRoundBasedFields(round, config)
+}
+
+func newAgreementMgrConfig(prev agreementMgrConfig, config *types.Config,
+ crs common.Hash) (c agreementMgrConfig) {
+ c = agreementMgrConfig{}
+ c.from(prev.RoundID()+1, config, crs)
+ c.AppendTo(prev.RoundBasedConfig)
+ return
+}
+
+type baRoundSetting struct {
+ round uint64
+ dkgSet map[types.NodeID]struct{}
+ threshold int
+ ticker Ticker
+ crs common.Hash
+}
+
+type agreementMgr struct {
+ // TODO(mission): unbound Consensus instance from this module.
+ con *Consensus
+ ID types.NodeID
+ app Application
+ gov Governance
+ network Network
+ logger common.Logger
+ cache *utils.NodeSetCache
+ signer *utils.Signer
+ bcModule *blockChain
+ ctx context.Context
+ configs []agreementMgrConfig
+ baModule *agreement
+ recv *consensusBAReceiver
+ processedBAResult map[types.Position]struct{}
+ voteFilter *utils.VoteFilter
+ settingCache *lru.Cache
+ curRoundSetting *baRoundSetting
+ waitGroup sync.WaitGroup
+ isRunning bool
+ lock sync.RWMutex
+}
+
+func newAgreementMgr(con *Consensus) (mgr *agreementMgr, err error) {
+ settingCache, _ := lru.New(settingLimit)
+ mgr = &agreementMgr{
+ con: con,
+ ID: con.ID,
+ app: con.app,
+ gov: con.gov,
+ network: con.network,
+ logger: con.logger,
+ cache: con.nodeSetCache,
+ signer: con.signer,
+ bcModule: con.bcModule,
+ ctx: con.ctx,
+ processedBAResult: make(map[types.Position]struct{}, maxResultCache),
+ voteFilter: utils.NewVoteFilter(),
+ settingCache: settingCache,
+ }
+ mgr.recv = &consensusBAReceiver{
+ consensus: con,
+ restartNotary: make(chan types.Position, 1),
+ }
+ return mgr, nil
+}
+
+func (mgr *agreementMgr) prepare() {
+ round := mgr.bcModule.tipRound()
+ agr := newAgreement(
+ mgr.ID,
+ mgr.recv,
+ newLeaderSelector(genValidLeader(mgr), mgr.logger),
+ mgr.signer,
+ mgr.logger)
+ setting := mgr.generateSetting(round)
+ if setting == nil {
+ mgr.logger.Warn("Unable to prepare init setting", "round", round)
+ return
+ }
+ mgr.curRoundSetting = setting
+ agr.notarySet = mgr.curRoundSetting.dkgSet
+ // Hacky way to make agreement module self contained.
+ mgr.recv.agreementModule = agr
+ mgr.baModule = agr
+ if round >= DKGDelayRound {
+ if _, exist := setting.dkgSet[mgr.ID]; exist {
+ mgr.logger.Debug("Preparing signer and npks.", "round", round)
+ npk, signer, err := mgr.con.cfgModule.getDKGInfo(round, false)
+ if err != nil {
+ mgr.logger.Error("Failed to prepare signer and npks.",
+ "round", round,
+ "error", err)
+ }
+ mgr.logger.Debug("Prepared signer and npks.",
+ "round", round, "signer", signer != nil, "npks", npk != nil)
+ }
+ }
+ return
+}
+
+func (mgr *agreementMgr) run() {
+ mgr.lock.Lock()
+ defer mgr.lock.Unlock()
+ if mgr.isRunning {
+ return
+ }
+ mgr.isRunning = true
+ mgr.waitGroup.Add(1)
+ go func() {
+ defer mgr.waitGroup.Done()
+ mgr.runBA(mgr.bcModule.tipRound())
+ }()
+}
+
+func (mgr *agreementMgr) calcLeader(
+ dkgSet map[types.NodeID]struct{},
+ crs common.Hash, pos types.Position) (
+ types.NodeID, error) {
+ nodeSet := types.NewNodeSetFromMap(dkgSet)
+ leader := nodeSet.GetSubSet(1, types.NewNodeLeaderTarget(
+ crs, pos.Height))
+ for nID := range leader {
+ return nID, nil
+ }
+ return types.NodeID{}, ErrNoValidLeader
+}
+
+func (mgr *agreementMgr) config(round uint64) *agreementMgrConfig {
+ mgr.lock.RLock()
+ defer mgr.lock.RUnlock()
+ if round < mgr.configs[0].RoundID() {
+ panic(ErrRoundOutOfRange)
+ }
+ roundIndex := round - mgr.configs[0].RoundID()
+ if roundIndex >= uint64(len(mgr.configs)) {
+ return nil
+ }
+ return &mgr.configs[roundIndex]
+}
+
+func (mgr *agreementMgr) notifyRoundEvents(evts []utils.RoundEventParam) error {
+ mgr.lock.Lock()
+ defer mgr.lock.Unlock()
+ apply := func(e utils.RoundEventParam) error {
+ if len(mgr.configs) > 0 {
+ lastCfg := mgr.configs[len(mgr.configs)-1]
+ if e.BeginHeight != lastCfg.RoundEndHeight() {
+ return ErrInvalidBlockHeight
+ }
+ if lastCfg.RoundID() == e.Round {
+ mgr.configs[len(mgr.configs)-1].ExtendLength()
+ } else if lastCfg.RoundID()+1 == e.Round {
+ mgr.configs = append(mgr.configs, newAgreementMgrConfig(
+ lastCfg, e.Config, e.CRS))
+ } else {
+ return ErrInvalidRoundID
+ }
+ } else {
+ c := agreementMgrConfig{}
+ c.from(e.Round, e.Config, e.CRS)
+ c.SetRoundBeginHeight(e.BeginHeight)
+ mgr.configs = append(mgr.configs, c)
+ }
+ return nil
+ }
+ for _, e := range evts {
+ if err := apply(e); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (mgr *agreementMgr) checkProposer(
+ round uint64, proposerID types.NodeID) error {
+ if round == mgr.curRoundSetting.round {
+ if _, exist := mgr.curRoundSetting.dkgSet[proposerID]; !exist {
+ return ErrNotInNotarySet
+ }
+ } else if round == mgr.curRoundSetting.round+1 {
+ setting := mgr.generateSetting(round)
+ if setting == nil {
+ return ErrConfigurationNotReady
+ }
+ if _, exist := setting.dkgSet[proposerID]; !exist {
+ return ErrNotInNotarySet
+ }
+ }
+ return nil
+}
+
+func (mgr *agreementMgr) processVote(v *types.Vote) (err error) {
+ if !mgr.recv.isNotary {
+ return nil
+ }
+ if mgr.voteFilter.Filter(v) {
+ return nil
+ }
+ if err := mgr.checkProposer(v.Position.Round, v.ProposerID); err != nil {
+ return err
+ }
+ if err = mgr.baModule.processVote(v); err == nil {
+ mgr.baModule.updateFilter(mgr.voteFilter)
+ mgr.voteFilter.AddVote(v)
+ }
+ if err == ErrSkipButNoError {
+ err = nil
+ }
+ return
+}
+
+func (mgr *agreementMgr) processBlock(b *types.Block) error {
+ if err := mgr.checkProposer(b.Position.Round, b.ProposerID); err != nil {
+ return err
+ }
+ return mgr.baModule.processBlock(b)
+}
+
+func (mgr *agreementMgr) touchAgreementResult(
+ result *types.AgreementResult) (first bool) {
+ // DO NOT LOCK THIS FUNCTION!!!!!!!! YOU WILL REGRET IT!!!!!
+ if _, exist := mgr.processedBAResult[result.Position]; !exist {
+ first = true
+ if len(mgr.processedBAResult) > maxResultCache {
+ for k := range mgr.processedBAResult {
+ // Randomly drop one element.
+ delete(mgr.processedBAResult, k)
+ break
+ }
+ }
+ mgr.processedBAResult[result.Position] = struct{}{}
+ }
+ return
+}
+
+func (mgr *agreementMgr) untouchAgreementResult(
+ result *types.AgreementResult) {
+ // DO NOT LOCK THIS FUNCTION!!!!!!!! YOU WILL REGRET IT!!!!!
+ delete(mgr.processedBAResult, result.Position)
+}
+
+func (mgr *agreementMgr) processAgreementResult(
+ result *types.AgreementResult) error {
+ aID := mgr.baModule.agreementID()
+ if isStop(aID) {
+ return nil
+ }
+ if result.Position == aID && !mgr.baModule.confirmed() {
+ mgr.logger.Info("Syncing BA", "position", result.Position)
+ if result.Position.Round >= DKGDelayRound {
+ return mgr.baModule.processAgreementResult(result)
+ }
+ for key := range result.Votes {
+ if err := mgr.baModule.processVote(&result.Votes[key]); err != nil {
+ return err
+ }
+ }
+ } else if result.Position.Newer(aID) {
+ mgr.logger.Info("Fast syncing BA", "position", result.Position)
+ if result.Position.Round < DKGDelayRound {
+ mgr.logger.Debug("Calling Network.PullBlocks for fast syncing BA",
+ "hash", result.BlockHash)
+ mgr.network.PullBlocks(common.Hashes{result.BlockHash})
+ for key := range result.Votes {
+ if err := mgr.baModule.processVote(&result.Votes[key]); err != nil {
+ return err
+ }
+ }
+ }
+ setting := mgr.generateSetting(result.Position.Round)
+ if setting == nil {
+ mgr.logger.Warn("unable to get setting", "round",
+ result.Position.Round)
+ return ErrConfigurationNotReady
+ }
+ mgr.curRoundSetting = setting
+ leader, err := mgr.calcLeader(setting.dkgSet, setting.crs, result.Position)
+ if err != nil {
+ return err
+ }
+ mgr.baModule.restart(
+ setting.dkgSet, setting.threshold,
+ result.Position, leader, setting.crs)
+ if result.Position.Round >= DKGDelayRound {
+ return mgr.baModule.processAgreementResult(result)
+ }
+ }
+ return nil
+}
+
+func (mgr *agreementMgr) processFinalizedBlock(block *types.Block) error {
+ aID := mgr.baModule.agreementID()
+ if block.Position.Older(aID) {
+ return nil
+ }
+ mgr.baModule.processFinalizedBlock(block)
+ return nil
+}
+
+func (mgr *agreementMgr) stop() {
+ // Stop all running agreement modules.
+ func() {
+ mgr.lock.Lock()
+ defer mgr.lock.Unlock()
+ mgr.baModule.stop()
+ }()
+ // Block until all routines are done.
+ mgr.waitGroup.Wait()
+}
+
+func (mgr *agreementMgr) generateSetting(round uint64) *baRoundSetting {
+ if setting, exist := mgr.settingCache.Get(round); exist {
+ return setting.(*baRoundSetting)
+ }
+ curConfig := mgr.config(round)
+ if curConfig == nil {
+ return nil
+ }
+ var dkgSet map[types.NodeID]struct{}
+ if round >= DKGDelayRound {
+ _, qualidifed, err := typesDKG.CalcQualifyNodes(
+ mgr.gov.DKGMasterPublicKeys(round),
+ mgr.gov.DKGComplaints(round),
+ utils.GetDKGThreshold(mgr.gov.Configuration(round)),
+ )
+ if err != nil {
+ mgr.logger.Error("Failed to get gpk", "round", round, "error", err)
+ return nil
+ }
+ dkgSet = qualidifed
+ }
+ if len(dkgSet) == 0 {
+ var err error
+ dkgSet, err = mgr.cache.GetNotarySet(round)
+ if err != nil {
+ mgr.logger.Error("Failed to get notarySet", "round", round, "error", err)
+ return nil
+ }
+ }
+ setting := &baRoundSetting{
+ crs: curConfig.crs,
+ dkgSet: dkgSet,
+ round: round,
+ threshold: utils.GetBAThreshold(&types.Config{
+ NotarySetSize: curConfig.notarySetSize}),
+ }
+ mgr.settingCache.Add(round, setting)
+ return setting
+}
+
+func (mgr *agreementMgr) runBA(initRound uint64) {
+ // These are round based variables.
+ var (
+ currentRound uint64
+ nextRound = initRound
+ curConfig = mgr.config(initRound)
+ setting = &baRoundSetting{}
+ tickDuration time.Duration
+ ticker Ticker
+ )
+
+ // Check if this routine needs to awake in this round and prepare essential
+ // variables when yes.
+ checkRound := func() (isDKG bool) {
+ defer func() {
+ currentRound = nextRound
+ nextRound++
+ }()
+ // Wait until the configuartion for next round is ready.
+ for {
+ if setting = mgr.generateSetting(nextRound); setting != nil {
+ break
+ } else {
+ mgr.logger.Debug("Round is not ready", "round", nextRound)
+ time.Sleep(1 * time.Second)
+ }
+ }
+ _, isDKG = setting.dkgSet[mgr.ID]
+ if isDKG {
+ mgr.logger.Info("Selected as dkg set",
+ "ID", mgr.ID,
+ "round", nextRound)
+ } else {
+ mgr.logger.Info("Not selected as dkg set",
+ "ID", mgr.ID,
+ "round", nextRound)
+ }
+ // Setup ticker
+ if tickDuration != curConfig.lambdaBA {
+ if ticker != nil {
+ ticker.Stop()
+ }
+ ticker = newTicker(mgr.gov, nextRound, TickerBA)
+ tickDuration = curConfig.lambdaBA
+ }
+ setting.ticker = ticker
+ return
+ }
+Loop:
+ for {
+ select {
+ case <-mgr.ctx.Done():
+ break Loop
+ default:
+ }
+ mgr.recv.isNotary = checkRound()
+ mgr.voteFilter = utils.NewVoteFilter()
+ mgr.voteFilter.Position.Round = currentRound
+ mgr.recv.emptyBlockHashMap = &sync.Map{}
+ if currentRound >= DKGDelayRound && mgr.recv.isNotary {
+ var err error
+ mgr.recv.npks, mgr.recv.psigSigner, err =
+ mgr.con.cfgModule.getDKGInfo(currentRound, false)
+ if err != nil {
+ mgr.logger.Warn("cannot get dkg info",
+ "round", currentRound, "error", err)
+ }
+ } else {
+ mgr.recv.npks = nil
+ mgr.recv.psigSigner = nil
+ }
+ // Run BA for this round.
+ mgr.recv.restartNotary <- types.Position{
+ Round: currentRound,
+ Height: math.MaxUint64,
+ }
+ if err := mgr.baRoutineForOneRound(setting); err != nil {
+ mgr.logger.Error("BA routine failed",
+ "error", err,
+ "nodeID", mgr.ID)
+ break Loop
+ }
+ }
+}
+
+func (mgr *agreementMgr) baRoutineForOneRound(
+ setting *baRoundSetting) (err error) {
+ agr := mgr.baModule
+ recv := mgr.recv
+ oldPos := agr.agreementID()
+ restart := func(restartPos types.Position) (breakLoop bool, err error) {
+ if !isStop(restartPos) {
+ if restartPos.Height+1 >= mgr.config(setting.round).RoundEndHeight() {
+ for {
+ select {
+ case <-mgr.ctx.Done():
+ break
+ default:
+ }
+ tipRound := mgr.bcModule.tipRound()
+ if tipRound > setting.round {
+ break
+ } else {
+ mgr.logger.Debug("Waiting blockChain to change round...",
+ "curRound", setting.round,
+ "tipRound", tipRound)
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ // This round is finished.
+ breakLoop = true
+ return
+ }
+ if restartPos.Older(oldPos) {
+ // The restartNotary event is triggered by 'BlockConfirmed'
+ // of some older block.
+ return
+ }
+ }
+ var nextHeight uint64
+ var nextTime time.Time
+ for {
+ // Make sure we are stoppable.
+ select {
+ case <-mgr.ctx.Done():
+ breakLoop = true
+ return
+ default:
+ }
+ nextHeight, nextTime = mgr.bcModule.nextBlock()
+ if nextHeight != notReadyHeight {
+ if isStop(restartPos) {
+ break
+ }
+ if nextHeight > restartPos.Height {
+ break
+ }
+ }
+ mgr.logger.Debug("BlockChain not ready!!!",
+ "old", oldPos, "restart", restartPos, "next", nextHeight)
+ time.Sleep(100 * time.Millisecond)
+ }
+ nextPos := types.Position{
+ Round: setting.round,
+ Height: nextHeight,
+ }
+ oldPos = nextPos
+ var leader types.NodeID
+ leader, err = mgr.calcLeader(setting.dkgSet, setting.crs, nextPos)
+ if err != nil {
+ return
+ }
+ time.Sleep(nextTime.Sub(time.Now()))
+ setting.ticker.Restart()
+ agr.restart(setting.dkgSet, setting.threshold, nextPos, leader, setting.crs)
+ return
+ }
+Loop:
+ for {
+ select {
+ case <-mgr.ctx.Done():
+ break Loop
+ default:
+ }
+ if agr.confirmed() {
+ // Block until receive restartPos
+ select {
+ case restartPos := <-recv.restartNotary:
+ breakLoop, err := restart(restartPos)
+ if err != nil {
+ return err
+ }
+ if breakLoop {
+ break Loop
+ }
+ case <-mgr.ctx.Done():
+ break Loop
+ }
+ }
+ select {
+ case restartPos := <-recv.restartNotary:
+ breakLoop, err := restart(restartPos)
+ if err != nil {
+ return err
+ }
+ if breakLoop {
+ break Loop
+ }
+ default:
+ }
+ if !mgr.recv.isNotary {
+ select {
+ case <-setting.ticker.Tick():
+ continue Loop
+ case <-mgr.ctx.Done():
+ break Loop
+ }
+ }
+ if err = agr.nextState(); err != nil {
+ mgr.logger.Error("Failed to proceed to next state",
+ "nodeID", mgr.ID.String(),
+ "error", err)
+ break Loop
+ }
+ if agr.pullVotes() {
+ pos := agr.agreementID()
+ mgr.logger.Debug("Calling Network.PullVotes for syncing votes",
+ "position", pos)
+ mgr.network.PullVotes(pos)
+ }
+ for i := 0; i < agr.clocks(); i++ {
+ // Priority select for agreement.done().
+ select {
+ case <-agr.done():
+ continue Loop
+ default:
+ }
+ select {
+ case <-agr.done():
+ continue Loop
+ case <-setting.ticker.Tick():
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-state.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-state.go
new file mode 100644
index 000000000..fc2b6f3d5
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement-state.go
@@ -0,0 +1,213 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "fmt"
+
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+)
+
+// Errors for agreement state module.
+var (
+ ErrNoEnoughVoteInPrepareState = fmt.Errorf("no enough vote in prepare state")
+ ErrNoEnoughVoteInAckState = fmt.Errorf("no enough vote in ack state")
+)
+
+// agreementStateType is the state of agreement
+type agreementStateType int
+
+// agreementStateType enum.
+const (
+ stateFast agreementStateType = iota
+ stateFastVote
+ stateInitial
+ statePreCommit
+ stateCommit
+ stateForward
+ statePullVote
+ stateSleep
+)
+
+type agreementState interface {
+ state() agreementStateType
+ nextState() (agreementState, error)
+ clocks() int
+}
+
+//----- FastState -----
+type fastState struct {
+ a *agreementData
+}
+
+func newFastState(a *agreementData) *fastState {
+ return &fastState{a: a}
+}
+
+func (s *fastState) state() agreementStateType { return stateFast }
+func (s *fastState) clocks() int { return 0 }
+func (s *fastState) nextState() (agreementState, error) {
+ if func() bool {
+ s.a.lock.Lock()
+ defer s.a.lock.Unlock()
+ return s.a.isLeader
+ }() {
+ hash := s.a.recv.ProposeBlock()
+ if hash != types.NullBlockHash {
+ s.a.lock.Lock()
+ defer s.a.lock.Unlock()
+ s.a.recv.ProposeVote(types.NewVote(types.VoteFast, hash, s.a.period))
+ }
+ }
+ return newFastVoteState(s.a), nil
+}
+
+//----- FastVoteState -----
+type fastVoteState struct {
+ a *agreementData
+}
+
+func newFastVoteState(a *agreementData) *fastVoteState {
+ return &fastVoteState{a: a}
+}
+
+func (s *fastVoteState) state() agreementStateType { return stateFastVote }
+func (s *fastVoteState) clocks() int { return 3 }
+func (s *fastVoteState) nextState() (agreementState, error) {
+ return newInitialState(s.a), nil
+}
+
+//----- InitialState -----
+type initialState struct {
+ a *agreementData
+}
+
+func newInitialState(a *agreementData) *initialState {
+ return &initialState{a: a}
+}
+
+func (s *initialState) state() agreementStateType { return stateInitial }
+func (s *initialState) clocks() int { return 0 }
+func (s *initialState) nextState() (agreementState, error) {
+ if func() bool {
+ s.a.lock.Lock()
+ defer s.a.lock.Unlock()
+ return !s.a.isLeader
+ }() {
+ // Leader already proposed block in fastState.
+ hash := s.a.recv.ProposeBlock()
+ s.a.lock.Lock()
+ defer s.a.lock.Unlock()
+ s.a.recv.ProposeVote(types.NewVote(types.VoteInit, hash, s.a.period))
+ }
+ return newPreCommitState(s.a), nil
+}
+
+//----- PreCommitState -----
+type preCommitState struct {
+ a *agreementData
+}
+
+func newPreCommitState(a *agreementData) *preCommitState {
+ return &preCommitState{a: a}
+}
+
+func (s *preCommitState) state() agreementStateType { return statePreCommit }
+func (s *preCommitState) clocks() int { return 2 }
+func (s *preCommitState) nextState() (agreementState, error) {
+ s.a.lock.RLock()
+ defer s.a.lock.RUnlock()
+ if s.a.lockValue == types.SkipBlockHash ||
+ s.a.lockValue == types.NullBlockHash {
+ hash := s.a.leader.leaderBlockHash()
+ s.a.recv.ProposeVote(types.NewVote(types.VotePreCom, hash, s.a.period))
+ } else {
+ s.a.recv.ProposeVote(types.NewVote(
+ types.VotePreCom, s.a.lockValue, s.a.period))
+ }
+ return newCommitState(s.a), nil
+}
+
+//----- CommitState -----
+type commitState struct {
+ a *agreementData
+}
+
+func newCommitState(a *agreementData) *commitState {
+ return &commitState{a: a}
+}
+
+func (s *commitState) state() agreementStateType { return stateCommit }
+func (s *commitState) clocks() int { return 2 }
+func (s *commitState) nextState() (agreementState, error) {
+ s.a.lock.Lock()
+ defer s.a.lock.Unlock()
+ s.a.recv.ProposeVote(types.NewVote(types.VoteCom, s.a.lockValue, s.a.period))
+ return newForwardState(s.a), nil
+}
+
+// ----- ForwardState -----
+type forwardState struct {
+ a *agreementData
+}
+
+func newForwardState(a *agreementData) *forwardState {
+ return &forwardState{a: a}
+}
+
+func (s *forwardState) state() agreementStateType { return stateForward }
+func (s *forwardState) clocks() int { return 4 }
+
+func (s *forwardState) nextState() (agreementState, error) {
+ return newPullVoteState(s.a), nil
+}
+
+// ----- PullVoteState -----
+// pullVoteState is a special state to ensure the assumption in the consensus
+// algorithm that every vote will eventually arrive for all nodes.
+type pullVoteState struct {
+ a *agreementData
+}
+
+func newPullVoteState(a *agreementData) *pullVoteState {
+ return &pullVoteState{a: a}
+}
+
+func (s *pullVoteState) state() agreementStateType { return statePullVote }
+func (s *pullVoteState) clocks() int { return 4 }
+
+func (s *pullVoteState) nextState() (agreementState, error) {
+ return s, nil
+}
+
+// ----- SleepState -----
+// sleepState is a special state after BA has output and waits for restart.
+type sleepState struct {
+ a *agreementData
+}
+
+func newSleepState(a *agreementData) *sleepState {
+ return &sleepState{a: a}
+}
+
+func (s *sleepState) state() agreementStateType { return stateSleep }
+func (s *sleepState) clocks() int { return 65536 }
+
+func (s *sleepState) nextState() (agreementState, error) {
+ return s, nil
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement.go
new file mode 100644
index 000000000..bad6afa2b
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/agreement.go
@@ -0,0 +1,797 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+// closedchan is a reusable closed channel.
+var closedchan = make(chan struct{})
+
+func init() {
+ close(closedchan)
+}
+
+// Errors for agreement module.
+var (
+ ErrInvalidVote = fmt.Errorf("invalid vote")
+ ErrNotInNotarySet = fmt.Errorf("not in notary set")
+ ErrIncorrectVoteSignature = fmt.Errorf("incorrect vote signature")
+ ErrIncorrectVotePartialSignature = fmt.Errorf("incorrect vote psig")
+ ErrMismatchBlockPosition = fmt.Errorf("mismatch block position")
+)
+
+// ErrFork for fork error in agreement.
+type ErrFork struct {
+ nID types.NodeID
+ old, new common.Hash
+}
+
+func (e *ErrFork) Error() string {
+ return fmt.Sprintf("fork is found for %s, old %s, new %s",
+ e.nID.String(), e.old, e.new)
+}
+
+// ErrForkVote for fork vote error in agreement.
+type ErrForkVote struct {
+ nID types.NodeID
+ old, new *types.Vote
+}
+
+func (e *ErrForkVote) Error() string {
+ return fmt.Sprintf("fork vote is found for %s, old %s, new %s",
+ e.nID.String(), e.old, e.new)
+}
+
+func newVoteListMap() []map[types.NodeID]*types.Vote {
+ listMap := make([]map[types.NodeID]*types.Vote, types.MaxVoteType)
+ for idx := range listMap {
+ listMap[idx] = make(map[types.NodeID]*types.Vote)
+ }
+ return listMap
+}
+
+// agreementReceiver is the interface receiving agreement event.
+type agreementReceiver interface {
+ ProposeVote(vote *types.Vote)
+ ProposeBlock() common.Hash
+ // ConfirmBlock is called with lock hold. User can safely use all data within
+ // agreement module.
+ ConfirmBlock(common.Hash, map[types.NodeID]*types.Vote)
+ PullBlocks(common.Hashes)
+ ReportForkVote(v1, v2 *types.Vote)
+ ReportForkBlock(b1, b2 *types.Block)
+ VerifyPartialSignature(vote *types.Vote) (bool, bool)
+}
+
+type pendingBlock struct {
+ block *types.Block
+ receivedTime time.Time
+}
+
+type pendingVote struct {
+ vote *types.Vote
+ receivedTime time.Time
+}
+
+// agreementData is the data for agreementState.
+type agreementData struct {
+ recv agreementReceiver
+
+ ID types.NodeID
+ isLeader bool
+ leader *leaderSelector
+ lockValue common.Hash
+ lockIter uint64
+ period uint64
+ requiredVote int
+ votes map[uint64][]map[types.NodeID]*types.Vote
+ lock sync.RWMutex
+ blocks map[types.NodeID]*types.Block
+ blocksLock sync.Mutex
+}
+
+// agreement is the agreement protocal describe in the Crypto Shuffle Algorithm.
+type agreement struct {
+ state agreementState
+ data *agreementData
+ aID *atomic.Value
+ doneChan chan struct{}
+ notarySet map[types.NodeID]struct{}
+ hasVoteFast bool
+ hasOutput bool
+ lock sync.RWMutex
+ pendingBlock []pendingBlock
+ pendingVote []pendingVote
+ pendingAgreementResult map[types.Position]*types.AgreementResult
+ candidateBlock map[common.Hash]*types.Block
+ fastForward chan uint64
+ signer *utils.Signer
+ logger common.Logger
+}
+
+// newAgreement creates a agreement instance.
+func newAgreement(
+ ID types.NodeID,
+ recv agreementReceiver,
+ leader *leaderSelector,
+ signer *utils.Signer,
+ logger common.Logger) *agreement {
+ agreement := &agreement{
+ data: &agreementData{
+ recv: recv,
+ ID: ID,
+ leader: leader,
+ },
+ aID: &atomic.Value{},
+ pendingAgreementResult: make(map[types.Position]*types.AgreementResult),
+ candidateBlock: make(map[common.Hash]*types.Block),
+ fastForward: make(chan uint64, 1),
+ signer: signer,
+ logger: logger,
+ }
+ agreement.stop()
+ return agreement
+}
+
+// restart the agreement
+func (a *agreement) restart(
+ notarySet map[types.NodeID]struct{},
+ threshold int, aID types.Position, leader types.NodeID,
+ crs common.Hash) {
+ if !func() bool {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ if !isStop(aID) {
+ oldAID := a.agreementID()
+ if !isStop(oldAID) && !aID.Newer(oldAID) {
+ return false
+ }
+ }
+ a.logger.Debug("Restarting BA",
+ "notarySet", notarySet, "position", aID, "leader", leader)
+ a.data.lock.Lock()
+ defer a.data.lock.Unlock()
+ a.data.blocksLock.Lock()
+ defer a.data.blocksLock.Unlock()
+ a.data.votes = make(map[uint64][]map[types.NodeID]*types.Vote)
+ a.data.votes[1] = newVoteListMap()
+ a.data.period = 2
+ a.data.blocks = make(map[types.NodeID]*types.Block)
+ a.data.requiredVote = threshold
+ a.data.leader.restart(crs)
+ a.data.lockValue = types.SkipBlockHash
+ a.data.lockIter = 0
+ a.data.isLeader = a.data.ID == leader
+ if a.doneChan != nil {
+ close(a.doneChan)
+ }
+ a.doneChan = make(chan struct{})
+ a.fastForward = make(chan uint64, 1)
+ a.hasVoteFast = false
+ a.hasOutput = false
+ a.state = newFastState(a.data)
+ a.notarySet = notarySet
+ a.candidateBlock = make(map[common.Hash]*types.Block)
+ a.aID.Store(struct {
+ pos types.Position
+ leader types.NodeID
+ }{aID, leader})
+ return true
+ }() {
+ return
+ }
+
+ if isStop(aID) {
+ return
+ }
+
+ var result *types.AgreementResult
+ func() {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ newPendingAgreementResult := make(
+ map[types.Position]*types.AgreementResult)
+ for pos, agr := range a.pendingAgreementResult {
+ if pos.Newer(aID) {
+ newPendingAgreementResult[pos] = agr
+ } else if pos == aID {
+ result = agr
+ }
+ }
+ a.pendingAgreementResult = newPendingAgreementResult
+ }()
+
+ expireTime := time.Now().Add(-10 * time.Second)
+ replayBlock := make([]*types.Block, 0)
+ func() {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ newPendingBlock := make([]pendingBlock, 0)
+ for _, pending := range a.pendingBlock {
+ if aID.Newer(pending.block.Position) {
+ continue
+ } else if pending.block.Position == aID {
+ if result == nil ||
+ result.Position.Round < DKGDelayRound ||
+ result.BlockHash == pending.block.Hash {
+ replayBlock = append(replayBlock, pending.block)
+ }
+ } else if pending.receivedTime.After(expireTime) {
+ newPendingBlock = append(newPendingBlock, pending)
+ }
+ }
+ a.pendingBlock = newPendingBlock
+ }()
+
+ replayVote := make([]*types.Vote, 0)
+ func() {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ newPendingVote := make([]pendingVote, 0)
+ for _, pending := range a.pendingVote {
+ if aID.Newer(pending.vote.Position) {
+ continue
+ } else if pending.vote.Position == aID {
+ if result == nil || result.Position.Round < DKGDelayRound {
+ replayVote = append(replayVote, pending.vote)
+ }
+ } else if pending.receivedTime.After(expireTime) {
+ newPendingVote = append(newPendingVote, pending)
+ }
+ }
+ a.pendingVote = newPendingVote
+ }()
+
+ for _, block := range replayBlock {
+ if err := a.processBlock(block); err != nil {
+ a.logger.Error("Failed to process block when restarting agreement",
+ "block", block)
+ }
+ }
+
+ if result != nil {
+ if err := a.processAgreementResult(result); err != nil {
+ a.logger.Error("Failed to process agreement result when retarting",
+ "result", result)
+ }
+ }
+
+ for _, vote := range replayVote {
+ if err := a.processVote(vote); err != nil {
+ a.logger.Error("Failed to process vote when restarting agreement",
+ "vote", vote)
+ }
+ }
+}
+
+func (a *agreement) stop() {
+ a.restart(make(map[types.NodeID]struct{}), int(math.MaxInt32),
+ types.Position{
+ Height: math.MaxUint64,
+ },
+ types.NodeID{}, common.Hash{})
+}
+
+func isStop(aID types.Position) bool {
+ return aID.Height == math.MaxUint64
+}
+
+// clocks returns how many time this state is required.
+func (a *agreement) clocks() int {
+ a.data.lock.RLock()
+ defer a.data.lock.RUnlock()
+ scale := int(a.data.period) - 1
+ if a.state.state() == stateForward {
+ scale = 1
+ }
+ if scale < 1 {
+ // just in case.
+ scale = 1
+ }
+ // 10 is a magic number derived from many years of experience.
+ if scale > 10 {
+ scale = 10
+ }
+ return a.state.clocks() * scale
+}
+
+// pullVotes returns if current agreement requires more votes to continue.
+func (a *agreement) pullVotes() bool {
+ a.data.lock.RLock()
+ defer a.data.lock.RUnlock()
+ return a.state.state() == statePullVote ||
+ a.state.state() == stateInitial ||
+ (a.state.state() == statePreCommit && (a.data.period%3) == 0)
+}
+
+// agreementID returns the current agreementID.
+func (a *agreement) agreementID() types.Position {
+ return a.aID.Load().(struct {
+ pos types.Position
+ leader types.NodeID
+ }).pos
+}
+
+// leader returns the current leader.
+func (a *agreement) leader() types.NodeID {
+ return a.aID.Load().(struct {
+ pos types.Position
+ leader types.NodeID
+ }).leader
+}
+
+// nextState is called at the specific clock time.
+func (a *agreement) nextState() (err error) {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ if a.hasOutput {
+ a.state = newSleepState(a.data)
+ return
+ }
+ a.state, err = a.state.nextState()
+ return
+}
+
+func (a *agreement) sanityCheck(vote *types.Vote) error {
+ if vote.Type >= types.MaxVoteType {
+ return ErrInvalidVote
+ }
+ ok, err := utils.VerifyVoteSignature(vote)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return ErrIncorrectVoteSignature
+ }
+ if vote.Position.Round != a.agreementID().Round {
+ // TODO(jimmy): maybe we can verify partial signature at agreement-mgr.
+ return nil
+ }
+ if ok, report := a.data.recv.VerifyPartialSignature(vote); !ok {
+ if report {
+ return ErrIncorrectVotePartialSignature
+ }
+ return ErrSkipButNoError
+ }
+ return nil
+}
+
+func (a *agreement) checkForkVote(vote *types.Vote) (
+ alreadyExist bool, err error) {
+ a.data.lock.RLock()
+ defer a.data.lock.RUnlock()
+ if votes, exist := a.data.votes[vote.Period]; exist {
+ if oldVote, exist := votes[vote.Type][vote.ProposerID]; exist {
+ alreadyExist = true
+ if vote.BlockHash != oldVote.BlockHash {
+ a.data.recv.ReportForkVote(oldVote, vote)
+ err = &ErrForkVote{vote.ProposerID, oldVote, vote}
+ return
+ }
+ }
+ }
+ return
+}
+
+// prepareVote prepares a vote.
+func (a *agreement) prepareVote(vote *types.Vote) (err error) {
+ vote.Position = a.agreementID()
+ err = a.signer.SignVote(vote)
+ return
+}
+
+func (a *agreement) updateFilter(filter *utils.VoteFilter) {
+ if isStop(a.agreementID()) {
+ return
+ }
+ a.lock.RLock()
+ defer a.lock.RUnlock()
+ a.data.lock.RLock()
+ defer a.data.lock.RUnlock()
+ filter.Confirm = a.hasOutput
+ filter.LockIter = a.data.lockIter
+ filter.Period = a.data.period
+ filter.Position.Height = a.agreementID().Height
+}
+
+// processVote is the entry point for processing Vote.
+func (a *agreement) processVote(vote *types.Vote) error {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ if err := a.sanityCheck(vote); err != nil {
+ return err
+ }
+ aID := a.agreementID()
+
+ // Agreement module has stopped.
+ if isStop(aID) {
+ // Hacky way to not drop first votes when round just begins.
+ if vote.Position.Round == aID.Round {
+ a.pendingVote = append(a.pendingVote, pendingVote{
+ vote: vote,
+ receivedTime: time.Now().UTC(),
+ })
+ return nil
+ }
+ return ErrSkipButNoError
+ }
+ if vote.Position != aID {
+ if aID.Newer(vote.Position) {
+ return nil
+ }
+ a.pendingVote = append(a.pendingVote, pendingVote{
+ vote: vote,
+ receivedTime: time.Now().UTC(),
+ })
+ return nil
+ }
+ exist, err := a.checkForkVote(vote)
+ if err != nil {
+ return err
+ }
+ if exist {
+ return nil
+ }
+
+ a.data.lock.Lock()
+ defer a.data.lock.Unlock()
+ if _, exist := a.data.votes[vote.Period]; !exist {
+ a.data.votes[vote.Period] = newVoteListMap()
+ }
+ if _, exist := a.data.votes[vote.Period][vote.Type][vote.ProposerID]; exist {
+ return nil
+ }
+ a.data.votes[vote.Period][vote.Type][vote.ProposerID] = vote
+ if !a.hasOutput &&
+ (vote.Type == types.VoteCom ||
+ vote.Type == types.VoteFast ||
+ vote.Type == types.VoteFastCom) {
+ if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok &&
+ hash != types.SkipBlockHash {
+ if vote.Type == types.VoteFast {
+ if !a.hasVoteFast {
+ if a.state.state() == stateFast ||
+ a.state.state() == stateFastVote {
+ a.data.recv.ProposeVote(
+ types.NewVote(types.VoteFastCom, hash, vote.Period))
+ a.hasVoteFast = true
+
+ }
+ if a.data.lockIter == 0 {
+ a.data.lockValue = hash
+ a.data.lockIter = 1
+ }
+ }
+ } else {
+ a.hasOutput = true
+ a.data.recv.ConfirmBlock(hash,
+ a.data.votes[vote.Period][vote.Type])
+ if a.doneChan != nil {
+ close(a.doneChan)
+ a.doneChan = nil
+ }
+ }
+ return nil
+ }
+ } else if a.hasOutput {
+ return nil
+ }
+
+ // Check if the agreement requires fast-forwarding.
+ if len(a.fastForward) > 0 {
+ return nil
+ }
+ if vote.Type == types.VotePreCom {
+ if vote.Period < a.data.lockIter {
+ // This PreCom is useless for us.
+ return nil
+ }
+ if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok &&
+ hash != types.SkipBlockHash {
+ // Condition 1.
+ if vote.Period > a.data.lockIter {
+ a.data.lockValue = hash
+ a.data.lockIter = vote.Period
+ }
+ // Condition 2.
+ if vote.Period > a.data.period {
+ a.fastForward <- vote.Period
+ if a.doneChan != nil {
+ close(a.doneChan)
+ a.doneChan = nil
+ }
+ return nil
+ }
+ }
+ }
+ // Condition 3.
+ if vote.Type == types.VoteCom && vote.Period >= a.data.period &&
+ len(a.data.votes[vote.Period][types.VoteCom]) >= a.data.requiredVote {
+ hashes := common.Hashes{}
+ addPullBlocks := func(voteType types.VoteType) {
+ for _, vote := range a.data.votes[vote.Period][voteType] {
+ if vote.BlockHash == types.NullBlockHash ||
+ vote.BlockHash == types.SkipBlockHash {
+ continue
+ }
+ if _, found := a.findCandidateBlockNoLock(vote.BlockHash); !found {
+ hashes = append(hashes, vote.BlockHash)
+ }
+ }
+ }
+ addPullBlocks(types.VotePreCom)
+ addPullBlocks(types.VoteCom)
+ if len(hashes) > 0 {
+ a.data.recv.PullBlocks(hashes)
+ }
+ a.fastForward <- vote.Period + 1
+ if a.doneChan != nil {
+ close(a.doneChan)
+ a.doneChan = nil
+ }
+ return nil
+ }
+ return nil
+}
+
+func (a *agreement) processFinalizedBlock(block *types.Block) {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ if a.hasOutput {
+ return
+ }
+ aID := a.agreementID()
+ if aID.Older(block.Position) {
+ return
+ }
+ a.addCandidateBlockNoLock(block)
+ a.hasOutput = true
+ a.data.lock.Lock()
+ defer a.data.lock.Unlock()
+ a.data.recv.ConfirmBlock(block.Hash, nil)
+ if a.doneChan != nil {
+ close(a.doneChan)
+ a.doneChan = nil
+ }
+}
+
+func (a *agreement) processAgreementResult(result *types.AgreementResult) error {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ aID := a.agreementID()
+ if result.Position.Older(aID) {
+ return nil
+ } else if result.Position.Newer(aID) {
+ a.pendingAgreementResult[result.Position] = result
+ return nil
+ }
+ if a.hasOutput {
+ return nil
+ }
+ a.data.lock.Lock()
+ defer a.data.lock.Unlock()
+ if _, exist := a.findCandidateBlockNoLock(result.BlockHash); !exist {
+ a.data.recv.PullBlocks(common.Hashes{result.BlockHash})
+ }
+ a.hasOutput = true
+ a.data.recv.ConfirmBlock(result.BlockHash, nil)
+ if a.doneChan != nil {
+ close(a.doneChan)
+ a.doneChan = nil
+ }
+ return nil
+}
+
+func (a *agreement) done() <-chan struct{} {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ select {
+ case period := <-a.fastForward:
+ a.data.lock.Lock()
+ defer a.data.lock.Unlock()
+ if period <= a.data.period {
+ break
+ }
+ a.data.setPeriod(period)
+ a.state = newPreCommitState(a.data)
+ a.doneChan = make(chan struct{})
+ return closedchan
+ default:
+ }
+ if a.doneChan == nil {
+ return closedchan
+ }
+ return a.doneChan
+}
+
+func (a *agreement) confirmed() bool {
+ a.lock.RLock()
+ defer a.lock.RUnlock()
+ return a.confirmedNoLock()
+}
+
+func (a *agreement) confirmedNoLock() bool {
+ return a.hasOutput
+}
+
+// processBlock is the entry point for processing Block.
+func (a *agreement) processBlock(block *types.Block) error {
+ checkSkip := func() bool {
+ aID := a.agreementID()
+ if block.Position != aID {
+ // Agreement module has stopped.
+ if !isStop(aID) {
+ if aID.Newer(block.Position) {
+ return true
+ }
+ }
+ }
+ return false
+ }
+ if checkSkip() {
+ return nil
+ }
+ if err := utils.VerifyBlockSignature(block); err != nil {
+ return err
+ }
+
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ a.data.blocksLock.Lock()
+ defer a.data.blocksLock.Unlock()
+ aID := a.agreementID()
+ // a.agreementID might change during lock, so we need to checkSkip again.
+ if checkSkip() {
+ return nil
+ } else if aID != block.Position {
+ a.pendingBlock = append(a.pendingBlock, pendingBlock{
+ block: block,
+ receivedTime: time.Now().UTC(),
+ })
+ return nil
+ } else if a.confirmedNoLock() {
+ return nil
+ }
+ if b, exist := a.data.blocks[block.ProposerID]; exist {
+ if b.Hash != block.Hash {
+ a.data.recv.ReportForkBlock(b, block)
+ return &ErrFork{block.ProposerID, b.Hash, block.Hash}
+ }
+ return nil
+ }
+ if err := a.data.leader.processBlock(block); err != nil {
+ return err
+ }
+ a.data.blocks[block.ProposerID] = block
+ a.addCandidateBlockNoLock(block)
+ if block.ProposerID != a.data.ID &&
+ (a.state.state() == stateFast || a.state.state() == stateFastVote) &&
+ block.ProposerID == a.leader() {
+ go func() {
+ for func() bool {
+ if aID != a.agreementID() {
+ return false
+ }
+ a.lock.RLock()
+ defer a.lock.RUnlock()
+ if a.state.state() != stateFast && a.state.state() != stateFastVote {
+ return false
+ }
+ a.data.lock.RLock()
+ defer a.data.lock.RUnlock()
+ a.data.blocksLock.Lock()
+ defer a.data.blocksLock.Unlock()
+ block, exist := a.data.blocks[a.leader()]
+ if !exist {
+ return true
+ }
+ ok, err := a.data.leader.validLeader(block, a.data.leader.hashCRS)
+ if err != nil {
+ fmt.Println("Error checking validLeader for Fast BA",
+ "error", err, "block", block)
+ return false
+ }
+ if ok {
+ a.data.recv.ProposeVote(
+ types.NewVote(types.VoteFast, block.Hash, a.data.period))
+ return false
+ }
+ return true
+ }() {
+ // TODO(jimmy): retry interval should be related to configurations.
+ time.Sleep(250 * time.Millisecond)
+ }
+ }()
+ }
+ return nil
+}
+
+func (a *agreement) addCandidateBlock(block *types.Block) {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ a.addCandidateBlockNoLock(block)
+}
+
+func (a *agreement) addCandidateBlockNoLock(block *types.Block) {
+ a.candidateBlock[block.Hash] = block
+}
+
+func (a *agreement) findCandidateBlockNoLock(
+ hash common.Hash) (*types.Block, bool) {
+ b, e := a.candidateBlock[hash]
+ return b, e
+}
+
+// find a block in both candidate blocks and pending blocks in leader-selector.
+// A block might be confirmed by others while we can't verify its validity.
+func (a *agreement) findBlockNoLock(hash common.Hash) (*types.Block, bool) {
+ b, e := a.findCandidateBlockNoLock(hash)
+ if !e {
+ b, e = a.data.leader.findPendingBlock(hash)
+ }
+ return b, e
+}
+
+func (a *agreementData) countVote(period uint64, voteType types.VoteType) (
+ blockHash common.Hash, ok bool) {
+ a.lock.RLock()
+ defer a.lock.RUnlock()
+ return a.countVoteNoLock(period, voteType)
+}
+
+func (a *agreementData) countVoteNoLock(
+ period uint64, voteType types.VoteType) (blockHash common.Hash, ok bool) {
+ votes, exist := a.votes[period]
+ if !exist {
+ return
+ }
+ candidate := make(map[common.Hash]int)
+ for _, vote := range votes[voteType] {
+ if _, exist := candidate[vote.BlockHash]; !exist {
+ candidate[vote.BlockHash] = 0
+ }
+ candidate[vote.BlockHash]++
+ }
+ for candidateHash, votes := range candidate {
+ if votes >= a.requiredVote {
+ blockHash = candidateHash
+ ok = true
+ return
+ }
+ }
+ return
+}
+
+func (a *agreementData) setPeriod(period uint64) {
+ for i := a.period + 1; i <= period; i++ {
+ if _, exist := a.votes[i]; !exist {
+ a.votes[i] = newVoteListMap()
+ }
+ }
+ a.period = period
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/blockchain.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockchain.go
new file mode 100644
index 000000000..579ccd44c
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockchain.go
@@ -0,0 +1,681 @@
+// Copyright 2019 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+// Errors for sanity check error.
+var (
+ ErrBlockFromOlderPosition = errors.New("block from older position")
+ ErrNotGenesisBlock = errors.New("not a genesis block")
+ ErrIsGenesisBlock = errors.New("is a genesis block")
+ ErrIncorrectParentHash = errors.New("incorrect parent hash")
+ ErrInvalidBlockHeight = errors.New("invalid block height")
+ ErrInvalidRoundID = errors.New("invalid round id")
+ ErrInvalidTimestamp = errors.New("invalid timestamp")
+ ErrNotFollowTipPosition = errors.New("not follow tip position")
+ ErrDuplicatedPendingBlock = errors.New("duplicated pending block")
+ ErrRetrySanityCheckLater = errors.New("retry sanity check later")
+ ErrRoundNotSwitch = errors.New("round not switch")
+ ErrIncorrectAgreementResult = errors.New(
+ "incorrect block randomness result")
+ ErrMissingRandomness = errors.New("missing block randomness")
+)
+
+const notReadyHeight uint64 = math.MaxUint64
+
+type pendingBlockRecord struct {
+ position types.Position
+ block *types.Block
+}
+
+type pendingBlockRecords []pendingBlockRecord
+
+func (pb *pendingBlockRecords) insert(p pendingBlockRecord) error {
+ idx := sort.Search(len(*pb), func(i int) bool {
+ return !(*pb)[i].position.Older(p.position)
+ })
+ switch idx {
+ case len(*pb):
+ *pb = append(*pb, p)
+ default:
+ if (*pb)[idx].position.Equal(p.position) {
+ // Allow to overwrite pending block record for empty blocks, we may
+ // need to pull that block from others when its parent is not found
+ // locally.
+ if (*pb)[idx].block == nil && p.block != nil {
+ (*pb)[idx].block = p.block
+ return nil
+ }
+ return ErrDuplicatedPendingBlock
+ }
+ // Insert the value to that index.
+ *pb = append((*pb), pendingBlockRecord{})
+ copy((*pb)[idx+1:], (*pb)[idx:])
+ (*pb)[idx] = p
+ }
+ return nil
+}
+
+func (pb pendingBlockRecords) searchByHeight(h uint64) (
+ pendingBlockRecord, bool) {
+ idx := sort.Search(len(pb), func(i int) bool {
+ return pb[i].position.Height >= h
+ })
+ if idx == len(pb) || pb[idx].position.Height != h {
+ return pendingBlockRecord{}, false
+ }
+ return pb[idx], true
+}
+
+func (pb pendingBlockRecords) searchByPosition(p types.Position) (
+ pendingBlockRecord, bool) {
+ idx := sort.Search(len(pb), func(i int) bool {
+ return !pb[i].block.Position.Older(p)
+ })
+ if idx == len(pb) || !pb[idx].position.Equal(p) {
+ return pendingBlockRecord{}, false
+ }
+ return pb[idx], true
+}
+
+type blockChainConfig struct {
+ utils.RoundBasedConfig
+
+ minBlockInterval time.Duration
+}
+
+func (c *blockChainConfig) fromConfig(round uint64, config *types.Config) {
+ c.minBlockInterval = config.MinBlockInterval
+ c.SetupRoundBasedFields(round, config)
+}
+
+func newBlockChainConfig(prev blockChainConfig, config *types.Config) (
+ c blockChainConfig) {
+ c = blockChainConfig{}
+ c.fromConfig(prev.RoundID()+1, config)
+ c.AppendTo(prev.RoundBasedConfig)
+ return
+}
+
+type tsigVerifierGetter interface {
+ UpdateAndGet(uint64) (TSigVerifier, bool, error)
+ Purge(uint64)
+}
+
+type blockChain struct {
+ lock sync.RWMutex
+ ID types.NodeID
+ lastConfirmed *types.Block
+ lastDelivered *types.Block
+ signer *utils.Signer
+ vGetter tsigVerifierGetter
+ app Application
+ logger common.Logger
+ pendingRandomnesses map[types.Position][]byte
+ configs []blockChainConfig
+ pendingBlocks pendingBlockRecords
+ confirmedBlocks types.BlocksByPosition
+ dMoment time.Time
+
+ // Do not access this variable besides processAgreementResult.
+ lastPosition types.Position
+}
+
+func newBlockChain(nID types.NodeID, dMoment time.Time, initBlock *types.Block,
+ app Application, vGetter tsigVerifierGetter, signer *utils.Signer,
+ logger common.Logger) *blockChain {
+ return &blockChain{
+ ID: nID,
+ lastConfirmed: initBlock,
+ lastDelivered: initBlock,
+ signer: signer,
+ vGetter: vGetter,
+ app: app,
+ logger: logger,
+ dMoment: dMoment,
+ pendingRandomnesses: make(
+ map[types.Position][]byte),
+ }
+}
+
+func (bc *blockChain) notifyRoundEvents(evts []utils.RoundEventParam) error {
+ bc.lock.Lock()
+ defer bc.lock.Unlock()
+ apply := func(e utils.RoundEventParam) error {
+ if len(bc.configs) > 0 {
+ lastCfg := bc.configs[len(bc.configs)-1]
+ if e.BeginHeight != lastCfg.RoundEndHeight() {
+ return ErrInvalidBlockHeight
+ }
+ if lastCfg.RoundID() == e.Round {
+ bc.configs[len(bc.configs)-1].ExtendLength()
+ } else if lastCfg.RoundID()+1 == e.Round {
+ bc.configs = append(bc.configs, newBlockChainConfig(
+ lastCfg, e.Config))
+ } else {
+ return ErrInvalidRoundID
+ }
+ } else {
+ c := blockChainConfig{}
+ c.fromConfig(e.Round, e.Config)
+ c.SetRoundBeginHeight(e.BeginHeight)
+ if bc.lastConfirmed == nil {
+ if c.RoundID() != 0 {
+ panic(fmt.Errorf(
+ "genesis config should from round 0, but %d",
+ c.RoundID()))
+ }
+ } else {
+ if c.RoundID() != bc.lastConfirmed.Position.Round {
+ panic(fmt.Errorf("incompatible config/block round %s %d",
+ bc.lastConfirmed, c.RoundID()))
+ }
+ if !c.Contains(bc.lastConfirmed.Position.Height) {
+ panic(fmt.Errorf(
+ "unmatched round-event with block %s %d %d %d",
+ bc.lastConfirmed, e.Round, e.Reset, e.BeginHeight))
+ }
+ }
+ bc.configs = append(bc.configs, c)
+ }
+ return nil
+ }
+ for _, e := range evts {
+ if err := apply(e); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (bc *blockChain) proposeBlock(position types.Position,
+ proposeTime time.Time, isEmpty bool) (b *types.Block, err error) {
+ bc.lock.RLock()
+ defer bc.lock.RUnlock()
+ return bc.prepareBlock(position, proposeTime, isEmpty)
+}
+
+func (bc *blockChain) extractBlocks() (ret []*types.Block) {
+ bc.lock.Lock()
+ defer bc.lock.Unlock()
+ for len(bc.confirmedBlocks) > 0 {
+ c := bc.confirmedBlocks[0]
+ if c.Position.Round >= DKGDelayRound &&
+ len(c.Randomness) == 0 &&
+ !bc.setRandomnessFromPending(c) {
+ break
+ }
+ c, bc.confirmedBlocks = bc.confirmedBlocks[0], bc.confirmedBlocks[1:]
+ ret = append(ret, c)
+ bc.lastDelivered = c
+ }
+ return
+}
+
+func (bc *blockChain) sanityCheck(b *types.Block) error {
+ bc.lock.RLock()
+ defer bc.lock.RUnlock()
+ if bc.lastConfirmed == nil {
+ // It should be a genesis block.
+ if !b.IsGenesis() {
+ return ErrNotGenesisBlock
+ }
+ if b.Timestamp.Before(bc.dMoment.Add(bc.configs[0].minBlockInterval)) {
+ return ErrInvalidTimestamp
+ }
+ return nil
+ }
+ if b.IsGenesis() {
+ return ErrIsGenesisBlock
+ }
+ if b.Position.Height != bc.lastConfirmed.Position.Height+1 {
+ if b.Position.Height > bc.lastConfirmed.Position.Height {
+ return ErrRetrySanityCheckLater
+ }
+ return ErrInvalidBlockHeight
+ }
+ tipConfig := bc.tipConfig()
+ if tipConfig.IsLastBlock(bc.lastConfirmed) {
+ if b.Position.Round != bc.lastConfirmed.Position.Round+1 {
+ return ErrRoundNotSwitch
+ }
+ } else {
+ if b.Position.Round != bc.lastConfirmed.Position.Round {
+ return ErrInvalidRoundID
+ }
+ }
+ if !b.ParentHash.Equal(bc.lastConfirmed.Hash) {
+ return ErrIncorrectParentHash
+ }
+ if b.Timestamp.Before(bc.lastConfirmed.Timestamp.Add(
+ tipConfig.minBlockInterval)) {
+ return ErrInvalidTimestamp
+ }
+ if err := utils.VerifyBlockSignature(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+// addEmptyBlock is called when an empty block is confirmed by BA.
+func (bc *blockChain) addEmptyBlock(position types.Position) (
+ *types.Block, error) {
+ bc.lock.Lock()
+ defer bc.lock.Unlock()
+ add := func() *types.Block {
+ emptyB, err := bc.prepareBlock(position, time.Time{}, true)
+ if err != nil || emptyB == nil {
+ // This helper is expected to be called when an empty block is ready
+ // to be confirmed.
+ panic(err)
+ }
+ bc.confirmBlock(emptyB)
+ bc.checkIfBlocksConfirmed()
+ return emptyB
+ }
+ if bc.lastConfirmed != nil {
+ if !position.Newer(bc.lastConfirmed.Position) {
+ bc.logger.Warn("Dropping empty block: older than tip",
+ "position", &position,
+ "last-confirmed", bc.lastConfirmed)
+ return nil, ErrBlockFromOlderPosition
+ }
+ if bc.lastConfirmed.Position.Height+1 == position.Height {
+ return add(), nil
+ }
+ } else if position.Height == types.GenesisHeight && position.Round == 0 {
+ return add(), nil
+ } else {
+ return nil, ErrInvalidBlockHeight
+ }
+ return nil, bc.addPendingBlockRecord(pendingBlockRecord{position, nil})
+}
+
+// addBlock should be called when the block is confirmed by BA, we won't perform
+// sanity check against this block, it's ok to add block with skipping height.
+func (bc *blockChain) addBlock(b *types.Block) error {
+ if b.Position.Round >= DKGDelayRound &&
+ len(b.Randomness) == 0 &&
+ !bc.setRandomnessFromPending(b) {
+ return ErrMissingRandomness
+ }
+ bc.lock.Lock()
+ defer bc.lock.Unlock()
+ confirmed := false
+ if bc.lastConfirmed != nil {
+ if !b.Position.Newer(bc.lastConfirmed.Position) {
+ bc.logger.Warn("Dropping block: older than tip",
+ "block", b, "last-confirmed", bc.lastConfirmed)
+ return nil
+ }
+ if bc.lastConfirmed.Position.Height+1 == b.Position.Height {
+ confirmed = true
+ }
+ } else if b.IsGenesis() {
+ confirmed = true
+ }
+ delete(bc.pendingRandomnesses, b.Position)
+ if !confirmed {
+ return bc.addPendingBlockRecord(pendingBlockRecord{b.Position, b})
+ }
+ bc.confirmBlock(b)
+ bc.checkIfBlocksConfirmed()
+ return nil
+}
+
+func (bc *blockChain) tipRound() uint64 {
+ bc.lock.RLock()
+ defer bc.lock.RUnlock()
+ if bc.lastConfirmed == nil {
+ return 0
+ }
+ offset, tipConfig := uint64(0), bc.tipConfig()
+ if tipConfig.IsLastBlock(bc.lastConfirmed) {
+ offset++
+ }
+ return bc.lastConfirmed.Position.Round + offset
+}
+
+func (bc *blockChain) confirmed(h uint64) bool {
+ bc.lock.RLock()
+ defer bc.lock.RUnlock()
+ if bc.lastConfirmed != nil && bc.lastConfirmed.Position.Height >= h {
+ return true
+ }
+ r, found := bc.pendingBlocks.searchByHeight(h)
+ if !found {
+ return false
+ }
+ return r.block != nil
+}
+
+func (bc *blockChain) nextBlock() (uint64, time.Time) {
+ bc.lock.RLock()
+ defer bc.lock.RUnlock()
+ // It's ok to access tip config directly without checking the existence of
+ // lastConfirmed block in the scenario of "nextBlock" method.
+ tip, config := bc.lastConfirmed, bc.configs[0]
+ if tip == nil {
+ return types.GenesisHeight, bc.dMoment
+ }
+ if tip != bc.lastDelivered {
+ // If tip is not delivered, we should not proceed to next block.
+ return notReadyHeight, time.Time{}
+ }
+ return tip.Position.Height + 1, tip.Timestamp.Add(config.minBlockInterval)
+}
+
+func (bc *blockChain) pendingBlocksWithoutRandomness() []*types.Block {
+ bc.lock.RLock()
+ defer bc.lock.RUnlock()
+ blocks := make([]*types.Block, 0)
+ for _, b := range bc.confirmedBlocks {
+ if b.Position.Round < DKGDelayRound ||
+ len(b.Randomness) > 0 ||
+ bc.setRandomnessFromPending(b) {
+ continue
+ }
+ blocks = append(blocks, b)
+ }
+ for _, r := range bc.pendingBlocks {
+ if r.position.Round < DKGDelayRound {
+ continue
+ }
+ if r.block != nil &&
+ len(r.block.Randomness) == 0 &&
+ !bc.setRandomnessFromPending(r.block) {
+ blocks = append(blocks, r.block)
+ }
+ }
+ return blocks
+}
+
+func (bc *blockChain) lastDeliveredBlock() *types.Block {
+ bc.lock.RLock()
+ defer bc.lock.RUnlock()
+ return bc.lastDelivered
+}
+
+func (bc *blockChain) lastPendingBlock() *types.Block {
+ bc.lock.RLock()
+ defer bc.lock.RUnlock()
+ if len(bc.confirmedBlocks) == 0 {
+ return nil
+ }
+ return bc.confirmedBlocks[0]
+}
+
+/////////////////////////////////////////////
+//
+// internal helpers
+//
+/////////////////////////////////////////////
+
+// findPendingBlock is a helper to find a block in either pending or confirmed
+// state by position.
+func (bc *blockChain) findPendingBlock(p types.Position) *types.Block {
+ if idx := sort.Search(len(bc.confirmedBlocks), func(i int) bool {
+ return !bc.confirmedBlocks[i].Position.Older(p)
+ }); idx != len(bc.confirmedBlocks) &&
+ bc.confirmedBlocks[idx].Position.Equal(p) {
+ return bc.confirmedBlocks[idx]
+ }
+ pendingRec, _ := bc.pendingBlocks.searchByPosition(p)
+ return pendingRec.block
+}
+
+func (bc *blockChain) addPendingBlockRecord(p pendingBlockRecord) error {
+ if err := bc.pendingBlocks.insert(p); err != nil {
+ if err == ErrDuplicatedPendingBlock {
+ // We need to ignore this error because BA might confirm duplicated
+ // blocks in position.
+ err = nil
+ }
+ return err
+ }
+ return nil
+}
+
+func (bc *blockChain) checkIfBlocksConfirmed() {
+ var err error
+ for len(bc.pendingBlocks) > 0 {
+ if bc.pendingBlocks[0].position.Height <
+ bc.lastConfirmed.Position.Height+1 {
+ panic(fmt.Errorf("unexpected case %s %s", bc.lastConfirmed,
+ bc.pendingBlocks[0].position))
+ }
+ if bc.pendingBlocks[0].position.Height >
+ bc.lastConfirmed.Position.Height+1 {
+ break
+ }
+ var pending pendingBlockRecord
+ pending, bc.pendingBlocks = bc.pendingBlocks[0], bc.pendingBlocks[1:]
+ nextTip := pending.block
+ if nextTip == nil {
+ if nextTip, err = bc.prepareBlock(
+ pending.position, time.Time{}, true); err != nil {
+ // It should not be error when prepare empty block for correct
+ // position.
+ panic(err)
+ }
+ }
+ bc.confirmBlock(nextTip)
+ }
+}
+
+func (bc *blockChain) purgeConfig() {
+ for bc.configs[0].RoundID() < bc.lastConfirmed.Position.Round {
+ bc.configs = bc.configs[1:]
+ }
+ if bc.configs[0].RoundID() != bc.lastConfirmed.Position.Round {
+ panic(fmt.Errorf("mismatched tip config: %d %d",
+ bc.configs[0].RoundID(), bc.lastConfirmed.Position.Round))
+ }
+}
+
+func (bc *blockChain) verifyRandomness(
+ blockHash common.Hash, round uint64, randomness []byte) (bool, error) {
+ if round < DKGDelayRound {
+ return bytes.Compare(randomness, NoRand) == 0, nil
+ }
+ v, ok, err := bc.vGetter.UpdateAndGet(round)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, ErrTSigNotReady
+ }
+ return v.VerifySignature(blockHash, crypto.Signature{
+ Type: "bls",
+ Signature: randomness}), nil
+}
+
+func (bc *blockChain) prepareBlock(position types.Position,
+ proposeTime time.Time, empty bool) (b *types.Block, err error) {
+ b = &types.Block{Position: position, Timestamp: proposeTime}
+ tip := bc.lastConfirmed
+ // Make sure we can propose a block at expected position for callers.
+ if tip == nil {
+ if bc.configs[0].RoundID() != uint64(0) {
+ panic(fmt.Errorf(
+ "Genesis config should be ready when preparing genesis: %d",
+ bc.configs[0].RoundID()))
+ }
+ // It should be the case for genesis block.
+ if !position.Equal(types.Position{Height: types.GenesisHeight}) {
+ b, err = nil, ErrNotGenesisBlock
+ return
+ }
+ minExpectedTime := bc.dMoment.Add(bc.configs[0].minBlockInterval)
+ if empty {
+ b.Timestamp = minExpectedTime
+ } else {
+ bc.logger.Debug("Calling genesis Application.PreparePayload")
+ if b.Payload, err = bc.app.PreparePayload(b.Position); err != nil {
+ b = nil
+ return
+ }
+ bc.logger.Debug("Calling genesis Application.PrepareWitness")
+ if b.Witness, err = bc.app.PrepareWitness(0); err != nil {
+ b = nil
+ return
+ }
+ if proposeTime.Before(minExpectedTime) {
+ b.Timestamp = minExpectedTime
+ }
+ }
+ } else {
+ tipConfig := bc.tipConfig()
+ if tip.Position.Height+1 != position.Height {
+ b, err = nil, ErrNotFollowTipPosition
+ return
+ }
+ if tipConfig.IsLastBlock(tip) {
+ if tip.Position.Round+1 != position.Round {
+ b, err = nil, ErrRoundNotSwitch
+ return
+ }
+ } else {
+ if tip.Position.Round != position.Round {
+ b, err = nil, ErrInvalidRoundID
+ return
+ }
+ }
+ minExpectedTime := tip.Timestamp.Add(bc.configs[0].minBlockInterval)
+ b.ParentHash = tip.Hash
+ if !empty {
+ bc.logger.Debug("Calling Application.PreparePayload",
+ "position", b.Position)
+ if b.Payload, err = bc.app.PreparePayload(b.Position); err != nil {
+ b = nil
+ return
+ }
+ bc.logger.Debug("Calling Application.PrepareWitness",
+ "height", tip.Witness.Height)
+ if b.Witness, err = bc.app.PrepareWitness(
+ tip.Witness.Height); err != nil {
+ b = nil
+ return
+ }
+ if b.Timestamp.Before(minExpectedTime) {
+ b.Timestamp = minExpectedTime
+ }
+ } else {
+ b.Witness.Height = tip.Witness.Height
+ b.Witness.Data = make([]byte, len(tip.Witness.Data))
+ copy(b.Witness.Data, tip.Witness.Data)
+ b.Timestamp = minExpectedTime
+ }
+ }
+ if empty {
+ if b.Hash, err = utils.HashBlock(b); err != nil {
+ b = nil
+ return
+ }
+ } else {
+ if err = bc.signer.SignBlock(b); err != nil {
+ b = nil
+ return
+ }
+ }
+ return
+}
+
+func (bc *blockChain) tipConfig() blockChainConfig {
+ if bc.lastConfirmed == nil {
+ panic(fmt.Errorf("attempting to access config without tip"))
+ }
+ if bc.lastConfirmed.Position.Round != bc.configs[0].RoundID() {
+ panic(fmt.Errorf("inconsist config and tip: %d %d",
+ bc.lastConfirmed.Position.Round, bc.configs[0].RoundID()))
+ }
+ return bc.configs[0]
+}
+
+func (bc *blockChain) confirmBlock(b *types.Block) {
+ if bc.lastConfirmed != nil &&
+ bc.lastConfirmed.Position.Height+1 != b.Position.Height {
+ panic(fmt.Errorf("confirmed blocks not continuous in height: %s %s",
+ bc.lastConfirmed, b))
+ }
+ bc.logger.Debug("Calling Application.BlockConfirmed", "block", b)
+ bc.app.BlockConfirmed(*b)
+ bc.lastConfirmed = b
+ bc.confirmedBlocks = append(bc.confirmedBlocks, b)
+ bc.purgeConfig()
+}
+
+func (bc *blockChain) setRandomnessFromPending(b *types.Block) bool {
+ if r, exist := bc.pendingRandomnesses[b.Position]; exist {
+ b.Randomness = r
+ delete(bc.pendingRandomnesses, b.Position)
+ return true
+ }
+ return false
+}
+
+func (bc *blockChain) processAgreementResult(result *types.AgreementResult) error {
+ if result.Position.Round < DKGDelayRound {
+ return nil
+ }
+ if !result.Position.Newer(bc.lastPosition) {
+ return ErrSkipButNoError
+ }
+ ok, err := bc.verifyRandomness(
+ result.BlockHash, result.Position.Round, result.Randomness)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return ErrIncorrectAgreementResult
+ }
+ bc.lock.Lock()
+ defer bc.lock.Unlock()
+ if !result.Position.Newer(bc.lastDelivered.Position) {
+ return nil
+ }
+ bc.pendingRandomnesses[result.Position] = result.Randomness
+ bc.lastPosition = bc.lastDelivered.Position
+ return nil
+}
+
+func (bc *blockChain) addBlockRandomness(pos types.Position, rand []byte) {
+ if pos.Round < DKGDelayRound {
+ return
+ }
+ bc.lock.Lock()
+ defer bc.lock.Unlock()
+ if !pos.Newer(bc.lastDelivered.Position) {
+ return
+ }
+ bc.pendingRandomnesses[pos] = rand
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/interfaces.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/interfaces.go
new file mode 100644
index 000000000..c85630775
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/interfaces.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package blockdb
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/dexon-foundation/dexon-consensus/common"
+ "github.com/dexon-foundation/dexon-consensus/core/types"
+)
+
+var (
+ // ErrBlockExists is the error when block eixsts.
+ ErrBlockExists = errors.New("block exists")
+ // ErrBlockDoesNotExist is the error when block does not eixst.
+ ErrBlockDoesNotExist = errors.New("block does not exist")
+ // ErrIterationFinished is the error to check if the iteration is finished.
+ ErrIterationFinished = errors.New("iteration finished")
+ // ErrEmptyPath is the error when the required path is empty.
+ ErrEmptyPath = fmt.Errorf("empty path")
+ // ErrClosed is the error when using DB after it's closed.
+ ErrClosed = fmt.Errorf("db closed")
+ // ErrNotImplemented is the error that some interface is not implemented.
+ ErrNotImplemented = fmt.Errorf("not implemented")
+)
+
+// BlockDatabase is the interface for a BlockDatabase.
+type BlockDatabase interface {
+ Reader
+ Writer
+
+ // Close allows database implementation able to
+ // release resource when finishing.
+ Close() error
+}
+
+// Reader defines the interface for reading blocks into DB.
+type Reader interface {
+ Has(hash common.Hash) bool
+ Get(hash common.Hash) (types.Block, error)
+ GetAll() (BlockIterator, error)
+}
+
+// Writer defines the interface for writing blocks into DB.
+type Writer interface {
+ Update(block types.Block) error
+ Put(block types.Block) error
+}
+
+// BlockIterator defines an iterator on blocks hold
+// in a DB.
+type BlockIterator interface {
+ Next() (types.Block, error)
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/level-db.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/level-db.go
new file mode 100644
index 000000000..76730fc9c
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/level-db.go
@@ -0,0 +1,127 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package blockdb
+
+import (
+ "encoding/json"
+
+ "github.com/syndtr/goleveldb/leveldb"
+
+ "github.com/dexon-foundation/dexon-consensus/common"
+ "github.com/dexon-foundation/dexon-consensus/core/types"
+)
+
+// LevelDBBackedBlockDB is a leveldb backed BlockDB implementation.
+type LevelDBBackedBlockDB struct {
+ db *leveldb.DB
+}
+
+// NewLevelDBBackedBlockDB initialize a leveldb-backed block database.
+func NewLevelDBBackedBlockDB(
+ path string) (lvl *LevelDBBackedBlockDB, err error) {
+
+ db, err := leveldb.OpenFile(path, nil)
+ if err != nil {
+ return
+ }
+ lvl = &LevelDBBackedBlockDB{db: db}
+ return
+}
+
+// Close implement Closer interface, which would release allocated resource.
+func (lvl *LevelDBBackedBlockDB) Close() error {
+ return lvl.db.Close()
+}
+
+// Has implements the Reader.Has method.
+func (lvl *LevelDBBackedBlockDB) Has(hash common.Hash) bool {
+ exists, err := lvl.db.Has([]byte(hash[:]), nil)
+ if err != nil {
+ // TODO(missionliao): Modify the interface to return error.
+ panic(err)
+ }
+ return exists
+}
+
+// Get implements the Reader.Get method.
+func (lvl *LevelDBBackedBlockDB) Get(
+ hash common.Hash) (block types.Block, err error) {
+
+ queried, err := lvl.db.Get([]byte(hash[:]), nil)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ err = ErrBlockDoesNotExist
+ }
+ return
+ }
+ err = json.Unmarshal(queried, &block)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// Update implements the Writer.Update method.
+func (lvl *LevelDBBackedBlockDB) Update(block types.Block) (err error) {
+ // NOTE: we didn't handle changes of block hash (and it
+ // should not happen).
+ marshaled, err := json.Marshal(&block)
+ if err != nil {
+ return
+ }
+
+ if !lvl.Has(block.Hash) {
+ err = ErrBlockDoesNotExist
+ return
+ }
+ err = lvl.db.Put(
+ []byte(block.Hash[:]),
+ marshaled,
+ nil)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// Put implements the Writer.Put method.
+func (lvl *LevelDBBackedBlockDB) Put(block types.Block) (err error) {
+ marshaled, err := json.Marshal(&block)
+ if err != nil {
+ return
+ }
+ if lvl.Has(block.Hash) {
+ err = ErrBlockExists
+ return
+ }
+ err = lvl.db.Put(
+ []byte(block.Hash[:]),
+ marshaled,
+ nil)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// GetAll implements Reader.GetAll method, which allows callers
+// to retrieve all blocks in DB.
+func (lvl *LevelDBBackedBlockDB) GetAll() (BlockIterator, error) {
+ // TODO (mission): Implement this part via goleveldb's iterator.
+ return nil, ErrNotImplemented
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/memory.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/memory.go
new file mode 100644
index 000000000..b45af229b
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/blockdb/memory.go
@@ -0,0 +1,183 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package blockdb
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "sync"
+
+ "github.com/dexon-foundation/dexon-consensus/common"
+ "github.com/dexon-foundation/dexon-consensus/core/types"
+)
+
+type seqIterator struct {
+ idx int
+ db *MemBackedBlockDB
+}
+
+func (seq *seqIterator) Next() (types.Block, error) {
+ curIdx := seq.idx
+ seq.idx++
+ return seq.db.getByIndex(curIdx)
+}
+
+// MemBackedBlockDB is a memory backed BlockDB implementation.
+type MemBackedBlockDB struct {
+ blocksMutex sync.RWMutex
+ blockHashSequence common.Hashes
+ blocksByHash map[common.Hash]*types.Block
+ persistantFilePath string
+}
+
+// NewMemBackedBlockDB initialize a memory-backed block database.
+func NewMemBackedBlockDB(persistantFilePath ...string) (db *MemBackedBlockDB, err error) {
+ db = &MemBackedBlockDB{
+ blockHashSequence: common.Hashes{},
+ blocksByHash: make(map[common.Hash]*types.Block),
+ }
+ if len(persistantFilePath) == 0 || len(persistantFilePath[0]) == 0 {
+ return
+ }
+ db.persistantFilePath = persistantFilePath[0]
+ buf, err := ioutil.ReadFile(db.persistantFilePath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ // Something unexpected happened.
+ return
+ }
+ // It's expected behavior that file doesn't exists, we should not
+ // report error on it.
+ err = nil
+ return
+ }
+
+ // Init this instance by file content, it's a temporary way
+ // to export those private field for JSON encoding.
+ toLoad := struct {
+ Sequence common.Hashes
+ ByHash map[common.Hash]*types.Block
+ }{}
+ err = json.Unmarshal(buf, &toLoad)
+ if err != nil {
+ return
+ }
+ db.blockHashSequence = toLoad.Sequence
+ db.blocksByHash = toLoad.ByHash
+ return
+}
+
+// Has returns wheter or not the DB has a block identified with the hash.
+func (m *MemBackedBlockDB) Has(hash common.Hash) bool {
+ m.blocksMutex.RLock()
+ defer m.blocksMutex.RUnlock()
+
+ _, ok := m.blocksByHash[hash]
+ return ok
+}
+
+// Get returns a block given a hash.
+func (m *MemBackedBlockDB) Get(hash common.Hash) (types.Block, error) {
+ m.blocksMutex.RLock()
+ defer m.blocksMutex.RUnlock()
+
+ return m.internalGet(hash)
+}
+
+func (m *MemBackedBlockDB) internalGet(hash common.Hash) (types.Block, error) {
+ b, ok := m.blocksByHash[hash]
+ if !ok {
+ return types.Block{}, ErrBlockDoesNotExist
+ }
+ return *b, nil
+}
+
+// Put inserts a new block into the database.
+func (m *MemBackedBlockDB) Put(block types.Block) error {
+ if m.Has(block.Hash) {
+ return ErrBlockExists
+ }
+
+ m.blocksMutex.Lock()
+ defer m.blocksMutex.Unlock()
+
+ m.blockHashSequence = append(m.blockHashSequence, block.Hash)
+ m.blocksByHash[block.Hash] = &block
+ return nil
+}
+
+// Update updates a block in the database.
+func (m *MemBackedBlockDB) Update(block types.Block) error {
+ if !m.Has(block.Hash) {
+ return ErrBlockDoesNotExist
+ }
+
+ m.blocksMutex.Lock()
+ defer m.blocksMutex.Unlock()
+
+ m.blocksByHash[block.Hash] = &block
+ return nil
+}
+
+// Close implement Closer interface, which would release allocated resource.
+func (m *MemBackedBlockDB) Close() (err error) {
+ // Save internal state to a pretty-print json file. It's a temporary way
+ // to dump private file via JSON encoding.
+ if len(m.persistantFilePath) == 0 {
+ return
+ }
+
+ m.blocksMutex.RLock()
+ defer m.blocksMutex.RUnlock()
+
+ toDump := struct {
+ Sequence common.Hashes
+ ByHash map[common.Hash]*types.Block
+ }{
+ Sequence: m.blockHashSequence,
+ ByHash: m.blocksByHash,
+ }
+
+ // Dump to JSON with 2-space indent.
+ buf, err := json.Marshal(&toDump)
+ if err != nil {
+ return
+ }
+
+ err = ioutil.WriteFile(m.persistantFilePath, buf, 0644)
+ return
+}
+
+func (m *MemBackedBlockDB) getByIndex(idx int) (types.Block, error) {
+ m.blocksMutex.RLock()
+ defer m.blocksMutex.RUnlock()
+
+ if idx >= len(m.blockHashSequence) {
+ return types.Block{}, ErrIterationFinished
+ }
+
+ hash := m.blockHashSequence[idx]
+ return m.internalGet(hash)
+}
+
+// GetAll implement Reader.GetAll method, which allows caller
+// to retrieve all blocks in DB.
+func (m *MemBackedBlockDB) GetAll() (BlockIterator, error) {
+ return &seqIterator{db: m}, nil
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/configuration-chain.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/configuration-chain.go
new file mode 100644
index 000000000..0f1400cb5
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/configuration-chain.go
@@ -0,0 +1,795 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/db"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+// Errors for configuration chain..
+var (
+ ErrDKGNotRegistered = fmt.Errorf(
+ "not yet registered in DKG protocol")
+ ErrTSigAlreadyRunning = fmt.Errorf(
+ "tsig is already running")
+ ErrDKGNotReady = fmt.Errorf(
+ "DKG is not ready")
+ ErrSkipButNoError = fmt.Errorf(
+ "skip but no error")
+ ErrDKGAborted = fmt.Errorf(
+ "DKG is aborted")
+)
+
+// ErrMismatchDKG represent an attempt to run DKG protocol is failed because
+// the register DKG protocol is mismatched, interms of round and resetCount.
+type ErrMismatchDKG struct {
+ expectRound, expectReset uint64
+ actualRound, actualReset uint64
+}
+
+func (e ErrMismatchDKG) Error() string {
+ return fmt.Sprintf(
+ "mismatch DKG, abort running: expect(%d %d) actual(%d %d)",
+ e.expectRound, e.expectReset, e.actualRound, e.actualReset)
+}
+
+type dkgStepFn func(round uint64, reset uint64) error
+
+type configurationChain struct {
+ ID types.NodeID
+ recv dkgReceiver
+ gov Governance
+ dkg *dkgProtocol
+ dkgRunPhases []dkgStepFn
+ logger common.Logger
+ dkgLock sync.RWMutex
+ dkgSigner map[uint64]*dkgShareSecret
+ npks map[uint64]*typesDKG.NodePublicKeys
+ complaints []*typesDKG.Complaint
+ dkgResult sync.RWMutex
+ tsig map[common.Hash]*tsigProtocol
+ tsigTouched map[common.Hash]struct{}
+ tsigReady *sync.Cond
+ cache *utils.NodeSetCache
+ db db.Database
+ notarySet map[types.NodeID]struct{}
+ mpkReady bool
+ pendingPrvShare map[types.NodeID]*typesDKG.PrivateShare
+ // TODO(jimmy-dexon): add timeout to pending psig.
+ pendingPsig map[common.Hash][]*typesDKG.PartialSignature
+ prevHash common.Hash
+ dkgCtx context.Context
+ dkgCtxCancel context.CancelFunc
+ dkgRunning bool
+}
+
+func newConfigurationChain(
+ ID types.NodeID,
+ recv dkgReceiver,
+ gov Governance,
+ cache *utils.NodeSetCache,
+ dbInst db.Database,
+ logger common.Logger) *configurationChain {
+ configurationChain := &configurationChain{
+ ID: ID,
+ recv: recv,
+ gov: gov,
+ logger: logger,
+ dkgSigner: make(map[uint64]*dkgShareSecret),
+ npks: make(map[uint64]*typesDKG.NodePublicKeys),
+ tsig: make(map[common.Hash]*tsigProtocol),
+ tsigTouched: make(map[common.Hash]struct{}),
+ tsigReady: sync.NewCond(&sync.Mutex{}),
+ cache: cache,
+ db: dbInst,
+ pendingPsig: make(map[common.Hash][]*typesDKG.PartialSignature),
+ }
+ configurationChain.initDKGPhasesFunc()
+ return configurationChain
+}
+
+func (cc *configurationChain) abortDKG(
+ parentCtx context.Context,
+ round, reset uint64) bool {
+ cc.dkgLock.Lock()
+ defer cc.dkgLock.Unlock()
+ if cc.dkg != nil {
+ return cc.abortDKGNoLock(parentCtx, round, reset)
+ }
+ return false
+}
+
+func (cc *configurationChain) abortDKGNoLock(
+ ctx context.Context,
+ round, reset uint64) bool {
+ if cc.dkg.round > round ||
+ (cc.dkg.round == round && cc.dkg.reset > reset) {
+ cc.logger.Error("Newer DKG already is registered",
+ "round", round,
+ "reset", reset)
+ return false
+ }
+ cc.logger.Error("Previous DKG is not finished",
+ "round", round,
+ "reset", reset,
+ "previous-round", cc.dkg.round,
+ "previous-reset", cc.dkg.reset)
+ // Abort DKG routine in previous round.
+ cc.logger.Error("Aborting DKG in previous round",
+ "round", round,
+ "previous-round", cc.dkg.round)
+ // Notify current running DKG protocol to abort.
+ if cc.dkgCtxCancel != nil {
+ cc.dkgCtxCancel()
+ }
+ cc.dkgLock.Unlock()
+ // Wait for current running DKG protocol aborting.
+ for {
+ cc.dkgLock.Lock()
+ if cc.dkgRunning == false {
+ cc.dkg = nil
+ break
+ }
+ select {
+ case <-ctx.Done():
+ return false
+ case <-time.After(100 * time.Millisecond):
+ }
+ cc.dkgLock.Unlock()
+ }
+ cc.logger.Error("Previous DKG aborted",
+ "round", round,
+ "reset", reset)
+ return cc.dkg == nil
+}
+
+func (cc *configurationChain) registerDKG(
+ parentCtx context.Context,
+ round, reset uint64,
+ threshold int) {
+ cc.dkgLock.Lock()
+ defer cc.dkgLock.Unlock()
+ if cc.dkg != nil {
+ // Make sure we only proceed when cc.dkg is nil.
+ if !cc.abortDKGNoLock(parentCtx, round, reset) {
+ return
+ }
+ select {
+ case <-parentCtx.Done():
+ return
+ default:
+ }
+ if cc.dkg != nil {
+ // This panic would only raise when multiple attampts to register
+ // a DKG protocol at the same time.
+ panic(ErrMismatchDKG{
+ expectRound: round,
+ expectReset: reset,
+ actualRound: cc.dkg.round,
+ actualReset: cc.dkg.reset,
+ })
+ }
+ }
+ notarySet, err := cc.cache.GetNotarySet(round)
+ if err != nil {
+ cc.logger.Error("Error getting notary set from cache", "error", err)
+ return
+ }
+ cc.notarySet = notarySet
+ cc.pendingPrvShare = make(map[types.NodeID]*typesDKG.PrivateShare)
+ cc.mpkReady = false
+ cc.dkg, err = recoverDKGProtocol(cc.ID, cc.recv, round, reset, cc.db)
+ cc.dkgCtx, cc.dkgCtxCancel = context.WithCancel(parentCtx)
+ if err != nil {
+ panic(err)
+ }
+ if cc.dkg == nil {
+ cc.dkg = newDKGProtocol(
+ cc.ID,
+ cc.recv,
+ round,
+ reset,
+ threshold)
+
+ err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo())
+ if err != nil {
+ cc.logger.Error("Error put or update DKG protocol", "error",
+ err)
+ return
+ }
+ }
+
+ go func() {
+ ticker := newTicker(cc.gov, round, TickerDKG)
+ defer ticker.Stop()
+ <-ticker.Tick()
+ cc.dkgLock.Lock()
+ defer cc.dkgLock.Unlock()
+ if cc.dkg != nil && cc.dkg.round == round && cc.dkg.reset == reset {
+ cc.dkg.proposeMPKReady()
+ }
+ }()
+}
+
+func (cc *configurationChain) runDKGPhaseOne(round uint64, reset uint64) error {
+ if cc.dkg.round < round ||
+ (cc.dkg.round == round && cc.dkg.reset < reset) {
+ return ErrDKGNotRegistered
+ }
+ if cc.dkg.round != round || cc.dkg.reset != reset {
+ cc.logger.Warn("DKG canceled", "round", round, "reset", reset)
+ return ErrSkipButNoError
+ }
+ cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round)
+ if cc.gov.IsDKGFinal(round) {
+ cc.logger.Warn("DKG already final", "round", round)
+ return ErrSkipButNoError
+ }
+ cc.logger.Debug("Calling Governance.IsDKGMPKReady", "round", round)
+ var err error
+ for err == nil && !cc.gov.IsDKGMPKReady(round) {
+ cc.dkgLock.Unlock()
+ cc.logger.Debug("DKG MPKs are not ready yet. Try again later...",
+ "nodeID", cc.ID,
+ "round", round)
+ select {
+ case <-cc.dkgCtx.Done():
+ err = ErrDKGAborted
+ case <-time.After(500 * time.Millisecond):
+ }
+ cc.dkgLock.Lock()
+ }
+ return err
+}
+
+func (cc *configurationChain) runDKGPhaseTwoAndThree(
+ round uint64, reset uint64) error {
+ // Check if this node successfully join the protocol.
+ cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
+ mpks := cc.gov.DKGMasterPublicKeys(round)
+ inProtocol := false
+ for _, mpk := range mpks {
+ if mpk.ProposerID == cc.ID {
+ inProtocol = true
+ break
+ }
+ }
+ if !inProtocol {
+ cc.logger.Warn("Failed to join DKG protocol",
+ "round", round,
+ "reset", reset)
+ return ErrSkipButNoError
+ }
+ // Phase 2(T = 0): Exchange DKG secret key share.
+ if err := cc.dkg.processMasterPublicKeys(mpks); err != nil {
+ cc.logger.Error("Failed to process master public key",
+ "round", round,
+ "reset", reset,
+ "error", err)
+ }
+ cc.mpkReady = true
+ // The time to process private share might be long, check aborting before
+ // get into that loop.
+ select {
+ case <-cc.dkgCtx.Done():
+ return ErrDKGAborted
+ default:
+ }
+ for _, prvShare := range cc.pendingPrvShare {
+ if err := cc.dkg.processPrivateShare(prvShare); err != nil {
+ cc.logger.Error("Failed to process private share",
+ "round", round,
+ "reset", reset,
+ "error", err)
+ }
+ }
+
+ // Phase 3(T = 0~λ): Propose complaint.
+ // Propose complaint is done in `processMasterPublicKeys`.
+ return nil
+}
+
+func (cc *configurationChain) runDKGPhaseFour() {
+ // Phase 4(T = λ): Propose nack complaints.
+ cc.dkg.proposeNackComplaints()
+}
+
+func (cc *configurationChain) runDKGPhaseFiveAndSix(round uint64, reset uint64) {
+ // Phase 5(T = 2λ): Propose Anti nack complaint.
+ cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
+ cc.complaints = cc.gov.DKGComplaints(round)
+ if err := cc.dkg.processNackComplaints(cc.complaints); err != nil {
+ cc.logger.Error("Failed to process NackComplaint",
+ "round", round,
+ "reset", reset,
+ "error", err)
+ }
+
+ // Phase 6(T = 3λ): Rebroadcast anti nack complaint.
+ // Rebroadcast is done in `processPrivateShare`.
+}
+
+func (cc *configurationChain) runDKGPhaseSeven() {
+ // Phase 7(T = 4λ): Enforce complaints and nack complaints.
+ cc.dkg.enforceNackComplaints(cc.complaints)
+ // Enforce complaint is done in `processPrivateShare`.
+}
+
+func (cc *configurationChain) runDKGPhaseEight() {
+ // Phase 8(T = 5λ): DKG finalize.
+ cc.dkg.proposeFinalize()
+}
+
+func (cc *configurationChain) runDKGPhaseNine(round uint64, reset uint64) error {
+ // Phase 9(T = 6λ): DKG is ready.
+ // Normally, IsDKGFinal would return true here. Use this for in case of
+ // unexpected network fluctuation and ensure the robustness of DKG protocol.
+ cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round)
+ var err error
+ for err == nil && !cc.gov.IsDKGFinal(round) {
+ cc.dkgLock.Unlock()
+ cc.logger.Debug("DKG is not ready yet. Try again later...",
+ "nodeID", cc.ID.String()[:6],
+ "round", round,
+ "reset", reset)
+ select {
+ case <-cc.dkgCtx.Done():
+ err = ErrDKGAborted
+ case <-time.After(500 * time.Millisecond):
+ }
+ cc.dkgLock.Lock()
+ }
+ if err != nil {
+ return err
+ }
+ cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
+ cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
+ npks, err := typesDKG.NewNodePublicKeys(round,
+ cc.gov.DKGMasterPublicKeys(round),
+ cc.gov.DKGComplaints(round),
+ cc.dkg.threshold)
+ if err != nil {
+ return err
+ }
+ qualifies := ""
+ for nID := range npks.QualifyNodeIDs {
+ qualifies += fmt.Sprintf("%s ", nID.String()[:6])
+ }
+ cc.logger.Info("Qualify Nodes",
+ "nodeID", cc.ID,
+ "round", round,
+ "reset", reset,
+ "count", len(npks.QualifyIDs),
+ "qualifies", qualifies)
+ if _, exist := npks.QualifyNodeIDs[cc.ID]; !exist {
+ cc.logger.Warn("Self is not in Qualify Nodes",
+ "round", round,
+ "reset", reset)
+ return nil
+ }
+ signer, err := cc.dkg.recoverShareSecret(npks.QualifyIDs)
+ if err != nil {
+ return err
+ }
+ // Save private shares to DB.
+ if err =
+ cc.db.PutDKGPrivateKey(round, reset, *signer.privateKey); err != nil {
+ return err
+ }
+ cc.dkg.proposeSuccess()
+ cc.dkgResult.Lock()
+ defer cc.dkgResult.Unlock()
+ cc.dkgSigner[round] = signer
+ cc.npks[round] = npks
+ return nil
+}
+
+func (cc *configurationChain) initDKGPhasesFunc() {
+ cc.dkgRunPhases = []dkgStepFn{
+ func(round uint64, reset uint64) error {
+ return cc.runDKGPhaseOne(round, reset)
+ },
+ func(round uint64, reset uint64) error {
+ return cc.runDKGPhaseTwoAndThree(round, reset)
+ },
+ func(round uint64, reset uint64) error {
+ cc.runDKGPhaseFour()
+ return nil
+ },
+ func(round uint64, reset uint64) error {
+ cc.runDKGPhaseFiveAndSix(round, reset)
+ return nil
+ },
+ func(round uint64, reset uint64) error {
+ cc.runDKGPhaseSeven()
+ return nil
+ },
+ func(round uint64, reset uint64) error {
+ cc.runDKGPhaseEight()
+ return nil
+ },
+ func(round uint64, reset uint64) error {
+ return cc.runDKGPhaseNine(round, reset)
+ },
+ }
+}
+
+func (cc *configurationChain) runDKG(
+ round uint64, reset uint64, event *common.Event,
+ dkgBeginHeight, dkgHeight uint64) (err error) {
+ // Check if corresponding DKG signer is ready.
+ if _, _, err = cc.getDKGInfo(round, false); err == nil {
+ return ErrSkipButNoError
+ }
+ cfg := utils.GetConfigWithPanic(cc.gov, round, cc.logger)
+ phaseHeight := uint64(
+ cfg.LambdaDKG.Nanoseconds() / cfg.MinBlockInterval.Nanoseconds())
+ skipPhase := int(dkgHeight / phaseHeight)
+ cc.logger.Info("Skipping DKG phase", "phase", skipPhase)
+ cc.dkgLock.Lock()
+ defer cc.dkgLock.Unlock()
+ if cc.dkg == nil {
+ return ErrDKGNotRegistered
+ }
+ // Make sure the existed dkgProtocol is expected one.
+ if cc.dkg.round != round || cc.dkg.reset != reset {
+ return ErrMismatchDKG{
+ expectRound: round,
+ expectReset: reset,
+ actualRound: cc.dkg.round,
+ actualReset: cc.dkg.reset,
+ }
+ }
+ if cc.dkgRunning {
+ panic(fmt.Errorf("duplicated call to runDKG: %d %d", round, reset))
+ }
+ cc.dkgRunning = true
+ defer func() {
+ // Here we should hold the cc.dkgLock, reset cc.dkg to nil when done.
+ if cc.dkg != nil {
+ cc.dkg = nil
+ }
+ cc.dkgRunning = false
+ }()
+ wg := sync.WaitGroup{}
+ var dkgError error
+ // Make a copy of cc.dkgCtx so each phase function can refer to the correct
+ // context.
+ ctx := cc.dkgCtx
+ cc.dkg.step = skipPhase
+ for i := skipPhase; i < len(cc.dkgRunPhases); i++ {
+ wg.Add(1)
+ event.RegisterHeight(dkgBeginHeight+phaseHeight*uint64(i), func(uint64) {
+ go func() {
+ defer wg.Done()
+ cc.dkgLock.Lock()
+ defer cc.dkgLock.Unlock()
+ if dkgError != nil {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ dkgError = ErrDKGAborted
+ return
+ default:
+ }
+
+ err := cc.dkgRunPhases[cc.dkg.step](round, reset)
+ if err == nil || err == ErrSkipButNoError {
+ err = nil
+ cc.dkg.step++
+ err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo())
+ if err != nil {
+ cc.logger.Error("Failed to save DKG Protocol",
+ "step", cc.dkg.step,
+ "error", err)
+ }
+ }
+ if err != nil && dkgError == nil {
+ dkgError = err
+ }
+ }()
+ })
+ }
+ cc.dkgLock.Unlock()
+ wgChan := make(chan struct{}, 1)
+ go func() {
+ wg.Wait()
+ wgChan <- struct{}{}
+ }()
+ select {
+ case <-cc.dkgCtx.Done():
+ case <-wgChan:
+ }
+ cc.dkgLock.Lock()
+ select {
+ case <-cc.dkgCtx.Done():
+ return ErrDKGAborted
+ default:
+ }
+ return dkgError
+}
+
+func (cc *configurationChain) isDKGFinal(round uint64) bool {
+ if !cc.gov.IsDKGFinal(round) {
+ return false
+ }
+ _, _, err := cc.getDKGInfo(round, false)
+ return err == nil
+}
+
+func (cc *configurationChain) getDKGInfo(
+ round uint64, ignoreSigner bool) (
+ *typesDKG.NodePublicKeys, *dkgShareSecret, error) {
+ getFromCache := func() (*typesDKG.NodePublicKeys, *dkgShareSecret) {
+ cc.dkgResult.RLock()
+ defer cc.dkgResult.RUnlock()
+ npks := cc.npks[round]
+ signer := cc.dkgSigner[round]
+ return npks, signer
+ }
+ npks, signer := getFromCache()
+ if npks == nil || (!ignoreSigner && signer == nil) {
+ if err := cc.recoverDKGInfo(round, ignoreSigner); err != nil {
+ return nil, nil, err
+ }
+ npks, signer = getFromCache()
+ }
+ if npks == nil || (!ignoreSigner && signer == nil) {
+ return nil, nil, ErrDKGNotReady
+ }
+ return npks, signer, nil
+}
+
+func (cc *configurationChain) recoverDKGInfo(
+ round uint64, ignoreSigner bool) error {
+ var npksExists, signerExists bool
+ func() {
+ cc.dkgResult.Lock()
+ defer cc.dkgResult.Unlock()
+ _, signerExists = cc.dkgSigner[round]
+ _, npksExists = cc.npks[round]
+ }()
+ if signerExists && npksExists {
+ return nil
+ }
+ if !cc.gov.IsDKGFinal(round) {
+ return ErrDKGNotReady
+ }
+
+ threshold := utils.GetDKGThreshold(
+ utils.GetConfigWithPanic(cc.gov, round, cc.logger))
+ cc.logger.Debug("Calling Governance.DKGMasterPublicKeys for recoverDKGInfo",
+ "round", round)
+ mpk := cc.gov.DKGMasterPublicKeys(round)
+ cc.logger.Debug("Calling Governance.DKGComplaints for recoverDKGInfo",
+ "round", round)
+ comps := cc.gov.DKGComplaints(round)
+ qualifies, _, err := typesDKG.CalcQualifyNodes(mpk, comps, threshold)
+ if err != nil {
+ return err
+ }
+ if len(qualifies) <
+ utils.GetDKGValidThreshold(utils.GetConfigWithPanic(
+ cc.gov, round, cc.logger)) {
+ return typesDKG.ErrNotReachThreshold
+ }
+
+ if !npksExists {
+ npks, err := typesDKG.NewNodePublicKeys(round,
+ cc.gov.DKGMasterPublicKeys(round),
+ cc.gov.DKGComplaints(round),
+ threshold)
+ if err != nil {
+ cc.logger.Warn("Failed to create DKGNodePublicKeys",
+ "round", round, "error", err)
+ return err
+ }
+ func() {
+ cc.dkgResult.Lock()
+ defer cc.dkgResult.Unlock()
+ cc.npks[round] = npks
+ }()
+ }
+ if !signerExists && !ignoreSigner {
+ reset := cc.gov.DKGResetCount(round)
+ // Check if we have private shares in DB.
+ prvKey, err := cc.db.GetDKGPrivateKey(round, reset)
+ if err != nil {
+ cc.logger.Warn("Failed to create DKGPrivateKey",
+ "round", round, "error", err)
+ dkgProtocolInfo, err := cc.db.GetDKGProtocol()
+ if err != nil {
+ cc.logger.Warn("Unable to recover DKGProtocolInfo",
+ "round", round, "error", err)
+ return err
+ }
+ if dkgProtocolInfo.Round != round {
+ cc.logger.Warn("DKGProtocolInfo round mismatch",
+ "round", round, "infoRound", dkgProtocolInfo.Round)
+ return err
+ }
+ prvKeyRecover, err :=
+ dkgProtocolInfo.PrvShares.RecoverPrivateKey(qualifies)
+ if err != nil {
+ cc.logger.Warn("Failed to recover DKGPrivateKey",
+ "round", round, "error", err)
+ return err
+ }
+ if err = cc.db.PutDKGPrivateKey(
+ round, reset, *prvKeyRecover); err != nil {
+ cc.logger.Warn("Failed to save DKGPrivateKey",
+ "round", round, "error", err)
+ }
+ prvKey = *prvKeyRecover
+ }
+ func() {
+ cc.dkgResult.Lock()
+ defer cc.dkgResult.Unlock()
+ cc.dkgSigner[round] = &dkgShareSecret{
+ privateKey: &prvKey,
+ }
+ }()
+ }
+ return nil
+}
+
+func (cc *configurationChain) preparePartialSignature(
+ round uint64, hash common.Hash) (*typesDKG.PartialSignature, error) {
+ _, signer, _ := cc.getDKGInfo(round, false)
+ if signer == nil {
+ return nil, ErrDKGNotReady
+ }
+ return &typesDKG.PartialSignature{
+ ProposerID: cc.ID,
+ Round: round,
+ Hash: hash,
+ PartialSignature: signer.sign(hash),
+ }, nil
+}
+
+func (cc *configurationChain) touchTSigHash(hash common.Hash) (first bool) {
+ cc.tsigReady.L.Lock()
+ defer cc.tsigReady.L.Unlock()
+ _, exist := cc.tsigTouched[hash]
+ cc.tsigTouched[hash] = struct{}{}
+ return !exist
+}
+
+func (cc *configurationChain) untouchTSigHash(hash common.Hash) {
+ cc.tsigReady.L.Lock()
+ defer cc.tsigReady.L.Unlock()
+ delete(cc.tsigTouched, hash)
+}
+
+func (cc *configurationChain) runTSig(
+ round uint64, hash common.Hash, wait time.Duration) (
+ crypto.Signature, error) {
+ npks, _, _ := cc.getDKGInfo(round, false)
+ if npks == nil {
+ return crypto.Signature{}, ErrDKGNotReady
+ }
+ cc.tsigReady.L.Lock()
+ defer cc.tsigReady.L.Unlock()
+ if _, exist := cc.tsig[hash]; exist {
+ return crypto.Signature{}, ErrTSigAlreadyRunning
+ }
+ cc.tsig[hash] = newTSigProtocol(npks, hash)
+ pendingPsig := cc.pendingPsig[hash]
+ delete(cc.pendingPsig, hash)
+ go func() {
+ for _, psig := range pendingPsig {
+ if err := cc.processPartialSignature(psig); err != nil {
+ cc.logger.Error("Failed to process partial signature",
+ "nodeID", cc.ID,
+ "error", err)
+ }
+ }
+ }()
+ timeout := make(chan struct{}, 1)
+ go func() {
+ time.Sleep(wait)
+ timeout <- struct{}{}
+ cc.tsigReady.Broadcast()
+ }()
+ var signature crypto.Signature
+ var err error
+ for func() bool {
+ signature, err = cc.tsig[hash].signature()
+ select {
+ case <-timeout:
+ return false
+ default:
+ }
+ return err == ErrNotEnoughtPartialSignatures
+ }() {
+ cc.tsigReady.Wait()
+ }
+ delete(cc.tsig, hash)
+ if err != nil {
+ return crypto.Signature{}, err
+ }
+ return signature, nil
+}
+
+func (cc *configurationChain) runCRSTSig(
+ round uint64, crs common.Hash) ([]byte, error) {
+ sig, err := cc.runTSig(round, crs, cc.gov.Configuration(round).LambdaDKG*5)
+ cc.logger.Info("CRS",
+ "nodeID", cc.ID,
+ "round", round+1,
+ "signature", sig)
+ return sig.Signature[:], err
+}
+
+func (cc *configurationChain) processPrivateShare(
+ prvShare *typesDKG.PrivateShare) error {
+ cc.dkgLock.Lock()
+ defer cc.dkgLock.Unlock()
+ if cc.dkg == nil {
+ return nil
+ }
+ if _, exist := cc.notarySet[prvShare.ProposerID]; !exist {
+ return ErrNotDKGParticipant
+ }
+ if !cc.mpkReady {
+ // TODO(jimmy-dexon): remove duplicated signature check in dkg module.
+ ok, err := utils.VerifyDKGPrivateShareSignature(prvShare)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return ErrIncorrectPrivateShareSignature
+ }
+ cc.pendingPrvShare[prvShare.ProposerID] = prvShare
+ return nil
+ }
+ return cc.dkg.processPrivateShare(prvShare)
+}
+
+func (cc *configurationChain) processPartialSignature(
+ psig *typesDKG.PartialSignature) error {
+ cc.tsigReady.L.Lock()
+ defer cc.tsigReady.L.Unlock()
+ if _, exist := cc.tsig[psig.Hash]; !exist {
+ ok, err := utils.VerifyDKGPartialSignatureSignature(psig)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return ErrIncorrectPartialSignatureSignature
+ }
+ cc.pendingPsig[psig.Hash] = append(cc.pendingPsig[psig.Hash], psig)
+ return nil
+ }
+ if err := cc.tsig[psig.Hash].processPartialSignature(psig); err != nil {
+ return err
+ }
+ cc.tsigReady.Broadcast()
+ return nil
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/consensus.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/consensus.go
new file mode 100644
index 000000000..8b2b9a048
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/consensus.go
@@ -0,0 +1,1567 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ cryptoDKG "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/db"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+// Errors for consensus core.
+var (
+ ErrProposerNotInNodeSet = fmt.Errorf(
+ "proposer is not in node set")
+ ErrIncorrectHash = fmt.Errorf(
+ "hash of block is incorrect")
+ ErrIncorrectSignature = fmt.Errorf(
+ "signature of block is incorrect")
+ ErrUnknownBlockProposed = fmt.Errorf(
+ "unknown block is proposed")
+ ErrIncorrectAgreementResultPosition = fmt.Errorf(
+ "incorrect agreement result position")
+ ErrNotEnoughVotes = fmt.Errorf(
+ "not enought votes")
+ ErrCRSNotReady = fmt.Errorf(
+ "CRS not ready")
+ ErrConfigurationNotReady = fmt.Errorf(
+ "Configuration not ready")
+ ErrIncorrectBlockRandomness = fmt.Errorf(
+ "randomness of block is incorrect")
+ ErrCannotVerifyBlockRandomness = fmt.Errorf(
+ "cannot verify block randomness")
+)
+
+type selfAgreementResult types.AgreementResult
+
+// consensusBAReceiver implements agreementReceiver.
+type consensusBAReceiver struct {
+ consensus *Consensus
+ agreementModule *agreement
+ emptyBlockHashMap *sync.Map
+ isNotary bool
+ restartNotary chan types.Position
+ npks *typesDKG.NodePublicKeys
+ psigSigner *dkgShareSecret
+}
+
+func (recv *consensusBAReceiver) emptyBlockHash(pos types.Position) (
+ common.Hash, error) {
+ hashVal, ok := recv.emptyBlockHashMap.Load(pos)
+ if ok {
+ return hashVal.(common.Hash), nil
+ }
+ emptyBlock, err := recv.consensus.bcModule.prepareBlock(
+ pos, time.Time{}, true)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ hash, err := utils.HashBlock(emptyBlock)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ recv.emptyBlockHashMap.Store(pos, hash)
+ return hash, nil
+}
+
+func (recv *consensusBAReceiver) VerifyPartialSignature(vote *types.Vote) (
+ bool, bool) {
+ if vote.Position.Round >= DKGDelayRound && vote.BlockHash != types.SkipBlockHash {
+ if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom {
+ if recv.npks == nil {
+ recv.consensus.logger.Debug(
+ "Unable to verify psig, npks is nil",
+ "vote", vote)
+ return false, false
+ }
+ if vote.Position.Round != recv.npks.Round {
+ recv.consensus.logger.Debug(
+ "Unable to verify psig, round of npks mismatch",
+ "vote", vote,
+ "npksRound", recv.npks.Round)
+ return false, false
+ }
+ pubKey, exist := recv.npks.PublicKeys[vote.ProposerID]
+ if !exist {
+ recv.consensus.logger.Debug(
+ "Unable to verify psig, proposer is not qualified",
+ "vote", vote)
+ return false, true
+ }
+ blockHash := vote.BlockHash
+ if blockHash == types.NullBlockHash {
+ var err error
+ blockHash, err = recv.emptyBlockHash(vote.Position)
+ if err != nil {
+ recv.consensus.logger.Error(
+ "Failed to verify vote for empty block",
+ "position", vote.Position,
+ "error", err)
+ return false, true
+ }
+ }
+ return pubKey.VerifySignature(
+ blockHash, crypto.Signature(vote.PartialSignature)), true
+ }
+ }
+ return len(vote.PartialSignature.Signature) == 0, true
+}
+
+func (recv *consensusBAReceiver) ProposeVote(vote *types.Vote) {
+ if !recv.isNotary {
+ return
+ }
+ if recv.psigSigner != nil &&
+ vote.BlockHash != types.SkipBlockHash {
+ if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom {
+ if vote.BlockHash == types.NullBlockHash {
+ hash, err := recv.emptyBlockHash(vote.Position)
+ if err != nil {
+ recv.consensus.logger.Error(
+ "Failed to propose vote for empty block",
+ "position", vote.Position,
+ "error", err)
+ return
+ }
+ vote.PartialSignature = recv.psigSigner.sign(hash)
+ } else {
+ vote.PartialSignature = recv.psigSigner.sign(vote.BlockHash)
+ }
+ }
+ }
+ if err := recv.agreementModule.prepareVote(vote); err != nil {
+ recv.consensus.logger.Error("Failed to prepare vote", "error", err)
+ return
+ }
+ go func() {
+ if err := recv.agreementModule.processVote(vote); err != nil {
+ recv.consensus.logger.Error("Failed to process self vote",
+ "error", err,
+ "vote", vote)
+ return
+ }
+ recv.consensus.logger.Debug("Calling Network.BroadcastVote",
+ "vote", vote)
+ recv.consensus.network.BroadcastVote(vote)
+ }()
+}
+
+func (recv *consensusBAReceiver) ProposeBlock() common.Hash {
+ if !recv.isNotary {
+ return common.Hash{}
+ }
+ block, err := recv.consensus.proposeBlock(recv.agreementModule.agreementID())
+ if err != nil || block == nil {
+ recv.consensus.logger.Error("Unable to propose block", "error", err)
+ return types.NullBlockHash
+ }
+ go func() {
+ if err := recv.consensus.preProcessBlock(block); err != nil {
+ recv.consensus.logger.Error("Failed to pre-process block", "error", err)
+ return
+ }
+ recv.consensus.logger.Debug("Calling Network.BroadcastBlock",
+ "block", block)
+ recv.consensus.network.BroadcastBlock(block)
+ }()
+ return block.Hash
+}
+
+func (recv *consensusBAReceiver) ConfirmBlock(
+ hash common.Hash, votes map[types.NodeID]*types.Vote) {
+ var (
+ block *types.Block
+ aID = recv.agreementModule.agreementID()
+ )
+
+ isEmptyBlockConfirmed := hash == common.Hash{}
+ if isEmptyBlockConfirmed {
+ recv.consensus.logger.Info("Empty block is confirmed", "position", aID)
+ var err error
+ block, err = recv.consensus.bcModule.addEmptyBlock(aID)
+ if err != nil {
+ recv.consensus.logger.Error("Add position for empty failed",
+ "error", err)
+ return
+ }
+ if block == nil {
+ // The empty block's parent is not found locally, thus we can't
+ // propose it at this moment.
+ //
+ // We can only rely on block pulling upon receiving
+ // types.AgreementResult from the next position.
+ recv.consensus.logger.Warn(
+ "An empty block is confirmed without its parent",
+ "position", aID)
+ return
+ }
+ } else {
+ var exist bool
+ block, exist = recv.agreementModule.findBlockNoLock(hash)
+ if !exist {
+ recv.consensus.logger.Debug("Unknown block confirmed",
+ "hash", hash.String()[:6])
+ ch := make(chan *types.Block)
+ func() {
+ recv.consensus.lock.Lock()
+ defer recv.consensus.lock.Unlock()
+ recv.consensus.baConfirmedBlock[hash] = ch
+ }()
+ go func() {
+ hashes := common.Hashes{hash}
+ PullBlockLoop:
+ for {
+ recv.consensus.logger.Debug("Calling Network.PullBlock for BA block",
+ "hash", hash)
+ recv.consensus.network.PullBlocks(hashes)
+ select {
+ case block = <-ch:
+ break PullBlockLoop
+ case <-time.After(1 * time.Second):
+ }
+ }
+ recv.consensus.logger.Debug("Receive unknown block",
+ "hash", hash.String()[:6],
+ "position", block.Position)
+ recv.agreementModule.addCandidateBlock(block)
+ recv.agreementModule.lock.Lock()
+ defer recv.agreementModule.lock.Unlock()
+ recv.ConfirmBlock(block.Hash, votes)
+ }()
+ return
+ }
+ }
+
+ if len(votes) == 0 && len(block.Randomness) == 0 {
+ recv.consensus.logger.Error("No votes to recover randomness",
+ "block", block)
+ } else if votes != nil {
+ voteList := make([]types.Vote, 0, len(votes))
+ IDs := make(cryptoDKG.IDs, 0, len(votes))
+ psigs := make([]cryptoDKG.PartialSignature, 0, len(votes))
+ for _, vote := range votes {
+ if vote.BlockHash != hash {
+ continue
+ }
+ if block.Position.Round >= DKGDelayRound {
+ ID, exist := recv.npks.IDMap[vote.ProposerID]
+ if !exist {
+ continue
+ }
+ IDs = append(IDs, ID)
+ psigs = append(psigs, vote.PartialSignature)
+ } else {
+ voteList = append(voteList, *vote)
+ }
+ }
+ if block.Position.Round >= DKGDelayRound {
+ rand, err := cryptoDKG.RecoverSignature(psigs, IDs)
+ if err != nil {
+ recv.consensus.logger.Warn("Unable to recover randomness",
+ "block", block,
+ "error", err)
+ } else {
+ block.Randomness = rand.Signature[:]
+ }
+ } else {
+ block.Randomness = NoRand
+ }
+
+ if recv.isNotary {
+ result := &types.AgreementResult{
+ BlockHash: block.Hash,
+ Position: block.Position,
+ Votes: voteList,
+ IsEmptyBlock: isEmptyBlockConfirmed,
+ Randomness: block.Randomness,
+ }
+ // touchAgreementResult does not support concurrent access.
+ go func() {
+ recv.consensus.priorityMsgChan <- (*selfAgreementResult)(result)
+ }()
+ recv.consensus.logger.Debug("Broadcast AgreementResult",
+ "result", result)
+ recv.consensus.network.BroadcastAgreementResult(result)
+ if block.IsEmpty() {
+ recv.consensus.bcModule.addBlockRandomness(
+ block.Position, block.Randomness)
+ }
+ if block.Position.Round >= DKGDelayRound {
+ recv.consensus.logger.Debug(
+ "Broadcast finalized block",
+ "block", block)
+ recv.consensus.network.BroadcastBlock(block)
+ }
+ }
+ }
+
+ if !block.IsGenesis() &&
+ !recv.consensus.bcModule.confirmed(block.Position.Height-1) {
+ go func(hash common.Hash) {
+ parentHash := hash
+ for {
+ recv.consensus.logger.Warn("Parent block not confirmed",
+ "parent-hash", parentHash.String()[:6],
+ "cur-position", block.Position)
+ ch := make(chan *types.Block)
+ if !func() bool {
+ recv.consensus.lock.Lock()
+ defer recv.consensus.lock.Unlock()
+ if _, exist := recv.consensus.baConfirmedBlock[parentHash]; exist {
+ return false
+ }
+ recv.consensus.baConfirmedBlock[parentHash] = ch
+ return true
+ }() {
+ return
+ }
+ var block *types.Block
+ PullBlockLoop:
+ for {
+ recv.consensus.logger.Debug("Calling Network.PullBlock for parent",
+ "hash", parentHash)
+ recv.consensus.network.PullBlocks(common.Hashes{parentHash})
+ select {
+ case block = <-ch:
+ break PullBlockLoop
+ case <-time.After(1 * time.Second):
+ }
+ }
+ recv.consensus.logger.Info("Receive parent block",
+ "parent-hash", block.ParentHash.String()[:6],
+ "cur-position", block.Position)
+ if !block.IsFinalized() {
+ // TODO(jimmy): use a seperate message to pull finalized
+ // block. Here, we pull it again as workaround.
+ continue
+ }
+ recv.consensus.processBlockChan <- block
+ parentHash = block.ParentHash
+ if block.IsGenesis() || recv.consensus.bcModule.confirmed(
+ block.Position.Height-1) {
+ return
+ }
+ }
+ }(block.ParentHash)
+ }
+ if !block.IsEmpty() {
+ recv.consensus.processBlockChan <- block
+ }
+ // Clean the restartNotary channel so BA will not stuck by deadlock.
+CleanChannelLoop:
+ for {
+ select {
+ case <-recv.restartNotary:
+ default:
+ break CleanChannelLoop
+ }
+ }
+ recv.restartNotary <- block.Position
+}
+
+func (recv *consensusBAReceiver) PullBlocks(hashes common.Hashes) {
+ if !recv.isNotary {
+ return
+ }
+ recv.consensus.logger.Debug("Calling Network.PullBlocks", "hashes", hashes)
+ recv.consensus.network.PullBlocks(hashes)
+}
+
+func (recv *consensusBAReceiver) ReportForkVote(v1, v2 *types.Vote) {
+ recv.consensus.gov.ReportForkVote(v1, v2)
+}
+
+func (recv *consensusBAReceiver) ReportForkBlock(b1, b2 *types.Block) {
+ b1Clone := b1.Clone()
+ b2Clone := b2.Clone()
+ b1Clone.Payload = []byte{}
+ b2Clone.Payload = []byte{}
+ recv.consensus.gov.ReportForkBlock(b1Clone, b2Clone)
+}
+
+// consensusDKGReceiver implements dkgReceiver.
+type consensusDKGReceiver struct {
+ ID types.NodeID
+ gov Governance
+ signer *utils.Signer
+ nodeSetCache *utils.NodeSetCache
+ cfgModule *configurationChain
+ network Network
+ logger common.Logger
+}
+
+// ProposeDKGComplaint proposes a DKGComplaint.
+func (recv *consensusDKGReceiver) ProposeDKGComplaint(
+ complaint *typesDKG.Complaint) {
+ if err := recv.signer.SignDKGComplaint(complaint); err != nil {
+ recv.logger.Error("Failed to sign DKG complaint", "error", err)
+ return
+ }
+ recv.logger.Debug("Calling Governace.AddDKGComplaint",
+ "complaint", complaint)
+ recv.gov.AddDKGComplaint(complaint)
+}
+
+// ProposeDKGMasterPublicKey propose a DKGMasterPublicKey.
+func (recv *consensusDKGReceiver) ProposeDKGMasterPublicKey(
+ mpk *typesDKG.MasterPublicKey) {
+ if err := recv.signer.SignDKGMasterPublicKey(mpk); err != nil {
+ recv.logger.Error("Failed to sign DKG master public key", "error", err)
+ return
+ }
+ recv.logger.Debug("Calling Governance.AddDKGMasterPublicKey", "key", mpk)
+ recv.gov.AddDKGMasterPublicKey(mpk)
+}
+
+// ProposeDKGPrivateShare propose a DKGPrivateShare.
+func (recv *consensusDKGReceiver) ProposeDKGPrivateShare(
+ prv *typesDKG.PrivateShare) {
+ if err := recv.signer.SignDKGPrivateShare(prv); err != nil {
+ recv.logger.Error("Failed to sign DKG private share", "error", err)
+ return
+ }
+ receiverPubKey, exists := recv.nodeSetCache.GetPublicKey(prv.ReceiverID)
+ if !exists {
+ recv.logger.Error("Public key for receiver not found",
+ "receiver", prv.ReceiverID.String()[:6])
+ return
+ }
+ if prv.ReceiverID == recv.ID {
+ go func() {
+ if err := recv.cfgModule.processPrivateShare(prv); err != nil {
+ recv.logger.Error("Failed to process self private share", "prvShare", prv)
+ }
+ }()
+ } else {
+ recv.logger.Debug("Calling Network.SendDKGPrivateShare",
+ "receiver", hex.EncodeToString(receiverPubKey.Bytes()))
+ recv.network.SendDKGPrivateShare(receiverPubKey, prv)
+ }
+}
+
+// ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint.
+func (recv *consensusDKGReceiver) ProposeDKGAntiNackComplaint(
+ prv *typesDKG.PrivateShare) {
+ if prv.ProposerID == recv.ID {
+ if err := recv.signer.SignDKGPrivateShare(prv); err != nil {
+ recv.logger.Error("Failed sign DKG private share", "error", err)
+ return
+ }
+ }
+ recv.logger.Debug("Calling Network.BroadcastDKGPrivateShare", "share", prv)
+ recv.network.BroadcastDKGPrivateShare(prv)
+}
+
+// ProposeDKGMPKReady propose a DKGMPKReady message.
+func (recv *consensusDKGReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) {
+ if err := recv.signer.SignDKGMPKReady(ready); err != nil {
+ recv.logger.Error("Failed to sign DKG ready", "error", err)
+ return
+ }
+ recv.logger.Debug("Calling Governance.AddDKGMPKReady", "ready", ready)
+ recv.gov.AddDKGMPKReady(ready)
+}
+
+// ProposeDKGFinalize propose a DKGFinalize message.
+func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) {
+ if err := recv.signer.SignDKGFinalize(final); err != nil {
+ recv.logger.Error("Failed to sign DKG finalize", "error", err)
+ return
+ }
+ recv.logger.Debug("Calling Governance.AddDKGFinalize", "final", final)
+ recv.gov.AddDKGFinalize(final)
+}
+
+// ProposeDKGSuccess propose a DKGSuccess message.
+func (recv *consensusDKGReceiver) ProposeDKGSuccess(success *typesDKG.Success) {
+ if err := recv.signer.SignDKGSuccess(success); err != nil {
+ recv.logger.Error("Failed to sign DKG successize", "error", err)
+ return
+ }
+ recv.logger.Debug("Calling Governance.AddDKGSuccess", "success", success)
+ recv.gov.AddDKGSuccess(success)
+}
+
+// Consensus implements DEXON Consensus algorithm.
+type Consensus struct {
+ // Node Info.
+ ID types.NodeID
+ signer *utils.Signer
+
+ // BA.
+ baMgr *agreementMgr
+ baConfirmedBlock map[common.Hash]chan<- *types.Block
+
+ // DKG.
+ dkgRunning int32
+ dkgReady *sync.Cond
+ cfgModule *configurationChain
+
+ // Interfaces.
+ db db.Database
+ app Application
+ debugApp Debug
+ gov Governance
+ network Network
+
+ // Misc.
+ bcModule *blockChain
+ dMoment time.Time
+ nodeSetCache *utils.NodeSetCache
+ tsigVerifierCache *TSigVerifierCache
+ lock sync.RWMutex
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ event *common.Event
+ roundEvent *utils.RoundEvent
+ logger common.Logger
+ resetDeliveryGuardTicker chan struct{}
+ msgChan chan types.Msg
+ priorityMsgChan chan interface{}
+ waitGroup sync.WaitGroup
+ processBlockChan chan *types.Block
+
+ // Context of Dummy receiver during switching from syncer.
+ dummyCancel context.CancelFunc
+ dummyFinished <-chan struct{}
+ dummyMsgBuffer []types.Msg
+}
+
+// NewConsensus construct an Consensus instance.
+func NewConsensus(
+ dMoment time.Time,
+ app Application,
+ gov Governance,
+ db db.Database,
+ network Network,
+ prv crypto.PrivateKey,
+ logger common.Logger) *Consensus {
+ return newConsensusForRound(
+ nil, dMoment, app, gov, db, network, prv, logger, true)
+}
+
+// NewConsensusForSimulation creates an instance of Consensus for simulation,
+// the only difference with NewConsensus is nonblocking of app.
+func NewConsensusForSimulation(
+ dMoment time.Time,
+ app Application,
+ gov Governance,
+ db db.Database,
+ network Network,
+ prv crypto.PrivateKey,
+ logger common.Logger) *Consensus {
+ return newConsensusForRound(
+ nil, dMoment, app, gov, db, network, prv, logger, false)
+}
+
+// NewConsensusFromSyncer constructs an Consensus instance from information
+// provided from syncer.
+//
+// You need to provide the initial block for this newly created Consensus
+// instance to bootstrap with. A proper choice is the last finalized block you
+// delivered to syncer.
+//
+// NOTE: those confirmed blocks should be organized by chainID and sorted by
+// their positions, in ascending order.
+func NewConsensusFromSyncer(
+ initBlock *types.Block,
+ startWithEmpty bool,
+ dMoment time.Time,
+ app Application,
+ gov Governance,
+ db db.Database,
+ networkModule Network,
+ prv crypto.PrivateKey,
+ confirmedBlocks []*types.Block,
+ cachedMessages []types.Msg,
+ logger common.Logger) (*Consensus, error) {
+ // Setup Consensus instance.
+ con := newConsensusForRound(initBlock, dMoment, app, gov, db,
+ networkModule, prv, logger, true)
+ // Launch a dummy receiver before we start receiving from network module.
+ con.dummyMsgBuffer = cachedMessages
+ con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver(
+ con.ctx, networkModule.ReceiveChan(), func(msg types.Msg) {
+ con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg)
+ })
+ // Dump all BA-confirmed blocks to the consensus instance, make sure these
+ // added blocks forming a DAG.
+ refBlock := initBlock
+ for _, b := range confirmedBlocks {
+ // Only when its parent block is already added to lattice, we can
+ // then add this block. If not, our pulling mechanism would stop at
+ // the block we added, and lost its parent block forever.
+ if b.Position.Height != refBlock.Position.Height+1 {
+ break
+ }
+ if err := con.processBlock(b); err != nil {
+ return nil, err
+ }
+ refBlock = b
+ }
+ if startWithEmpty {
+ emptyPos := types.Position{
+ Round: con.bcModule.tipRound(),
+ Height: initBlock.Position.Height + 1,
+ }
+ _, err := con.bcModule.addEmptyBlock(emptyPos)
+ if err != nil {
+ panic(err)
+ }
+ }
+ return con, nil
+}
+
+// newConsensusForRound creates a Consensus instance.
+func newConsensusForRound(
+ initBlock *types.Block,
+ dMoment time.Time,
+ app Application,
+ gov Governance,
+ db db.Database,
+ network Network,
+ prv crypto.PrivateKey,
+ logger common.Logger,
+ usingNonBlocking bool) *Consensus {
+ // TODO(w): load latest blockHeight from DB, and use config at that height.
+ nodeSetCache := utils.NewNodeSetCache(gov)
+ // Setup signer module.
+ signer := utils.NewSigner(prv)
+ // Check if the application implement Debug interface.
+ var debugApp Debug
+ if a, ok := app.(Debug); ok {
+ debugApp = a
+ }
+ // Get configuration for bootstrap round.
+ initPos := types.Position{
+ Round: 0,
+ Height: types.GenesisHeight,
+ }
+ if initBlock != nil {
+ initPos = initBlock.Position
+ }
+ // Init configuration chain.
+ ID := types.NewNodeID(prv.PublicKey())
+ recv := &consensusDKGReceiver{
+ ID: ID,
+ gov: gov,
+ signer: signer,
+ nodeSetCache: nodeSetCache,
+ network: network,
+ logger: logger,
+ }
+ cfgModule := newConfigurationChain(ID, recv, gov, nodeSetCache, db, logger)
+ recv.cfgModule = cfgModule
+ signer.SetBLSSigner(
+ func(round uint64, hash common.Hash) (crypto.Signature, error) {
+ _, signer, err := cfgModule.getDKGInfo(round, false)
+ if err != nil {
+ return crypto.Signature{}, err
+ }
+ return crypto.Signature(signer.sign(hash)), nil
+ })
+ appModule := app
+ if usingNonBlocking {
+ appModule = newNonBlocking(app, debugApp)
+ }
+ tsigVerifierCache := NewTSigVerifierCache(gov, 7)
+ bcModule := newBlockChain(ID, dMoment, initBlock, appModule,
+ tsigVerifierCache, signer, logger)
+ // Construct Consensus instance.
+ con := &Consensus{
+ ID: ID,
+ app: appModule,
+ debugApp: debugApp,
+ gov: gov,
+ db: db,
+ network: network,
+ baConfirmedBlock: make(map[common.Hash]chan<- *types.Block),
+ dkgReady: sync.NewCond(&sync.Mutex{}),
+ cfgModule: cfgModule,
+ bcModule: bcModule,
+ dMoment: dMoment,
+ nodeSetCache: nodeSetCache,
+ tsigVerifierCache: tsigVerifierCache,
+ signer: signer,
+ event: common.NewEvent(),
+ logger: logger,
+ resetDeliveryGuardTicker: make(chan struct{}),
+ msgChan: make(chan types.Msg, 1024),
+ priorityMsgChan: make(chan interface{}, 1024),
+ processBlockChan: make(chan *types.Block, 1024),
+ }
+ con.ctx, con.ctxCancel = context.WithCancel(context.Background())
+ var err error
+ con.roundEvent, err = utils.NewRoundEvent(con.ctx, gov, logger, initPos,
+ ConfigRoundShift)
+ if err != nil {
+ panic(err)
+ }
+ if con.baMgr, err = newAgreementMgr(con); err != nil {
+ panic(err)
+ }
+ if err = con.prepare(initBlock); err != nil {
+ panic(err)
+ }
+ return con
+}
+
+// prepare the Consensus instance to be ready for blocks after 'initBlock'.
+// 'initBlock' could be either:
+// - nil
+// - the last finalized block
+func (con *Consensus) prepare(initBlock *types.Block) (err error) {
+ // Trigger the round validation method for the next round of the first
+ // round.
+ // The block past from full node should be delivered already or known by
+ // full node. We don't have to notify it.
+ initRound := uint64(0)
+ if initBlock != nil {
+ initRound = initBlock.Position.Round
+ }
+ if initRound == 0 {
+ if DKGDelayRound == 0 {
+ panic("not implemented yet")
+ }
+ }
+ // Measure time elapse for each handler of round events.
+ elapse := func(what string, lastE utils.RoundEventParam) func() {
+ start := time.Now()
+ con.logger.Info("Handle round event",
+ "what", what,
+ "event", lastE)
+ return func() {
+ con.logger.Info("Finish round event",
+ "what", what,
+ "event", lastE,
+ "elapse", time.Since(start))
+ }
+ }
+ // Register round event handler to purge cached node set. To make sure each
+ // modules see the up-to-date node set, we need to make sure this action
+ // should be taken as the first one.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ defer elapse("purge-cache", evts[len(evts)-1])()
+ for _, e := range evts {
+ if e.Reset == 0 {
+ continue
+ }
+ con.nodeSetCache.Purge(e.Round + 1)
+ con.tsigVerifierCache.Purge(e.Round + 1)
+ }
+ })
+ // Register round event handler to abort previous running DKG if any.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ e := evts[len(evts)-1]
+ go func() {
+ defer elapse("abort-DKG", e)()
+ if e.Reset > 0 {
+ aborted := con.cfgModule.abortDKG(con.ctx, e.Round+1, e.Reset-1)
+ con.logger.Info("DKG aborting result",
+ "round", e.Round+1,
+ "reset", e.Reset-1,
+ "aborted", aborted)
+ }
+ }()
+ })
+ // Register round event handler to update BA and BC modules.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ defer elapse("append-config", evts[len(evts)-1])()
+ // Always updates newer configs to the later modules first in the data
+ // flow.
+ if err := con.bcModule.notifyRoundEvents(evts); err != nil {
+ panic(err)
+ }
+ if err := con.baMgr.notifyRoundEvents(evts); err != nil {
+ panic(err)
+ }
+ })
+ // Register round event handler to reset DKG if the DKG set for next round
+ // failed to setup.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ e := evts[len(evts)-1]
+ defer elapse("reset-DKG", e)()
+ nextRound := e.Round + 1
+ if nextRound < DKGDelayRound {
+ return
+ }
+ curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round)
+ if err != nil {
+ con.logger.Error("Error getting notary set when proposing CRS",
+ "round", e.Round,
+ "error", err)
+ return
+ }
+ if _, exist := curNotarySet[con.ID]; !exist {
+ return
+ }
+ con.event.RegisterHeight(e.NextDKGResetHeight(), func(uint64) {
+ if ok, _ := utils.IsDKGValid(
+ con.gov, con.logger, nextRound, e.Reset); ok {
+ return
+ }
+ // Aborting all previous running DKG protocol instance if any.
+ go con.runCRS(e.Round, utils.Rehash(e.CRS, uint(e.Reset+1)), true)
+ })
+ })
+ // Register round event handler to propose new CRS.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ // We don't have to propose new CRS during DKG reset, the reset of DKG
+ // would be done by the notary set in previous round.
+ e := evts[len(evts)-1]
+ defer elapse("propose-CRS", e)()
+ if e.Reset != 0 || e.Round < DKGDelayRound {
+ return
+ }
+ if curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round); err != nil {
+ con.logger.Error("Error getting notary set when proposing CRS",
+ "round", e.Round,
+ "error", err)
+ } else {
+ if _, exist := curNotarySet[con.ID]; !exist {
+ return
+ }
+ con.event.RegisterHeight(e.NextCRSProposingHeight(), func(uint64) {
+ con.logger.Debug(
+ "Calling Governance.CRS to check if already proposed",
+ "round", e.Round+1)
+ if (con.gov.CRS(e.Round+1) != common.Hash{}) {
+ con.logger.Debug("CRS already proposed", "round", e.Round+1)
+ return
+ }
+ go con.runCRS(e.Round, e.CRS, false)
+ })
+ }
+ })
+ // Touch nodeSetCache for next round.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ e := evts[len(evts)-1]
+ defer elapse("touch-NodeSetCache", e)()
+ con.event.RegisterHeight(e.NextTouchNodeSetCacheHeight(), func(uint64) {
+ if e.Reset == 0 {
+ return
+ }
+ go func() {
+ nextRound := e.Round + 1
+ if err := con.nodeSetCache.Touch(nextRound); err != nil {
+ con.logger.Warn("Failed to update nodeSetCache",
+ "round", nextRound,
+ "error", err)
+ }
+ }()
+ })
+ })
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ e := evts[len(evts)-1]
+ if e.Reset != 0 {
+ return
+ }
+ defer elapse("touch-DKGCache", e)()
+ go func() {
+ if _, err :=
+ con.tsigVerifierCache.Update(e.Round); err != nil {
+ con.logger.Warn("Failed to update tsig cache",
+ "round", e.Round,
+ "error", err)
+ }
+ }()
+ go func() {
+ threshold := utils.GetDKGThreshold(
+ utils.GetConfigWithPanic(con.gov, e.Round, con.logger))
+ // Restore group public key.
+ con.logger.Debug(
+ "Calling Governance.DKGMasterPublicKeys for recoverDKGInfo",
+ "round", e.Round)
+ con.logger.Debug(
+ "Calling Governance.DKGComplaints for recoverDKGInfo",
+ "round", e.Round)
+ _, qualifies, err := typesDKG.CalcQualifyNodes(
+ con.gov.DKGMasterPublicKeys(e.Round),
+ con.gov.DKGComplaints(e.Round),
+ threshold)
+ if err != nil {
+ con.logger.Warn("Failed to calculate dkg set",
+ "round", e.Round,
+ "error", err)
+ return
+ }
+ if _, exist := qualifies[con.ID]; !exist {
+ return
+ }
+ if _, _, err :=
+ con.cfgModule.getDKGInfo(e.Round, true); err != nil {
+ con.logger.Warn("Failed to recover DKG info",
+ "round", e.Round,
+ "error", err)
+ }
+ }()
+ })
+ // checkCRS is a generator of checker to check if CRS for that round is
+ // ready or not.
+ checkCRS := func(round uint64) func() bool {
+ return func() bool {
+ nextCRS := con.gov.CRS(round)
+ if (nextCRS != common.Hash{}) {
+ return true
+ }
+ con.logger.Debug("CRS is not ready yet. Try again later...",
+ "nodeID", con.ID,
+ "round", round)
+ return false
+ }
+ }
+ // Trigger round validation method for next period.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ e := evts[len(evts)-1]
+ defer elapse("next-round", e)()
+ // Register a routine to trigger round events.
+ con.event.RegisterHeight(e.NextRoundValidationHeight(),
+ utils.RoundEventRetryHandlerGenerator(con.roundEvent, con.event))
+ // Register a routine to register next DKG.
+ con.event.RegisterHeight(e.NextDKGRegisterHeight(), func(uint64) {
+ nextRound := e.Round + 1
+ if nextRound < DKGDelayRound {
+ con.logger.Info("Skip runDKG for round",
+ "round", nextRound,
+ "reset", e.Reset)
+ return
+ }
+ go func() {
+ // Normally, gov.CRS would return non-nil. Use this for in case
+ // of unexpected network fluctuation and ensure the robustness.
+ if !checkWithCancel(
+ con.ctx, 500*time.Millisecond, checkCRS(nextRound)) {
+ con.logger.Debug("unable to prepare CRS for notary set",
+ "round", nextRound,
+ "reset", e.Reset)
+ return
+ }
+ nextNotarySet, err := con.nodeSetCache.GetNotarySet(nextRound)
+ if err != nil {
+ con.logger.Error("Error getting notary set for next round",
+ "round", nextRound,
+ "reset", e.Reset,
+ "error", err)
+ return
+ }
+ if _, exist := nextNotarySet[con.ID]; !exist {
+ con.logger.Info("Not selected as notary set",
+ "round", nextRound,
+ "reset", e.Reset)
+ return
+ }
+ con.logger.Info("Selected as notary set",
+ "round", nextRound,
+ "reset", e.Reset)
+ nextConfig := utils.GetConfigWithPanic(con.gov, nextRound,
+ con.logger)
+ con.cfgModule.registerDKG(con.ctx, nextRound, e.Reset,
+ utils.GetDKGThreshold(nextConfig))
+ con.event.RegisterHeight(e.NextDKGPreparationHeight(),
+ func(h uint64) {
+ func() {
+ con.dkgReady.L.Lock()
+ defer con.dkgReady.L.Unlock()
+ con.dkgRunning = 0
+ }()
+ // We want to skip some of the DKG phases when started.
+ dkgCurrentHeight := h - e.NextDKGPreparationHeight()
+ con.runDKG(
+ nextRound, e.Reset,
+ e.NextDKGPreparationHeight(), dkgCurrentHeight)
+ })
+ }()
+ })
+ })
+ con.roundEvent.TriggerInitEvent()
+ if initBlock != nil {
+ con.event.NotifyHeight(initBlock.Position.Height)
+ }
+ con.baMgr.prepare()
+ return
+}
+
+// Run starts running DEXON Consensus.
+func (con *Consensus) Run() {
+ // There may have emptys block in blockchain added by force sync.
+ blocksWithoutRandomness := con.bcModule.pendingBlocksWithoutRandomness()
+ // Launch BA routines.
+ con.baMgr.run()
+ // Launch network handler.
+ con.logger.Debug("Calling Network.ReceiveChan")
+ con.waitGroup.Add(1)
+ go con.deliverNetworkMsg()
+ con.waitGroup.Add(1)
+ go con.processMsg()
+ go con.processBlockLoop()
+ // Stop dummy receiver if launched.
+ if con.dummyCancel != nil {
+ con.logger.Trace("Stop dummy receiver")
+ con.dummyCancel()
+ <-con.dummyFinished
+ // Replay those cached messages.
+ con.logger.Trace("Dummy receiver stoped, start dumping cached messages",
+ "count", len(con.dummyMsgBuffer))
+ for _, msg := range con.dummyMsgBuffer {
+ loop:
+ for {
+ select {
+ case con.msgChan <- msg:
+ break loop
+ case <-time.After(50 * time.Millisecond):
+ con.logger.Debug(
+ "internal message channel is full when syncing")
+ }
+ }
+ }
+ con.logger.Trace("Finish dumping cached messages")
+ }
+ con.generateBlockRandomness(blocksWithoutRandomness)
+ // Sleep until dMoment come.
+ time.Sleep(con.dMoment.Sub(time.Now().UTC()))
+ // Take some time to bootstrap.
+ time.Sleep(3 * time.Second)
+ con.waitGroup.Add(1)
+ go con.deliveryGuard()
+ // Block until done.
+ select {
+ case <-con.ctx.Done():
+ }
+}
+
+func (con *Consensus) generateBlockRandomness(blocks []*types.Block) {
+ con.logger.Debug("Start generating block randomness", "blocks", blocks)
+ isNotarySet := make(map[uint64]bool)
+ for _, block := range blocks {
+ if block.Position.Round < DKGDelayRound {
+ continue
+ }
+ doRun, exist := isNotarySet[block.Position.Round]
+ if !exist {
+ curNotarySet, err := con.nodeSetCache.GetNotarySet(block.Position.Round)
+ if err != nil {
+ con.logger.Error("Error getting notary set when generate block tsig",
+ "round", block.Position.Round,
+ "error", err)
+ continue
+ }
+ _, exist := curNotarySet[con.ID]
+ isNotarySet[block.Position.Round] = exist
+ doRun = exist
+ }
+ if !doRun {
+ continue
+ }
+ go func(block *types.Block) {
+ psig, err := con.cfgModule.preparePartialSignature(
+ block.Position.Round, block.Hash)
+ if err != nil {
+ con.logger.Error("Failed to prepare partial signature",
+ "block", block,
+ "error", err)
+ } else if err = con.signer.SignDKGPartialSignature(psig); err != nil {
+ con.logger.Error("Failed to sign DKG partial signature",
+ "block", block,
+ "error", err)
+ } else if err = con.cfgModule.processPartialSignature(psig); err != nil {
+ con.logger.Error("Failed to process partial signature",
+ "block", block,
+ "error", err)
+ } else {
+ con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
+ "proposer", psig.ProposerID,
+ "block", block)
+ con.network.BroadcastDKGPartialSignature(psig)
+ sig, err := con.cfgModule.runTSig(
+ block.Position.Round,
+ block.Hash,
+ 60*time.Minute,
+ )
+ if err != nil {
+ con.logger.Error("Failed to run Block Tsig",
+ "block", block,
+ "error", err)
+ return
+ }
+ result := &types.AgreementResult{
+ BlockHash: block.Hash,
+ Position: block.Position,
+ Randomness: sig.Signature[:],
+ }
+ con.bcModule.addBlockRandomness(block.Position, sig.Signature[:])
+ con.logger.Debug("Broadcast BlockRandomness",
+ "block", block,
+ "result", result)
+ con.network.BroadcastAgreementResult(result)
+ if err := con.deliverFinalizedBlocks(); err != nil {
+ con.logger.Error("Failed to deliver finalized block",
+ "error", err)
+ }
+ }
+ }(block)
+ }
+}
+
+// runDKG starts running DKG protocol.
+func (con *Consensus) runDKG(
+ round, reset, dkgBeginHeight, dkgHeight uint64) {
+ con.dkgReady.L.Lock()
+ defer con.dkgReady.L.Unlock()
+ if con.dkgRunning != 0 {
+ return
+ }
+ con.dkgRunning = 1
+ go func() {
+ defer func() {
+ con.dkgReady.L.Lock()
+ defer con.dkgReady.L.Unlock()
+ con.dkgReady.Broadcast()
+ con.dkgRunning = 2
+ }()
+ if err :=
+ con.cfgModule.runDKG(
+ round, reset,
+ con.event, dkgBeginHeight, dkgHeight); err != nil {
+ con.logger.Error("Failed to runDKG", "error", err)
+ }
+ }()
+}
+
+func (con *Consensus) runCRS(round uint64, hash common.Hash, reset bool) {
+ // Start running next round CRS.
+ psig, err := con.cfgModule.preparePartialSignature(round, hash)
+ if err != nil {
+ con.logger.Error("Failed to prepare partial signature", "error", err)
+ } else if err = con.signer.SignDKGPartialSignature(psig); err != nil {
+ con.logger.Error("Failed to sign DKG partial signature", "error", err)
+ } else if err = con.cfgModule.processPartialSignature(psig); err != nil {
+ con.logger.Error("Failed to process partial signature", "error", err)
+ } else {
+ con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
+ "proposer", psig.ProposerID,
+ "round", psig.Round,
+ "hash", psig.Hash)
+ con.network.BroadcastDKGPartialSignature(psig)
+ con.logger.Debug("Calling Governance.CRS", "round", round)
+ crs, err := con.cfgModule.runCRSTSig(round, hash)
+ if err != nil {
+ con.logger.Error("Failed to run CRS Tsig", "error", err)
+ } else {
+ if reset {
+ con.logger.Debug("Calling Governance.ResetDKG",
+ "round", round+1,
+ "crs", hex.EncodeToString(crs))
+ con.gov.ResetDKG(crs)
+ } else {
+ con.logger.Debug("Calling Governance.ProposeCRS",
+ "round", round+1,
+ "crs", hex.EncodeToString(crs))
+ con.gov.ProposeCRS(round+1, crs)
+ }
+ }
+ }
+}
+
+// Stop the Consensus core.
+func (con *Consensus) Stop() {
+ con.ctxCancel()
+ con.baMgr.stop()
+ con.event.Reset()
+ con.waitGroup.Wait()
+ if nbApp, ok := con.app.(*nonBlocking); ok {
+ nbApp.wait()
+ }
+}
+
+func (con *Consensus) deliverNetworkMsg() {
+ defer con.waitGroup.Done()
+ recv := con.network.ReceiveChan()
+ for {
+ select {
+ case <-con.ctx.Done():
+ return
+ default:
+ }
+ select {
+ case msg := <-recv:
+ innerLoop:
+ for {
+ select {
+ case con.msgChan <- msg:
+ break innerLoop
+ case <-time.After(500 * time.Millisecond):
+ con.logger.Debug("internal message channel is full",
+ "pending", msg)
+ }
+ }
+ case <-con.ctx.Done():
+ return
+ }
+ }
+}
+
+func (con *Consensus) processMsg() {
+ defer con.waitGroup.Done()
+MessageLoop:
+ for {
+ select {
+ case <-con.ctx.Done():
+ return
+ default:
+ }
+ var msg, peer interface{}
+ select {
+ case msg = <-con.priorityMsgChan:
+ default:
+ }
+ if msg == nil {
+ select {
+ case message := <-con.msgChan:
+ msg, peer = message.Payload, message.PeerID
+ case msg = <-con.priorityMsgChan:
+ case <-con.ctx.Done():
+ return
+ }
+ }
+ switch val := msg.(type) {
+ case *selfAgreementResult:
+ con.baMgr.touchAgreementResult((*types.AgreementResult)(val))
+ case *types.Block:
+ if ch, exist := func() (chan<- *types.Block, bool) {
+ con.lock.RLock()
+ defer con.lock.RUnlock()
+ ch, e := con.baConfirmedBlock[val.Hash]
+ return ch, e
+ }(); exist {
+ if val.IsEmpty() {
+ hash, err := utils.HashBlock(val)
+ if err != nil {
+ con.logger.Error("Error verifying empty block hash",
+ "block", val,
+ "error, err")
+ con.network.ReportBadPeerChan() <- peer
+ continue MessageLoop
+ }
+ if hash != val.Hash {
+ con.logger.Error("Incorrect confirmed empty block hash",
+ "block", val,
+ "hash", hash)
+ con.network.ReportBadPeerChan() <- peer
+ continue MessageLoop
+ }
+ if _, err := con.bcModule.proposeBlock(
+ val.Position, time.Time{}, true); err != nil {
+ con.logger.Error("Error adding empty block",
+ "block", val,
+ "error", err)
+ con.network.ReportBadPeerChan() <- peer
+ continue MessageLoop
+ }
+ } else {
+ if !val.IsFinalized() {
+ con.logger.Warn("Ignore not finalized block",
+ "block", val)
+ continue MessageLoop
+ }
+ ok, err := con.bcModule.verifyRandomness(
+ val.Hash, val.Position.Round, val.Randomness)
+ if err != nil {
+ con.logger.Error("Error verifying confirmed block randomness",
+ "block", val,
+ "error", err)
+ con.network.ReportBadPeerChan() <- peer
+ continue MessageLoop
+ }
+ if !ok {
+ con.logger.Error("Incorrect confirmed block randomness",
+ "block", val)
+ con.network.ReportBadPeerChan() <- peer
+ continue MessageLoop
+ }
+ if err := utils.VerifyBlockSignature(val); err != nil {
+ con.logger.Error("VerifyBlockSignature failed",
+ "block", val,
+ "error", err)
+ con.network.ReportBadPeerChan() <- peer
+ continue MessageLoop
+ }
+ }
+ func() {
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ // In case of multiple delivered block.
+ if _, exist := con.baConfirmedBlock[val.Hash]; !exist {
+ return
+ }
+ delete(con.baConfirmedBlock, val.Hash)
+ ch <- val
+ }()
+ } else if val.IsFinalized() {
+ if err := con.processFinalizedBlock(val); err != nil {
+ con.logger.Error("Failed to process finalized block",
+ "block", val,
+ "error", err)
+ con.network.ReportBadPeerChan() <- peer
+ }
+ } else {
+ if err := con.preProcessBlock(val); err != nil {
+ con.logger.Error("Failed to pre process block",
+ "block", val,
+ "error", err)
+ con.network.ReportBadPeerChan() <- peer
+ }
+ }
+ case *types.Vote:
+ if err := con.ProcessVote(val); err != nil {
+ con.logger.Error("Failed to process vote",
+ "vote", val,
+ "error", err)
+ con.network.ReportBadPeerChan() <- peer
+ }
+ case *types.AgreementResult:
+ if err := con.ProcessAgreementResult(val); err != nil {
+ con.logger.Error("Failed to process agreement result",
+ "result", val,
+ "error", err)
+ con.network.ReportBadPeerChan() <- peer
+ }
+ case *typesDKG.PrivateShare:
+ if err := con.cfgModule.processPrivateShare(val); err != nil {
+ con.logger.Error("Failed to process private share",
+ "error", err)
+ con.network.ReportBadPeerChan() <- peer
+ }
+
+ case *typesDKG.PartialSignature:
+ if err := con.cfgModule.processPartialSignature(val); err != nil {
+ con.logger.Error("Failed to process partial signature",
+ "error", err)
+ con.network.ReportBadPeerChan() <- peer
+ }
+ }
+ }
+}
+
+// ProcessVote is the entry point to submit ont vote to a Consensus instance.
+func (con *Consensus) ProcessVote(vote *types.Vote) (err error) {
+ err = con.baMgr.processVote(vote)
+ return
+}
+
+// ProcessAgreementResult processes the randomness request.
+func (con *Consensus) ProcessAgreementResult(
+ rand *types.AgreementResult) error {
+ if !con.baMgr.touchAgreementResult(rand) {
+ return nil
+ }
+ // Sanity Check.
+ if err := VerifyAgreementResult(rand, con.nodeSetCache); err != nil {
+ con.baMgr.untouchAgreementResult(rand)
+ return err
+ }
+ if err := con.bcModule.processAgreementResult(rand); err != nil {
+ con.baMgr.untouchAgreementResult(rand)
+ if err == ErrSkipButNoError {
+ return nil
+ }
+ return err
+ }
+ // Syncing BA Module.
+ if err := con.baMgr.processAgreementResult(rand); err != nil {
+ con.baMgr.untouchAgreementResult(rand)
+ return err
+ }
+
+ con.logger.Debug("Rebroadcast AgreementResult",
+ "result", rand)
+ con.network.BroadcastAgreementResult(rand)
+
+ return con.deliverFinalizedBlocks()
+}
+
+// preProcessBlock performs Byzantine Agreement on the block.
+func (con *Consensus) preProcessBlock(b *types.Block) (err error) {
+ err = con.baMgr.processBlock(b)
+ if err == nil && con.debugApp != nil {
+ con.debugApp.BlockReceived(b.Hash)
+ }
+ return
+}
+
+func (con *Consensus) processFinalizedBlock(b *types.Block) (err error) {
+ if b.Position.Round < DKGDelayRound {
+ return
+ }
+ if err = utils.VerifyBlockSignature(b); err != nil {
+ return
+ }
+ verifier, ok, err := con.tsigVerifierCache.UpdateAndGet(b.Position.Round)
+ if err != nil {
+ return
+ }
+ if !ok {
+ err = ErrCannotVerifyBlockRandomness
+ return
+ }
+ if !verifier.VerifySignature(b.Hash, crypto.Signature{
+ Type: "bls",
+ Signature: b.Randomness,
+ }) {
+ err = ErrIncorrectBlockRandomness
+ return
+ }
+ err = con.baMgr.processFinalizedBlock(b)
+ if err == nil && con.debugApp != nil {
+ con.debugApp.BlockReceived(b.Hash)
+ }
+ return
+}
+
+func (con *Consensus) deliveryGuard() {
+ defer con.waitGroup.Done()
+ select {
+ case <-con.ctx.Done():
+ case <-time.After(con.dMoment.Sub(time.Now())):
+ }
+ // Node takes time to start.
+ select {
+ case <-con.ctx.Done():
+ case <-time.After(60 * time.Second):
+ }
+ for {
+ select {
+ case <-con.ctx.Done():
+ return
+ default:
+ }
+ select {
+ case <-con.ctx.Done():
+ return
+ case <-con.resetDeliveryGuardTicker:
+ case <-time.After(60 * time.Second):
+ con.logger.Error("No blocks delivered for too long", "ID", con.ID)
+ panic(fmt.Errorf("No blocks delivered for too long"))
+ }
+ }
+}
+
+// deliverBlock deliver a block to application layer.
+func (con *Consensus) deliverBlock(b *types.Block) {
+ select {
+ case con.resetDeliveryGuardTicker <- struct{}{}:
+ default:
+ }
+ if err := con.db.PutBlock(*b); err != nil {
+ panic(err)
+ }
+ if err := con.db.PutCompactionChainTipInfo(b.Hash,
+ b.Position.Height); err != nil {
+ panic(err)
+ }
+ con.logger.Debug("Calling Application.BlockDelivered", "block", b)
+ con.app.BlockDelivered(b.Hash, b.Position, common.CopyBytes(b.Randomness))
+ if con.debugApp != nil {
+ con.debugApp.BlockReady(b.Hash)
+ }
+}
+
+// deliverFinalizedBlocks extracts and delivers finalized blocks to application
+// layer.
+func (con *Consensus) deliverFinalizedBlocks() error {
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ return con.deliverFinalizedBlocksWithoutLock()
+}
+
+func (con *Consensus) deliverFinalizedBlocksWithoutLock() (err error) {
+ deliveredBlocks := con.bcModule.extractBlocks()
+ con.logger.Debug("Last blocks in compaction chain",
+ "delivered", con.bcModule.lastDeliveredBlock(),
+ "pending", con.bcModule.lastPendingBlock())
+ for _, b := range deliveredBlocks {
+ con.deliverBlock(b)
+ con.event.NotifyHeight(b.Position.Height)
+ }
+ return
+}
+
+func (con *Consensus) processBlockLoop() {
+ for {
+ select {
+ case <-con.ctx.Done():
+ return
+ default:
+ }
+ select {
+ case <-con.ctx.Done():
+ return
+ case block := <-con.processBlockChan:
+ if err := con.processBlock(block); err != nil {
+ con.logger.Error("Error processing block",
+ "block", block,
+ "error", err)
+ }
+ }
+ }
+}
+
+// processBlock is the entry point to submit one block to a Consensus instance.
+func (con *Consensus) processBlock(block *types.Block) (err error) {
+ // Block processed by blockChain can be out-of-order. But the output from
+ // blockChain (deliveredBlocks) cannot, thus we need to protect the part
+ // below with writer lock.
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ if err = con.bcModule.addBlock(block); err != nil {
+ return
+ }
+ if err = con.deliverFinalizedBlocksWithoutLock(); err != nil {
+ return
+ }
+ return
+}
+
+// PrepareBlock would setup header fields of block based on its ProposerID.
+func (con *Consensus) proposeBlock(position types.Position) (
+ *types.Block, error) {
+ b, err := con.bcModule.proposeBlock(position, time.Now().UTC(), false)
+ if err != nil {
+ return nil, err
+ }
+ con.logger.Debug("Calling Governance.CRS", "round", b.Position.Round)
+ crs := con.gov.CRS(b.Position.Round)
+ if crs.Equal(common.Hash{}) {
+ con.logger.Error("CRS for round is not ready, unable to prepare block",
+ "position", &b.Position)
+ return nil, ErrCRSNotReady
+ }
+ if err = con.signer.SignCRS(b, crs); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/constant.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/constant.go
new file mode 100644
index 000000000..51b95a3c0
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/constant.go
@@ -0,0 +1,41 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import "github.com/byzantine-lab/dexon-consensus/core/utils"
+
+// ConfigRoundShift refers to the difference between block's round and config
+// round derived from its state.
+//
+// For example, when round shift is 2, a block in round 0 should derive config
+// for round 2.
+const ConfigRoundShift uint64 = 2
+
+// DKGDelayRound refers to the round that first DKG is run.
+//
+// For example, when delay round is 1, new DKG will run at round 1. Round 0 will
+// have neither DKG nor CRS.
+const DKGDelayRound uint64 = 1
+
+// NoRand is the magic placeholder for randomness field in blocks for blocks
+// proposed before DKGDelayRound.
+var NoRand = []byte("norand")
+
+func init() {
+ utils.SetDKGDelayRound(DKGDelayRound)
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/constant.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/constant.go
new file mode 100644
index 000000000..3f6627b92
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/constant.go
@@ -0,0 +1,26 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package dkg
+
+import (
+ "github.com/byzantine-lab/bls/ffi/go/bls"
+)
+
+const (
+ curve = bls.BLS12_381
+)
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/dkg.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/dkg.go
new file mode 100644
index 000000000..b9dd038ce
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/dkg.go
@@ -0,0 +1,637 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package dkg
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+
+ "github.com/byzantine-lab/bls/ffi/go/bls"
+ "github.com/byzantine-lab/go-tangerine/rlp"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+)
+
+var (
+ // ErrDuplicatedShare is reported when adding an private key share of same id.
+ ErrDuplicatedShare = fmt.Errorf("invalid share")
+ // ErrNoIDToRecover is reported when no id is provided for recovering private
+ // key.
+ ErrNoIDToRecover = fmt.Errorf("no id to recover private key")
+ // ErrShareNotFound is reported when the private key share of id is not found
+ // when recovering private key.
+ ErrShareNotFound = fmt.Errorf("share not found")
+)
+
+const cryptoType = "bls"
+
+var publicKeyLength int
+
+func init() {
+ if err := bls.Init(curve); err != nil {
+ panic(err)
+ }
+
+ pubKey := &bls.PublicKey{}
+ publicKeyLength = len(pubKey.Serialize())
+}
+
+// PrivateKey represents a private key structure implments
+// Crypto.PrivateKey interface.
+type PrivateKey struct {
+ privateKey bls.SecretKey
+ publicKey PublicKey
+}
+
+// EncodeRLP implements rlp.Encoder
+func (prv *PrivateKey) EncodeRLP(w io.Writer) error {
+ return rlp.Encode(w, prv.Bytes())
+}
+
+// DecodeRLP implements rlp.Decoder
+func (prv *PrivateKey) DecodeRLP(s *rlp.Stream) error {
+ var b []byte
+ if err := s.Decode(&b); err != nil {
+ return err
+ }
+ return prv.SetBytes(b)
+}
+
+// MarshalJSON implements json.Marshaller.
+func (prv *PrivateKey) MarshalJSON() ([]byte, error) {
+ return json.Marshal(&prv.privateKey)
+}
+
+// UnmarshalJSON implements json.Unmarshaller.
+func (prv *PrivateKey) UnmarshalJSON(data []byte) error {
+ return json.Unmarshal(data, &prv.privateKey)
+}
+
+// ID is the id for DKG protocol.
+type ID = bls.ID
+
+// IDs is an array of ID.
+type IDs []ID
+
+// PublicKey represents a public key structure implements
+// Crypto.PublicKey interface.
+type PublicKey struct {
+ publicKey bls.PublicKey
+}
+
+// PrivateKeyShares represents a private key shares for DKG protocol.
+type PrivateKeyShares struct {
+ shares []PrivateKey
+ shareIndex map[ID]int
+ masterPrivateKey []bls.SecretKey
+}
+
+// Equal check equality between two PrivateKeyShares instances.
+func (prvs *PrivateKeyShares) Equal(other *PrivateKeyShares) bool {
+ // Check shares.
+ if len(prvs.shareIndex) != len(other.shareIndex) {
+ return false
+ }
+ for dID, idx := range prvs.shareIndex {
+ otherIdx, exists := other.shareIndex[dID]
+ if !exists {
+ return false
+ }
+ if !prvs.shares[idx].privateKey.IsEqual(
+ &other.shares[otherIdx].privateKey) {
+ return false
+ }
+ }
+ // Check master private keys.
+ if len(prvs.masterPrivateKey) != len(other.masterPrivateKey) {
+ return false
+ }
+ for idx, m := range prvs.masterPrivateKey {
+ if m.GetHexString() != other.masterPrivateKey[idx].GetHexString() {
+ return false
+ }
+ }
+ return true
+}
+
+// EncodeRLP implements rlp.Encoder
+func (prvs *PrivateKeyShares) EncodeRLP(w io.Writer) error {
+ data := make([][][]byte, 3)
+ shares := make([][]byte, len(prvs.shares))
+ for i, s := range prvs.shares {
+ shares[i] = s.Bytes()
+ }
+ data[0] = shares
+
+ shareIndex := make([][]byte, 0)
+ for k, v := range prvs.shareIndex {
+ shareIndex = append(shareIndex, k.GetLittleEndian())
+
+ vBytes, err := rlp.EncodeToBytes(uint64(v))
+ if err != nil {
+ return err
+ }
+ shareIndex = append(shareIndex, vBytes)
+ }
+ data[1] = shareIndex
+
+ mpks := make([][]byte, len(prvs.masterPrivateKey))
+ for i, m := range prvs.masterPrivateKey {
+ mpks[i] = m.GetLittleEndian()
+ }
+ data[2] = mpks
+ return rlp.Encode(w, data)
+}
+
+// DecodeRLP implements rlp.Decoder
+func (prvs *PrivateKeyShares) DecodeRLP(s *rlp.Stream) error {
+ *prvs = PrivateKeyShares{}
+ var dec [][][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ var shares []PrivateKey
+ for _, bs := range dec[0] {
+ var key PrivateKey
+ err := key.SetBytes(bs)
+ if err != nil {
+ return err
+ }
+ shares = append(shares, key)
+ }
+ (*prvs).shares = shares
+
+ sharesIndex := map[ID]int{}
+ for i := 0; i < len(dec[1]); i += 2 {
+ var key ID
+ err := key.SetLittleEndian(dec[1][i])
+ if err != nil {
+ return err
+ }
+
+ var value uint64
+ err = rlp.DecodeBytes(dec[1][i+1], &value)
+ if err != nil {
+ return err
+ }
+
+ sharesIndex[key] = int(value)
+ }
+ (*prvs).shareIndex = sharesIndex
+
+ var mpks []bls.SecretKey
+ for _, bs := range dec[2] {
+ var key bls.SecretKey
+ if err := key.SetLittleEndian(bs); err != nil {
+ return err
+ }
+ mpks = append(mpks, key)
+ }
+ (*prvs).masterPrivateKey = mpks
+
+ return nil
+}
+
+type publicKeySharesCache struct {
+ share []PublicKey
+ index map[ID]int
+}
+
+// PublicKeyShares represents a public key shares for DKG protocol.
+type PublicKeyShares struct {
+ cache atomic.Value
+ lock sync.Mutex
+ masterPublicKey []bls.PublicKey
+}
+
+// Equal checks equality of two PublicKeyShares instance.
+func (pubs *PublicKeyShares) Equal(other *PublicKeyShares) bool {
+ cache := pubs.cache.Load().(*publicKeySharesCache)
+ cacheOther := other.cache.Load().(*publicKeySharesCache)
+ // Check shares.
+ for dID, idx := range cache.index {
+ otherIdx, exists := cacheOther.index[dID]
+ if !exists {
+ continue
+ }
+ if !cache.share[idx].publicKey.IsEqual(
+ &cacheOther.share[otherIdx].publicKey) {
+ return false
+ }
+ }
+ // Check master public keys.
+ if len(pubs.masterPublicKey) != len(other.masterPublicKey) {
+ return false
+ }
+ for idx, m := range pubs.masterPublicKey {
+ if m.GetHexString() != other.masterPublicKey[idx].GetHexString() {
+ return false
+ }
+ }
+ return true
+}
+
+// EncodeRLP implements rlp.Encoder
+func (pubs *PublicKeyShares) EncodeRLP(w io.Writer) error {
+ mpks := make([][]byte, len(pubs.masterPublicKey))
+ for i, m := range pubs.masterPublicKey {
+ mpks[i] = m.Serialize()
+ }
+ return rlp.Encode(w, mpks)
+}
+
+// DecodeRLP implements rlp.Decoder
+func (pubs *PublicKeyShares) DecodeRLP(s *rlp.Stream) error {
+ var dec [][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ ps := NewEmptyPublicKeyShares()
+ for _, k := range dec {
+ var key bls.PublicKey
+ if err := key.Deserialize(k); err != nil {
+ return err
+ }
+ ps.masterPublicKey = append(ps.masterPublicKey, key)
+ }
+
+ *pubs = *ps.Move()
+ return nil
+}
+
+// MarshalJSON implements json.Marshaller.
+func (pubs *PublicKeyShares) MarshalJSON() ([]byte, error) {
+ type Alias PublicKeyShares
+ data := &struct {
+ MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"`
+ }{
+ make([]*bls.PublicKey, len(pubs.masterPublicKey)),
+ }
+ for i := range pubs.masterPublicKey {
+ data.MasterPublicKeys[i] = &pubs.masterPublicKey[i]
+ }
+ return json.Marshal(data)
+}
+
+// UnmarshalJSON implements json.Unmarshaller.
+func (pubs *PublicKeyShares) UnmarshalJSON(data []byte) error {
+ type Alias PublicKeyShares
+ aux := &struct {
+ MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"`
+ }{}
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ mpk := make([]bls.PublicKey, len(aux.MasterPublicKeys))
+ for i, pk := range aux.MasterPublicKeys {
+ mpk[i] = *pk
+ }
+ pubs.masterPublicKey = mpk
+ return nil
+}
+
+// Clone clones every fields of PublicKeyShares. This method is mainly
+// for testing purpose thus would panic when error.
+func (pubs *PublicKeyShares) Clone() *PublicKeyShares {
+ b, err := rlp.EncodeToBytes(pubs)
+ if err != nil {
+ panic(err)
+ }
+ pubsCopy := NewEmptyPublicKeyShares()
+ if err := rlp.DecodeBytes(b, pubsCopy); err != nil {
+ panic(err)
+ }
+ return pubsCopy
+}
+
+// NewID creates a ew ID structure.
+func NewID(id []byte) ID {
+ var blsID bls.ID
+ // #nosec G104
+ blsID.SetLittleEndian(id)
+ return blsID
+}
+
+// BytesID creates a new ID structure,
+// It returns err if the byte slice is not valid.
+func BytesID(id []byte) (ID, error) {
+ var blsID bls.ID
+ // #nosec G104
+ err := blsID.SetLittleEndian(id)
+ return blsID, err
+}
+
+// NewPrivateKey creates a new PrivateKey structure.
+func NewPrivateKey() *PrivateKey {
+ var key bls.SecretKey
+ key.SetByCSPRNG()
+ return &PrivateKey{
+ privateKey: key,
+ publicKey: *newPublicKey(&key),
+ }
+}
+
+// NewPrivateKeyShares creates a DKG private key shares of threshold t.
+func NewPrivateKeyShares(t int) (*PrivateKeyShares, *PublicKeyShares) {
+ var prv bls.SecretKey
+ prv.SetByCSPRNG()
+ msk := prv.GetMasterSecretKey(t)
+ mpk := bls.GetMasterPublicKey(msk)
+ pubShare := NewEmptyPublicKeyShares()
+ pubShare.masterPublicKey = mpk
+ return &PrivateKeyShares{
+ masterPrivateKey: msk,
+ shareIndex: make(map[ID]int),
+ }, pubShare
+}
+
+// NewEmptyPrivateKeyShares creates an empty private key shares.
+func NewEmptyPrivateKeyShares() *PrivateKeyShares {
+ return &PrivateKeyShares{
+ shareIndex: make(map[ID]int),
+ }
+}
+
+// SetParticipants sets the DKG participants.
+func (prvs *PrivateKeyShares) SetParticipants(IDs IDs) {
+ prvs.shares = make([]PrivateKey, len(IDs))
+ prvs.shareIndex = make(map[ID]int, len(IDs))
+ for idx, ID := range IDs {
+ // #nosec G104
+ prvs.shares[idx].privateKey.Set(prvs.masterPrivateKey, &ID)
+ prvs.shareIndex[ID] = idx
+ }
+}
+
+// AddShare adds a share.
+func (prvs *PrivateKeyShares) AddShare(ID ID, share *PrivateKey) error {
+ if idx, exist := prvs.shareIndex[ID]; exist {
+ if !share.privateKey.IsEqual(&prvs.shares[idx].privateKey) {
+ return ErrDuplicatedShare
+ }
+ return nil
+ }
+ prvs.shareIndex[ID] = len(prvs.shares)
+ prvs.shares = append(prvs.shares, *share)
+ return nil
+}
+
+// RecoverPrivateKey recovers private key from the shares.
+func (prvs *PrivateKeyShares) RecoverPrivateKey(qualifyIDs IDs) (
+ *PrivateKey, error) {
+ var prv PrivateKey
+ if len(qualifyIDs) == 0 {
+ return nil, ErrNoIDToRecover
+ }
+ for i, ID := range qualifyIDs {
+ idx, exist := prvs.shareIndex[ID]
+ if !exist {
+ return nil, ErrShareNotFound
+ }
+ if i == 0 {
+ prv.privateKey = prvs.shares[idx].privateKey
+ continue
+ }
+ prv.privateKey.Add(&prvs.shares[idx].privateKey)
+ }
+ return &prv, nil
+}
+
+// RecoverPublicKey recovers public key from the shares.
+func (prvs *PrivateKeyShares) RecoverPublicKey(qualifyIDs IDs) (
+ *PublicKey, error) {
+ var pub PublicKey
+ if len(qualifyIDs) == 0 {
+ return nil, ErrNoIDToRecover
+ }
+ for i, ID := range qualifyIDs {
+ idx, exist := prvs.shareIndex[ID]
+ if !exist {
+ return nil, ErrShareNotFound
+ }
+ if i == 0 {
+ pub.publicKey = *prvs.shares[idx].privateKey.GetPublicKey()
+ continue
+ }
+ pub.publicKey.Add(prvs.shares[idx].privateKey.GetPublicKey())
+ }
+ return &pub, nil
+}
+
+// Share returns the share for the ID.
+func (prvs *PrivateKeyShares) Share(ID ID) (*PrivateKey, bool) {
+ idx, exist := prvs.shareIndex[ID]
+ if !exist {
+ return nil, false
+ }
+ return &prvs.shares[idx], true
+}
+
+// NewEmptyPublicKeyShares creates an empty public key shares.
+func NewEmptyPublicKeyShares() *PublicKeyShares {
+ pubShares := &PublicKeyShares{}
+ pubShares.cache.Store(&publicKeySharesCache{
+ index: make(map[ID]int),
+ })
+ return pubShares
+}
+
+// Move will invalidate itself. Do not access to original reference.
+func (pubs *PublicKeyShares) Move() *PublicKeyShares {
+ return pubs
+}
+
+// Share returns the share for the ID.
+func (pubs *PublicKeyShares) Share(ID ID) (*PublicKey, error) {
+ cache := pubs.cache.Load().(*publicKeySharesCache)
+ idx, exist := cache.index[ID]
+ if exist {
+ return &cache.share[idx], nil
+ }
+ var pk PublicKey
+ if err := pk.publicKey.Set(pubs.masterPublicKey, &ID); err != nil {
+ return nil, err
+ }
+ if err := pubs.AddShare(ID, &pk); err != nil {
+ return nil, err
+ }
+ return &pk, nil
+}
+
+// AddShare adds a share.
+func (pubs *PublicKeyShares) AddShare(shareID ID, share *PublicKey) error {
+ cache := pubs.cache.Load().(*publicKeySharesCache)
+ if idx, exist := cache.index[shareID]; exist {
+ if !share.publicKey.IsEqual(&cache.share[idx].publicKey) {
+ return ErrDuplicatedShare
+ }
+ return nil
+ }
+ pubs.lock.Lock()
+ defer pubs.lock.Unlock()
+ cache = pubs.cache.Load().(*publicKeySharesCache)
+ newCache := &publicKeySharesCache{
+ index: make(map[ID]int, len(cache.index)+1),
+ share: make([]PublicKey, len(cache.share), len(cache.share)+1),
+ }
+ for k, v := range cache.index {
+ newCache.index[k] = v
+ }
+ copy(newCache.share, cache.share)
+ newCache.index[shareID] = len(newCache.share)
+ newCache.share = append(newCache.share, *share)
+ pubs.cache.Store(newCache)
+ return nil
+}
+
+// VerifyPrvShare verifies if the private key shares is valid.
+func (pubs *PublicKeyShares) VerifyPrvShare(ID ID, share *PrivateKey) (
+ bool, error) {
+ var pk bls.PublicKey
+ if err := pk.Set(pubs.masterPublicKey, &ID); err != nil {
+ return false, err
+ }
+ return pk.IsEqual(share.privateKey.GetPublicKey()), nil
+}
+
+// VerifyPubShare verifies if the public key shares is valid.
+func (pubs *PublicKeyShares) VerifyPubShare(ID ID, share *PublicKey) (
+ bool, error) {
+ var pk bls.PublicKey
+ if err := pk.Set(pubs.masterPublicKey, &ID); err != nil {
+ return false, err
+ }
+ return pk.IsEqual(&share.publicKey), nil
+}
+
+// RecoverPublicKey recovers private key from the shares.
+func (pubs *PublicKeyShares) RecoverPublicKey(qualifyIDs IDs) (
+ *PublicKey, error) {
+ var pub PublicKey
+ if len(qualifyIDs) == 0 {
+ return nil, ErrNoIDToRecover
+ }
+ for i, ID := range qualifyIDs {
+ pk, err := pubs.Share(ID)
+ if err != nil {
+ return nil, err
+ }
+ if i == 0 {
+ pub.publicKey = pk.publicKey
+ continue
+ }
+ pub.publicKey.Add(&pk.publicKey)
+ }
+ return &pub, nil
+}
+
+// MasterKeyBytes returns []byte representation of master public key.
+func (pubs *PublicKeyShares) MasterKeyBytes() []byte {
+ bytes := make([]byte, 0, len(pubs.masterPublicKey)*publicKeyLength)
+ for _, pk := range pubs.masterPublicKey {
+ bytes = append(bytes, pk.Serialize()...)
+ }
+ return bytes
+}
+
+// newPublicKey creates a new PublicKey structure.
+func newPublicKey(prvKey *bls.SecretKey) *PublicKey {
+ return &PublicKey{
+ publicKey: *prvKey.GetPublicKey(),
+ }
+}
+
+// newPublicKeyFromBytes create a new PublicKey structure
+// from bytes representation of bls.PublicKey
+func newPublicKeyFromBytes(b []byte) (*PublicKey, error) {
+ var pub PublicKey
+ err := pub.publicKey.Deserialize(b)
+ return &pub, err
+}
+
+// PublicKey returns the public key associate this private key.
+func (prv *PrivateKey) PublicKey() crypto.PublicKey {
+ return prv.publicKey
+}
+
+// Sign calculates a signature.
+func (prv *PrivateKey) Sign(hash common.Hash) (crypto.Signature, error) {
+ msg := string(hash[:])
+ sign := prv.privateKey.Sign(msg)
+ return crypto.Signature{
+ Type: cryptoType,
+ Signature: sign.Serialize(),
+ }, nil
+}
+
+// Bytes returns []byte representation of private key.
+func (prv *PrivateKey) Bytes() []byte {
+ return prv.privateKey.GetLittleEndian()
+}
+
+// SetBytes sets the private key data to []byte.
+func (prv *PrivateKey) SetBytes(bytes []byte) error {
+ var key bls.SecretKey
+ if err := key.SetLittleEndian(bytes); err != nil {
+ return err
+ }
+ prv.privateKey = key
+ prv.publicKey = *newPublicKey(&prv.privateKey)
+ return nil
+}
+
+// String returns string representation of privat key.
+func (prv *PrivateKey) String() string {
+ return prv.privateKey.GetHexString()
+}
+
+// VerifySignature checks that the given public key created signature over hash.
+func (pub PublicKey) VerifySignature(
+ hash common.Hash, signature crypto.Signature) bool {
+ if len(signature.Signature) == 0 {
+ return false
+ }
+ var sig bls.Sign
+ if err := sig.Deserialize(signature.Signature[:]); err != nil {
+ fmt.Println(err)
+ return false
+ }
+ msg := string(hash[:])
+ return sig.Verify(&pub.publicKey, msg)
+}
+
+// Bytes returns []byte representation of public key.
+func (pub PublicKey) Bytes() []byte {
+ return pub.publicKey.Serialize()
+}
+
+// Serialize return bytes representation of public key.
+func (pub *PublicKey) Serialize() []byte {
+ return pub.publicKey.Serialize()
+}
+
+// Deserialize parses bytes representation of public key.
+func (pub *PublicKey) Deserialize(b []byte) error {
+ return pub.publicKey.Deserialize(b)
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/utils.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/utils.go
new file mode 100644
index 000000000..589480a3b
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/dkg/utils.go
@@ -0,0 +1,92 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package dkg
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math/rand"
+
+ "github.com/byzantine-lab/bls/ffi/go/bls"
+
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+)
+
+// PartialSignature is a partial signature in DKG+TSIG protocol.
+type PartialSignature crypto.Signature
+
+var (
+ // ErrEmptySignature is reported if the signature is empty.
+ ErrEmptySignature = fmt.Errorf("invalid empty signature")
+)
+
+// RecoverSignature recovers TSIG signature.
+func RecoverSignature(sigs []PartialSignature, signerIDs IDs) (
+ crypto.Signature, error) {
+ blsSigs := make([]bls.Sign, len(sigs))
+ for i, sig := range sigs {
+ if len(sig.Signature) == 0 {
+ return crypto.Signature{}, ErrEmptySignature
+ }
+ if err := blsSigs[i].Deserialize([]byte(sig.Signature)); err != nil {
+ return crypto.Signature{}, err
+ }
+ }
+ var recoverSig bls.Sign
+ if err := recoverSig.Recover(blsSigs, []bls.ID(signerIDs)); err != nil {
+ return crypto.Signature{}, err
+ }
+ return crypto.Signature{
+ Type: cryptoType,
+ Signature: recoverSig.Serialize()}, nil
+}
+
+// RecoverGroupPublicKey recovers group public key.
+func RecoverGroupPublicKey(pubShares []*PublicKeyShares) *PublicKey {
+ var pub *PublicKey
+ for _, pubShare := range pubShares {
+ pk0 := pubShare.masterPublicKey[0]
+ if pub == nil {
+ pub = &PublicKey{
+ publicKey: pk0,
+ }
+ } else {
+ pub.publicKey.Add(&pk0)
+ }
+ }
+ return pub
+}
+
+// NewRandomPrivateKeyShares constructs a private key shares randomly.
+func NewRandomPrivateKeyShares() *PrivateKeyShares {
+ // Generate IDs.
+ rndIDs := make(IDs, 0, 10)
+ for i := range rndIDs {
+ id := make([]byte, 8)
+ binary.LittleEndian.PutUint64(id, rand.Uint64())
+ rndIDs[i] = NewID(id)
+ }
+ prvShares := NewEmptyPrivateKeyShares()
+ prvShares.SetParticipants(rndIDs)
+ for _, id := range rndIDs {
+ if err := prvShares.AddShare(id, NewPrivateKey()); err != nil {
+ panic(err)
+ }
+ }
+ return prvShares
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/ecdsa/ecdsa.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/ecdsa/ecdsa.go
new file mode 100644
index 000000000..5c3bf96bb
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/ecdsa/ecdsa.go
@@ -0,0 +1,135 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package ecdsa
+
+import (
+ "crypto/ecdsa"
+
+ dexCrypto "github.com/byzantine-lab/go-tangerine/crypto"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+)
+
+const cryptoType = "ecdsa"
+
+func init() {
+ if err := crypto.RegisterSigToPub(cryptoType, SigToPub); err != nil {
+ panic(err)
+ }
+}
+
+// PrivateKey represents a private key structure used in geth and implments
+// Crypto.PrivateKey interface.
+type PrivateKey struct {
+ privateKey *ecdsa.PrivateKey
+}
+
+// PublicKey represents a public key structure used in geth and implements
+// Crypto.PublicKey interface.
+type PublicKey struct {
+ publicKey *ecdsa.PublicKey
+}
+
+// NewPrivateKey creates a new PrivateKey structure.
+func NewPrivateKey() (*PrivateKey, error) {
+ key, err := dexCrypto.GenerateKey()
+ if err != nil {
+ return nil, err
+ }
+ return &PrivateKey{privateKey: key}, nil
+}
+
+// NewPrivateKeyFromECDSA creates a new PrivateKey structure from
+// ecdsa.PrivateKey.
+func NewPrivateKeyFromECDSA(key *ecdsa.PrivateKey) *PrivateKey {
+ return &PrivateKey{privateKey: key}
+}
+
+// NewPublicKeyFromECDSA creates a new PublicKey structure from
+// ecdsa.PublicKey.
+func NewPublicKeyFromECDSA(key *ecdsa.PublicKey) *PublicKey {
+ return &PublicKey{publicKey: key}
+}
+
+// NewPublicKeyFromByteSlice constructs an eth.publicKey instance from
+// a byte slice.
+func NewPublicKeyFromByteSlice(b []byte) (crypto.PublicKey, error) {
+ pub, err := dexCrypto.UnmarshalPubkey(b)
+ if err != nil {
+ return &PublicKey{}, err
+ }
+ return &PublicKey{publicKey: pub}, nil
+}
+
+// PublicKey returns the public key associate this private key.
+func (prv *PrivateKey) PublicKey() crypto.PublicKey {
+ return NewPublicKeyFromECDSA(&(prv.privateKey.PublicKey))
+}
+
+// Sign calculates an ECDSA signature.
+//
+// This function is susceptible to chosen plaintext attacks that can leak
+// information about the private key that is used for signing. Callers must
+// be aware that the given hash cannot be chosen by an adversery. Common
+// solution is to hash any input before calculating the signature.
+//
+// The produced signature is in the [R || S || V] format where V is 0 or 1.
+func (prv *PrivateKey) Sign(hash common.Hash) (
+ sig crypto.Signature, err error) {
+ s, err := dexCrypto.Sign(hash[:], prv.privateKey)
+ sig = crypto.Signature{
+ Type: cryptoType,
+ Signature: s,
+ }
+ return
+}
+
+// VerifySignature checks that the given public key created signature over hash.
+// The public key should be in compressed (33 bytes) or uncompressed (65 bytes)
+// format.
+// The signature should have the 64 byte [R || S] format.
+func (pub *PublicKey) VerifySignature(
+ hash common.Hash, signature crypto.Signature) bool {
+ sig := signature.Signature
+ if len(sig) == 65 {
+ // The last byte is for ecrecover.
+ sig = sig[:64]
+ }
+ return dexCrypto.VerifySignature(pub.Bytes(), hash[:], sig)
+}
+
+// Compress encodes a public key to the 33-byte compressed format.
+func (pub *PublicKey) Compress() []byte {
+ return dexCrypto.CompressPubkey(pub.publicKey)
+}
+
+// Bytes returns the []byte representation of uncompressed public key. (65 bytes)
+func (pub *PublicKey) Bytes() []byte {
+ return dexCrypto.FromECDSAPub(pub.publicKey)
+}
+
+// SigToPub returns the PublicKey that created the given signature.
+func SigToPub(
+ hash common.Hash, signature crypto.Signature) (crypto.PublicKey, error) {
+ key, err := dexCrypto.SigToPub(hash[:], signature.Signature[:])
+ if err != nil {
+ return &PublicKey{}, err
+ }
+ return &PublicKey{publicKey: key}, nil
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/interfaces.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/interfaces.go
new file mode 100644
index 000000000..9fe47f7dc
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/interfaces.go
@@ -0,0 +1,48 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package crypto
+
+import (
+ "github.com/byzantine-lab/dexon-consensus/common"
+)
+
+// Signature is the basic signature type in DEXON.
+type Signature struct {
+ Type string
+ Signature []byte
+}
+
+// PrivateKey describes the asymmetric cryptography interface that interacts
+// with the private key.
+type PrivateKey interface {
+ // PublicKey returns the public key associate this private key.
+ PublicKey() PublicKey
+
+ // Sign calculates a signature.
+ Sign(hash common.Hash) (Signature, error)
+}
+
+// PublicKey describes the asymmetric cryptography interface that interacts
+// with the public key.
+type PublicKey interface {
+ // VerifySignature checks that the given public key created signature over hash.
+ VerifySignature(hash common.Hash, signature Signature) bool
+
+ // Bytes returns the []byte representation of public key.
+ Bytes() []byte
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/utils.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/utils.go
new file mode 100644
index 000000000..744be3e5f
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/crypto/utils.go
@@ -0,0 +1,80 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package crypto
+
+import (
+ "encoding/hex"
+ "fmt"
+
+ "github.com/byzantine-lab/go-tangerine/crypto"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+)
+
+var (
+ // ErrSigToPubTypeNotFound is reported if the type is already used.
+ ErrSigToPubTypeNotFound = fmt.Errorf("type of sigToPub is not found")
+
+ // ErrSigToPubTypeAlreadyExist is reported if the type is already used.
+ ErrSigToPubTypeAlreadyExist = fmt.Errorf("type of sigToPub is already exist")
+)
+
+// SigToPubFn is a function to recover public key from signature.
+type SigToPubFn func(hash common.Hash, signature Signature) (PublicKey, error)
+
+var sigToPubCB map[string]SigToPubFn
+
+func init() {
+ sigToPubCB = make(map[string]SigToPubFn)
+}
+
+// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
+// converting it to an internal Hash data structure.
+func Keccak256Hash(data ...[]byte) (h common.Hash) {
+ return common.Hash(crypto.Keccak256Hash(data...))
+}
+
+// Clone returns a deep copy of a signature.
+func (sig Signature) Clone() Signature {
+ return Signature{
+ Type: sig.Type,
+ Signature: sig.Signature[:],
+ }
+}
+
+func (sig Signature) String() string {
+ return hex.EncodeToString([]byte(sig.Signature[:]))
+}
+
+// RegisterSigToPub registers a sigToPub function of type.
+func RegisterSigToPub(sigType string, sigToPub SigToPubFn) error {
+ if _, exist := sigToPubCB[sigType]; exist {
+ return ErrSigToPubTypeAlreadyExist
+ }
+ sigToPubCB[sigType] = sigToPub
+ return nil
+}
+
+// SigToPub recovers public key from signature.
+func SigToPub(hash common.Hash, signature Signature) (PublicKey, error) {
+ sigToPub, exist := sigToPubCB[signature.Type]
+ if !exist {
+ return nil, ErrSigToPubTypeNotFound
+ }
+ return sigToPub(hash, signature)
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/db/interfaces.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/interfaces.go
new file mode 100644
index 000000000..1d15c68a0
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/interfaces.go
@@ -0,0 +1,100 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package db
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+)
+
+var (
+ // ErrBlockExists is the error when block eixsts.
+ ErrBlockExists = errors.New("block exists")
+ // ErrBlockDoesNotExist is the error when block does not eixst.
+ ErrBlockDoesNotExist = errors.New("block does not exist")
+ // ErrIterationFinished is the error to check if the iteration is finished.
+ ErrIterationFinished = errors.New("iteration finished")
+ // ErrEmptyPath is the error when the required path is empty.
+ ErrEmptyPath = fmt.Errorf("empty path")
+ // ErrClosed is the error when using DB after it's closed.
+ ErrClosed = fmt.Errorf("db closed")
+ // ErrNotImplemented is the error that some interface is not implemented.
+ ErrNotImplemented = fmt.Errorf("not implemented")
+ // ErrInvalidCompactionChainTipHeight means the newly updated height of
+ // the tip of compaction chain is invalid, usually means it's smaller than
+ // current cached one.
+ ErrInvalidCompactionChainTipHeight = fmt.Errorf(
+ "invalid compaction chain tip height")
+ // ErrDKGPrivateKeyExists raised when attempting to save DKG private key
+ // that already saved.
+ ErrDKGPrivateKeyExists = errors.New("dkg private key exists")
+ // ErrDKGPrivateKeyDoesNotExist raised when the DKG private key of the
+ // requested round does not exists.
+ ErrDKGPrivateKeyDoesNotExist = errors.New("dkg private key does not exists")
+ // ErrDKGProtocolExists raised when attempting to save DKG protocol
+ // that already saved.
+ ErrDKGProtocolExists = errors.New("dkg protocol exists")
+ // ErrDKGProtocolDoesNotExist raised when the DKG protocol of the
+ // requested round does not exists.
+ ErrDKGProtocolDoesNotExist = errors.New("dkg protocol does not exists")
+)
+
+// Database is the interface for a Database.
+type Database interface {
+ Reader
+ Writer
+
+ // Close allows database implementation able to
+ // release resource when finishing.
+ Close() error
+}
+
+// Reader defines the interface for reading blocks into DB.
+type Reader interface {
+ HasBlock(hash common.Hash) bool
+ GetBlock(hash common.Hash) (types.Block, error)
+ GetAllBlocks() (BlockIterator, error)
+
+ // GetCompactionChainTipInfo returns the block hash and finalization height
+ // of the tip block of compaction chain. Empty hash and zero height means
+ // the compaction chain is empty.
+ GetCompactionChainTipInfo() (common.Hash, uint64)
+
+ // DKG Private Key related methods.
+ GetDKGPrivateKey(round, reset uint64) (dkg.PrivateKey, error)
+ GetDKGProtocol() (dkgProtocol DKGProtocolInfo, err error)
+}
+
+// Writer defines the interface for writing blocks into DB.
+type Writer interface {
+ UpdateBlock(block types.Block) error
+ PutBlock(block types.Block) error
+ PutCompactionChainTipInfo(common.Hash, uint64) error
+ PutDKGPrivateKey(round, reset uint64, pk dkg.PrivateKey) error
+ PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error
+}
+
+// BlockIterator defines an iterator on blocks hold
+// in a DB.
+type BlockIterator interface {
+ NextBlock() (types.Block, error)
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/db/level-db.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/level-db.go
new file mode 100644
index 000000000..9e3564b50
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/level-db.go
@@ -0,0 +1,573 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package db
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/syndtr/goleveldb/leveldb"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ "github.com/byzantine-lab/go-tangerine/rlp"
+)
+
+var (
+ blockKeyPrefix = []byte("b-")
+ compactionChainTipInfoKey = []byte("cc-tip")
+ dkgPrivateKeyKeyPrefix = []byte("dkg-prvs")
+ dkgProtocolInfoKeyPrefix = []byte("dkg-protocol-info")
+)
+
+type compactionChainTipInfo struct {
+ Height uint64 `json:"height"`
+ Hash common.Hash `json:"hash"`
+}
+
+// DKGProtocolInfo DKG protocol info.
+type DKGProtocolInfo struct {
+ ID types.NodeID
+ Round uint64
+ Threshold uint64
+ IDMap NodeIDToDKGID
+ MpkMap NodeIDToPubShares
+ MasterPrivateShare dkg.PrivateKeyShares
+ IsMasterPrivateShareEmpty bool
+ PrvShares dkg.PrivateKeyShares
+ IsPrvSharesEmpty bool
+ PrvSharesReceived NodeID
+ NodeComplained NodeID
+ AntiComplaintReceived NodeIDToNodeIDs
+ Step uint64
+ Reset uint64
+}
+
+type dkgPrivateKey struct {
+ PK dkg.PrivateKey
+ Reset uint64
+}
+
+// Equal compare with target DKGProtocolInfo.
+func (info *DKGProtocolInfo) Equal(target *DKGProtocolInfo) bool {
+ if !info.ID.Equal(target.ID) ||
+ info.Round != target.Round ||
+ info.Threshold != target.Threshold ||
+ info.IsMasterPrivateShareEmpty != target.IsMasterPrivateShareEmpty ||
+ info.IsPrvSharesEmpty != target.IsPrvSharesEmpty ||
+ info.Step != target.Step ||
+ info.Reset != target.Reset ||
+ !info.MasterPrivateShare.Equal(&target.MasterPrivateShare) ||
+ !info.PrvShares.Equal(&target.PrvShares) {
+ return false
+ }
+
+ if len(info.IDMap) != len(target.IDMap) {
+ return false
+ }
+ for k, v := range info.IDMap {
+ tV, exist := target.IDMap[k]
+ if !exist {
+ return false
+ }
+
+ if !v.IsEqual(&tV) {
+ return false
+ }
+ }
+
+ if len(info.MpkMap) != len(target.MpkMap) {
+ return false
+ }
+ for k, v := range info.MpkMap {
+ tV, exist := target.MpkMap[k]
+ if !exist {
+ return false
+ }
+
+ if !v.Equal(tV) {
+ return false
+ }
+ }
+
+ if len(info.PrvSharesReceived) != len(target.PrvSharesReceived) {
+ return false
+ }
+ for k := range info.PrvSharesReceived {
+ _, exist := target.PrvSharesReceived[k]
+ if !exist {
+ return false
+ }
+ }
+
+ if len(info.NodeComplained) != len(target.NodeComplained) {
+ return false
+ }
+ for k := range info.NodeComplained {
+ _, exist := target.NodeComplained[k]
+ if !exist {
+ return false
+ }
+ }
+
+ if len(info.AntiComplaintReceived) != len(target.AntiComplaintReceived) {
+ return false
+ }
+ for k, v := range info.AntiComplaintReceived {
+ tV, exist := target.AntiComplaintReceived[k]
+ if !exist {
+ return false
+ }
+
+ if len(v) != len(tV) {
+ return false
+ }
+ for kk := range v {
+ _, exist := tV[kk]
+ if !exist {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// NodeIDToNodeIDs the map with NodeID to NodeIDs.
+type NodeIDToNodeIDs map[types.NodeID]map[types.NodeID]struct{}
+
+// EncodeRLP implements rlp.Encoder
+func (m NodeIDToNodeIDs) EncodeRLP(w io.Writer) error {
+ var allBytes [][][]byte
+ for k, v := range m {
+ kBytes, err := k.MarshalText()
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, [][]byte{kBytes})
+
+ var vBytes [][]byte
+ for subK := range v {
+ bytes, err := subK.MarshalText()
+ if err != nil {
+ return err
+ }
+ vBytes = append(vBytes, bytes)
+ }
+ allBytes = append(allBytes, vBytes)
+ }
+
+ return rlp.Encode(w, allBytes)
+}
+
+// DecodeRLP implements rlp.Encoder
+func (m *NodeIDToNodeIDs) DecodeRLP(s *rlp.Stream) error {
+ *m = make(NodeIDToNodeIDs)
+ var dec [][][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(dec); i += 2 {
+ key := types.NodeID{}
+ err := key.UnmarshalText(dec[i][0])
+ if err != nil {
+ return err
+ }
+
+ valueMap := map[types.NodeID]struct{}{}
+ for _, v := range dec[i+1] {
+ value := types.NodeID{}
+ err := value.UnmarshalText(v)
+ if err != nil {
+ return err
+ }
+
+ valueMap[value] = struct{}{}
+ }
+
+ (*m)[key] = valueMap
+ }
+
+ return nil
+}
+
+// NodeID the map with NodeID.
+type NodeID map[types.NodeID]struct{}
+
+// EncodeRLP implements rlp.Encoder
+func (m NodeID) EncodeRLP(w io.Writer) error {
+ var allBytes [][]byte
+ for k := range m {
+ kBytes, err := k.MarshalText()
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, kBytes)
+ }
+
+ return rlp.Encode(w, allBytes)
+}
+
+// DecodeRLP implements rlp.Encoder
+func (m *NodeID) DecodeRLP(s *rlp.Stream) error {
+ *m = make(NodeID)
+ var dec [][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(dec); i++ {
+ key := types.NodeID{}
+ err := key.UnmarshalText(dec[i])
+ if err != nil {
+ return err
+ }
+
+ (*m)[key] = struct{}{}
+ }
+
+ return nil
+}
+
+// NodeIDToPubShares the map with NodeID to PublicKeyShares.
+type NodeIDToPubShares map[types.NodeID]*dkg.PublicKeyShares
+
+// EncodeRLP implements rlp.Encoder
+func (m NodeIDToPubShares) EncodeRLP(w io.Writer) error {
+ var allBytes [][]byte
+ for k, v := range m {
+ kBytes, err := k.MarshalText()
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, kBytes)
+
+ bytes, err := rlp.EncodeToBytes(v)
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, bytes)
+ }
+
+ return rlp.Encode(w, allBytes)
+}
+
+// DecodeRLP implements rlp.Encoder
+func (m *NodeIDToPubShares) DecodeRLP(s *rlp.Stream) error {
+ *m = make(NodeIDToPubShares)
+ var dec [][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(dec); i += 2 {
+ key := types.NodeID{}
+ err := key.UnmarshalText(dec[i])
+ if err != nil {
+ return err
+ }
+
+ value := dkg.PublicKeyShares{}
+ err = rlp.DecodeBytes(dec[i+1], &value)
+ if err != nil {
+ return err
+ }
+
+ (*m)[key] = &value
+ }
+
+ return nil
+}
+
+// NodeIDToDKGID the map with NodeID to DKGID.
+type NodeIDToDKGID map[types.NodeID]dkg.ID
+
+// EncodeRLP implements rlp.Encoder
+func (m NodeIDToDKGID) EncodeRLP(w io.Writer) error {
+ var allBytes [][]byte
+ for k, v := range m {
+ kBytes, err := k.MarshalText()
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, kBytes)
+ allBytes = append(allBytes, v.GetLittleEndian())
+ }
+
+ return rlp.Encode(w, allBytes)
+}
+
+// DecodeRLP implements rlp.Encoder
+func (m *NodeIDToDKGID) DecodeRLP(s *rlp.Stream) error {
+ *m = make(NodeIDToDKGID)
+ var dec [][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(dec); i += 2 {
+ key := types.NodeID{}
+ err := key.UnmarshalText(dec[i])
+ if err != nil {
+ return err
+ }
+
+ value := dkg.ID{}
+ err = value.SetLittleEndian(dec[i+1])
+ if err != nil {
+ return err
+ }
+
+ (*m)[key] = value
+ }
+
+ return nil
+}
+
+// LevelDBBackedDB is a leveldb backed DB implementation.
+type LevelDBBackedDB struct {
+ db *leveldb.DB
+}
+
+// NewLevelDBBackedDB initialize a leveldb-backed database.
+func NewLevelDBBackedDB(
+ path string) (lvl *LevelDBBackedDB, err error) {
+
+ dbInst, err := leveldb.OpenFile(path, nil)
+ if err != nil {
+ return
+ }
+ lvl = &LevelDBBackedDB{db: dbInst}
+ return
+}
+
+// Close implement Closer interface, which would release allocated resource.
+func (lvl *LevelDBBackedDB) Close() error {
+ return lvl.db.Close()
+}
+
+// HasBlock implements the Reader.Has method.
+func (lvl *LevelDBBackedDB) HasBlock(hash common.Hash) bool {
+ exists, err := lvl.internalHasBlock(lvl.getBlockKey(hash))
+ if err != nil {
+ panic(err)
+ }
+ return exists
+}
+
+func (lvl *LevelDBBackedDB) internalHasBlock(key []byte) (bool, error) {
+ return lvl.db.Has(key, nil)
+}
+
+// GetBlock implements the Reader.GetBlock method.
+func (lvl *LevelDBBackedDB) GetBlock(
+ hash common.Hash) (block types.Block, err error) {
+ queried, err := lvl.db.Get(lvl.getBlockKey(hash), nil)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ err = ErrBlockDoesNotExist
+ }
+ return
+ }
+ err = rlp.DecodeBytes(queried, &block)
+ return
+}
+
+// UpdateBlock implements the Writer.UpdateBlock method.
+func (lvl *LevelDBBackedDB) UpdateBlock(block types.Block) (err error) {
+ // NOTE: we didn't handle changes of block hash (and it
+ // should not happen).
+ marshaled, err := rlp.EncodeToBytes(&block)
+ if err != nil {
+ return
+ }
+ blockKey := lvl.getBlockKey(block.Hash)
+ exists, err := lvl.internalHasBlock(blockKey)
+ if err != nil {
+ return
+ }
+ if !exists {
+ err = ErrBlockDoesNotExist
+ return
+ }
+ err = lvl.db.Put(blockKey, marshaled, nil)
+ return
+}
+
+// PutBlock implements the Writer.PutBlock method.
+func (lvl *LevelDBBackedDB) PutBlock(block types.Block) (err error) {
+ marshaled, err := rlp.EncodeToBytes(&block)
+ if err != nil {
+ return
+ }
+ blockKey := lvl.getBlockKey(block.Hash)
+ exists, err := lvl.internalHasBlock(blockKey)
+ if err != nil {
+ return
+ }
+ if exists {
+ err = ErrBlockExists
+ return
+ }
+ err = lvl.db.Put(blockKey, marshaled, nil)
+ return
+}
+
+// GetAllBlocks implements Reader.GetAllBlocks method, which allows callers
+// to retrieve all blocks in DB.
+func (lvl *LevelDBBackedDB) GetAllBlocks() (BlockIterator, error) {
+ return nil, ErrNotImplemented
+}
+
+// PutCompactionChainTipInfo saves tip of compaction chain into the database.
+func (lvl *LevelDBBackedDB) PutCompactionChainTipInfo(
+ blockHash common.Hash, height uint64) error {
+ marshaled, err := rlp.EncodeToBytes(&compactionChainTipInfo{
+ Hash: blockHash,
+ Height: height,
+ })
+ if err != nil {
+ return err
+ }
+ // Check current cached tip info to make sure the one to be updated is
+ // valid.
+ info, err := lvl.internalGetCompactionChainTipInfo()
+ if err != nil {
+ return err
+ }
+ if info.Height+1 != height {
+ return ErrInvalidCompactionChainTipHeight
+ }
+ return lvl.db.Put(compactionChainTipInfoKey, marshaled, nil)
+}
+
+func (lvl *LevelDBBackedDB) internalGetCompactionChainTipInfo() (
+ info compactionChainTipInfo, err error) {
+ queried, err := lvl.db.Get(compactionChainTipInfoKey, nil)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ err = nil
+ }
+ return
+ }
+ err = rlp.DecodeBytes(queried, &info)
+ return
+}
+
+// GetCompactionChainTipInfo get the tip info of compaction chain into the
+// database.
+func (lvl *LevelDBBackedDB) GetCompactionChainTipInfo() (
+ hash common.Hash, height uint64) {
+ info, err := lvl.internalGetCompactionChainTipInfo()
+ if err != nil {
+ panic(err)
+ }
+ hash, height = info.Hash, info.Height
+ return
+}
+
+// GetDKGPrivateKey get DKG private key of one round.
+func (lvl *LevelDBBackedDB) GetDKGPrivateKey(round, reset uint64) (
+ prv dkg.PrivateKey, err error) {
+ queried, err := lvl.db.Get(lvl.getDKGPrivateKeyKey(round), nil)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ err = ErrDKGPrivateKeyDoesNotExist
+ }
+ return
+ }
+ pk := dkgPrivateKey{}
+ err = rlp.DecodeBytes(queried, &pk)
+ if pk.Reset != reset {
+ err = ErrDKGPrivateKeyDoesNotExist
+ return
+ }
+ prv = pk.PK
+ return
+}
+
+// PutDKGPrivateKey save DKG private key of one round.
+func (lvl *LevelDBBackedDB) PutDKGPrivateKey(
+ round, reset uint64, prv dkg.PrivateKey) error {
+ // Check existence.
+ _, err := lvl.GetDKGPrivateKey(round, reset)
+ if err == nil {
+ return ErrDKGPrivateKeyExists
+ }
+ if err != ErrDKGPrivateKeyDoesNotExist {
+ return err
+ }
+ pk := &dkgPrivateKey{
+ PK: prv,
+ Reset: reset,
+ }
+ marshaled, err := rlp.EncodeToBytes(&pk)
+ if err != nil {
+ return err
+ }
+ return lvl.db.Put(
+ lvl.getDKGPrivateKeyKey(round), marshaled, nil)
+}
+
+// GetDKGProtocol get DKG protocol.
+func (lvl *LevelDBBackedDB) GetDKGProtocol() (
+ info DKGProtocolInfo, err error) {
+ queried, err := lvl.db.Get(lvl.getDKGProtocolInfoKey(), nil)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ err = ErrDKGProtocolDoesNotExist
+ }
+ return
+ }
+
+ err = rlp.DecodeBytes(queried, &info)
+ return
+}
+
+// PutOrUpdateDKGProtocol save DKG protocol.
+func (lvl *LevelDBBackedDB) PutOrUpdateDKGProtocol(info DKGProtocolInfo) error {
+ marshaled, err := rlp.EncodeToBytes(&info)
+ if err != nil {
+ return err
+ }
+ return lvl.db.Put(lvl.getDKGProtocolInfoKey(), marshaled, nil)
+}
+
+func (lvl *LevelDBBackedDB) getBlockKey(hash common.Hash) (ret []byte) {
+ ret = make([]byte, len(blockKeyPrefix)+len(hash[:]))
+ copy(ret, blockKeyPrefix)
+ copy(ret[len(blockKeyPrefix):], hash[:])
+ return
+}
+
+func (lvl *LevelDBBackedDB) getDKGPrivateKeyKey(
+ round uint64) (ret []byte) {
+ ret = make([]byte, len(dkgPrivateKeyKeyPrefix)+8)
+ copy(ret, dkgPrivateKeyKeyPrefix)
+ binary.LittleEndian.PutUint64(
+ ret[len(dkgPrivateKeyKeyPrefix):], round)
+ return
+}
+
+func (lvl *LevelDBBackedDB) getDKGProtocolInfoKey() (ret []byte) {
+ ret = make([]byte, len(dkgProtocolInfoKeyPrefix)+8)
+ copy(ret, dkgProtocolInfoKeyPrefix)
+ return
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/db/memory.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/memory.go
new file mode 100644
index 000000000..2ad5cda9e
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/db/memory.go
@@ -0,0 +1,262 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package db
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "sync"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+)
+
+type blockSeqIterator struct {
+ idx int
+ db *MemBackedDB
+}
+
+// NextBlock implemenets BlockIterator.NextBlock method.
+func (seq *blockSeqIterator) NextBlock() (types.Block, error) {
+ curIdx := seq.idx
+ seq.idx++
+ return seq.db.getBlockByIndex(curIdx)
+}
+
+// MemBackedDB is a memory backed DB implementation.
+type MemBackedDB struct {
+ blocksLock sync.RWMutex
+ blockHashSequence common.Hashes
+ blocksByHash map[common.Hash]*types.Block
+ compactionChainTipLock sync.RWMutex
+ compactionChainTipHash common.Hash
+ compactionChainTipHeight uint64
+ dkgPrivateKeysLock sync.RWMutex
+ dkgPrivateKeys map[uint64]*dkgPrivateKey
+ dkgProtocolLock sync.RWMutex
+ dkgProtocolInfo *DKGProtocolInfo
+ persistantFilePath string
+}
+
+// NewMemBackedDB initialize a memory-backed database.
+func NewMemBackedDB(persistantFilePath ...string) (
+ dbInst *MemBackedDB, err error) {
+ dbInst = &MemBackedDB{
+ blockHashSequence: common.Hashes{},
+ blocksByHash: make(map[common.Hash]*types.Block),
+ dkgPrivateKeys: make(map[uint64]*dkgPrivateKey),
+ }
+ if len(persistantFilePath) == 0 || len(persistantFilePath[0]) == 0 {
+ return
+ }
+ dbInst.persistantFilePath = persistantFilePath[0]
+ buf, err := ioutil.ReadFile(dbInst.persistantFilePath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ // Something unexpected happened.
+ return
+ }
+ // It's expected behavior that file doesn't exists, we should not
+ // report error on it.
+ err = nil
+ return
+ }
+
+ // Init this instance by file content, it's a temporary way
+ // to export those private field for JSON encoding.
+ toLoad := struct {
+ Sequence common.Hashes
+ ByHash map[common.Hash]*types.Block
+ }{}
+ err = json.Unmarshal(buf, &toLoad)
+ if err != nil {
+ return
+ }
+ dbInst.blockHashSequence = toLoad.Sequence
+ dbInst.blocksByHash = toLoad.ByHash
+ return
+}
+
+// HasBlock returns wheter or not the DB has a block identified with the hash.
+func (m *MemBackedDB) HasBlock(hash common.Hash) bool {
+ m.blocksLock.RLock()
+ defer m.blocksLock.RUnlock()
+
+ _, ok := m.blocksByHash[hash]
+ return ok
+}
+
+// GetBlock returns a block given a hash.
+func (m *MemBackedDB) GetBlock(hash common.Hash) (types.Block, error) {
+ m.blocksLock.RLock()
+ defer m.blocksLock.RUnlock()
+
+ return m.internalGetBlock(hash)
+}
+
+func (m *MemBackedDB) internalGetBlock(hash common.Hash) (types.Block, error) {
+ b, ok := m.blocksByHash[hash]
+ if !ok {
+ return types.Block{}, ErrBlockDoesNotExist
+ }
+ return *b, nil
+}
+
+// PutBlock inserts a new block into the database.
+func (m *MemBackedDB) PutBlock(block types.Block) error {
+ if m.HasBlock(block.Hash) {
+ return ErrBlockExists
+ }
+
+ m.blocksLock.Lock()
+ defer m.blocksLock.Unlock()
+
+ m.blockHashSequence = append(m.blockHashSequence, block.Hash)
+ m.blocksByHash[block.Hash] = &block
+ return nil
+}
+
+// UpdateBlock updates a block in the database.
+func (m *MemBackedDB) UpdateBlock(block types.Block) error {
+ if !m.HasBlock(block.Hash) {
+ return ErrBlockDoesNotExist
+ }
+
+ m.blocksLock.Lock()
+ defer m.blocksLock.Unlock()
+
+ m.blocksByHash[block.Hash] = &block
+ return nil
+}
+
+// PutCompactionChainTipInfo saves tip of compaction chain into the database.
+func (m *MemBackedDB) PutCompactionChainTipInfo(
+ blockHash common.Hash, height uint64) error {
+ m.compactionChainTipLock.Lock()
+ defer m.compactionChainTipLock.Unlock()
+ if m.compactionChainTipHeight+1 != height {
+ return ErrInvalidCompactionChainTipHeight
+ }
+ m.compactionChainTipHeight = height
+ m.compactionChainTipHash = blockHash
+ return nil
+}
+
+// GetCompactionChainTipInfo get the tip info of compaction chain into the
+// database.
+func (m *MemBackedDB) GetCompactionChainTipInfo() (
+ hash common.Hash, height uint64) {
+ m.compactionChainTipLock.RLock()
+ defer m.compactionChainTipLock.RUnlock()
+ return m.compactionChainTipHash, m.compactionChainTipHeight
+}
+
+// GetDKGPrivateKey get DKG private key of one round.
+func (m *MemBackedDB) GetDKGPrivateKey(round, reset uint64) (
+ dkg.PrivateKey, error) {
+ m.dkgPrivateKeysLock.RLock()
+ defer m.dkgPrivateKeysLock.RUnlock()
+ if prv, exists := m.dkgPrivateKeys[round]; exists && prv.Reset == reset {
+ return prv.PK, nil
+ }
+ return dkg.PrivateKey{}, ErrDKGPrivateKeyDoesNotExist
+}
+
+// PutDKGPrivateKey save DKG private key of one round.
+func (m *MemBackedDB) PutDKGPrivateKey(
+ round, reset uint64, prv dkg.PrivateKey) error {
+ m.dkgPrivateKeysLock.Lock()
+ defer m.dkgPrivateKeysLock.Unlock()
+ if prv, exists := m.dkgPrivateKeys[round]; exists && prv.Reset == reset {
+ return ErrDKGPrivateKeyExists
+ }
+ m.dkgPrivateKeys[round] = &dkgPrivateKey{
+ PK: prv,
+ Reset: reset,
+ }
+ return nil
+}
+
+// GetDKGProtocol get DKG protocol.
+func (m *MemBackedDB) GetDKGProtocol() (
+ DKGProtocolInfo, error) {
+ m.dkgProtocolLock.RLock()
+ defer m.dkgProtocolLock.RUnlock()
+ if m.dkgProtocolInfo == nil {
+ return DKGProtocolInfo{}, ErrDKGProtocolDoesNotExist
+ }
+
+ return *m.dkgProtocolInfo, nil
+}
+
+// PutOrUpdateDKGProtocol save DKG protocol.
+func (m *MemBackedDB) PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error {
+ m.dkgProtocolLock.Lock()
+ defer m.dkgProtocolLock.Unlock()
+ m.dkgProtocolInfo = &dkgProtocol
+ return nil
+}
+
+// Close implement Closer interface, which would release allocated resource.
+func (m *MemBackedDB) Close() (err error) {
+ // Save internal state to a pretty-print json file. It's a temporary way
+ // to dump private file via JSON encoding.
+ if len(m.persistantFilePath) == 0 {
+ return
+ }
+
+ m.blocksLock.RLock()
+ defer m.blocksLock.RUnlock()
+
+ toDump := struct {
+ Sequence common.Hashes
+ ByHash map[common.Hash]*types.Block
+ }{
+ Sequence: m.blockHashSequence,
+ ByHash: m.blocksByHash,
+ }
+
+ // Dump to JSON with 2-space indent.
+ buf, err := json.Marshal(&toDump)
+ if err != nil {
+ return
+ }
+
+ err = ioutil.WriteFile(m.persistantFilePath, buf, 0644)
+ return
+}
+
+func (m *MemBackedDB) getBlockByIndex(idx int) (types.Block, error) {
+ m.blocksLock.RLock()
+ defer m.blocksLock.RUnlock()
+
+ if idx >= len(m.blockHashSequence) {
+ return types.Block{}, ErrIterationFinished
+ }
+
+ hash := m.blockHashSequence[idx]
+ return m.internalGetBlock(hash)
+}
+
+// GetAllBlocks implement Reader.GetAllBlocks method, which allows caller
+// to retrieve all blocks in DB.
+func (m *MemBackedDB) GetAllBlocks() (BlockIterator, error) {
+ return &blockSeqIterator{db: m}, nil
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/dkg-tsig-protocol.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/dkg-tsig-protocol.go
new file mode 100644
index 000000000..38739da4e
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/dkg-tsig-protocol.go
@@ -0,0 +1,709 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/db"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+// Errors for dkg module.
+var (
+ ErrNotDKGParticipant = fmt.Errorf(
+ "not a DKG participant")
+ ErrNotQualifyDKGParticipant = fmt.Errorf(
+ "not a qualified DKG participant")
+ ErrIDShareNotFound = fmt.Errorf(
+ "private share not found for specific ID")
+ ErrIncorrectPrivateShareSignature = fmt.Errorf(
+ "incorrect private share signature")
+ ErrMismatchPartialSignatureHash = fmt.Errorf(
+ "mismatch partialSignature hash")
+ ErrIncorrectPartialSignatureSignature = fmt.Errorf(
+ "incorrect partialSignature signature")
+ ErrIncorrectPartialSignature = fmt.Errorf(
+ "incorrect partialSignature")
+ ErrNotEnoughtPartialSignatures = fmt.Errorf(
+ "not enough of partial signatures")
+ ErrRoundAlreadyPurged = fmt.Errorf(
+ "cache of round already been purged")
+ ErrTSigNotReady = fmt.Errorf(
+ "tsig not ready")
+ ErrSelfMPKNotRegister = fmt.Errorf(
+ "self mpk not registered")
+ ErrUnableGetSelfPrvShare = fmt.Errorf(
+ "unable to get self DKG PrivateShare")
+ ErrSelfPrvShareMismatch = fmt.Errorf(
+ "self privateShare does not match mpk registered")
+)
+
+// ErrUnexpectedDKGResetCount represents receiving a DKG message with unexpected
+// DKG reset count.
+type ErrUnexpectedDKGResetCount struct {
+ expect, actual uint64
+ proposerID types.NodeID
+}
+
+func (e ErrUnexpectedDKGResetCount) Error() string {
+ return fmt.Sprintf(
+ "unexpected DKG reset count, from:%s expect:%d actual:%d",
+ e.proposerID.String()[:6], e.expect, e.actual)
+}
+
+// ErrUnexpectedRound represents receiving a DKG message with unexpected round.
+type ErrUnexpectedRound struct {
+ expect, actual uint64
+ proposerID types.NodeID
+}
+
+func (e ErrUnexpectedRound) Error() string {
+ return fmt.Sprintf("unexpected round, from:%s expect:%d actual:%d",
+ e.proposerID.String()[:6], e.expect, e.actual)
+}
+
+type dkgReceiver interface {
+ // ProposeDKGComplaint proposes a DKGComplaint.
+ ProposeDKGComplaint(complaint *typesDKG.Complaint)
+
+ // ProposeDKGMasterPublicKey propose a DKGMasterPublicKey.
+ ProposeDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey)
+
+ // ProposeDKGPrivateShare propose a DKGPrivateShare.
+ ProposeDKGPrivateShare(prv *typesDKG.PrivateShare)
+
+ // ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint.
+ ProposeDKGAntiNackComplaint(prv *typesDKG.PrivateShare)
+
+ // ProposeDKGMPKReady propose a DKGMPKReady message.
+ ProposeDKGMPKReady(ready *typesDKG.MPKReady)
+
+ // ProposeDKGFinalize propose a DKGFinalize message.
+ ProposeDKGFinalize(final *typesDKG.Finalize)
+
+ // ProposeDKGSuccess propose a DKGSuccess message.
+ ProposeDKGSuccess(final *typesDKG.Success)
+}
+
+type dkgProtocol struct {
+ ID types.NodeID
+ recv dkgReceiver
+ round uint64
+ reset uint64
+ threshold int
+ idMap map[types.NodeID]dkg.ID
+ mpkMap map[types.NodeID]*dkg.PublicKeyShares
+ masterPrivateShare *dkg.PrivateKeyShares
+ prvShares *dkg.PrivateKeyShares
+ prvSharesReceived map[types.NodeID]struct{}
+ nodeComplained map[types.NodeID]struct{}
+ // Complaint[from][to]'s anti is saved to antiComplaint[from][to].
+ antiComplaintReceived map[types.NodeID]map[types.NodeID]struct{}
+ // The completed step in `runDKG`.
+ step int
+}
+
+func (d *dkgProtocol) convertFromInfo(info db.DKGProtocolInfo) {
+ d.ID = info.ID
+ d.idMap = info.IDMap
+ d.round = info.Round
+ d.threshold = int(info.Threshold)
+ d.idMap = info.IDMap
+ d.mpkMap = info.MpkMap
+ d.prvSharesReceived = info.PrvSharesReceived
+ d.nodeComplained = info.NodeComplained
+ d.antiComplaintReceived = info.AntiComplaintReceived
+ d.step = int(info.Step)
+ d.reset = info.Reset
+ if info.IsMasterPrivateShareEmpty {
+ d.masterPrivateShare = nil
+ } else {
+ d.masterPrivateShare = &info.MasterPrivateShare
+ }
+
+ if info.IsPrvSharesEmpty {
+ d.prvShares = nil
+ } else {
+ d.prvShares = &info.PrvShares
+ }
+}
+
+func (d *dkgProtocol) toDKGProtocolInfo() db.DKGProtocolInfo {
+ info := db.DKGProtocolInfo{
+ ID: d.ID,
+ Round: d.round,
+ Threshold: uint64(d.threshold),
+ IDMap: d.idMap,
+ MpkMap: d.mpkMap,
+ PrvSharesReceived: d.prvSharesReceived,
+ NodeComplained: d.nodeComplained,
+ AntiComplaintReceived: d.antiComplaintReceived,
+ Step: uint64(d.step),
+ Reset: d.reset,
+ }
+
+ if d.masterPrivateShare != nil {
+ info.MasterPrivateShare = *d.masterPrivateShare
+ } else {
+ info.IsMasterPrivateShareEmpty = true
+ }
+
+ if d.prvShares != nil {
+ info.PrvShares = *d.prvShares
+ } else {
+ info.IsPrvSharesEmpty = true
+ }
+
+ return info
+}
+
+type dkgShareSecret struct {
+ privateKey *dkg.PrivateKey
+}
+
+// TSigVerifier is the interface verifying threshold signature.
+type TSigVerifier interface {
+ VerifySignature(hash common.Hash, sig crypto.Signature) bool
+}
+
+// TSigVerifierCacheInterface specifies interface used by TSigVerifierCache.
+type TSigVerifierCacheInterface interface {
+ // Configuration returns the configuration at a given round.
+ // Return the genesis configuration if round == 0.
+ Configuration(round uint64) *types.Config
+
+ // DKGComplaints gets all the DKGComplaints of round.
+ DKGComplaints(round uint64) []*typesDKG.Complaint
+
+ // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round.
+ DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey
+
+ // IsDKGFinal checks if DKG is final.
+ IsDKGFinal(round uint64) bool
+}
+
+// TSigVerifierCache is the cache for TSigVerifier.
+type TSigVerifierCache struct {
+ intf TSigVerifierCacheInterface
+ verifier map[uint64]TSigVerifier
+ minRound uint64
+ cacheSize int
+ lock sync.RWMutex
+}
+
+type tsigProtocol struct {
+ nodePublicKeys *typesDKG.NodePublicKeys
+ hash common.Hash
+ sigs map[dkg.ID]dkg.PartialSignature
+ threshold int
+}
+
+func newDKGProtocol(
+ ID types.NodeID,
+ recv dkgReceiver,
+ round uint64,
+ reset uint64,
+ threshold int) *dkgProtocol {
+
+ prvShare, pubShare := dkg.NewPrivateKeyShares(threshold)
+
+ recv.ProposeDKGMasterPublicKey(&typesDKG.MasterPublicKey{
+ Round: round,
+ Reset: reset,
+ DKGID: typesDKG.NewID(ID),
+ PublicKeyShares: *pubShare.Move(),
+ })
+
+ return &dkgProtocol{
+ ID: ID,
+ recv: recv,
+ round: round,
+ reset: reset,
+ threshold: threshold,
+ idMap: make(map[types.NodeID]dkg.ID),
+ mpkMap: make(map[types.NodeID]*dkg.PublicKeyShares),
+ masterPrivateShare: prvShare,
+ prvShares: dkg.NewEmptyPrivateKeyShares(),
+ prvSharesReceived: make(map[types.NodeID]struct{}),
+ nodeComplained: make(map[types.NodeID]struct{}),
+ antiComplaintReceived: make(map[types.NodeID]map[types.NodeID]struct{}),
+ }
+}
+
+func recoverDKGProtocol(
+ ID types.NodeID,
+ recv dkgReceiver,
+ round uint64,
+ reset uint64,
+ coreDB db.Database) (*dkgProtocol, error) {
+ dkgProtocolInfo, err := coreDB.GetDKGProtocol()
+ if err != nil {
+ if err == db.ErrDKGProtocolDoesNotExist {
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ dkgProtocol := dkgProtocol{
+ recv: recv,
+ }
+ dkgProtocol.convertFromInfo(dkgProtocolInfo)
+
+ if dkgProtocol.ID != ID || dkgProtocol.round != round || dkgProtocol.reset != reset {
+ return nil, nil
+ }
+
+ return &dkgProtocol, nil
+}
+
+func (d *dkgProtocol) processMasterPublicKeys(
+ mpks []*typesDKG.MasterPublicKey) (err error) {
+ d.idMap = make(map[types.NodeID]dkg.ID, len(mpks))
+ d.mpkMap = make(map[types.NodeID]*dkg.PublicKeyShares, len(mpks))
+ d.prvSharesReceived = make(map[types.NodeID]struct{}, len(mpks))
+ ids := make(dkg.IDs, len(mpks))
+ for i := range mpks {
+ if mpks[i].Reset != d.reset {
+ return ErrUnexpectedDKGResetCount{
+ expect: d.reset,
+ actual: mpks[i].Reset,
+ proposerID: mpks[i].ProposerID,
+ }
+ }
+ nID := mpks[i].ProposerID
+ d.idMap[nID] = mpks[i].DKGID
+ d.mpkMap[nID] = &mpks[i].PublicKeyShares
+ ids[i] = mpks[i].DKGID
+ }
+ d.masterPrivateShare.SetParticipants(ids)
+ if err = d.verifySelfPrvShare(); err != nil {
+ return
+ }
+ for _, mpk := range mpks {
+ share, ok := d.masterPrivateShare.Share(mpk.DKGID)
+ if !ok {
+ err = ErrIDShareNotFound
+ continue
+ }
+ d.recv.ProposeDKGPrivateShare(&typesDKG.PrivateShare{
+ ReceiverID: mpk.ProposerID,
+ Round: d.round,
+ Reset: d.reset,
+ PrivateShare: *share,
+ })
+ }
+ return
+}
+
+func (d *dkgProtocol) verifySelfPrvShare() error {
+ selfMPK, exist := d.mpkMap[d.ID]
+ if !exist {
+ return ErrSelfMPKNotRegister
+ }
+ share, ok := d.masterPrivateShare.Share(d.idMap[d.ID])
+ if !ok {
+ return ErrUnableGetSelfPrvShare
+ }
+ ok, err := selfMPK.VerifyPrvShare(
+ d.idMap[d.ID], share)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return ErrSelfPrvShareMismatch
+ }
+ return nil
+}
+
+func (d *dkgProtocol) proposeNackComplaints() {
+ for nID := range d.mpkMap {
+ if _, exist := d.prvSharesReceived[nID]; exist {
+ continue
+ }
+ d.recv.ProposeDKGComplaint(&typesDKG.Complaint{
+ Round: d.round,
+ Reset: d.reset,
+ PrivateShare: typesDKG.PrivateShare{
+ ProposerID: nID,
+ Round: d.round,
+ Reset: d.reset,
+ },
+ })
+ }
+}
+
+func (d *dkgProtocol) processNackComplaints(complaints []*typesDKG.Complaint) (
+ err error) {
+ if err = d.verifySelfPrvShare(); err != nil {
+ return
+ }
+ for _, complaint := range complaints {
+ if !complaint.IsNack() {
+ continue
+ }
+ if complaint.Reset != d.reset {
+ continue
+ }
+ if complaint.PrivateShare.ProposerID != d.ID {
+ continue
+ }
+ id, exist := d.idMap[complaint.ProposerID]
+ if !exist {
+ err = ErrNotDKGParticipant
+ continue
+ }
+ share, ok := d.masterPrivateShare.Share(id)
+ if !ok {
+ err = ErrIDShareNotFound
+ continue
+ }
+ d.recv.ProposeDKGAntiNackComplaint(&typesDKG.PrivateShare{
+ ProposerID: d.ID,
+ ReceiverID: complaint.ProposerID,
+ Round: d.round,
+ Reset: d.reset,
+ PrivateShare: *share,
+ })
+ }
+ return
+}
+
+func (d *dkgProtocol) enforceNackComplaints(complaints []*typesDKG.Complaint) {
+ complained := make(map[types.NodeID]struct{})
+ // Do not propose nack complaint to itself.
+ complained[d.ID] = struct{}{}
+ for _, complaint := range complaints {
+ if d.round != complaint.Round || d.reset != complaint.Reset {
+ continue
+ }
+ if !complaint.IsNack() {
+ continue
+ }
+ if complaint.Reset != d.reset {
+ continue
+ }
+ to := complaint.PrivateShare.ProposerID
+ if _, exist := complained[to]; exist {
+ continue
+ }
+ from := complaint.ProposerID
+ // Nack complaint is already proposed.
+ if from == d.ID {
+ continue
+ }
+ if _, exist :=
+ d.antiComplaintReceived[from][to]; !exist {
+ complained[to] = struct{}{}
+ d.recv.ProposeDKGComplaint(&typesDKG.Complaint{
+ Round: d.round,
+ Reset: d.reset,
+ PrivateShare: typesDKG.PrivateShare{
+ ProposerID: to,
+ Round: d.round,
+ Reset: d.reset,
+ },
+ })
+ }
+ }
+}
+
+func (d *dkgProtocol) sanityCheck(prvShare *typesDKG.PrivateShare) error {
+ if d.round != prvShare.Round {
+ return ErrUnexpectedRound{
+ expect: d.round,
+ actual: prvShare.Round,
+ proposerID: prvShare.ProposerID,
+ }
+ }
+ if d.reset != prvShare.Reset {
+ return ErrUnexpectedDKGResetCount{
+ expect: d.reset,
+ actual: prvShare.Reset,
+ proposerID: prvShare.ProposerID,
+ }
+ }
+ if _, exist := d.idMap[prvShare.ProposerID]; !exist {
+ return ErrNotDKGParticipant
+ }
+ ok, err := utils.VerifyDKGPrivateShareSignature(prvShare)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return ErrIncorrectPrivateShareSignature
+ }
+ return nil
+}
+
+func (d *dkgProtocol) processPrivateShare(
+ prvShare *typesDKG.PrivateShare) error {
+ receiverID, exist := d.idMap[prvShare.ReceiverID]
+ // This node is not a DKG participant, ignore the private share.
+ if !exist {
+ return nil
+ }
+ if prvShare.ReceiverID == d.ID {
+ if _, exist := d.prvSharesReceived[prvShare.ProposerID]; exist {
+ return nil
+ }
+ } else {
+ if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; exist {
+ if _, exist :=
+ d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID]; exist {
+ return nil
+ }
+ }
+ }
+ if err := d.sanityCheck(prvShare); err != nil {
+ return err
+ }
+ mpk := d.mpkMap[prvShare.ProposerID]
+ ok, err := mpk.VerifyPrvShare(receiverID, &prvShare.PrivateShare)
+ if err != nil {
+ return err
+ }
+ if prvShare.ReceiverID == d.ID {
+ d.prvSharesReceived[prvShare.ProposerID] = struct{}{}
+ }
+ if !ok {
+ if _, exist := d.nodeComplained[prvShare.ProposerID]; exist {
+ return nil
+ }
+ complaint := &typesDKG.Complaint{
+ Round: d.round,
+ Reset: d.reset,
+ PrivateShare: *prvShare,
+ }
+ d.nodeComplained[prvShare.ProposerID] = struct{}{}
+ d.recv.ProposeDKGComplaint(complaint)
+ } else if prvShare.ReceiverID == d.ID {
+ sender := d.idMap[prvShare.ProposerID]
+ if err := d.prvShares.AddShare(sender, &prvShare.PrivateShare); err != nil {
+ return err
+ }
+ } else {
+ // The prvShare is an anti complaint.
+ if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; !exist {
+ d.antiComplaintReceived[prvShare.ReceiverID] =
+ make(map[types.NodeID]struct{})
+ }
+ if _, exist :=
+ d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID]; !exist {
+ d.recv.ProposeDKGAntiNackComplaint(prvShare)
+ d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID] =
+ struct{}{}
+ }
+ }
+ return nil
+}
+
+func (d *dkgProtocol) proposeMPKReady() {
+ d.recv.ProposeDKGMPKReady(&typesDKG.MPKReady{
+ ProposerID: d.ID,
+ Round: d.round,
+ Reset: d.reset,
+ })
+}
+
+func (d *dkgProtocol) proposeFinalize() {
+ d.recv.ProposeDKGFinalize(&typesDKG.Finalize{
+ ProposerID: d.ID,
+ Round: d.round,
+ Reset: d.reset,
+ })
+}
+
+func (d *dkgProtocol) proposeSuccess() {
+ d.recv.ProposeDKGSuccess(&typesDKG.Success{
+ ProposerID: d.ID,
+ Round: d.round,
+ Reset: d.reset,
+ })
+}
+
+func (d *dkgProtocol) recoverShareSecret(qualifyIDs dkg.IDs) (
+ *dkgShareSecret, error) {
+ if len(qualifyIDs) < d.threshold {
+ return nil, typesDKG.ErrNotReachThreshold
+ }
+ prvKey, err := d.prvShares.RecoverPrivateKey(qualifyIDs)
+ if err != nil {
+ return nil, err
+ }
+ return &dkgShareSecret{
+ privateKey: prvKey,
+ }, nil
+}
+
+func (ss *dkgShareSecret) sign(hash common.Hash) dkg.PartialSignature {
+ // DKG sign will always success.
+ sig, _ := ss.privateKey.Sign(hash)
+ return dkg.PartialSignature(sig)
+}
+
+// NewTSigVerifierCache creats a TSigVerifierCache instance.
+func NewTSigVerifierCache(
+ intf TSigVerifierCacheInterface, cacheSize int) *TSigVerifierCache {
+ return &TSigVerifierCache{
+ intf: intf,
+ verifier: make(map[uint64]TSigVerifier),
+ cacheSize: cacheSize,
+ }
+}
+
+// UpdateAndGet calls Update and then Get.
+func (tc *TSigVerifierCache) UpdateAndGet(round uint64) (
+ TSigVerifier, bool, error) {
+ ok, err := tc.Update(round)
+ if err != nil {
+ return nil, false, err
+ }
+ if !ok {
+ return nil, false, nil
+ }
+ v, ok := tc.Get(round)
+ return v, ok, nil
+}
+
+// Purge the cache.
+func (tc *TSigVerifierCache) Purge(round uint64) {
+ tc.lock.Lock()
+ defer tc.lock.Unlock()
+ delete(tc.verifier, round)
+}
+
+// Update the cache and returns if success.
+func (tc *TSigVerifierCache) Update(round uint64) (bool, error) {
+ tc.lock.Lock()
+ defer tc.lock.Unlock()
+ if round < tc.minRound {
+ return false, ErrRoundAlreadyPurged
+ }
+ if _, exist := tc.verifier[round]; exist {
+ return true, nil
+ }
+ if !tc.intf.IsDKGFinal(round) {
+ return false, nil
+ }
+ gpk, err := typesDKG.NewGroupPublicKey(round,
+ tc.intf.DKGMasterPublicKeys(round),
+ tc.intf.DKGComplaints(round),
+ utils.GetDKGThreshold(utils.GetConfigWithPanic(tc.intf, round, nil)))
+ if err != nil {
+ return false, err
+ }
+ if len(tc.verifier) == 0 {
+ tc.minRound = round
+ }
+ tc.verifier[round] = gpk
+ if len(tc.verifier) > tc.cacheSize {
+ delete(tc.verifier, tc.minRound)
+ }
+ for {
+ if _, exist := tc.verifier[tc.minRound]; !exist {
+ tc.minRound++
+ } else {
+ break
+ }
+ }
+ return true, nil
+}
+
+// Delete the cache of given round.
+func (tc *TSigVerifierCache) Delete(round uint64) {
+ tc.lock.Lock()
+ defer tc.lock.Unlock()
+ delete(tc.verifier, round)
+}
+
+// Get the TSigVerifier of round and returns if it exists.
+func (tc *TSigVerifierCache) Get(round uint64) (TSigVerifier, bool) {
+ tc.lock.RLock()
+ defer tc.lock.RUnlock()
+ verifier, exist := tc.verifier[round]
+ return verifier, exist
+}
+
+func newTSigProtocol(
+ npks *typesDKG.NodePublicKeys,
+ hash common.Hash) *tsigProtocol {
+ return &tsigProtocol{
+ nodePublicKeys: npks,
+ hash: hash,
+ sigs: make(map[dkg.ID]dkg.PartialSignature, npks.Threshold+1),
+ }
+}
+
+func (tsig *tsigProtocol) sanityCheck(psig *typesDKG.PartialSignature) error {
+ _, exist := tsig.nodePublicKeys.PublicKeys[psig.ProposerID]
+ if !exist {
+ return ErrNotQualifyDKGParticipant
+ }
+ ok, err := utils.VerifyDKGPartialSignatureSignature(psig)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return ErrIncorrectPartialSignatureSignature
+ }
+ if psig.Hash != tsig.hash {
+ return ErrMismatchPartialSignatureHash
+ }
+ return nil
+}
+
+func (tsig *tsigProtocol) processPartialSignature(
+ psig *typesDKG.PartialSignature) error {
+ if psig.Round != tsig.nodePublicKeys.Round {
+ return nil
+ }
+ id, exist := tsig.nodePublicKeys.IDMap[psig.ProposerID]
+ if !exist {
+ return ErrNotQualifyDKGParticipant
+ }
+ if err := tsig.sanityCheck(psig); err != nil {
+ return err
+ }
+ pubKey := tsig.nodePublicKeys.PublicKeys[psig.ProposerID]
+ if !pubKey.VerifySignature(
+ tsig.hash, crypto.Signature(psig.PartialSignature)) {
+ return ErrIncorrectPartialSignature
+ }
+ tsig.sigs[id] = psig.PartialSignature
+ return nil
+}
+
+func (tsig *tsigProtocol) signature() (crypto.Signature, error) {
+ if len(tsig.sigs) < tsig.nodePublicKeys.Threshold {
+ return crypto.Signature{}, ErrNotEnoughtPartialSignatures
+ }
+ ids := make(dkg.IDs, 0, len(tsig.sigs))
+ psigs := make([]dkg.PartialSignature, 0, len(tsig.sigs))
+ for id, psig := range tsig.sigs {
+ ids = append(ids, id)
+ psigs = append(psigs, psig)
+ }
+ return dkg.RecoverSignature(psigs, ids)
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/interfaces.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/interfaces.go
new file mode 100644
index 000000000..3adcf78c9
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/interfaces.go
@@ -0,0 +1,182 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+)
+
+// Application describes the application interface that interacts with DEXON
+// consensus core.
+type Application interface {
+ // PreparePayload is called when consensus core is preparing a block.
+ PreparePayload(position types.Position) ([]byte, error)
+
+ // PrepareWitness will return the witness data no lower than consensusHeight.
+ PrepareWitness(consensusHeight uint64) (types.Witness, error)
+
+ // VerifyBlock verifies if the block is valid.
+ VerifyBlock(block *types.Block) types.BlockVerifyStatus
+
+ // BlockConfirmed is called when a block is confirmed and added to lattice.
+ BlockConfirmed(block types.Block)
+
+ // BlockDelivered is called when a block is added to the compaction chain.
+ BlockDelivered(hash common.Hash, position types.Position, rand []byte)
+}
+
+// Debug describes the application interface that requires
+// more detailed consensus execution.
+type Debug interface {
+ // BlockReceived is called when the block received in agreement.
+ BlockReceived(common.Hash)
+ // BlockReady is called when the block's randomness is ready.
+ BlockReady(common.Hash)
+}
+
+// Network describs the network interface that interacts with DEXON consensus
+// core.
+type Network interface {
+ // PullBlocks tries to pull blocks from the DEXON network.
+ PullBlocks(hashes common.Hashes)
+
+ // PullVotes tries to pull votes from the DEXON network.
+ PullVotes(position types.Position)
+
+ // BroadcastVote broadcasts vote to all nodes in DEXON network.
+ BroadcastVote(vote *types.Vote)
+
+ // BroadcastBlock broadcasts block to all nodes in DEXON network.
+ BroadcastBlock(block *types.Block)
+
+ // BroadcastAgreementResult broadcasts agreement result to DKG set.
+ BroadcastAgreementResult(randRequest *types.AgreementResult)
+
+ // SendDKGPrivateShare sends PrivateShare to a DKG participant.
+ SendDKGPrivateShare(pub crypto.PublicKey, prvShare *typesDKG.PrivateShare)
+
+ // BroadcastDKGPrivateShare broadcasts PrivateShare to all DKG participants.
+ BroadcastDKGPrivateShare(prvShare *typesDKG.PrivateShare)
+
+ // BroadcastDKGPartialSignature broadcasts partialSignature to all
+ // DKG participants.
+ BroadcastDKGPartialSignature(psig *typesDKG.PartialSignature)
+
+ // ReceiveChan returns a channel to receive messages from DEXON network.
+ ReceiveChan() <-chan types.Msg
+
+ // ReportBadPeerChan returns a channel to report bad peer.
+ ReportBadPeerChan() chan<- interface{}
+}
+
+// Governance interface specifies interface to control the governance contract.
+// Note that there are a lot more methods in the governance contract, that this
+// interface only define those that are required to run the consensus algorithm.
+type Governance interface {
+ // Configuration returns the configuration at a given round.
+ // Return the genesis configuration if round == 0.
+ Configuration(round uint64) *types.Config
+
+ // CRS returns the CRS for a given round. Return the genesis CRS if
+ // round == 0.
+ //
+ // The CRS returned is the proposed or latest reseted one, it would be
+ // changed later if corresponding DKG set failed to generate group public
+ // key.
+ CRS(round uint64) common.Hash
+
+ // Propose a CRS of round.
+ ProposeCRS(round uint64, signedCRS []byte)
+
+ // NodeSet returns the node set at a given round.
+ // Return the genesis node set if round == 0.
+ NodeSet(round uint64) []crypto.PublicKey
+
+ // Get the begin height of a round.
+ GetRoundHeight(round uint64) uint64
+
+ //// DKG-related methods.
+
+ // AddDKGComplaint adds a DKGComplaint.
+ AddDKGComplaint(complaint *typesDKG.Complaint)
+
+ // DKGComplaints gets all the DKGComplaints of round.
+ DKGComplaints(round uint64) []*typesDKG.Complaint
+
+ // AddDKGMasterPublicKey adds a DKGMasterPublicKey.
+ AddDKGMasterPublicKey(masterPublicKey *typesDKG.MasterPublicKey)
+
+ // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round.
+ DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey
+
+ // AddDKGMPKReady adds a DKG ready message.
+ AddDKGMPKReady(ready *typesDKG.MPKReady)
+
+ // IsDKGMPKReady checks if DKG's master public key preparation is ready.
+ IsDKGMPKReady(round uint64) bool
+
+ // AddDKGFinalize adds a DKG finalize message.
+ AddDKGFinalize(final *typesDKG.Finalize)
+
+ // IsDKGFinal checks if DKG is final.
+ IsDKGFinal(round uint64) bool
+
+ // AddDKGSuccess adds a DKG success message.
+ AddDKGSuccess(success *typesDKG.Success)
+
+ // IsDKGSuccess checks if DKG is success.
+ IsDKGSuccess(round uint64) bool
+
+ // ReportForkVote reports a node for forking votes.
+ ReportForkVote(vote1, vote2 *types.Vote)
+
+ // ReportForkBlock reports a node for forking blocks.
+ ReportForkBlock(block1, block2 *types.Block)
+
+ // ResetDKG resets latest DKG data and propose new CRS.
+ ResetDKG(newSignedCRS []byte)
+
+ // DKGResetCount returns the reset count for DKG of given round.
+ DKGResetCount(round uint64) uint64
+}
+
+// Ticker define the capability to tick by interval.
+type Ticker interface {
+ // Tick would return a channel, which would be triggered until next tick.
+ Tick() <-chan time.Time
+
+ // Stop the ticker.
+ Stop()
+
+ // Retart the ticker and clear all internal data.
+ Restart()
+}
+
+// Recovery interface for interacting with recovery information.
+type Recovery interface {
+ // ProposeSkipBlock proposes a skip block.
+ ProposeSkipBlock(height uint64) error
+
+ // Votes gets the number of votes of given height.
+ Votes(height uint64) (uint64, error)
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/leader-selector.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/leader-selector.go
new file mode 100644
index 000000000..9e3d406a7
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/leader-selector.go
@@ -0,0 +1,149 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "math/big"
+ "sync"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+)
+
+type validLeaderFn func(block *types.Block, crs common.Hash) (bool, error)
+
+// Some constant value.
+var (
+ maxHash *big.Int
+ one *big.Rat
+)
+
+func init() {
+ hash := make([]byte, common.HashLength)
+ for i := range hash {
+ hash[i] = 0xff
+ }
+ maxHash = big.NewInt(0).SetBytes(hash)
+ one = big.NewRat(1, 1)
+}
+
+type leaderSelector struct {
+ hashCRS common.Hash
+ numCRS *big.Int
+ minCRSBlock *big.Int
+ minBlockHash common.Hash
+ pendingBlocks map[common.Hash]*types.Block
+ validLeader validLeaderFn
+ lock sync.Mutex
+ logger common.Logger
+}
+
+func newLeaderSelector(
+ validLeader validLeaderFn, logger common.Logger) *leaderSelector {
+ return &leaderSelector{
+ minCRSBlock: maxHash,
+ validLeader: validLeader,
+ logger: logger,
+ }
+}
+
+func (l *leaderSelector) distance(sig crypto.Signature) *big.Int {
+ hash := crypto.Keccak256Hash(sig.Signature[:])
+ num := big.NewInt(0)
+ num.SetBytes(hash[:])
+ num.Abs(num.Sub(l.numCRS, num))
+ return num
+}
+
+func (l *leaderSelector) probability(sig crypto.Signature) float64 {
+ dis := l.distance(sig)
+ prob := big.NewRat(1, 1).SetFrac(dis, maxHash)
+ p, _ := prob.Sub(one, prob).Float64()
+ return p
+}
+
+func (l *leaderSelector) restart(crs common.Hash) {
+ numCRS := big.NewInt(0)
+ numCRS.SetBytes(crs[:])
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ l.numCRS = numCRS
+ l.hashCRS = crs
+ l.minCRSBlock = maxHash
+ l.minBlockHash = types.NullBlockHash
+ l.pendingBlocks = make(map[common.Hash]*types.Block)
+}
+
+func (l *leaderSelector) leaderBlockHash() common.Hash {
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ for _, b := range l.pendingBlocks {
+ ok, dist := l.potentialLeader(b)
+ if !ok {
+ continue
+ }
+ ok, err := l.validLeader(b, l.hashCRS)
+ if err != nil {
+ l.logger.Error("Error checking validLeader", "error", err, "block", b)
+ delete(l.pendingBlocks, b.Hash)
+ continue
+ }
+ if ok {
+ l.updateLeader(b, dist)
+ delete(l.pendingBlocks, b.Hash)
+ }
+ }
+ return l.minBlockHash
+}
+
+func (l *leaderSelector) processBlock(block *types.Block) error {
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ ok, dist := l.potentialLeader(block)
+ if !ok {
+ return nil
+ }
+ ok, err := l.validLeader(block, l.hashCRS)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ l.pendingBlocks[block.Hash] = block
+ return nil
+ }
+ l.updateLeader(block, dist)
+ return nil
+}
+
+func (l *leaderSelector) potentialLeader(block *types.Block) (bool, *big.Int) {
+ dist := l.distance(block.CRSSignature)
+ cmp := l.minCRSBlock.Cmp(dist)
+ return (cmp > 0 || (cmp == 0 && block.Hash.Less(l.minBlockHash))), dist
+}
+
+func (l *leaderSelector) updateLeader(block *types.Block, dist *big.Int) {
+ l.minCRSBlock = dist
+ l.minBlockHash = block.Hash
+}
+
+func (l *leaderSelector) findPendingBlock(
+ hash common.Hash) (*types.Block, bool) {
+ b, e := l.pendingBlocks[hash]
+ return b, e
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/nonblocking.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/nonblocking.go
new file mode 100644
index 000000000..516138a63
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/nonblocking.go
@@ -0,0 +1,137 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+)
+
+type blockConfirmedEvent struct {
+ block *types.Block
+}
+
+type blockDeliveredEvent struct {
+ blockHash common.Hash
+ blockPosition types.Position
+ rand []byte
+}
+
+// nonBlocking implements these interfaces and is a decorator for
+// them that makes the methods to be non-blocking.
+// - Application
+// - Debug
+// - It also provides nonblockig for db update.
+type nonBlocking struct {
+ app Application
+ debug Debug
+ eventChan chan interface{}
+ events []interface{}
+ eventsChange *sync.Cond
+ running sync.WaitGroup
+}
+
+func newNonBlocking(app Application, debug Debug) *nonBlocking {
+ nonBlockingModule := &nonBlocking{
+ app: app,
+ debug: debug,
+ eventChan: make(chan interface{}, 6),
+ events: make([]interface{}, 0, 100),
+ eventsChange: sync.NewCond(&sync.Mutex{}),
+ }
+ go nonBlockingModule.run()
+ return nonBlockingModule
+}
+
+func (nb *nonBlocking) addEvent(event interface{}) {
+ nb.eventsChange.L.Lock()
+ defer nb.eventsChange.L.Unlock()
+ nb.events = append(nb.events, event)
+ nb.eventsChange.Broadcast()
+}
+
+func (nb *nonBlocking) run() {
+ // This go routine consume the first event from events and call the
+ // corresponding methods of Application/Debug/db.
+ for {
+ var event interface{}
+ func() {
+ nb.eventsChange.L.Lock()
+ defer nb.eventsChange.L.Unlock()
+ for len(nb.events) == 0 {
+ nb.eventsChange.Wait()
+ }
+ event = nb.events[0]
+ nb.events = nb.events[1:]
+ nb.running.Add(1)
+ }()
+ switch e := event.(type) {
+ case blockConfirmedEvent:
+ nb.app.BlockConfirmed(*e.block)
+ case blockDeliveredEvent:
+ nb.app.BlockDelivered(e.blockHash, e.blockPosition, e.rand)
+ default:
+ fmt.Printf("Unknown event %v.", e)
+ }
+ nb.running.Done()
+ nb.eventsChange.Broadcast()
+ }
+}
+
+// wait will wait for all event in events finishes.
+func (nb *nonBlocking) wait() {
+ nb.eventsChange.L.Lock()
+ defer nb.eventsChange.L.Unlock()
+ for len(nb.events) > 0 {
+ nb.eventsChange.Wait()
+ }
+ nb.running.Wait()
+}
+
+// PreparePayload cannot be non-blocking.
+func (nb *nonBlocking) PreparePayload(position types.Position) ([]byte, error) {
+ return nb.app.PreparePayload(position)
+}
+
+// PrepareWitness cannot be non-blocking.
+func (nb *nonBlocking) PrepareWitness(height uint64) (types.Witness, error) {
+ return nb.app.PrepareWitness(height)
+}
+
+// VerifyBlock cannot be non-blocking.
+func (nb *nonBlocking) VerifyBlock(block *types.Block) types.BlockVerifyStatus {
+ return nb.app.VerifyBlock(block)
+}
+
+// BlockConfirmed is called when a block is confirmed and added to lattice.
+func (nb *nonBlocking) BlockConfirmed(block types.Block) {
+ nb.addEvent(blockConfirmedEvent{&block})
+}
+
+// BlockDelivered is called when a block is add to the compaction chain.
+func (nb *nonBlocking) BlockDelivered(blockHash common.Hash,
+ blockPosition types.Position, rand []byte) {
+ nb.addEvent(blockDeliveredEvent{
+ blockHash: blockHash,
+ blockPosition: blockPosition,
+ rand: rand,
+ })
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/agreement.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/agreement.go
new file mode 100644
index 000000000..274cbfc79
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/agreement.go
@@ -0,0 +1,301 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package syncer
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+// Struct agreement implements struct of BA (Byzantine Agreement) protocol
+// needed in syncer, which only receives agreement results.
+type agreement struct {
+ chainTip uint64
+ cache *utils.NodeSetCache
+ tsigVerifierCache *core.TSigVerifierCache
+ inputChan chan interface{}
+ outputChan chan<- *types.Block
+ pullChan chan<- common.Hash
+ blocks map[types.Position]map[common.Hash]*types.Block
+ agreementResults map[common.Hash][]byte
+ latestCRSRound uint64
+ pendingAgrs map[uint64]map[common.Hash]*types.AgreementResult
+ pendingBlocks map[uint64]map[common.Hash]*types.Block
+ logger common.Logger
+ confirmedBlocks map[common.Hash]struct{}
+ ctx context.Context
+ ctxCancel context.CancelFunc
+}
+
+// newAgreement creates a new agreement instance.
+func newAgreement(chainTip uint64,
+ ch chan<- *types.Block, pullChan chan<- common.Hash,
+ cache *utils.NodeSetCache, verifier *core.TSigVerifierCache,
+ logger common.Logger) *agreement {
+ a := &agreement{
+ chainTip: chainTip,
+ cache: cache,
+ tsigVerifierCache: verifier,
+ inputChan: make(chan interface{}, 1000),
+ outputChan: ch,
+ pullChan: pullChan,
+ blocks: make(map[types.Position]map[common.Hash]*types.Block),
+ agreementResults: make(map[common.Hash][]byte),
+ logger: logger,
+ pendingAgrs: make(
+ map[uint64]map[common.Hash]*types.AgreementResult),
+ pendingBlocks: make(
+ map[uint64]map[common.Hash]*types.Block),
+ confirmedBlocks: make(map[common.Hash]struct{}),
+ }
+ a.ctx, a.ctxCancel = context.WithCancel(context.Background())
+ return a
+}
+
+// run starts the agreement, this does not start a new routine, go a new
+// routine explicitly in the caller.
+func (a *agreement) run() {
+ defer a.ctxCancel()
+ for {
+ select {
+ case val, ok := <-a.inputChan:
+ if !ok {
+ // InputChan is closed by network when network ends.
+ return
+ }
+ switch v := val.(type) {
+ case *types.Block:
+ if v.Position.Round >= core.DKGDelayRound && v.IsFinalized() {
+ a.processFinalizedBlock(v)
+ } else {
+ a.processBlock(v)
+ }
+ case *types.AgreementResult:
+ a.processAgreementResult(v)
+ case uint64:
+ a.processNewCRS(v)
+ }
+ }
+ }
+}
+
+func (a *agreement) processBlock(b *types.Block) {
+ if _, exist := a.confirmedBlocks[b.Hash]; exist {
+ return
+ }
+ if rand, exist := a.agreementResults[b.Hash]; exist {
+ if len(b.Randomness) == 0 {
+ b.Randomness = rand
+ }
+ a.confirm(b)
+ } else {
+ if _, exist := a.blocks[b.Position]; !exist {
+ a.blocks[b.Position] = make(map[common.Hash]*types.Block)
+ }
+ a.blocks[b.Position][b.Hash] = b
+ }
+}
+
+func (a *agreement) processFinalizedBlock(block *types.Block) {
+ // Cache those results that CRS is not ready yet.
+ if _, exists := a.confirmedBlocks[block.Hash]; exists {
+ a.logger.Trace("finalized block already confirmed", "block", block)
+ return
+ }
+ if block.Position.Round > a.latestCRSRound {
+ pendingsForRound, exists := a.pendingBlocks[block.Position.Round]
+ if !exists {
+ pendingsForRound = make(map[common.Hash]*types.Block)
+ a.pendingBlocks[block.Position.Round] = pendingsForRound
+ }
+ pendingsForRound[block.Hash] = block
+ a.logger.Trace("finalized block cached", "block", block)
+ return
+ }
+ if err := utils.VerifyBlockSignature(block); err != nil {
+ return
+ }
+ verifier, ok, err := a.tsigVerifierCache.UpdateAndGet(
+ block.Position.Round)
+ if err != nil {
+ a.logger.Error("error verifying block randomness",
+ "block", block,
+ "error", err)
+ return
+ }
+ if !ok {
+ a.logger.Error("cannot verify block randomness", "block", block)
+ return
+ }
+ if !verifier.VerifySignature(block.Hash, crypto.Signature{
+ Type: "bls",
+ Signature: block.Randomness,
+ }) {
+ a.logger.Error("incorrect block randomness", "block", block)
+ return
+ }
+ a.confirm(block)
+}
+
+func (a *agreement) processAgreementResult(r *types.AgreementResult) {
+ // Cache those results that CRS is not ready yet.
+ if _, exists := a.confirmedBlocks[r.BlockHash]; exists {
+ a.logger.Trace("Agreement result already confirmed", "result", r)
+ return
+ }
+ if r.Position.Round > a.latestCRSRound {
+ pendingsForRound, exists := a.pendingAgrs[r.Position.Round]
+ if !exists {
+ pendingsForRound = make(map[common.Hash]*types.AgreementResult)
+ a.pendingAgrs[r.Position.Round] = pendingsForRound
+ }
+ pendingsForRound[r.BlockHash] = r
+ a.logger.Trace("Agreement result cached", "result", r)
+ return
+ }
+ if err := core.VerifyAgreementResult(r, a.cache); err != nil {
+ a.logger.Error("Agreement result verification failed",
+ "result", r,
+ "error", err)
+ return
+ }
+ if r.Position.Round >= core.DKGDelayRound {
+ verifier, ok, err := a.tsigVerifierCache.UpdateAndGet(r.Position.Round)
+ if err != nil {
+ a.logger.Error("error verifying agreement result randomness",
+ "result", r,
+ "error", err)
+ return
+ }
+ if !ok {
+ a.logger.Error("cannot verify agreement result randomness", "result", r)
+ return
+ }
+ if !verifier.VerifySignature(r.BlockHash, crypto.Signature{
+ Type: "bls",
+ Signature: r.Randomness,
+ }) {
+ a.logger.Error("incorrect agreement result randomness", "result", r)
+ return
+ }
+ } else {
+ // Special case for rounds before DKGDelayRound.
+ if bytes.Compare(r.Randomness, core.NoRand) != 0 {
+ a.logger.Error("incorrect agreement result randomness", "result", r)
+ return
+ }
+ }
+ if r.IsEmptyBlock {
+ b := &types.Block{
+ Position: r.Position,
+ Randomness: r.Randomness,
+ }
+ // Empty blocks should be confirmed directly, they won't be sent over
+ // the wire.
+ a.confirm(b)
+ return
+ }
+ if bs, exist := a.blocks[r.Position]; exist {
+ if b, exist := bs[r.BlockHash]; exist {
+ b.Randomness = r.Randomness
+ a.confirm(b)
+ return
+ }
+ }
+ a.agreementResults[r.BlockHash] = r.Randomness
+loop:
+ for {
+ select {
+ case a.pullChan <- r.BlockHash:
+ break loop
+ case <-a.ctx.Done():
+ a.logger.Error("Pull request is not sent",
+ "position", &r.Position,
+ "hash", r.BlockHash.String()[:6])
+ return
+ case <-time.After(500 * time.Millisecond):
+ a.logger.Debug("Pull request is unable to send",
+ "position", &r.Position,
+ "hash", r.BlockHash.String()[:6])
+ }
+ }
+}
+
+func (a *agreement) processNewCRS(round uint64) {
+ if round <= a.latestCRSRound {
+ return
+ }
+ prevRound := a.latestCRSRound + 1
+ a.latestCRSRound = round
+ // Verify all pending results.
+ for r := prevRound; r <= a.latestCRSRound; r++ {
+ pendingsForRound := a.pendingAgrs[r]
+ if pendingsForRound == nil {
+ continue
+ }
+ delete(a.pendingAgrs, r)
+ for _, res := range pendingsForRound {
+ if err := core.VerifyAgreementResult(res, a.cache); err != nil {
+ a.logger.Error("Invalid agreement result",
+ "result", res,
+ "error", err)
+ continue
+ }
+ a.logger.Error("Flush agreement result", "result", res)
+ a.processAgreementResult(res)
+ break
+ }
+ }
+}
+
+// confirm notifies consensus the confirmation of a block in BA.
+func (a *agreement) confirm(b *types.Block) {
+ if !b.IsFinalized() {
+ panic(fmt.Errorf("confirm a block %s without randomness", b))
+ }
+ if _, exist := a.confirmedBlocks[b.Hash]; !exist {
+ delete(a.blocks, b.Position)
+ delete(a.agreementResults, b.Hash)
+ loop:
+ for {
+ select {
+ case a.outputChan <- b:
+ break loop
+ case <-a.ctx.Done():
+ a.logger.Error("Confirmed block is not sent", "block", b)
+ return
+ case <-time.After(500 * time.Millisecond):
+ a.logger.Debug("Agreement output channel is full", "block", b)
+ }
+ }
+ a.confirmedBlocks[b.Hash] = struct{}{}
+ }
+ if b.Position.Height > a.chainTip+1 {
+ if _, exist := a.confirmedBlocks[b.ParentHash]; !exist {
+ a.pullChan <- b.ParentHash
+ }
+ }
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/consensus.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/consensus.go
new file mode 100644
index 000000000..d12dc4863
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/consensus.go
@@ -0,0 +1,543 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package syncer
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/db"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+var (
+ // ErrAlreadySynced is reported when syncer is synced.
+ ErrAlreadySynced = fmt.Errorf("already synced")
+ // ErrNotSynced is reported when syncer is not synced yet.
+ ErrNotSynced = fmt.Errorf("not synced yet")
+ // ErrGenesisBlockReached is reported when genesis block reached.
+ ErrGenesisBlockReached = fmt.Errorf("genesis block reached")
+ // ErrInvalidBlockOrder is reported when SyncBlocks receives unordered
+ // blocks.
+ ErrInvalidBlockOrder = fmt.Errorf("invalid block order")
+ // ErrInvalidSyncingHeight raised when the blocks to sync is not following
+ // the compaction chain tip in database.
+ ErrInvalidSyncingHeight = fmt.Errorf("invalid syncing height")
+)
+
+// Consensus is for syncing consensus module.
+type Consensus struct {
+ db db.Database
+ gov core.Governance
+ dMoment time.Time
+ logger common.Logger
+ app core.Application
+ prv crypto.PrivateKey
+ network core.Network
+ nodeSetCache *utils.NodeSetCache
+ tsigVerifier *core.TSigVerifierCache
+
+ blocks types.BlocksByPosition
+ agreementModule *agreement
+ agreementRoundCut uint64
+ heightEvt *common.Event
+ roundEvt *utils.RoundEvent
+
+ // lock for accessing all fields.
+ lock sync.RWMutex
+ duringBuffering bool
+ latestCRSRound uint64
+ waitGroup sync.WaitGroup
+ agreementWaitGroup sync.WaitGroup
+ pullChan chan common.Hash
+ receiveChan chan *types.Block
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ syncedLastBlock *types.Block
+ syncedConsensus *core.Consensus
+ syncedSkipNext bool
+ dummyCancel context.CancelFunc
+ dummyFinished <-chan struct{}
+ dummyMsgBuffer []types.Msg
+ initChainTipHeight uint64
+}
+
+// NewConsensus creates an instance for Consensus (syncer consensus).
+func NewConsensus(
+ initHeight uint64,
+ dMoment time.Time,
+ app core.Application,
+ gov core.Governance,
+ db db.Database,
+ network core.Network,
+ prv crypto.PrivateKey,
+ logger common.Logger) *Consensus {
+
+ con := &Consensus{
+ dMoment: dMoment,
+ app: app,
+ gov: gov,
+ db: db,
+ network: network,
+ nodeSetCache: utils.NewNodeSetCache(gov),
+ tsigVerifier: core.NewTSigVerifierCache(gov, 7),
+ prv: prv,
+ logger: logger,
+ receiveChan: make(chan *types.Block, 1000),
+ pullChan: make(chan common.Hash, 1000),
+ heightEvt: common.NewEvent(),
+ }
+ con.ctx, con.ctxCancel = context.WithCancel(context.Background())
+ _, con.initChainTipHeight = db.GetCompactionChainTipInfo()
+ con.agreementModule = newAgreement(
+ con.initChainTipHeight,
+ con.receiveChan,
+ con.pullChan,
+ con.nodeSetCache,
+ con.tsigVerifier,
+ con.logger)
+ con.agreementWaitGroup.Add(1)
+ go func() {
+ defer con.agreementWaitGroup.Done()
+ con.agreementModule.run()
+ }()
+ if err := con.deliverPendingBlocks(initHeight); err != nil {
+ panic(err)
+ }
+ return con
+}
+
+func (con *Consensus) deliverPendingBlocks(height uint64) error {
+ if height >= con.initChainTipHeight {
+ return nil
+ }
+ blocks := make([]*types.Block, 0, con.initChainTipHeight-height)
+ hash, _ := con.db.GetCompactionChainTipInfo()
+ for {
+ block, err := con.db.GetBlock(hash)
+ if err != nil {
+ return err
+ }
+ if block.Position.Height == height {
+ break
+ }
+ blocks = append(blocks, &block)
+ hash = block.ParentHash
+ }
+ sort.Sort(types.BlocksByPosition(blocks))
+ for _, b := range blocks {
+ con.logger.Debug("Syncer BlockConfirmed", "block", b)
+ con.app.BlockConfirmed(*b)
+ con.logger.Debug("Syncer BlockDelivered", "block", b)
+ con.app.BlockDelivered(b.Hash, b.Position, b.Randomness)
+ }
+ return nil
+}
+
+func (con *Consensus) assureBuffering() {
+ if func() bool {
+ con.lock.RLock()
+ defer con.lock.RUnlock()
+ return con.duringBuffering
+ }() {
+ return
+ }
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ if con.duringBuffering {
+ return
+ }
+ con.duringBuffering = true
+ // Get latest block to prepare utils.RoundEvent.
+ var (
+ err error
+ blockHash, height = con.db.GetCompactionChainTipInfo()
+ )
+ if height == 0 {
+ con.roundEvt, err = utils.NewRoundEvent(con.ctx, con.gov, con.logger,
+ types.Position{}, core.ConfigRoundShift)
+ } else {
+ var b types.Block
+ if b, err = con.db.GetBlock(blockHash); err == nil {
+ con.roundEvt, err = utils.NewRoundEvent(con.ctx, con.gov,
+ con.logger, b.Position, core.ConfigRoundShift)
+ }
+ }
+ if err != nil {
+ panic(err)
+ }
+ // Make sure con.roundEvt stopped before stopping con.agreementModule.
+ con.waitGroup.Add(1)
+ // Register a round event handler to reset node set cache, this handler
+ // should be the highest priority.
+ con.roundEvt.Register(func(evts []utils.RoundEventParam) {
+ for _, e := range evts {
+ if e.Reset == 0 {
+ continue
+ }
+ con.nodeSetCache.Purge(e.Round + 1)
+ con.tsigVerifier.Purge(e.Round + 1)
+ }
+ })
+ // Register a round event handler to notify CRS to agreementModule.
+ con.roundEvt.Register(func(evts []utils.RoundEventParam) {
+ con.waitGroup.Add(1)
+ go func() {
+ defer con.waitGroup.Done()
+ for _, e := range evts {
+ select {
+ case <-con.ctx.Done():
+ return
+ default:
+ }
+ for func() bool {
+ select {
+ case <-con.ctx.Done():
+ return false
+ case con.agreementModule.inputChan <- e.Round:
+ return false
+ case <-time.After(500 * time.Millisecond):
+ con.logger.Warn(
+ "Agreement input channel is full when notifying new round",
+ "round", e.Round,
+ )
+ return true
+ }
+ }() {
+ }
+ }
+ }()
+ })
+ // Register a round event handler to validate next round.
+ con.roundEvt.Register(func(evts []utils.RoundEventParam) {
+ con.heightEvt.RegisterHeight(
+ evts[len(evts)-1].NextRoundValidationHeight(),
+ utils.RoundEventRetryHandlerGenerator(con.roundEvt, con.heightEvt),
+ )
+ })
+ con.roundEvt.TriggerInitEvent()
+ con.startAgreement()
+ con.startNetwork()
+}
+
+func (con *Consensus) checkIfSynced(blocks []*types.Block) (synced bool) {
+ con.lock.RLock()
+ defer con.lock.RUnlock()
+ defer func() {
+ con.logger.Debug("Syncer synced status",
+ "last-block", blocks[len(blocks)-1],
+ "synced", synced,
+ )
+ }()
+ if len(con.blocks) == 0 || len(blocks) == 0 {
+ return
+ }
+ synced = !blocks[len(blocks)-1].Position.Older(con.blocks[0].Position)
+ return
+}
+
+func (con *Consensus) buildAllEmptyBlocks() {
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ // Clean empty blocks on tips of chains.
+ for len(con.blocks) > 0 && con.isEmptyBlock(con.blocks[0]) {
+ con.blocks = con.blocks[1:]
+ }
+ // Build empty blocks.
+ for i, b := range con.blocks {
+ if con.isEmptyBlock(b) {
+ if con.blocks[i-1].Position.Height+1 == b.Position.Height {
+ con.buildEmptyBlock(b, con.blocks[i-1])
+ }
+ }
+ }
+}
+
+// ForceSync forces syncer to become synced.
+func (con *Consensus) ForceSync(lastPos types.Position, skip bool) {
+ if con.syncedLastBlock != nil {
+ return
+ }
+ hash, height := con.db.GetCompactionChainTipInfo()
+ if height < lastPos.Height {
+ panic(fmt.Errorf("compaction chain not synced height %d, tip %d",
+ lastPos.Height, height))
+ } else if height > lastPos.Height {
+ skip = false
+ }
+ block, err := con.db.GetBlock(hash)
+ if err != nil {
+ panic(err)
+ }
+ con.syncedLastBlock = &block
+ con.stopBuffering()
+ // We might call stopBuffering without calling assureBuffering.
+ if con.dummyCancel == nil {
+ con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver(
+ context.Background(), con.network.ReceiveChan(),
+ func(msg types.Msg) {
+ con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg)
+ })
+ }
+ con.syncedSkipNext = skip
+ con.logger.Info("Force Sync", "block", &block, "skip", skip)
+}
+
+// SyncBlocks syncs blocks from compaction chain, latest is true if the caller
+// regards the blocks are the latest ones. Notice that latest can be true for
+// many times.
+// NOTICE: parameter "blocks" should be consecutive in compaction height.
+// NOTICE: this method is not expected to be called concurrently.
+func (con *Consensus) SyncBlocks(
+ blocks []*types.Block, latest bool) (synced bool, err error) {
+ defer func() {
+ con.logger.Debug("SyncBlocks returned",
+ "synced", synced,
+ "error", err,
+ "last-block", con.syncedLastBlock,
+ )
+ }()
+ if con.syncedLastBlock != nil {
+ synced, err = true, ErrAlreadySynced
+ return
+ }
+ if len(blocks) == 0 {
+ return
+ }
+ // Check if blocks are consecutive.
+ for i := 1; i < len(blocks); i++ {
+ if blocks[i].Position.Height != blocks[i-1].Position.Height+1 {
+ err = ErrInvalidBlockOrder
+ return
+ }
+ }
+ // Make sure the first block is the next block of current compaction chain
+ // tip in DB.
+ _, tipHeight := con.db.GetCompactionChainTipInfo()
+ if blocks[0].Position.Height != tipHeight+1 {
+ con.logger.Error("Mismatched block height",
+ "now", blocks[0].Position.Height,
+ "expected", tipHeight+1,
+ )
+ err = ErrInvalidSyncingHeight
+ return
+ }
+ con.logger.Trace("SyncBlocks",
+ "position", &blocks[0].Position,
+ "len", len(blocks),
+ "latest", latest,
+ )
+ for _, b := range blocks {
+ if err = con.db.PutBlock(*b); err != nil {
+ // A block might be put into db when confirmed by BA, but not
+ // finalized yet.
+ if err == db.ErrBlockExists {
+ err = con.db.UpdateBlock(*b)
+ }
+ if err != nil {
+ return
+ }
+ }
+ if err = con.db.PutCompactionChainTipInfo(
+ b.Hash, b.Position.Height); err != nil {
+ return
+ }
+ con.heightEvt.NotifyHeight(b.Position.Height)
+ }
+ if latest {
+ con.assureBuffering()
+ con.buildAllEmptyBlocks()
+ // Check if compaction and agreements' blocks are overlapped. The
+ // overlapping of compaction chain and BA's oldest blocks means the
+ // syncing is done.
+ if con.checkIfSynced(blocks) {
+ con.stopBuffering()
+ con.syncedLastBlock = blocks[len(blocks)-1]
+ synced = true
+ }
+ }
+ return
+}
+
+// GetSyncedConsensus returns the core.Consensus instance after synced.
+func (con *Consensus) GetSyncedConsensus() (*core.Consensus, error) {
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ if con.syncedConsensus != nil {
+ return con.syncedConsensus, nil
+ }
+ if con.syncedLastBlock == nil {
+ return nil, ErrNotSynced
+ }
+ // flush all blocks in con.blocks into core.Consensus, and build
+ // core.Consensus from syncer.
+ con.dummyCancel()
+ <-con.dummyFinished
+ var err error
+ con.syncedConsensus, err = core.NewConsensusFromSyncer(
+ con.syncedLastBlock,
+ con.syncedSkipNext,
+ con.dMoment,
+ con.app,
+ con.gov,
+ con.db,
+ con.network,
+ con.prv,
+ con.blocks,
+ con.dummyMsgBuffer,
+ con.logger)
+ return con.syncedConsensus, err
+}
+
+// stopBuffering stops the syncer buffering routines.
+//
+// This method is mainly for caller to stop the syncer before synced, the syncer
+// would call this method automatically after being synced.
+func (con *Consensus) stopBuffering() {
+ if func() (notBuffering bool) {
+ con.lock.RLock()
+ defer con.lock.RUnlock()
+ notBuffering = !con.duringBuffering
+ return
+ }() {
+ return
+ }
+ if func() (alreadyCanceled bool) {
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ if !con.duringBuffering {
+ alreadyCanceled = true
+ return
+ }
+ con.duringBuffering = false
+ con.logger.Trace("Syncer is about to stop")
+ // Stop network and CRS routines, wait until they are all stoped.
+ con.ctxCancel()
+ return
+ }() {
+ return
+ }
+ con.logger.Trace("Stop syncer modules")
+ con.roundEvt.Stop()
+ con.waitGroup.Done()
+ // Wait for all routines depends on con.agreementModule stopped.
+ con.waitGroup.Wait()
+ // Since there is no one waiting for the receive channel of fullnode, we
+ // need to launch a dummy receiver right away.
+ con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver(
+ context.Background(), con.network.ReceiveChan(),
+ func(msg types.Msg) {
+ con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg)
+ })
+ // Stop agreements.
+ con.logger.Trace("Stop syncer agreement modules")
+ con.stopAgreement()
+ con.logger.Trace("Syncer stopped")
+ return
+}
+
+// isEmptyBlock checks if a block is an empty block by both its hash and parent
+// hash are empty.
+func (con *Consensus) isEmptyBlock(b *types.Block) bool {
+ return b.Hash == common.Hash{} && b.ParentHash == common.Hash{}
+}
+
+// buildEmptyBlock builds an empty block in agreement.
+func (con *Consensus) buildEmptyBlock(b *types.Block, parent *types.Block) {
+ cfg := utils.GetConfigWithPanic(con.gov, b.Position.Round, con.logger)
+ b.Timestamp = parent.Timestamp.Add(cfg.MinBlockInterval)
+ b.Witness.Height = parent.Witness.Height
+ b.Witness.Data = make([]byte, len(parent.Witness.Data))
+ copy(b.Witness.Data, parent.Witness.Data)
+}
+
+// startAgreement starts agreements for receiving votes and agreements.
+func (con *Consensus) startAgreement() {
+ // Start a routine for listening receive channel and pull block channel.
+ go func() {
+ for {
+ select {
+ case b, ok := <-con.receiveChan:
+ if !ok {
+ return
+ }
+ func() {
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ if len(con.blocks) > 0 &&
+ !b.Position.Newer(con.blocks[0].Position) {
+ return
+ }
+ con.blocks = append(con.blocks, b)
+ sort.Sort(con.blocks)
+ }()
+ case h, ok := <-con.pullChan:
+ if !ok {
+ return
+ }
+ con.network.PullBlocks(common.Hashes{h})
+ }
+ }
+ }()
+}
+
+// startNetwork starts network for receiving blocks and agreement results.
+func (con *Consensus) startNetwork() {
+ con.waitGroup.Add(1)
+ go func() {
+ defer con.waitGroup.Done()
+ loop:
+ for {
+ select {
+ case val := <-con.network.ReceiveChan():
+ switch v := val.Payload.(type) {
+ case *types.Block:
+ case *types.AgreementResult:
+ // Avoid byzantine nodes attack by broadcasting older
+ // agreement results. Normal nodes might report 'synced'
+ // while still fall behind other nodes.
+ if v.Position.Height <= con.initChainTipHeight {
+ continue loop
+ }
+ default:
+ continue loop
+ }
+ con.agreementModule.inputChan <- val.Payload
+ case <-con.ctx.Done():
+ break loop
+ }
+ }
+ }()
+}
+
+func (con *Consensus) stopAgreement() {
+ if con.agreementModule.inputChan != nil {
+ close(con.agreementModule.inputChan)
+ }
+ con.agreementWaitGroup.Wait()
+ con.agreementModule.inputChan = nil
+ close(con.receiveChan)
+ close(con.pullChan)
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/watch-cat.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/watch-cat.go
new file mode 100644
index 000000000..e5ba911a7
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/syncer/watch-cat.go
@@ -0,0 +1,156 @@
+// Copyright 2019 The dexon-consensus Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package syncer
+
+import (
+ "context"
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+type configReader interface {
+ Configuration(round uint64) *types.Config
+}
+
+// WatchCat is reponsible for signaling if syncer object should be terminated.
+type WatchCat struct {
+ recovery core.Recovery
+ timeout time.Duration
+ configReader configReader
+ feed chan types.Position
+ lastPosition types.Position
+ polling time.Duration
+ ctx context.Context
+ cancel context.CancelFunc
+ logger common.Logger
+}
+
+// NewWatchCat creats a new WatchCat 🐱 object.
+func NewWatchCat(
+ recovery core.Recovery,
+ configReader configReader,
+ polling time.Duration,
+ timeout time.Duration,
+ logger common.Logger) *WatchCat {
+ wc := &WatchCat{
+ recovery: recovery,
+ timeout: timeout,
+ configReader: configReader,
+ feed: make(chan types.Position),
+ polling: polling,
+ logger: logger,
+ }
+ return wc
+}
+
+// Feed the WatchCat so it won't produce the termination signal.
+func (wc *WatchCat) Feed(position types.Position) {
+ wc.feed <- position
+}
+
+// Start the WatchCat.
+func (wc *WatchCat) Start() {
+ wc.Stop()
+ wc.lastPosition = types.Position{}
+ wc.ctx, wc.cancel = context.WithCancel(context.Background())
+ go func() {
+ var lastPos types.Position
+ MonitorLoop:
+ for {
+ select {
+ case <-wc.ctx.Done():
+ return
+ default:
+ }
+ select {
+ case <-wc.ctx.Done():
+ return
+ case pos := <-wc.feed:
+ if !pos.Newer(lastPos) {
+ wc.logger.Warn("Feed with older height",
+ "pos", pos, "lastPos", lastPos)
+ continue
+ }
+ lastPos = pos
+ case <-time.After(wc.timeout):
+ break MonitorLoop
+ }
+ }
+ go func() {
+ for {
+ select {
+ case <-wc.ctx.Done():
+ return
+ case <-wc.feed:
+ }
+ }
+ }()
+ defer wc.cancel()
+ proposed := false
+ threshold := uint64(
+ utils.GetConfigWithPanic(wc.configReader, lastPos.Round, wc.logger).
+ NotarySetSize / 2)
+ wc.logger.Info("Threshold for recovery", "votes", threshold)
+ ResetLoop:
+ for {
+ if !proposed {
+ wc.logger.Info("Calling Recovery.ProposeSkipBlock",
+ "height", lastPos.Height)
+ if err := wc.recovery.ProposeSkipBlock(lastPos.Height); err != nil {
+ wc.logger.Warn("Failed to proposeSkipBlock", "height", lastPos.Height, "error", err)
+ } else {
+ proposed = true
+ }
+ }
+ votes, err := wc.recovery.Votes(lastPos.Height)
+ if err != nil {
+ wc.logger.Error("Failed to get recovery votes", "height", lastPos.Height, "error", err)
+ } else if votes > threshold {
+ wc.logger.Info("Threshold for recovery reached!")
+ wc.lastPosition = lastPos
+ break ResetLoop
+ }
+ select {
+ case <-wc.ctx.Done():
+ return
+ case <-time.After(wc.polling):
+ }
+ }
+ }()
+}
+
+// Stop the WatchCat.
+func (wc *WatchCat) Stop() {
+ if wc.cancel != nil {
+ wc.cancel()
+ }
+}
+
+// Meow return a closed channel if syncer should be terminated.
+func (wc *WatchCat) Meow() <-chan struct{} {
+ return wc.ctx.Done()
+}
+
+// LastPosition returns the last position for recovery.
+func (wc *WatchCat) LastPosition() types.Position {
+ return wc.lastPosition
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/ticker.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/ticker.go
new file mode 100644
index 000000000..aba56ef9f
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/ticker.go
@@ -0,0 +1,127 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+// TickerType is the type of ticker.
+type TickerType int
+
+// TickerType enum.
+const (
+ TickerBA TickerType = iota
+ TickerDKG
+ TickerCRS
+)
+
+// defaultTicker is a wrapper to implement ticker interface based on
+// time.Ticker.
+type defaultTicker struct {
+ ticker *time.Ticker
+ tickerChan chan time.Time
+ duration time.Duration
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ waitGroup sync.WaitGroup
+}
+
+// newDefaultTicker constructs an defaultTicker instance by giving an interval.
+func newDefaultTicker(lambda time.Duration) *defaultTicker {
+ ticker := &defaultTicker{duration: lambda}
+ ticker.init()
+ return ticker
+}
+
+// Tick implements Tick method of ticker interface.
+func (t *defaultTicker) Tick() <-chan time.Time {
+ return t.tickerChan
+}
+
+// Stop implements Stop method of ticker interface.
+func (t *defaultTicker) Stop() {
+ t.ticker.Stop()
+ t.ctxCancel()
+ t.waitGroup.Wait()
+ t.ctx = nil
+ t.ctxCancel = nil
+ close(t.tickerChan)
+ t.tickerChan = nil
+}
+
+// Restart implements Stop method of ticker interface.
+func (t *defaultTicker) Restart() {
+ t.Stop()
+ t.init()
+}
+
+func (t *defaultTicker) init() {
+ t.ticker = time.NewTicker(t.duration)
+ t.tickerChan = make(chan time.Time)
+ t.ctx, t.ctxCancel = context.WithCancel(context.Background())
+ t.waitGroup.Add(1)
+ go t.monitor()
+}
+
+func (t *defaultTicker) monitor() {
+ defer t.waitGroup.Done()
+loop:
+ for {
+ select {
+ case <-t.ctx.Done():
+ break loop
+ case v := <-t.ticker.C:
+ select {
+ case t.tickerChan <- v:
+ default:
+ }
+ }
+ }
+}
+
+// newTicker is a helper to setup a ticker by giving an Governance. If
+// the governace object implements a ticker generator, a ticker from that
+// generator would be returned, else constructs a default one.
+func newTicker(gov Governance, round uint64, tickerType TickerType) (t Ticker) {
+ type tickerGenerator interface {
+ NewTicker(TickerType) Ticker
+ }
+
+ if gen, ok := gov.(tickerGenerator); ok {
+ t = gen.NewTicker(tickerType)
+ }
+ if t == nil {
+ var duration time.Duration
+ switch tickerType {
+ case TickerBA:
+ duration = utils.GetConfigWithPanic(gov, round, nil).LambdaBA
+ case TickerDKG:
+ duration = utils.GetConfigWithPanic(gov, round, nil).LambdaDKG
+ default:
+ panic(fmt.Errorf("unknown ticker type: %d", tickerType))
+ }
+ t = newDefaultTicker(duration)
+ }
+ return
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block-randomness.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block-randomness.go
new file mode 100644
index 000000000..b97188705
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block-randomness.go
@@ -0,0 +1,44 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package types
+
+import (
+ "encoding/hex"
+ "fmt"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+)
+
+// AgreementResult describes an agremeent result.
+type AgreementResult struct {
+ BlockHash common.Hash `json:"block_hash"`
+ Position Position `json:"position"`
+ Votes []Vote `json:"votes"`
+ IsEmptyBlock bool `json:"is_empty_block"`
+ Randomness []byte `json:"randomness"`
+}
+
+func (r *AgreementResult) String() string {
+ if len(r.Randomness) == 0 {
+ return fmt.Sprintf("agreementResult{Block:%s Pos:%s}",
+ r.BlockHash.String()[:6], r.Position)
+ }
+ return fmt.Sprintf("agreementResult{Block:%s Pos:%s Rand:%s}",
+ r.BlockHash.String()[:6], r.Position,
+ hex.EncodeToString(r.Randomness)[:6])
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block.go
new file mode 100644
index 000000000..bc92211b9
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/block.go
@@ -0,0 +1,227 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+// TODO(jimmy-dexon): remove comments of WitnessAck before open source.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/byzantine-lab/go-tangerine/rlp"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+)
+
+// GenesisHeight refers to the initial height the genesis block should be.
+const GenesisHeight uint64 = 1
+
+// BlockVerifyStatus is the return code for core.Application.VerifyBlock
+type BlockVerifyStatus int
+
+// Enums for return value of core.Application.VerifyBlock.
+const (
+ // VerifyOK: Block is verified.
+ VerifyOK BlockVerifyStatus = iota
+ // VerifyRetryLater: Block is unable to be verified at this moment.
+ // Try again later.
+ VerifyRetryLater
+ // VerifyInvalidBlock: Block is an invalid one.
+ VerifyInvalidBlock
+)
+
+type rlpTimestamp struct {
+ time.Time
+}
+
+func (t *rlpTimestamp) EncodeRLP(w io.Writer) error {
+ return rlp.Encode(w, uint64(t.UTC().UnixNano()))
+}
+
+func (t *rlpTimestamp) DecodeRLP(s *rlp.Stream) error {
+ var nano uint64
+ err := s.Decode(&nano)
+ if err == nil {
+ sec := int64(nano) / 1000000000
+ nsec := int64(nano) % 1000000000
+ t.Time = time.Unix(sec, nsec).UTC()
+ }
+ return err
+}
+
+// Witness represents the consensus information on the compaction chain.
+type Witness struct {
+ Height uint64 `json:"height"`
+ Data []byte `json:"data"`
+}
+
+// Block represents a single event broadcasted on the network.
+type Block struct {
+ ProposerID NodeID `json:"proposer_id"`
+ ParentHash common.Hash `json:"parent_hash"`
+ Hash common.Hash `json:"hash"`
+ Position Position `json:"position"`
+ Timestamp time.Time `json:"timestamp"`
+ Payload []byte `json:"payload"`
+ PayloadHash common.Hash `json:"payload_hash"`
+ Witness Witness `json:"witness"`
+ Randomness []byte `json:"randomness"`
+ Signature crypto.Signature `json:"signature"`
+
+ CRSSignature crypto.Signature `json:"crs_signature"`
+}
+
+type rlpBlock struct {
+ ProposerID NodeID
+ ParentHash common.Hash
+ Hash common.Hash
+ Position Position
+ Timestamp *rlpTimestamp
+ Payload []byte
+ PayloadHash common.Hash
+ Witness *Witness
+ Randomness []byte
+ Signature crypto.Signature
+
+ CRSSignature crypto.Signature
+}
+
+// EncodeRLP implements rlp.Encoder
+func (b *Block) EncodeRLP(w io.Writer) error {
+ return rlp.Encode(w, rlpBlock{
+ ProposerID: b.ProposerID,
+ ParentHash: b.ParentHash,
+ Hash: b.Hash,
+ Position: b.Position,
+ Timestamp: &rlpTimestamp{b.Timestamp},
+ Payload: b.Payload,
+ PayloadHash: b.PayloadHash,
+ Witness: &b.Witness,
+ Randomness: b.Randomness,
+ Signature: b.Signature,
+ CRSSignature: b.CRSSignature,
+ })
+}
+
+// DecodeRLP implements rlp.Decoder
+func (b *Block) DecodeRLP(s *rlp.Stream) error {
+ var dec rlpBlock
+ err := s.Decode(&dec)
+ if err == nil {
+ *b = Block{
+ ProposerID: dec.ProposerID,
+ ParentHash: dec.ParentHash,
+ Hash: dec.Hash,
+ Position: dec.Position,
+ Timestamp: dec.Timestamp.Time,
+ Payload: dec.Payload,
+ PayloadHash: dec.PayloadHash,
+ Witness: *dec.Witness,
+ Randomness: dec.Randomness,
+ Signature: dec.Signature,
+ CRSSignature: dec.CRSSignature,
+ }
+ }
+ return err
+}
+
+func (b *Block) String() string {
+ return fmt.Sprintf("Block{Hash:%v %s}", b.Hash.String()[:6], b.Position)
+}
+
+// Clone returns a deep copy of a block.
+func (b *Block) Clone() (bcopy *Block) {
+ bcopy = &Block{}
+ bcopy.ProposerID = b.ProposerID
+ bcopy.ParentHash = b.ParentHash
+ bcopy.Hash = b.Hash
+ bcopy.Position.Round = b.Position.Round
+ bcopy.Position.Height = b.Position.Height
+ bcopy.Signature = b.Signature.Clone()
+ bcopy.CRSSignature = b.CRSSignature.Clone()
+ bcopy.Witness.Height = b.Witness.Height
+ bcopy.Witness.Data = common.CopyBytes(b.Witness.Data)
+ bcopy.Timestamp = b.Timestamp
+ bcopy.Payload = common.CopyBytes(b.Payload)
+ bcopy.PayloadHash = b.PayloadHash
+ bcopy.Randomness = common.CopyBytes(b.Randomness)
+ return
+}
+
+// IsGenesis checks if the block is a genesisBlock
+func (b *Block) IsGenesis() bool {
+ return b.Position.Height == GenesisHeight && b.ParentHash == common.Hash{}
+}
+
+// IsFinalized checks if the block is finalized.
+func (b *Block) IsFinalized() bool {
+ return len(b.Randomness) > 0
+}
+
+// IsEmpty checks if the block is an 'empty block'.
+func (b *Block) IsEmpty() bool {
+ return b.ProposerID.Hash == common.Hash{}
+}
+
+// ByHash is the helper type for sorting slice of blocks by hash.
+type ByHash []*Block
+
+func (b ByHash) Len() int {
+ return len(b)
+}
+
+func (b ByHash) Less(i int, j int) bool {
+ return bytes.Compare([]byte(b[i].Hash[:]), []byte(b[j].Hash[:])) == -1
+}
+
+func (b ByHash) Swap(i int, j int) {
+ b[i], b[j] = b[j], b[i]
+}
+
+// BlocksByPosition is the helper type for sorting slice of blocks by position.
+type BlocksByPosition []*Block
+
+// Len implements Len method in sort.Sort interface.
+func (bs BlocksByPosition) Len() int {
+ return len(bs)
+}
+
+// Less implements Less method in sort.Sort interface.
+func (bs BlocksByPosition) Less(i int, j int) bool {
+ return bs[j].Position.Newer(bs[i].Position)
+}
+
+// Swap implements Swap method in sort.Sort interface.
+func (bs BlocksByPosition) Swap(i int, j int) {
+ bs[i], bs[j] = bs[j], bs[i]
+}
+
+// Push implements Push method in heap interface.
+func (bs *BlocksByPosition) Push(x interface{}) {
+ *bs = append(*bs, x.(*Block))
+}
+
+// Pop implements Pop method in heap interface.
+func (bs *BlocksByPosition) Pop() (ret interface{}) {
+ n := len(*bs)
+ *bs, ret = (*bs)[0:n-1], (*bs)[n-1]
+ return
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/config.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/config.go
new file mode 100644
index 000000000..dce38369e
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/config.go
@@ -0,0 +1,75 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package types
+
+import (
+ "encoding/binary"
+ "time"
+)
+
+// Config stands for Current Configuration Parameters.
+type Config struct {
+ // Lambda related.
+ LambdaBA time.Duration
+ LambdaDKG time.Duration
+
+ // Set related.
+ NotarySetSize uint32
+
+ // Time related.
+ RoundLength uint64
+ MinBlockInterval time.Duration
+}
+
+// Clone return a copied configuration.
+func (c *Config) Clone() *Config {
+ return &Config{
+ LambdaBA: c.LambdaBA,
+ LambdaDKG: c.LambdaDKG,
+ NotarySetSize: c.NotarySetSize,
+ RoundLength: c.RoundLength,
+ MinBlockInterval: c.MinBlockInterval,
+ }
+}
+
+// Bytes returns []byte representation of Config.
+func (c *Config) Bytes() []byte {
+ binaryLambdaBA := make([]byte, 8)
+ binary.LittleEndian.PutUint64(
+ binaryLambdaBA, uint64(c.LambdaBA.Nanoseconds()))
+ binaryLambdaDKG := make([]byte, 8)
+ binary.LittleEndian.PutUint64(
+ binaryLambdaDKG, uint64(c.LambdaDKG.Nanoseconds()))
+
+ binaryNotarySetSize := make([]byte, 4)
+ binary.LittleEndian.PutUint32(binaryNotarySetSize, c.NotarySetSize)
+
+ binaryRoundLength := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryRoundLength, c.RoundLength)
+ binaryMinBlockInterval := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryMinBlockInterval,
+ uint64(c.MinBlockInterval.Nanoseconds()))
+
+ enc := make([]byte, 0, 40)
+ enc = append(enc, binaryLambdaBA...)
+ enc = append(enc, binaryLambdaDKG...)
+ enc = append(enc, binaryNotarySetSize...)
+ enc = append(enc, binaryRoundLength...)
+ enc = append(enc, binaryMinBlockInterval...)
+ return enc
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/dkg/dkg.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/dkg/dkg.go
new file mode 100644
index 000000000..6c2b777cd
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/dkg/dkg.go
@@ -0,0 +1,485 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package dkg
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/byzantine-lab/go-tangerine/rlp"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ cryptoDKG "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+)
+
+// Errors for typesDKG package.
+var (
+ ErrNotReachThreshold = fmt.Errorf("threshold not reach")
+ ErrInvalidThreshold = fmt.Errorf("invalid threshold")
+)
+
+// NewID creates a DKGID from NodeID.
+func NewID(ID types.NodeID) cryptoDKG.ID {
+ return cryptoDKG.NewID(ID.Hash[:])
+}
+
+// PrivateShare describe a secret share in DKG protocol.
+type PrivateShare struct {
+ ProposerID types.NodeID `json:"proposer_id"`
+ ReceiverID types.NodeID `json:"receiver_id"`
+ Round uint64 `json:"round"`
+ Reset uint64 `json:"reset"`
+ PrivateShare cryptoDKG.PrivateKey `json:"private_share"`
+ Signature crypto.Signature `json:"signature"`
+}
+
+// Equal checks equality between two PrivateShare instances.
+func (p *PrivateShare) Equal(other *PrivateShare) bool {
+ return p.ProposerID.Equal(other.ProposerID) &&
+ p.ReceiverID.Equal(other.ReceiverID) &&
+ p.Round == other.Round &&
+ p.Reset == other.Reset &&
+ p.Signature.Type == other.Signature.Type &&
+ bytes.Compare(p.Signature.Signature, other.Signature.Signature) == 0 &&
+ bytes.Compare(
+ p.PrivateShare.Bytes(), other.PrivateShare.Bytes()) == 0
+}
+
+// MasterPublicKey decrtibe a master public key in DKG protocol.
+type MasterPublicKey struct {
+ ProposerID types.NodeID `json:"proposer_id"`
+ Round uint64 `json:"round"`
+ Reset uint64 `json:"reset"`
+ DKGID cryptoDKG.ID `json:"dkg_id"`
+ PublicKeyShares cryptoDKG.PublicKeyShares `json:"public_key_shares"`
+ Signature crypto.Signature `json:"signature"`
+}
+
+func (d *MasterPublicKey) String() string {
+ return fmt.Sprintf("MasterPublicKey{KP:%s Round:%d Reset:%d}",
+ d.ProposerID.String()[:6],
+ d.Round,
+ d.Reset)
+}
+
+// Equal check equality of two DKG master public keys.
+func (d *MasterPublicKey) Equal(other *MasterPublicKey) bool {
+ return d.ProposerID.Equal(other.ProposerID) &&
+ d.Round == other.Round &&
+ d.Reset == other.Reset &&
+ d.DKGID.GetHexString() == other.DKGID.GetHexString() &&
+ d.PublicKeyShares.Equal(&other.PublicKeyShares) &&
+ d.Signature.Type == other.Signature.Type &&
+ bytes.Compare(d.Signature.Signature, other.Signature.Signature) == 0
+}
+
+type rlpMasterPublicKey struct {
+ ProposerID types.NodeID
+ Round uint64
+ Reset uint64
+ DKGID []byte
+ PublicKeyShares *cryptoDKG.PublicKeyShares
+ Signature crypto.Signature
+}
+
+// EncodeRLP implements rlp.Encoder
+func (d *MasterPublicKey) EncodeRLP(w io.Writer) error {
+ return rlp.Encode(w, rlpMasterPublicKey{
+ ProposerID: d.ProposerID,
+ Round: d.Round,
+ Reset: d.Reset,
+ DKGID: d.DKGID.GetLittleEndian(),
+ PublicKeyShares: &d.PublicKeyShares,
+ Signature: d.Signature,
+ })
+}
+
+// DecodeRLP implements rlp.Decoder
+func (d *MasterPublicKey) DecodeRLP(s *rlp.Stream) error {
+ var dec rlpMasterPublicKey
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ id, err := cryptoDKG.BytesID(dec.DKGID)
+ if err != nil {
+ return err
+ }
+
+ *d = MasterPublicKey{
+ ProposerID: dec.ProposerID,
+ Round: dec.Round,
+ Reset: dec.Reset,
+ DKGID: id,
+ PublicKeyShares: *dec.PublicKeyShares.Move(),
+ Signature: dec.Signature,
+ }
+ return err
+}
+
+// NewMasterPublicKey returns a new MasterPublicKey instance.
+func NewMasterPublicKey() *MasterPublicKey {
+ return &MasterPublicKey{
+ PublicKeyShares: *cryptoDKG.NewEmptyPublicKeyShares(),
+ }
+}
+
+// UnmarshalJSON implements json.Unmarshaller.
+func (d *MasterPublicKey) UnmarshalJSON(data []byte) error {
+ type innertMasterPublicKey MasterPublicKey
+ d.PublicKeyShares = *cryptoDKG.NewEmptyPublicKeyShares()
+ return json.Unmarshal(data, (*innertMasterPublicKey)(d))
+}
+
+// Complaint describe a complaint in DKG protocol.
+type Complaint struct {
+ ProposerID types.NodeID `json:"proposer_id"`
+ Round uint64 `json:"round"`
+ Reset uint64 `json:"reset"`
+ PrivateShare PrivateShare `json:"private_share"`
+ Signature crypto.Signature `json:"signature"`
+}
+
+func (c *Complaint) String() string {
+ if c.IsNack() {
+ return fmt.Sprintf("DKGNackComplaint{CP:%s Round:%d Reset %d PSP:%s}",
+ c.ProposerID.String()[:6], c.Round, c.Reset,
+ c.PrivateShare.ProposerID.String()[:6])
+ }
+ return fmt.Sprintf("DKGComplaint{CP:%s Round:%d Reset %d PrivateShare:%v}",
+ c.ProposerID.String()[:6], c.Round, c.Reset, c.PrivateShare)
+}
+
+// Equal checks equality between two Complaint instances.
+func (c *Complaint) Equal(other *Complaint) bool {
+ return c.ProposerID.Equal(other.ProposerID) &&
+ c.Round == other.Round &&
+ c.Reset == other.Reset &&
+ c.PrivateShare.Equal(&other.PrivateShare) &&
+ c.Signature.Type == other.Signature.Type &&
+ bytes.Compare(c.Signature.Signature, other.Signature.Signature) == 0
+}
+
+type rlpComplaint struct {
+ ProposerID types.NodeID
+ Round uint64
+ Reset uint64
+ IsNack bool
+ PrivateShare []byte
+ Signature crypto.Signature
+}
+
+// EncodeRLP implements rlp.Encoder
+func (c *Complaint) EncodeRLP(w io.Writer) error {
+ if c.IsNack() {
+ return rlp.Encode(w, rlpComplaint{
+ ProposerID: c.ProposerID,
+ Round: c.Round,
+ Reset: c.Reset,
+ IsNack: true,
+ PrivateShare: c.PrivateShare.ProposerID.Hash[:],
+ Signature: c.Signature,
+ })
+ }
+ prvShare, err := rlp.EncodeToBytes(&c.PrivateShare)
+ if err != nil {
+ return err
+ }
+ return rlp.Encode(w, rlpComplaint{
+ ProposerID: c.ProposerID,
+ Round: c.Round,
+ Reset: c.Reset,
+ IsNack: false,
+ PrivateShare: prvShare,
+ Signature: c.Signature,
+ })
+}
+
+// DecodeRLP implements rlp.Decoder
+func (c *Complaint) DecodeRLP(s *rlp.Stream) error {
+ var dec rlpComplaint
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ var prvShare PrivateShare
+ if dec.IsNack {
+ copy(prvShare.ProposerID.Hash[:], dec.PrivateShare)
+ prvShare.Round = dec.Round
+ prvShare.Reset = dec.Reset
+ } else {
+ if err := rlp.DecodeBytes(dec.PrivateShare, &prvShare); err != nil {
+ return err
+ }
+ }
+
+ *c = Complaint{
+ ProposerID: dec.ProposerID,
+ Round: dec.Round,
+ Reset: dec.Reset,
+ PrivateShare: prvShare,
+ Signature: dec.Signature,
+ }
+ return nil
+}
+
+// IsNack returns true if it's a nack complaint in DKG protocol.
+func (c *Complaint) IsNack() bool {
+ return len(c.PrivateShare.Signature.Signature) == 0
+}
+
+// PartialSignature describe a partial signature in DKG protocol.
+type PartialSignature struct {
+ ProposerID types.NodeID `json:"proposer_id"`
+ Round uint64 `json:"round"`
+ Hash common.Hash `json:"hash"`
+ PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"`
+ Signature crypto.Signature `json:"signature"`
+}
+
+// MPKReady describe a dkg ready message in DKG protocol.
+type MPKReady struct {
+ ProposerID types.NodeID `json:"proposer_id"`
+ Round uint64 `json:"round"`
+ Reset uint64 `json:"reset"`
+ Signature crypto.Signature `json:"signature"`
+}
+
+func (ready *MPKReady) String() string {
+ return fmt.Sprintf("DKGMPKReady{RP:%s Round:%d Reset:%d}",
+ ready.ProposerID.String()[:6],
+ ready.Round,
+ ready.Reset)
+}
+
+// Equal check equality of two MPKReady instances.
+func (ready *MPKReady) Equal(other *MPKReady) bool {
+ return ready.ProposerID.Equal(other.ProposerID) &&
+ ready.Round == other.Round &&
+ ready.Reset == other.Reset &&
+ ready.Signature.Type == other.Signature.Type &&
+ bytes.Compare(ready.Signature.Signature, other.Signature.Signature) == 0
+}
+
+// Finalize describe a dkg finalize message in DKG protocol.
+type Finalize struct {
+ ProposerID types.NodeID `json:"proposer_id"`
+ Round uint64 `json:"round"`
+ Reset uint64 `json:"reset"`
+ Signature crypto.Signature `json:"signature"`
+}
+
+func (final *Finalize) String() string {
+ return fmt.Sprintf("DKGFinal{FP:%s Round:%d Reset:%d}",
+ final.ProposerID.String()[:6],
+ final.Round,
+ final.Reset)
+}
+
+// Equal check equality of two Finalize instances.
+func (final *Finalize) Equal(other *Finalize) bool {
+ return final.ProposerID.Equal(other.ProposerID) &&
+ final.Round == other.Round &&
+ final.Reset == other.Reset &&
+ final.Signature.Type == other.Signature.Type &&
+ bytes.Compare(final.Signature.Signature, other.Signature.Signature) == 0
+}
+
+// Success describe a dkg success message in DKG protocol.
+type Success struct {
+ ProposerID types.NodeID `json:"proposer_id"`
+ Round uint64 `json:"round"`
+ Reset uint64 `json:"reset"`
+ Signature crypto.Signature `json:"signature"`
+}
+
+func (s *Success) String() string {
+ return fmt.Sprintf("DKGSuccess{SP:%s Round:%d Reset:%d}",
+ s.ProposerID.String()[:6],
+ s.Round,
+ s.Reset)
+}
+
+// Equal check equality of two Success instances.
+func (s *Success) Equal(other *Success) bool {
+ return s.ProposerID.Equal(other.ProposerID) &&
+ s.Round == other.Round &&
+ s.Reset == other.Reset &&
+ s.Signature.Type == other.Signature.Type &&
+ bytes.Compare(s.Signature.Signature, other.Signature.Signature) == 0
+}
+
+// GroupPublicKey is the result of DKG protocol.
+type GroupPublicKey struct {
+ Round uint64
+ QualifyIDs cryptoDKG.IDs
+ QualifyNodeIDs map[types.NodeID]struct{}
+ IDMap map[types.NodeID]cryptoDKG.ID
+ GroupPublicKey *cryptoDKG.PublicKey
+ Threshold int
+}
+
+// VerifySignature verifies if the signature is correct.
+func (gpk *GroupPublicKey) VerifySignature(
+ hash common.Hash, sig crypto.Signature) bool {
+ return gpk.GroupPublicKey.VerifySignature(hash, sig)
+}
+
+// CalcQualifyNodes returns the qualified nodes.
+func CalcQualifyNodes(
+ mpks []*MasterPublicKey, complaints []*Complaint, threshold int) (
+ qualifyIDs cryptoDKG.IDs, qualifyNodeIDs map[types.NodeID]struct{}, err error) {
+ if len(mpks) < threshold {
+ err = ErrInvalidThreshold
+ return
+ }
+
+ // Calculate qualify members.
+ disqualifyIDs := map[types.NodeID]struct{}{}
+ complaintsByID := map[types.NodeID]map[types.NodeID]struct{}{}
+ for _, complaint := range complaints {
+ if complaint.IsNack() {
+ if _, exist := complaintsByID[complaint.PrivateShare.ProposerID]; !exist {
+ complaintsByID[complaint.PrivateShare.ProposerID] =
+ make(map[types.NodeID]struct{})
+ }
+ complaintsByID[complaint.PrivateShare.ProposerID][complaint.ProposerID] =
+ struct{}{}
+ } else {
+ disqualifyIDs[complaint.PrivateShare.ProposerID] = struct{}{}
+ }
+ }
+ for nID, complaints := range complaintsByID {
+ if len(complaints) >= threshold {
+ disqualifyIDs[nID] = struct{}{}
+ }
+ }
+ qualifyIDs = make(cryptoDKG.IDs, 0, len(mpks)-len(disqualifyIDs))
+ if cap(qualifyIDs) < threshold {
+ err = ErrNotReachThreshold
+ return
+ }
+ qualifyNodeIDs = make(map[types.NodeID]struct{})
+ for _, mpk := range mpks {
+ if _, exist := disqualifyIDs[mpk.ProposerID]; exist {
+ continue
+ }
+ qualifyIDs = append(qualifyIDs, mpk.DKGID)
+ qualifyNodeIDs[mpk.ProposerID] = struct{}{}
+ }
+ return
+}
+
+// NewGroupPublicKey creats a GroupPublicKey instance.
+func NewGroupPublicKey(
+ round uint64,
+ mpks []*MasterPublicKey, complaints []*Complaint,
+ threshold int) (
+ *GroupPublicKey, error) {
+ qualifyIDs, qualifyNodeIDs, err :=
+ CalcQualifyNodes(mpks, complaints, threshold)
+ if err != nil {
+ return nil, err
+ }
+ mpkMap := make(map[cryptoDKG.ID]*MasterPublicKey, cap(qualifyIDs))
+ idMap := make(map[types.NodeID]cryptoDKG.ID)
+ for _, mpk := range mpks {
+ if _, exist := qualifyNodeIDs[mpk.ProposerID]; !exist {
+ continue
+ }
+ mpkMap[mpk.DKGID] = mpk
+ idMap[mpk.ProposerID] = mpk.DKGID
+ }
+ // Recover Group Public Key.
+ pubShares := make([]*cryptoDKG.PublicKeyShares, 0, len(qualifyIDs))
+ for _, id := range qualifyIDs {
+ pubShares = append(pubShares, &mpkMap[id].PublicKeyShares)
+ }
+ groupPK := cryptoDKG.RecoverGroupPublicKey(pubShares)
+ return &GroupPublicKey{
+ Round: round,
+ QualifyIDs: qualifyIDs,
+ QualifyNodeIDs: qualifyNodeIDs,
+ IDMap: idMap,
+ Threshold: threshold,
+ GroupPublicKey: groupPK,
+ }, nil
+}
+
+// NodePublicKeys is the result of DKG protocol.
+type NodePublicKeys struct {
+ Round uint64
+ QualifyIDs cryptoDKG.IDs
+ QualifyNodeIDs map[types.NodeID]struct{}
+ IDMap map[types.NodeID]cryptoDKG.ID
+ PublicKeys map[types.NodeID]*cryptoDKG.PublicKey
+ Threshold int
+}
+
+// NewNodePublicKeys creats a NodePublicKeys instance.
+func NewNodePublicKeys(
+ round uint64,
+ mpks []*MasterPublicKey, complaints []*Complaint,
+ threshold int) (
+ *NodePublicKeys, error) {
+ qualifyIDs, qualifyNodeIDs, err :=
+ CalcQualifyNodes(mpks, complaints, threshold)
+ if err != nil {
+ return nil, err
+ }
+ mpkMap := make(map[cryptoDKG.ID]*MasterPublicKey, cap(qualifyIDs))
+ idMap := make(map[types.NodeID]cryptoDKG.ID)
+ for _, mpk := range mpks {
+ if _, exist := qualifyNodeIDs[mpk.ProposerID]; !exist {
+ continue
+ }
+ mpkMap[mpk.DKGID] = mpk
+ idMap[mpk.ProposerID] = mpk.DKGID
+ }
+ // Recover qualify members' public key.
+ pubKeys := make(map[types.NodeID]*cryptoDKG.PublicKey, len(qualifyIDs))
+ for _, recvID := range qualifyIDs {
+ pubShares := cryptoDKG.NewEmptyPublicKeyShares()
+ for _, id := range qualifyIDs {
+ pubShare, err := mpkMap[id].PublicKeyShares.Share(recvID)
+ if err != nil {
+ return nil, err
+ }
+ if err := pubShares.AddShare(id, pubShare); err != nil {
+ return nil, err
+ }
+ }
+ pubKey, err := pubShares.RecoverPublicKey(qualifyIDs)
+ if err != nil {
+ return nil, err
+ }
+ pubKeys[mpkMap[recvID].ProposerID] = pubKey
+ }
+ return &NodePublicKeys{
+ Round: round,
+ QualifyIDs: qualifyIDs,
+ QualifyNodeIDs: qualifyNodeIDs,
+ IDMap: idMap,
+ PublicKeys: pubKeys,
+ Threshold: threshold,
+ }, nil
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/message.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/message.go
new file mode 100644
index 000000000..0335cfaae
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/message.go
@@ -0,0 +1,24 @@
+// Copyright 2019 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package types
+
+// Msg for the network ReceiveChan.
+type Msg struct {
+ PeerID interface{}
+ Payload interface{}
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/node.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/node.go
new file mode 100644
index 000000000..84b38a3b1
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/node.go
@@ -0,0 +1,61 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package types
+
+import (
+ "bytes"
+ "encoding/hex"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+)
+
+// NodeID is the ID type for nodes.
+type NodeID struct {
+ common.Hash
+}
+
+// NewNodeID returns a NodeID with Hash set to the hash value of
+// public key.
+func NewNodeID(pubKey crypto.PublicKey) NodeID {
+ return NodeID{Hash: crypto.Keccak256Hash(pubKey.Bytes()[1:])}
+}
+
+// Equal checks if the hash representation is the same NodeID.
+func (v NodeID) Equal(v2 NodeID) bool {
+ return v.Hash == v2.Hash
+}
+
+func (v NodeID) String() string {
+ return hex.EncodeToString(v.Hash[:])[:6]
+}
+
+// NodeIDs implements sort.Interface for NodeID.
+type NodeIDs []NodeID
+
+func (v NodeIDs) Len() int {
+ return len(v)
+}
+
+func (v NodeIDs) Less(i int, j int) bool {
+ return bytes.Compare([]byte(v[i].Hash[:]), []byte(v[j].Hash[:])) == -1
+}
+
+func (v NodeIDs) Swap(i int, j int) {
+ v[i], v[j] = v[j], v[i]
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/nodeset.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/nodeset.go
new file mode 100644
index 000000000..522bcb224
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/nodeset.go
@@ -0,0 +1,162 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package types
+
+import (
+ "container/heap"
+ "encoding/binary"
+ "math/big"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+)
+
+// NodeSet is the node set structure as defined in DEXON consensus core.
+type NodeSet struct {
+ IDs map[NodeID]struct{}
+}
+
+// SubSetTarget is the sub set target for GetSubSet().
+type SubSetTarget struct {
+ data [][]byte
+}
+
+type subSetTargetType byte
+
+const (
+ targetNotarySet subSetTargetType = iota
+ targetNodeLeader
+)
+
+type nodeRank struct {
+ ID NodeID
+ rank *big.Int
+}
+
+// rankHeap is a MaxHeap structure.
+type rankHeap []*nodeRank
+
+func (h rankHeap) Len() int { return len(h) }
+func (h rankHeap) Less(i, j int) bool { return h[i].rank.Cmp(h[j].rank) > 0 }
+func (h rankHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
+func (h *rankHeap) Push(x interface{}) {
+ *h = append(*h, x.(*nodeRank))
+}
+func (h *rankHeap) Pop() interface{} {
+ old := *h
+ n := len(old)
+ x := old[n-1]
+ *h = old[0 : n-1]
+ return x
+}
+
+// NewNodeSet creates a new NodeSet instance.
+func NewNodeSet() *NodeSet {
+ return &NodeSet{
+ IDs: make(map[NodeID]struct{}),
+ }
+}
+
+// NewNodeSetFromMap creates a new NodeSet from NodeID map.
+func NewNodeSetFromMap(nodes map[NodeID]struct{}) *NodeSet {
+ nIDs := make(map[NodeID]struct{}, len(nodes))
+ for nID := range nodes {
+ nIDs[nID] = struct{}{}
+ }
+ return &NodeSet{
+ IDs: nIDs,
+ }
+}
+
+// NewNotarySetTarget is the target for getting Notary Set.
+func NewNotarySetTarget(crs common.Hash) *SubSetTarget {
+ return newTarget(targetNotarySet, crs[:])
+}
+
+// NewNodeLeaderTarget is the target for getting leader of fast BA.
+func NewNodeLeaderTarget(crs common.Hash, height uint64) *SubSetTarget {
+ binaryHeight := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryHeight, height)
+ return newTarget(targetNodeLeader, crs[:], binaryHeight)
+}
+
+// Add a NodeID to the set.
+func (ns *NodeSet) Add(ID NodeID) {
+ ns.IDs[ID] = struct{}{}
+}
+
+// Clone the NodeSet.
+func (ns *NodeSet) Clone() *NodeSet {
+ nsCopy := NewNodeSet()
+ for ID := range ns.IDs {
+ nsCopy.Add(ID)
+ }
+ return nsCopy
+}
+
+// GetSubSet returns the subset of given target.
+func (ns *NodeSet) GetSubSet(
+ size int, target *SubSetTarget) map[NodeID]struct{} {
+ if size == 0 {
+ return make(map[NodeID]struct{})
+ }
+ h := rankHeap{}
+ idx := 0
+ for nID := range ns.IDs {
+ if idx < size {
+ h = append(h, newNodeRank(nID, target))
+ } else if idx == size {
+ heap.Init(&h)
+ }
+ if idx >= size {
+ rank := newNodeRank(nID, target)
+ if rank.rank.Cmp(h[0].rank) < 0 {
+ h[0] = rank
+ heap.Fix(&h, 0)
+ }
+ }
+ idx++
+ }
+
+ nIDs := make(map[NodeID]struct{}, size)
+ for _, rank := range h {
+ nIDs[rank.ID] = struct{}{}
+ }
+
+ return nIDs
+}
+
+func newTarget(targetType subSetTargetType, data ...[]byte) *SubSetTarget {
+ data = append(data, []byte{byte(targetType)})
+ return &SubSetTarget{
+ data: data,
+ }
+}
+
+func newNodeRank(ID NodeID, target *SubSetTarget) *nodeRank {
+ data := make([][]byte, 1, len(target.data)+1)
+ data[0] = make([]byte, len(ID.Hash))
+ copy(data[0], ID.Hash[:])
+ data = append(data, target.data...)
+ h := crypto.Keccak256Hash(data...)
+ num := new(big.Int).SetBytes(h[:])
+ return &nodeRank{
+ ID: ID,
+ rank: num,
+ }
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/position.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/position.go
new file mode 100644
index 000000000..81d23c266
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/position.go
@@ -0,0 +1,51 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package types
+
+import (
+ "fmt"
+)
+
+// Position describes the position in the block lattice of an entity.
+type Position struct {
+ Round uint64 `json:"round"`
+ Height uint64 `json:"height"`
+}
+
+func (pos Position) String() string {
+ return fmt.Sprintf("Position{Round:%d Height:%d}", pos.Round, pos.Height)
+}
+
+// Equal checks if two positions are equal.
+func (pos Position) Equal(other Position) bool {
+ return pos.Round == other.Round && pos.Height == other.Height
+}
+
+// Newer checks if one block is newer than another one on the same chain.
+// If two blocks on different chain compared by this function, it would panic.
+func (pos Position) Newer(other Position) bool {
+ return pos.Round > other.Round ||
+ (pos.Round == other.Round && pos.Height > other.Height)
+}
+
+// Older checks if one block is older than another one on the same chain.
+// If two blocks on different chain compared by this function, it would panic.
+func (pos Position) Older(other Position) bool {
+ return pos.Round < other.Round ||
+ (pos.Round == other.Round && pos.Height < other.Height)
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/types/vote.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/vote.go
new file mode 100644
index 000000000..def09293a
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/types/vote.go
@@ -0,0 +1,100 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package types
+
+import (
+ "fmt"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ cryptoDKG "github.com/byzantine-lab/dexon-consensus/core/crypto/dkg"
+)
+
+// VoteType is the type of vote.
+type VoteType byte
+
+// VoteType enum.
+const (
+ VoteInit VoteType = iota
+ VotePreCom
+ VoteCom
+ VoteFast
+ VoteFastCom
+ // Do not add any type below MaxVoteType.
+ MaxVoteType
+)
+
+// NullBlockHash is the blockHash for ⊥ value.
+var NullBlockHash common.Hash
+
+// SkipBlockHash is the blockHash for SKIP value.
+var SkipBlockHash common.Hash
+
+func init() {
+ for idx := range SkipBlockHash {
+ SkipBlockHash[idx] = 0xff
+ }
+}
+
+// VoteHeader is the header for vote, which can be used as map keys.
+type VoteHeader struct {
+ ProposerID NodeID `json:"proposer_id"`
+ Type VoteType `json:"type"`
+ BlockHash common.Hash `json:"block_hash"`
+ Period uint64 `json:"period"`
+ Position Position `json:"position"`
+}
+
+// Vote is the vote structure defined in Crypto Shuffle Algorithm.
+type Vote struct {
+ VoteHeader `json:"header"`
+ PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"`
+ Signature crypto.Signature `json:"signature"`
+}
+
+func (v *Vote) String() string {
+ return fmt.Sprintf("Vote{VP:%s %s Period:%d Type:%d Hash:%s}",
+ v.ProposerID.String()[:6],
+ v.Position, v.Period, v.Type, v.BlockHash.String()[:6])
+}
+
+// NewVote constructs a Vote instance with header fields.
+func NewVote(t VoteType, hash common.Hash, period uint64) *Vote {
+ return &Vote{
+ VoteHeader: VoteHeader{
+ Type: t,
+ BlockHash: hash,
+ Period: period,
+ }}
+}
+
+// Clone returns a deep copy of a vote.
+func (v *Vote) Clone() *Vote {
+ return &Vote{
+ VoteHeader: VoteHeader{
+ ProposerID: v.ProposerID,
+ Type: v.Type,
+ BlockHash: v.BlockHash,
+ Period: v.Period,
+ Position: v.Position,
+ },
+ PartialSignature: cryptoDKG.PartialSignature(
+ crypto.Signature(v.PartialSignature).Clone()),
+ Signature: v.Signature.Clone(),
+ }
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils.go
new file mode 100644
index 000000000..4cb3bf18a
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils.go
@@ -0,0 +1,255 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "time"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ "github.com/byzantine-lab/dexon-consensus/core/utils"
+)
+
+// Errors for utils.
+var (
+ ErrIncorrectVoteBlockHash = fmt.Errorf(
+ "incorrect vote block hash")
+ ErrIncorrectVoteType = fmt.Errorf(
+ "incorrect vote type")
+ ErrIncorrectVotePosition = fmt.Errorf(
+ "incorrect vote position")
+ ErrIncorrectVoteProposer = fmt.Errorf(
+ "incorrect vote proposer")
+ ErrIncorrectVotePeriod = fmt.Errorf(
+ "incorrect vote period")
+)
+
+// NodeSetCache is type alias to avoid fullnode compile error when moving
+// it to core/utils package.
+type NodeSetCache = utils.NodeSetCache
+
+// NewNodeSetCache is function alias to avoid fullnode compile error when moving
+// it to core/utils package.
+var NewNodeSetCache = utils.NewNodeSetCache
+
+var (
+ debug = false
+ // ErrEmptyTimestamps would be reported if Block.timestamps is empty.
+ ErrEmptyTimestamps = errors.New("timestamp vector should not be empty")
+)
+
+func init() {
+ if os.Getenv("DEBUG") != "" {
+ debug = true
+ }
+}
+
+// Debugf is like fmt.Printf, but only output when we are in debug mode.
+func Debugf(format string, args ...interface{}) {
+ if debug {
+ fmt.Printf(format, args...)
+ }
+}
+
+// Debugln is like fmt.Println, but only output when we are in debug mode.
+func Debugln(args ...interface{}) {
+ if debug {
+ fmt.Println(args...)
+ }
+}
+
+func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time {
+ if sep == 0 {
+ return []time.Time{}
+ }
+ if t1.After(t2) {
+ return interpoTime(t2, t1, sep)
+ }
+ timestamps := make([]time.Time, sep)
+ duration := t2.Sub(t1)
+ period := time.Duration(
+ (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond
+ prevTime := t1
+ for idx := range timestamps {
+ prevTime = prevTime.Add(period)
+ timestamps[idx] = prevTime
+ }
+ return timestamps
+}
+
+func getMedianTime(timestamps []time.Time) (t time.Time, err error) {
+ if len(timestamps) == 0 {
+ err = ErrEmptyTimestamps
+ return
+ }
+ tscopy := make([]time.Time, 0, len(timestamps))
+ for _, ts := range timestamps {
+ tscopy = append(tscopy, ts)
+ }
+ sort.Sort(common.ByTime(tscopy))
+ if len(tscopy)%2 == 0 {
+ t1 := tscopy[len(tscopy)/2-1]
+ t2 := tscopy[len(tscopy)/2]
+ t = interpoTime(t1, t2, 1)[0]
+ } else {
+ t = tscopy[len(tscopy)/2]
+ }
+ return
+}
+
+func removeFromSortedUint32Slice(xs []uint32, x uint32) []uint32 {
+ indexToRemove := sort.Search(len(xs), func(idx int) bool {
+ return xs[idx] >= x
+ })
+ if indexToRemove == len(xs) || xs[indexToRemove] != x {
+ // This value is not found.
+ return xs
+ }
+ return append(xs[:indexToRemove], xs[indexToRemove+1:]...)
+}
+
+// HashConfigurationBlock returns the hash value of configuration block.
+func HashConfigurationBlock(
+ notarySet map[types.NodeID]struct{},
+ config *types.Config,
+ snapshotHash common.Hash,
+ prevHash common.Hash,
+) common.Hash {
+ notaryIDs := make(types.NodeIDs, 0, len(notarySet))
+ for nID := range notarySet {
+ notaryIDs = append(notaryIDs, nID)
+ }
+ sort.Sort(notaryIDs)
+ notarySetBytes := make([]byte, 0, len(notarySet)*len(common.Hash{}))
+ for _, nID := range notaryIDs {
+ notarySetBytes = append(notarySetBytes, nID.Hash[:]...)
+ }
+ configBytes := config.Bytes()
+
+ return crypto.Keccak256Hash(
+ notarySetBytes[:],
+ configBytes[:],
+ snapshotHash[:],
+ prevHash[:],
+ )
+}
+
+// VerifyAgreementResult perform sanity check against a types.AgreementResult
+// instance.
+func VerifyAgreementResult(
+ res *types.AgreementResult, cache *NodeSetCache) error {
+ if res.Position.Round >= DKGDelayRound {
+ if len(res.Randomness) == 0 {
+ return ErrMissingRandomness
+ }
+ return nil
+ }
+ notarySet, err := cache.GetNotarySet(res.Position.Round)
+ if err != nil {
+ return err
+ }
+ if len(res.Votes) < len(notarySet)*2/3+1 {
+ return ErrNotEnoughVotes
+ }
+ voted := make(map[types.NodeID]struct{}, len(notarySet))
+ voteType := res.Votes[0].Type
+ votePeriod := res.Votes[0].Period
+ if voteType != types.VoteFastCom && voteType != types.VoteCom {
+ return ErrIncorrectVoteType
+ }
+ for _, vote := range res.Votes {
+ if vote.Period != votePeriod {
+ return ErrIncorrectVotePeriod
+ }
+ if res.IsEmptyBlock {
+ if (vote.BlockHash != common.Hash{}) {
+ return ErrIncorrectVoteBlockHash
+ }
+ } else {
+ if vote.BlockHash != res.BlockHash {
+ return ErrIncorrectVoteBlockHash
+ }
+ }
+ if vote.Type != voteType {
+ return ErrIncorrectVoteType
+ }
+ if vote.Position != res.Position {
+ return ErrIncorrectVotePosition
+ }
+ if _, exist := notarySet[vote.ProposerID]; !exist {
+ return ErrIncorrectVoteProposer
+ }
+ ok, err := utils.VerifyVoteSignature(&vote)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return ErrIncorrectVoteSignature
+ }
+ voted[vote.ProposerID] = struct{}{}
+ }
+ if len(voted) < len(notarySet)*2/3+1 {
+ return ErrNotEnoughVotes
+ }
+ return nil
+}
+
+// DiffUint64 calculates difference between two uint64.
+func DiffUint64(a, b uint64) uint64 {
+ if a > b {
+ return a - b
+ }
+ return b - a
+}
+
+func isCI() bool {
+ return os.Getenv("CI") != ""
+}
+
+func isCircleCI() bool {
+ return isCI() && os.Getenv("CIRCLECI") == "true"
+}
+
+func isTravisCI() bool {
+ return isCI() && os.Getenv("TRAVIS") == "true"
+}
+
+// checkWithCancel is a helper to perform periodic checking with cancel.
+func checkWithCancel(parentCtx context.Context, interval time.Duration,
+ checker func() bool) (ret bool) {
+ ctx, cancel := context.WithCancel(parentCtx)
+ defer cancel()
+Loop:
+ for {
+ if ret = checker(); ret {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ break Loop
+ case <-time.After(interval):
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/crypto.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/crypto.go
new file mode 100644
index 000000000..161c1d495
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/crypto.go
@@ -0,0 +1,376 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package utils
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+)
+
+func hashWitness(witness *types.Witness) (common.Hash, error) {
+ binaryHeight := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryHeight, witness.Height)
+ return crypto.Keccak256Hash(
+ binaryHeight,
+ witness.Data), nil
+}
+
+// HashBlock generates hash of a types.Block.
+func HashBlock(block *types.Block) (common.Hash, error) {
+ hashPosition := HashPosition(block.Position)
+ binaryTimestamp, err := block.Timestamp.UTC().MarshalBinary()
+ if err != nil {
+ return common.Hash{}, err
+ }
+ binaryWitness, err := hashWitness(&block.Witness)
+ if err != nil {
+ return common.Hash{}, err
+ }
+
+ hash := crypto.Keccak256Hash(
+ block.ProposerID.Hash[:],
+ block.ParentHash[:],
+ hashPosition[:],
+ binaryTimestamp[:],
+ block.PayloadHash[:],
+ binaryWitness[:])
+ return hash, nil
+}
+
+// VerifyBlockSignature verifies the signature of types.Block.
+func VerifyBlockSignature(b *types.Block) (err error) {
+ payloadHash := crypto.Keccak256Hash(b.Payload)
+ if payloadHash != b.PayloadHash {
+ err = ErrIncorrectHash
+ return
+ }
+ return VerifyBlockSignatureWithoutPayload(b)
+}
+
+// VerifyBlockSignatureWithoutPayload verifies the signature of types.Block but
+// does not check if PayloadHash is correct.
+func VerifyBlockSignatureWithoutPayload(b *types.Block) (err error) {
+ hash, err := HashBlock(b)
+ if err != nil {
+ return
+ }
+ if hash != b.Hash {
+ err = ErrIncorrectHash
+ return
+ }
+ pubKey, err := crypto.SigToPub(b.Hash, b.Signature)
+ if err != nil {
+ return
+ }
+ if !b.ProposerID.Equal(types.NewNodeID(pubKey)) {
+ err = ErrIncorrectSignature
+ return
+ }
+ return
+
+}
+
+// HashVote generates hash of a types.Vote.
+func HashVote(vote *types.Vote) common.Hash {
+ binaryPeriod := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryPeriod, vote.Period)
+
+ hashPosition := HashPosition(vote.Position)
+
+ hash := crypto.Keccak256Hash(
+ vote.ProposerID.Hash[:],
+ vote.BlockHash[:],
+ binaryPeriod,
+ hashPosition[:],
+ vote.PartialSignature.Signature[:],
+ []byte{byte(vote.Type)},
+ )
+ return hash
+}
+
+// VerifyVoteSignature verifies the signature of types.Vote.
+func VerifyVoteSignature(vote *types.Vote) (bool, error) {
+ hash := HashVote(vote)
+ pubKey, err := crypto.SigToPub(hash, vote.Signature)
+ if err != nil {
+ return false, err
+ }
+ if vote.ProposerID != types.NewNodeID(pubKey) {
+ return false, nil
+ }
+ return true, nil
+}
+
+func hashCRS(block *types.Block, crs common.Hash) common.Hash {
+ hashPos := HashPosition(block.Position)
+ if block.Position.Round < dkgDelayRound {
+ return crypto.Keccak256Hash(crs[:], hashPos[:], block.ProposerID.Hash[:])
+ }
+ return crypto.Keccak256Hash(crs[:], hashPos[:])
+}
+
+// VerifyCRSSignature verifies the CRS signature of types.Block.
+func VerifyCRSSignature(
+ block *types.Block, crs common.Hash, npks *typesDKG.NodePublicKeys) bool {
+ hash := hashCRS(block, crs)
+ if block.Position.Round < dkgDelayRound {
+ return bytes.Compare(block.CRSSignature.Signature[:], hash[:]) == 0
+ }
+ if npks == nil {
+ return false
+ }
+ pubKey, exist := npks.PublicKeys[block.ProposerID]
+ if !exist {
+ return false
+ }
+ return pubKey.VerifySignature(hash, block.CRSSignature)
+}
+
+// HashPosition generates hash of a types.Position.
+func HashPosition(position types.Position) common.Hash {
+ binaryRound := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryRound, position.Round)
+
+ binaryHeight := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryHeight, position.Height)
+
+ return crypto.Keccak256Hash(
+ binaryRound,
+ binaryHeight,
+ )
+}
+
+func hashDKGPrivateShare(prvShare *typesDKG.PrivateShare) common.Hash {
+ binaryRound := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryRound, prvShare.Round)
+ binaryReset := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryReset, prvShare.Reset)
+
+ return crypto.Keccak256Hash(
+ prvShare.ProposerID.Hash[:],
+ prvShare.ReceiverID.Hash[:],
+ binaryRound,
+ binaryReset,
+ prvShare.PrivateShare.Bytes(),
+ )
+}
+
+// VerifyDKGPrivateShareSignature verifies the signature of
+// typesDKG.PrivateShare.
+func VerifyDKGPrivateShareSignature(
+ prvShare *typesDKG.PrivateShare) (bool, error) {
+ hash := hashDKGPrivateShare(prvShare)
+ pubKey, err := crypto.SigToPub(hash, prvShare.Signature)
+ if err != nil {
+ return false, err
+ }
+ if prvShare.ProposerID != types.NewNodeID(pubKey) {
+ return false, nil
+ }
+ return true, nil
+}
+
+func hashDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) common.Hash {
+ binaryRound := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryRound, mpk.Round)
+ binaryReset := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryReset, mpk.Reset)
+
+ return crypto.Keccak256Hash(
+ mpk.ProposerID.Hash[:],
+ mpk.DKGID.GetLittleEndian(),
+ mpk.PublicKeyShares.MasterKeyBytes(),
+ binaryRound,
+ binaryReset,
+ )
+}
+
+// VerifyDKGMasterPublicKeySignature verifies DKGMasterPublicKey signature.
+func VerifyDKGMasterPublicKeySignature(
+ mpk *typesDKG.MasterPublicKey) (bool, error) {
+ hash := hashDKGMasterPublicKey(mpk)
+ pubKey, err := crypto.SigToPub(hash, mpk.Signature)
+ if err != nil {
+ return false, err
+ }
+ if mpk.ProposerID != types.NewNodeID(pubKey) {
+ return false, nil
+ }
+ return true, nil
+}
+
+func hashDKGComplaint(complaint *typesDKG.Complaint) common.Hash {
+ binaryRound := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryRound, complaint.Round)
+ binaryReset := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryReset, complaint.Reset)
+
+ hashPrvShare := hashDKGPrivateShare(&complaint.PrivateShare)
+
+ return crypto.Keccak256Hash(
+ complaint.ProposerID.Hash[:],
+ binaryRound,
+ binaryReset,
+ hashPrvShare[:],
+ )
+}
+
+// VerifyDKGComplaintSignature verifies DKGCompliant signature.
+func VerifyDKGComplaintSignature(
+ complaint *typesDKG.Complaint) (bool, error) {
+ if complaint.Round != complaint.PrivateShare.Round {
+ return false, nil
+ }
+ if complaint.Reset != complaint.PrivateShare.Reset {
+ return false, nil
+ }
+ hash := hashDKGComplaint(complaint)
+ pubKey, err := crypto.SigToPub(hash, complaint.Signature)
+ if err != nil {
+ return false, err
+ }
+ if complaint.ProposerID != types.NewNodeID(pubKey) {
+ return false, nil
+ }
+ if !complaint.IsNack() {
+ return VerifyDKGPrivateShareSignature(&complaint.PrivateShare)
+ }
+ return true, nil
+}
+
+func hashDKGPartialSignature(psig *typesDKG.PartialSignature) common.Hash {
+ binaryRound := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryRound, psig.Round)
+
+ return crypto.Keccak256Hash(
+ psig.ProposerID.Hash[:],
+ binaryRound,
+ psig.Hash[:],
+ psig.PartialSignature.Signature[:],
+ )
+}
+
+// VerifyDKGPartialSignatureSignature verifies the signature of
+// typesDKG.PartialSignature.
+func VerifyDKGPartialSignatureSignature(
+ psig *typesDKG.PartialSignature) (bool, error) {
+ hash := hashDKGPartialSignature(psig)
+ pubKey, err := crypto.SigToPub(hash, psig.Signature)
+ if err != nil {
+ return false, err
+ }
+ if psig.ProposerID != types.NewNodeID(pubKey) {
+ return false, nil
+ }
+ return true, nil
+}
+
+func hashDKGMPKReady(ready *typesDKG.MPKReady) common.Hash {
+ binaryRound := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryRound, ready.Round)
+ binaryReset := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryReset, ready.Reset)
+
+ return crypto.Keccak256Hash(
+ ready.ProposerID.Hash[:],
+ binaryRound,
+ binaryReset,
+ )
+}
+
+// VerifyDKGMPKReadySignature verifies DKGMPKReady signature.
+func VerifyDKGMPKReadySignature(
+ ready *typesDKG.MPKReady) (bool, error) {
+ hash := hashDKGMPKReady(ready)
+ pubKey, err := crypto.SigToPub(hash, ready.Signature)
+ if err != nil {
+ return false, err
+ }
+ if ready.ProposerID != types.NewNodeID(pubKey) {
+ return false, nil
+ }
+ return true, nil
+}
+
+func hashDKGFinalize(final *typesDKG.Finalize) common.Hash {
+ binaryRound := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryRound, final.Round)
+ binaryReset := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryReset, final.Reset)
+
+ return crypto.Keccak256Hash(
+ final.ProposerID.Hash[:],
+ binaryRound,
+ binaryReset,
+ )
+}
+
+func hashDKGSuccess(success *typesDKG.Success) common.Hash {
+ binaryRound := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryRound, success.Round)
+ binaryReset := make([]byte, 8)
+ binary.LittleEndian.PutUint64(binaryReset, success.Reset)
+
+ return crypto.Keccak256Hash(
+ success.ProposerID.Hash[:],
+ binaryRound,
+ binaryReset,
+ )
+}
+
+// VerifyDKGFinalizeSignature verifies DKGFinalize signature.
+func VerifyDKGFinalizeSignature(
+ final *typesDKG.Finalize) (bool, error) {
+ hash := hashDKGFinalize(final)
+ pubKey, err := crypto.SigToPub(hash, final.Signature)
+ if err != nil {
+ return false, err
+ }
+ if final.ProposerID != types.NewNodeID(pubKey) {
+ return false, nil
+ }
+ return true, nil
+}
+
+// VerifyDKGSuccessSignature verifies DKGSuccess signature.
+func VerifyDKGSuccessSignature(
+ success *typesDKG.Success) (bool, error) {
+ hash := hashDKGSuccess(success)
+ pubKey, err := crypto.SigToPub(hash, success.Signature)
+ if err != nil {
+ return false, err
+ }
+ if success.ProposerID != types.NewNodeID(pubKey) {
+ return false, nil
+ }
+ return true, nil
+}
+
+// Rehash hashes the hash again and again and again...
+func Rehash(hash common.Hash, count uint) common.Hash {
+ result := hash
+ for i := uint(0); i < count; i++ {
+ result = crypto.Keccak256Hash(result[:])
+ }
+ return result
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/nodeset-cache.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/nodeset-cache.go
new file mode 100644
index 000000000..028690e18
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/nodeset-cache.go
@@ -0,0 +1,245 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package utils
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+)
+
+var (
+ // ErrNodeSetNotReady means we got nil empty node set.
+ ErrNodeSetNotReady = errors.New("node set is not ready")
+ // ErrCRSNotReady means we got empty CRS.
+ ErrCRSNotReady = errors.New("crs is not ready")
+ // ErrConfigurationNotReady means we go nil configuration.
+ ErrConfigurationNotReady = errors.New("configuration is not ready")
+)
+
+type sets struct {
+ crs common.Hash
+ nodeSet *types.NodeSet
+ notarySet map[types.NodeID]struct{}
+}
+
+// NodeSetCacheInterface interface specifies interface used by NodeSetCache.
+type NodeSetCacheInterface interface {
+ // Configuration returns the configuration at a given round.
+ // Return the genesis configuration if round == 0.
+ Configuration(round uint64) *types.Config
+
+ // CRS returns the CRS for a given round.
+ // Return the genesis CRS if round == 0.
+ CRS(round uint64) common.Hash
+
+ // NodeSet returns the node set at a given round.
+ // Return the genesis node set if round == 0.
+ NodeSet(round uint64) []crypto.PublicKey
+}
+
+// NodeSetCache caches node set information.
+//
+// NOTE: this module doesn't handle DKG resetting and can only be used along
+// with utils.RoundEvent.
+type NodeSetCache struct {
+ lock sync.RWMutex
+ nsIntf NodeSetCacheInterface
+ rounds map[uint64]*sets
+ keyPool map[types.NodeID]*struct {
+ pubKey crypto.PublicKey
+ refCnt int
+ }
+}
+
+// NewNodeSetCache constructs an NodeSetCache instance.
+func NewNodeSetCache(nsIntf NodeSetCacheInterface) *NodeSetCache {
+ return &NodeSetCache{
+ nsIntf: nsIntf,
+ rounds: make(map[uint64]*sets),
+ keyPool: make(map[types.NodeID]*struct {
+ pubKey crypto.PublicKey
+ refCnt int
+ }),
+ }
+}
+
+// Exists checks if a node is in node set of that round.
+func (cache *NodeSetCache) Exists(
+ round uint64, nodeID types.NodeID) (exists bool, err error) {
+
+ nIDs, exists := cache.get(round)
+ if !exists {
+ if nIDs, err = cache.update(round); err != nil {
+ return
+ }
+ }
+ _, exists = nIDs.nodeSet.IDs[nodeID]
+ return
+}
+
+// GetPublicKey return public key for that node:
+func (cache *NodeSetCache) GetPublicKey(
+ nodeID types.NodeID) (key crypto.PublicKey, exists bool) {
+
+ cache.lock.RLock()
+ defer cache.lock.RUnlock()
+
+ rec, exists := cache.keyPool[nodeID]
+ if exists {
+ key = rec.pubKey
+ }
+ return
+}
+
+// GetNodeSet returns IDs of nodes set of this round as map.
+func (cache *NodeSetCache) GetNodeSet(round uint64) (*types.NodeSet, error) {
+ IDs, exists := cache.get(round)
+ if !exists {
+ var err error
+ if IDs, err = cache.update(round); err != nil {
+ return nil, err
+ }
+ }
+ return IDs.nodeSet.Clone(), nil
+}
+
+// GetNotarySet returns of notary set of this round.
+func (cache *NodeSetCache) GetNotarySet(
+ round uint64) (map[types.NodeID]struct{}, error) {
+ IDs, err := cache.getOrUpdate(round)
+ if err != nil {
+ return nil, err
+ }
+ return cache.cloneMap(IDs.notarySet), nil
+}
+
+// Purge a specific round.
+func (cache *NodeSetCache) Purge(rID uint64) {
+ cache.lock.Lock()
+ defer cache.lock.Unlock()
+ nIDs, exist := cache.rounds[rID]
+ if !exist {
+ return
+ }
+ for nID := range nIDs.nodeSet.IDs {
+ rec := cache.keyPool[nID]
+ if rec.refCnt--; rec.refCnt == 0 {
+ delete(cache.keyPool, nID)
+ }
+ }
+ delete(cache.rounds, rID)
+}
+
+// Touch updates the internal cache of round.
+func (cache *NodeSetCache) Touch(round uint64) (err error) {
+ _, err = cache.update(round)
+ return
+}
+
+func (cache *NodeSetCache) cloneMap(
+ nIDs map[types.NodeID]struct{}) map[types.NodeID]struct{} {
+ nIDsCopy := make(map[types.NodeID]struct{}, len(nIDs))
+ for k := range nIDs {
+ nIDsCopy[k] = struct{}{}
+ }
+ return nIDsCopy
+}
+
+func (cache *NodeSetCache) getOrUpdate(round uint64) (nIDs *sets, err error) {
+ s, exists := cache.get(round)
+ if !exists {
+ if s, err = cache.update(round); err != nil {
+ return
+ }
+ }
+ nIDs = s
+ return
+}
+
+// update node set for that round.
+//
+// This cache would maintain 10 rounds before the updated round and purge
+// rounds not in this range.
+func (cache *NodeSetCache) update(round uint64) (nIDs *sets, err error) {
+ cache.lock.Lock()
+ defer cache.lock.Unlock()
+ // Get information for the requested round.
+ keySet := cache.nsIntf.NodeSet(round)
+ if keySet == nil {
+ err = ErrNodeSetNotReady
+ return
+ }
+ crs := cache.nsIntf.CRS(round)
+ if (crs == common.Hash{}) {
+ err = ErrCRSNotReady
+ return
+ }
+ // Cache new round.
+ nodeSet := types.NewNodeSet()
+ for _, key := range keySet {
+ nID := types.NewNodeID(key)
+ nodeSet.Add(nID)
+ if rec, exists := cache.keyPool[nID]; exists {
+ rec.refCnt++
+ } else {
+ cache.keyPool[nID] = &struct {
+ pubKey crypto.PublicKey
+ refCnt int
+ }{key, 1}
+ }
+ }
+ cfg := cache.nsIntf.Configuration(round)
+ if cfg == nil {
+ err = ErrConfigurationNotReady
+ return
+ }
+ nIDs = &sets{
+ crs: crs,
+ nodeSet: nodeSet,
+ notarySet: make(map[types.NodeID]struct{}),
+ }
+ nIDs.notarySet = nodeSet.GetSubSet(
+ int(cfg.NotarySetSize), types.NewNotarySetTarget(crs))
+ cache.rounds[round] = nIDs
+ // Purge older rounds.
+ for rID, nIDs := range cache.rounds {
+ nodeSet := nIDs.nodeSet
+ if round-rID <= 5 {
+ continue
+ }
+ for nID := range nodeSet.IDs {
+ rec := cache.keyPool[nID]
+ if rec.refCnt--; rec.refCnt == 0 {
+ delete(cache.keyPool, nID)
+ }
+ }
+ delete(cache.rounds, rID)
+ }
+ return
+}
+
+func (cache *NodeSetCache) get(round uint64) (nIDs *sets, exists bool) {
+ cache.lock.RLock()
+ defer cache.lock.RUnlock()
+ nIDs, exists = cache.rounds[round]
+ return
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/penalty-helper.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/penalty-helper.go
new file mode 100644
index 000000000..658fe79a9
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/penalty-helper.go
@@ -0,0 +1,131 @@
+// Copyright 2019 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package utils
+
+import (
+ "errors"
+
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+)
+
+var (
+ // ErrInvalidDKGMasterPublicKey means the DKG MasterPublicKey is invalid.
+ ErrInvalidDKGMasterPublicKey = errors.New("invalid DKG master public key")
+ // ErrPayloadNotEmpty means the payload of block is not empty.
+ ErrPayloadNotEmpty = errors.New("payload not empty")
+)
+
+// NeedPenaltyDKGPrivateShare checks if the proposer of dkg private share
+// should be penalized.
+func NeedPenaltyDKGPrivateShare(
+ complaint *typesDKG.Complaint, mpk *typesDKG.MasterPublicKey) (bool, error) {
+ if complaint.IsNack() {
+ return false, nil
+ }
+ if mpk.ProposerID != complaint.PrivateShare.ProposerID {
+ return false, nil
+ }
+ ok, err := VerifyDKGMasterPublicKeySignature(mpk)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, ErrInvalidDKGMasterPublicKey
+ }
+ ok, err = VerifyDKGComplaintSignature(complaint)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+ ok, err = mpk.PublicKeyShares.VerifyPrvShare(
+ typesDKG.NewID(complaint.PrivateShare.ReceiverID),
+ &complaint.PrivateShare.PrivateShare)
+ if err != nil {
+ return false, err
+ }
+ return !ok, nil
+}
+
+// NeedPenaltyForkVote checks if two votes are fork vote.
+func NeedPenaltyForkVote(vote1, vote2 *types.Vote) (bool, error) {
+ if vote1.ProposerID != vote2.ProposerID ||
+ vote1.Type != vote2.Type ||
+ vote1.Period != vote2.Period ||
+ vote1.Position != vote2.Position ||
+ vote1.BlockHash == vote2.BlockHash {
+ return false, nil
+ }
+ ok, err := VerifyVoteSignature(vote1)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+ ok, err = VerifyVoteSignature(vote2)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+ return true, nil
+}
+
+// NeedPenaltyForkBlock checks if two blocks are fork block.
+func NeedPenaltyForkBlock(block1, block2 *types.Block) (bool, error) {
+ if block1.ProposerID != block2.ProposerID ||
+ block1.Position != block2.Position ||
+ block1.Hash == block2.Hash {
+ return false, nil
+ }
+ if len(block1.Payload) != 0 || len(block2.Payload) != 0 {
+ return false, ErrPayloadNotEmpty
+ }
+ verifyBlock := func(block *types.Block) (bool, error) {
+ err := VerifyBlockSignatureWithoutPayload(block)
+ switch err {
+ case nil:
+ return true, nil
+ case ErrIncorrectSignature:
+ return false, nil
+ case ErrIncorrectHash:
+ return false, nil
+ default:
+ return false, err
+ }
+ }
+ ok, err := verifyBlock(block1)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+ ok, err = verifyBlock(block2)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+ return true, nil
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-based-config.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-based-config.go
new file mode 100644
index 000000000..88842cacf
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-based-config.go
@@ -0,0 +1,112 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package utils
+
+import (
+ "fmt"
+
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+)
+
+// RoundBasedConfig is based config for rounds and provide boundary checking
+// for rounds.
+type RoundBasedConfig struct {
+ roundID uint64
+ roundBeginHeight uint64
+ roundEndHeight uint64
+ roundLength uint64
+}
+
+// SetupRoundBasedFields setup round based fields, including round ID, the
+// length of rounds.
+func (c *RoundBasedConfig) SetupRoundBasedFields(
+ roundID uint64, cfg *types.Config) {
+ if c.roundLength > 0 {
+ panic(fmt.Errorf("duplicated set round based fields: %d",
+ c.roundLength))
+ }
+ c.roundID = roundID
+ c.roundLength = cfg.RoundLength
+}
+
+// SetRoundBeginHeight gives the beginning height for the initial round provided
+// when constructed.
+func (c *RoundBasedConfig) SetRoundBeginHeight(begin uint64) {
+ if c.roundBeginHeight != 0 {
+ panic(fmt.Errorf("duplicated set round begin height: %d",
+ c.roundBeginHeight))
+ }
+ c.roundBeginHeight = begin
+ c.roundEndHeight = begin + c.roundLength
+}
+
+// IsLastBlock checks if a block is the last block of this round.
+func (c *RoundBasedConfig) IsLastBlock(b *types.Block) bool {
+ if b.Position.Round != c.roundID {
+ panic(fmt.Errorf("attempt to compare by different round: %s, %d",
+ b, c.roundID))
+ }
+ return b.Position.Height+1 == c.roundEndHeight
+}
+
+// ExtendLength extends round ending height by the length of current round.
+func (c *RoundBasedConfig) ExtendLength() {
+ c.roundEndHeight += c.roundLength
+}
+
+// Contains checks if a block height is in this round.
+func (c *RoundBasedConfig) Contains(h uint64) bool {
+ return c.roundBeginHeight <= h && c.roundEndHeight > h
+}
+
+// RoundID returns the round ID of this config.
+func (c *RoundBasedConfig) RoundID() uint64 {
+ if c.roundLength == 0 {
+ panic(fmt.Errorf("config is not initialized: %d", c.roundID))
+ }
+ return c.roundID
+}
+
+// RoundEndHeight returns next checkpoint to varify if this round is ended.
+func (c *RoundBasedConfig) RoundEndHeight() uint64 {
+ if c.roundLength == 0 {
+ panic(fmt.Errorf("config is not initialized: %d", c.roundID))
+ }
+ return c.roundEndHeight
+}
+
+// AppendTo a config from previous round.
+func (c *RoundBasedConfig) AppendTo(other RoundBasedConfig) {
+ if c.roundID != other.roundID+1 {
+ panic(fmt.Errorf("round IDs of configs not continuous: %d %d",
+ c.roundID, other.roundID))
+ }
+ c.SetRoundBeginHeight(other.roundEndHeight)
+}
+
+// LastPeriodBeginHeight returns the begin height of last period. For example,
+// if a round is extended twice, then the return from this method is:
+//
+// begin + 2 * roundLength - roundLength
+//
+func (c *RoundBasedConfig) LastPeriodBeginHeight() uint64 {
+ if c.roundLength == 0 {
+ panic(fmt.Errorf("config is not initialized: %d", c.roundID))
+ }
+ return c.roundEndHeight - c.roundLength
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-event.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-event.go
new file mode 100644
index 000000000..4f4b04542
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/round-event.go
@@ -0,0 +1,358 @@
+// Copyright 2019 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package utils
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+)
+
+// ErrUnmatchedBlockHeightWithConfig is for invalid parameters for NewRoundEvent.
+type ErrUnmatchedBlockHeightWithConfig struct {
+ round uint64
+ reset uint64
+ blockHeight uint64
+}
+
+func (e ErrUnmatchedBlockHeightWithConfig) Error() string {
+ return fmt.Sprintf("unsynced block height and cfg: round:%d reset:%d h:%d",
+ e.round, e.reset, e.blockHeight)
+}
+
+// RoundEventParam defines the parameters passed to event handlers of
+// RoundEvent.
+type RoundEventParam struct {
+ // 'Round' of next checkpoint, might be identical to previous checkpoint.
+ Round uint64
+ // the count of reset DKG for 'Round+1'.
+ Reset uint64
+ // the begin block height of this event, the end block height of this event
+ // would be BeginHeight + config.RoundLength.
+ BeginHeight uint64
+ // The configuration for 'Round'.
+ Config *types.Config
+ // The CRS for 'Round'.
+ CRS common.Hash
+}
+
+// NextRoundValidationHeight returns the height to check if the next round is
+// ready.
+func (e RoundEventParam) NextRoundValidationHeight() uint64 {
+ return e.BeginHeight + e.Config.RoundLength*9/10
+}
+
+// NextCRSProposingHeight returns the height to propose CRS for next round.
+func (e RoundEventParam) NextCRSProposingHeight() uint64 {
+ return e.BeginHeight + e.Config.RoundLength/2
+}
+
+// NextDKGPreparationHeight returns the height to prepare DKG set for next
+// round.
+func (e RoundEventParam) NextDKGPreparationHeight() uint64 {
+ return e.BeginHeight + e.Config.RoundLength*2/3
+}
+
+// NextRoundHeight returns the height of the beginning of next round.
+func (e RoundEventParam) NextRoundHeight() uint64 {
+ return e.BeginHeight + e.Config.RoundLength
+}
+
+// NextTouchNodeSetCacheHeight returns the height to touch the node set cache.
+func (e RoundEventParam) NextTouchNodeSetCacheHeight() uint64 {
+ return e.BeginHeight + e.Config.RoundLength/2
+}
+
+// NextDKGResetHeight returns the height to reset DKG for next period.
+func (e RoundEventParam) NextDKGResetHeight() uint64 {
+ return e.BeginHeight + e.Config.RoundLength*85/100
+}
+
+// NextDKGRegisterHeight returns the height to register DKG.
+func (e RoundEventParam) NextDKGRegisterHeight() uint64 {
+ return e.BeginHeight + e.Config.RoundLength/2
+}
+
+// RoundEndHeight returns the round ending height of this round event.
+func (e RoundEventParam) RoundEndHeight() uint64 {
+ return e.BeginHeight + e.Config.RoundLength
+}
+
+func (e RoundEventParam) String() string {
+ return fmt.Sprintf("roundEvtParam{Round:%d Reset:%d Height:%d}",
+ e.Round,
+ e.Reset,
+ e.BeginHeight)
+}
+
+// roundEventFn defines the fingerprint of handlers of round events.
+type roundEventFn func([]RoundEventParam)
+
+// governanceAccessor is a subset of core.Governance to break the dependency
+// between core and utils package.
+type governanceAccessor interface {
+ // Configuration returns the configuration at a given round.
+ // Return the genesis configuration if round == 0.
+ Configuration(round uint64) *types.Config
+
+ // CRS returns the CRS for a given round.
+ // Return the genesis CRS if round == 0.
+ CRS(round uint64) common.Hash
+
+ // DKGComplaints gets all the DKGComplaints of round.
+ DKGComplaints(round uint64) []*typesDKG.Complaint
+
+ // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round.
+ DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey
+
+ // IsDKGFinal checks if DKG is final.
+ IsDKGFinal(round uint64) bool
+
+ // IsDKGSuccess checks if DKG is success.
+ IsDKGSuccess(round uint64) bool
+
+ // DKGResetCount returns the reset count for DKG of given round.
+ DKGResetCount(round uint64) uint64
+
+ // Get the begin height of a round.
+ GetRoundHeight(round uint64) uint64
+}
+
+// RoundEventRetryHandlerGenerator generates a handler to common.Event, which
+// would register itself to retry next round validation if round event is not
+// triggered.
+func RoundEventRetryHandlerGenerator(
+ rEvt *RoundEvent, hEvt *common.Event) func(uint64) {
+ var hEvtHandler func(uint64)
+ hEvtHandler = func(h uint64) {
+ if rEvt.ValidateNextRound(h) == 0 {
+ // Retry until at least one round event is triggered.
+ hEvt.RegisterHeight(h+1, hEvtHandler)
+ }
+ }
+ return hEvtHandler
+}
+
+// RoundEvent would be triggered when either:
+// - the next DKG set setup is ready.
+// - the next DKG set setup is failed, and previous DKG set already reset the
+// CRS.
+type RoundEvent struct {
+ gov governanceAccessor
+ logger common.Logger
+ lock sync.Mutex
+ handlers []roundEventFn
+ config RoundBasedConfig
+ lastTriggeredRound uint64
+ lastTriggeredResetCount uint64
+ roundShift uint64
+ gpkInvalid bool
+ ctx context.Context
+ ctxCancel context.CancelFunc
+}
+
+// NewRoundEvent creates an RoundEvent instance.
+func NewRoundEvent(parentCtx context.Context, gov governanceAccessor,
+ logger common.Logger, initPos types.Position, roundShift uint64) (
+ *RoundEvent, error) {
+ // We need to generate valid ending block height of this round (taken
+ // DKG reset count into consideration).
+ logger.Info("new RoundEvent", "position", initPos, "shift", roundShift)
+ initConfig := GetConfigWithPanic(gov, initPos.Round, logger)
+ e := &RoundEvent{
+ gov: gov,
+ logger: logger,
+ lastTriggeredRound: initPos.Round,
+ roundShift: roundShift,
+ }
+ e.ctx, e.ctxCancel = context.WithCancel(parentCtx)
+ e.config = RoundBasedConfig{}
+ e.config.SetupRoundBasedFields(initPos.Round, initConfig)
+ e.config.SetRoundBeginHeight(GetRoundHeight(gov, initPos.Round))
+ // Make sure the DKG reset count in current governance can cover the initial
+ // block height.
+ if initPos.Height >= types.GenesisHeight {
+ resetCount := gov.DKGResetCount(initPos.Round + 1)
+ remains := resetCount
+ for ; remains > 0 && !e.config.Contains(initPos.Height); remains-- {
+ e.config.ExtendLength()
+ }
+ if !e.config.Contains(initPos.Height) {
+ return nil, ErrUnmatchedBlockHeightWithConfig{
+ round: initPos.Round,
+ reset: resetCount,
+ blockHeight: initPos.Height,
+ }
+ }
+ e.lastTriggeredResetCount = resetCount - remains
+ }
+ return e, nil
+}
+
+// Register a handler to be called when new round is confirmed or new DKG reset
+// is detected.
+//
+// The earlier registered handler has higher priority.
+func (e *RoundEvent) Register(h roundEventFn) {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+ e.handlers = append(e.handlers, h)
+}
+
+// TriggerInitEvent triggers event from the initial setting.
+func (e *RoundEvent) TriggerInitEvent() {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+ events := []RoundEventParam{RoundEventParam{
+ Round: e.lastTriggeredRound,
+ Reset: e.lastTriggeredResetCount,
+ BeginHeight: e.config.LastPeriodBeginHeight(),
+ CRS: GetCRSWithPanic(e.gov, e.lastTriggeredRound, e.logger),
+ Config: GetConfigWithPanic(e.gov, e.lastTriggeredRound, e.logger),
+ }}
+ for _, h := range e.handlers {
+ h(events)
+ }
+}
+
+// ValidateNextRound validate if the DKG set for next round is ready to go or
+// failed to setup, all registered handlers would be called once some decision
+// is made on chain.
+//
+// The count of triggered events would be returned.
+func (e *RoundEvent) ValidateNextRound(blockHeight uint64) (count uint) {
+ // To make triggers continuous and sequential, the next validation should
+ // wait for previous one finishing. That's why I use mutex here directly.
+ var events []RoundEventParam
+ e.lock.Lock()
+ defer e.lock.Unlock()
+ e.logger.Trace("ValidateNextRound",
+ "height", blockHeight,
+ "round", e.lastTriggeredRound,
+ "count", e.lastTriggeredResetCount)
+ defer func() {
+ count = uint(len(events))
+ if count == 0 {
+ return
+ }
+ for _, h := range e.handlers {
+ // To make sure all handlers receive triggers sequentially, we can't
+ // raise go routines here.
+ h(events)
+ }
+ }()
+ var (
+ triggered bool
+ param RoundEventParam
+ beginHeight = blockHeight
+ startRound = e.lastTriggeredRound
+ )
+ for {
+ param, triggered = e.check(beginHeight, startRound)
+ if !triggered {
+ break
+ }
+ events = append(events, param)
+ beginHeight = param.BeginHeight
+ }
+ return
+}
+
+func (e *RoundEvent) check(blockHeight, startRound uint64) (
+ param RoundEventParam, triggered bool) {
+ defer func() {
+ if !triggered {
+ return
+ }
+ // A simple assertion to make sure we didn't pick the wrong round.
+ if e.config.RoundID() != e.lastTriggeredRound {
+ panic(fmt.Errorf("Triggered round not matched: %d, %d",
+ e.config.RoundID(), e.lastTriggeredRound))
+ }
+ param.Round = e.lastTriggeredRound
+ param.Reset = e.lastTriggeredResetCount
+ param.BeginHeight = e.config.LastPeriodBeginHeight()
+ param.CRS = GetCRSWithPanic(e.gov, e.lastTriggeredRound, e.logger)
+ param.Config = GetConfigWithPanic(e.gov, e.lastTriggeredRound, e.logger)
+ e.logger.Info("New RoundEvent triggered",
+ "round", e.lastTriggeredRound,
+ "reset", e.lastTriggeredResetCount,
+ "begin-height", e.config.LastPeriodBeginHeight(),
+ "crs", param.CRS.String()[:6],
+ )
+ }()
+ nextRound := e.lastTriggeredRound + 1
+ if nextRound >= startRound+e.roundShift {
+ // Avoid access configuration newer than last confirmed one over
+ // 'roundShift' rounds. Fullnode might crash if we access it before it
+ // knows.
+ return
+ }
+ nextCfg := GetConfigWithPanic(e.gov, nextRound, e.logger)
+ resetCount := e.gov.DKGResetCount(nextRound)
+ if resetCount > e.lastTriggeredResetCount {
+ e.lastTriggeredResetCount++
+ e.config.ExtendLength()
+ e.gpkInvalid = false
+ triggered = true
+ return
+ }
+ if e.gpkInvalid {
+ // We know that DKG already failed, now wait for the DKG set from
+ // previous round to reset DKG and don't have to reconstruct the
+ // group public key again.
+ return
+ }
+ if nextRound >= dkgDelayRound {
+ var ok bool
+ ok, e.gpkInvalid = IsDKGValid(
+ e.gov, e.logger, nextRound, e.lastTriggeredResetCount)
+ if !ok {
+ return
+ }
+ }
+ // The DKG set for next round is well prepared.
+ e.lastTriggeredRound = nextRound
+ e.lastTriggeredResetCount = 0
+ e.gpkInvalid = false
+ rCfg := RoundBasedConfig{}
+ rCfg.SetupRoundBasedFields(nextRound, nextCfg)
+ rCfg.AppendTo(e.config)
+ e.config = rCfg
+ triggered = true
+ return
+}
+
+// Stop the event source and block until last trigger returns.
+func (e *RoundEvent) Stop() {
+ e.ctxCancel()
+}
+
+// LastPeriod returns block height related info of the last period, including
+// begin height and round length.
+func (e *RoundEvent) LastPeriod() (begin uint64, length uint64) {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+ begin = e.config.LastPeriodBeginHeight()
+ length = e.config.RoundEndHeight() - e.config.LastPeriodBeginHeight()
+ return
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/signer.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/signer.go
new file mode 100644
index 000000000..9128e264c
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/signer.go
@@ -0,0 +1,154 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package utils
+
+import (
+ "errors"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/crypto"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+)
+
+// Errors for signer.
+var (
+ ErrInvalidProposerID = errors.New("invalid proposer id")
+ ErrIncorrectHash = errors.New("hash of block is incorrect")
+ ErrIncorrectSignature = errors.New("signature of block is incorrect")
+ ErrNoBLSSigner = errors.New("bls signer not set")
+)
+
+type blsSigner func(round uint64, hash common.Hash) (crypto.Signature, error)
+
+// Signer signs a segment of data.
+type Signer struct {
+ prvKey crypto.PrivateKey
+ pubKey crypto.PublicKey
+ proposerID types.NodeID
+ blsSign blsSigner
+}
+
+// NewSigner constructs an Signer instance.
+func NewSigner(prvKey crypto.PrivateKey) (s *Signer) {
+ s = &Signer{
+ prvKey: prvKey,
+ pubKey: prvKey.PublicKey(),
+ }
+ s.proposerID = types.NewNodeID(s.pubKey)
+ return
+}
+
+// SetBLSSigner for signing CRSSignature
+func (s *Signer) SetBLSSigner(signer blsSigner) {
+ s.blsSign = signer
+}
+
+// SignBlock signs a types.Block.
+func (s *Signer) SignBlock(b *types.Block) (err error) {
+ b.ProposerID = s.proposerID
+ b.PayloadHash = crypto.Keccak256Hash(b.Payload)
+ if b.Hash, err = HashBlock(b); err != nil {
+ return
+ }
+ if b.Signature, err = s.prvKey.Sign(b.Hash); err != nil {
+ return
+ }
+ return
+}
+
+// SignVote signs a types.Vote.
+func (s *Signer) SignVote(v *types.Vote) (err error) {
+ v.ProposerID = s.proposerID
+ v.Signature, err = s.prvKey.Sign(HashVote(v))
+ return
+}
+
+// SignCRS signs CRS signature of types.Block.
+func (s *Signer) SignCRS(b *types.Block, crs common.Hash) (err error) {
+ if b.ProposerID != s.proposerID {
+ err = ErrInvalidProposerID
+ return
+ }
+ if b.Position.Round < dkgDelayRound {
+ hash := hashCRS(b, crs)
+ b.CRSSignature = crypto.Signature{
+ Type: "bls",
+ Signature: hash[:],
+ }
+ return
+ }
+ if s.blsSign == nil {
+ err = ErrNoBLSSigner
+ return
+ }
+ b.CRSSignature, err = s.blsSign(b.Position.Round, hashCRS(b, crs))
+ return
+}
+
+// SignDKGComplaint signs a DKG complaint.
+func (s *Signer) SignDKGComplaint(complaint *typesDKG.Complaint) (err error) {
+ complaint.ProposerID = s.proposerID
+ complaint.Signature, err = s.prvKey.Sign(hashDKGComplaint(complaint))
+ return
+}
+
+// SignDKGMasterPublicKey signs a DKG master public key.
+func (s *Signer) SignDKGMasterPublicKey(
+ mpk *typesDKG.MasterPublicKey) (err error) {
+ mpk.ProposerID = s.proposerID
+ mpk.Signature, err = s.prvKey.Sign(hashDKGMasterPublicKey(mpk))
+ return
+}
+
+// SignDKGPrivateShare signs a DKG private share.
+func (s *Signer) SignDKGPrivateShare(
+ prvShare *typesDKG.PrivateShare) (err error) {
+ prvShare.ProposerID = s.proposerID
+ prvShare.Signature, err = s.prvKey.Sign(hashDKGPrivateShare(prvShare))
+ return
+}
+
+// SignDKGPartialSignature signs a DKG partial signature.
+func (s *Signer) SignDKGPartialSignature(
+ pSig *typesDKG.PartialSignature) (err error) {
+ pSig.ProposerID = s.proposerID
+ pSig.Signature, err = s.prvKey.Sign(hashDKGPartialSignature(pSig))
+ return
+}
+
+// SignDKGMPKReady signs a DKG ready message.
+func (s *Signer) SignDKGMPKReady(ready *typesDKG.MPKReady) (err error) {
+ ready.ProposerID = s.proposerID
+ ready.Signature, err = s.prvKey.Sign(hashDKGMPKReady(ready))
+ return
+}
+
+// SignDKGFinalize signs a DKG finalize message.
+func (s *Signer) SignDKGFinalize(final *typesDKG.Finalize) (err error) {
+ final.ProposerID = s.proposerID
+ final.Signature, err = s.prvKey.Sign(hashDKGFinalize(final))
+ return
+}
+
+// SignDKGSuccess signs a DKG success message.
+func (s *Signer) SignDKGSuccess(success *typesDKG.Success) (err error) {
+ success.ProposerID = s.proposerID
+ success.Signature, err = s.prvKey.Sign(hashDKGSuccess(success))
+ return
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/utils.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/utils.go
new file mode 100644
index 000000000..6ff5bb62f
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/utils.go
@@ -0,0 +1,207 @@
+// Copyright 2018 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package utils
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/byzantine-lab/dexon-consensus/common"
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+ typesDKG "github.com/byzantine-lab/dexon-consensus/core/types/dkg"
+)
+
+var dkgDelayRound uint64
+
+// SetDKGDelayRound sets the variable.
+func SetDKGDelayRound(delay uint64) {
+ dkgDelayRound = delay
+}
+
+type configAccessor interface {
+ Configuration(round uint64) *types.Config
+}
+
+// GetConfigWithPanic is a helper to access configs, and panic when config for
+// that round is not ready yet.
+func GetConfigWithPanic(accessor configAccessor, round uint64,
+ logger common.Logger) *types.Config {
+ if logger != nil {
+ logger.Debug("Calling Governance.Configuration", "round", round)
+ }
+ c := accessor.Configuration(round)
+ if c == nil {
+ panic(fmt.Errorf("configuration is not ready %v", round))
+ }
+ return c
+}
+
+type crsAccessor interface {
+ CRS(round uint64) common.Hash
+}
+
+// GetCRSWithPanic is a helper to access CRS, and panic when CRS for that
+// round is not ready yet.
+func GetCRSWithPanic(accessor crsAccessor, round uint64,
+ logger common.Logger) common.Hash {
+ if logger != nil {
+ logger.Debug("Calling Governance.CRS", "round", round)
+ }
+ crs := accessor.CRS(round)
+ if (crs == common.Hash{}) {
+ panic(fmt.Errorf("CRS is not ready %v", round))
+ }
+ return crs
+}
+
+// VerifyDKGComplaint verifies if its a valid DKGCompliant.
+func VerifyDKGComplaint(
+ complaint *typesDKG.Complaint, mpk *typesDKG.MasterPublicKey) (bool, error) {
+ ok, err := VerifyDKGComplaintSignature(complaint)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+ if complaint.IsNack() {
+ return true, nil
+ }
+ if complaint.Round != mpk.Round {
+ return false, nil
+ }
+ ok, err = VerifyDKGMasterPublicKeySignature(mpk)
+ if err != nil {
+ return false, err
+ }
+ if !ok {
+ return false, nil
+ }
+ ok, err = mpk.PublicKeyShares.VerifyPrvShare(
+ typesDKG.NewID(complaint.PrivateShare.ReceiverID),
+ &complaint.PrivateShare.PrivateShare)
+ if err != nil {
+ return false, err
+ }
+ return !ok, nil
+}
+
+// LaunchDummyReceiver launches a go routine to receive from the receive
+// channel of a network module. An context is required to stop the go routine
+// automatically. An optinal message handler could be provided.
+func LaunchDummyReceiver(
+ ctx context.Context, recv <-chan types.Msg, handler func(types.Msg)) (
+ context.CancelFunc, <-chan struct{}) {
+ var (
+ dummyCtx, dummyCancel = context.WithCancel(ctx)
+ finishedChan = make(chan struct{}, 1)
+ )
+ go func() {
+ defer func() {
+ finishedChan <- struct{}{}
+ }()
+ loop:
+ for {
+ select {
+ case <-dummyCtx.Done():
+ break loop
+ case v, ok := <-recv:
+ if !ok {
+ panic(fmt.Errorf(
+ "receive channel is closed before dummy receiver"))
+ }
+ if handler != nil {
+ handler(v)
+ }
+ }
+ }
+ }()
+ return dummyCancel, finishedChan
+}
+
+// GetDKGThreshold return expected threshold for given DKG set size.
+func GetDKGThreshold(config *types.Config) int {
+ return int(config.NotarySetSize*2/3) + 1
+}
+
+// GetDKGValidThreshold return threshold for DKG set to considered valid.
+func GetDKGValidThreshold(config *types.Config) int {
+ return int(config.NotarySetSize * 5 / 6)
+}
+
+// GetBAThreshold return threshold for BA votes.
+func GetBAThreshold(config *types.Config) int {
+ return int(config.NotarySetSize*2/3 + 1)
+}
+
+// GetNextRoundValidationHeight returns the block height to check if the next
+// round is ready.
+func GetNextRoundValidationHeight(begin, length uint64) uint64 {
+ return begin + length*9/10
+}
+
+// GetRoundHeight wraps the workaround for the round height logic in fullnode.
+func GetRoundHeight(accessor interface{}, round uint64) uint64 {
+ type roundHeightAccessor interface {
+ GetRoundHeight(round uint64) uint64
+ }
+ accessorInst := accessor.(roundHeightAccessor)
+ height := accessorInst.GetRoundHeight(round)
+ if round == 0 && height < types.GenesisHeight {
+ return types.GenesisHeight
+ }
+ return height
+}
+
+// IsDKGValid check if DKG is correctly prepared.
+func IsDKGValid(
+ gov governanceAccessor, logger common.Logger, round, reset uint64) (
+ valid bool, gpkInvalid bool) {
+ if !gov.IsDKGFinal(round) {
+ logger.Debug("DKG is not final", "round", round, "reset", reset)
+ return
+ }
+ if !gov.IsDKGSuccess(round) {
+ logger.Debug("DKG is not successful", "round", round, "reset", reset)
+ return
+ }
+ cfg := GetConfigWithPanic(gov, round, logger)
+ gpk, err := typesDKG.NewGroupPublicKey(
+ round,
+ gov.DKGMasterPublicKeys(round),
+ gov.DKGComplaints(round),
+ GetDKGThreshold(cfg))
+ if err != nil {
+ logger.Debug("Group public key setup failed",
+ "round", round,
+ "reset", reset,
+ "error", err)
+ gpkInvalid = true
+ return
+ }
+ if len(gpk.QualifyNodeIDs) < GetDKGValidThreshold(cfg) {
+ logger.Debug("Group public key threshold not reach",
+ "round", round,
+ "reset", reset,
+ "qualified", len(gpk.QualifyNodeIDs))
+ gpkInvalid = true
+ return
+ }
+ valid = true
+ return
+}
diff --git a/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/vote-filter.go b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/vote-filter.go
new file mode 100644
index 000000000..556c2489a
--- /dev/null
+++ b/vendor/github.com/byzantine-lab/dexon-consensus/core/utils/vote-filter.go
@@ -0,0 +1,72 @@
+// Copyright 2019 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package utils
+
+import (
+ "github.com/byzantine-lab/dexon-consensus/core/types"
+)
+
+// VoteFilter filters votes that are useless for now.
+// To maximize performance, this structure is not thread-safe and will never be.
+type VoteFilter struct {
+ Voted map[types.VoteHeader]struct{}
+ Position types.Position
+ LockIter uint64
+ Period uint64
+ Confirm bool
+}
+
+// NewVoteFilter creates a new vote filter instance.
+func NewVoteFilter() *VoteFilter {
+ return &VoteFilter{
+ Voted: make(map[types.VoteHeader]struct{}),
+ }
+}
+
+// Filter checks if the vote should be filtered out.
+func (vf *VoteFilter) Filter(vote *types.Vote) bool {
+ if vote.Type == types.VoteInit {
+ return true
+ }
+ if vote.Position.Older(vf.Position) {
+ return true
+ } else if vote.Position.Newer(vf.Position) {
+ // It's impossible to check the vote of other height.
+ return false
+ }
+ if vf.Confirm {
+ return true
+ }
+ if vote.Type == types.VotePreCom && vote.Period < vf.LockIter {
+ return true
+ }
+ if vote.Type == types.VoteCom &&
+ vote.Period < vf.Period &&
+ vote.BlockHash == types.SkipBlockHash {
+ return true
+ }
+ if _, exist := vf.Voted[vote.VoteHeader]; exist {
+ return true
+ }
+ return false
+}
+
+// AddVote to the filter so the same vote will be filtered.
+func (vf *VoteFilter) AddVote(vote *types.Vote) {
+ vf.Voted[vote.VoteHeader] = struct{}{}
+}