From 6f2a06197463ad2aefb2b053b2d784b9b3ff184f Mon Sep 17 00:00:00 2001 From: Wei-Ning Huang Date: Fri, 22 Mar 2019 17:16:17 +0800 Subject: vendor: sync to latest core (#295) --- .../dexon-consensus/core/consensus.go | 101 +++++++++++---------- .../dexon-consensus/core/crypto/dkg/dkg.go | 62 ++++++++++++- .../dexon-consensus/core/syncer/consensus.go | 14 +-- .../dexon-consensus/core/utils/round-event.go | 68 +++++++------- vendor/vendor.json | 48 +++++----- 5 files changed, 175 insertions(+), 118 deletions(-) (limited to 'vendor') diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go index 8f8002b67..83727ec58 100644 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go +++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go @@ -658,8 +658,10 @@ func (con *Consensus) prepare( // Register round event handler to abort previous running DKG if any. con.roundEvent.Register(func(evts []utils.RoundEventParam) { e := evts[len(evts)-1] - defer elapse("abort DKG", e)() - con.cfgModule.abortDKG(e.Round+1, e.Reset) + go func() { + defer elapse("abort DKG", e)() + con.cfgModule.abortDKG(e.Round+1, e.Reset) + }() }) // Register round event handler to update BA and BC modules. con.roundEvent.Register(func(evts []utils.RoundEventParam) { @@ -721,8 +723,10 @@ func (con *Consensus) prepare( return } // Aborting all previous running DKG protocol instance if any. - con.cfgModule.abortDKG(nextRound, e.Reset) - con.runCRS(e.Round, utils.Rehash(e.CRS, uint(e.Reset+1)), true) + go func() { + con.cfgModule.abortDKG(nextRound, e.Reset) + con.runCRS(e.Round, utils.Rehash(e.CRS, uint(e.Reset+1)), true) + }() }) }) // Register round event handler to propose new CRS. @@ -750,7 +754,7 @@ func (con *Consensus) prepare( con.logger.Debug("CRS already proposed", "round", e.Round+1) return } - con.runCRS(e.Round, e.CRS, false) + go con.runCRS(e.Round, e.CRS, false) }) } }) @@ -788,10 +792,8 @@ func (con *Consensus) prepare( e := evts[len(evts)-1] defer elapse("next round", e)() // Register a routine to trigger round events. - con.event.RegisterHeight(e.NextRoundValidationHeight(), func( - blockHeight uint64) { - con.roundEvent.ValidateNextRound(blockHeight) - }) + con.event.RegisterHeight(e.NextRoundValidationHeight(), + utils.RoundEventRetryHandlerGenerator(con.roundEvent, con.event)) // Register a routine to register next DKG. con.event.RegisterHeight(e.NextDKGRegisterHeight(), func(uint64) { nextRound := e.Round + 1 @@ -801,48 +803,53 @@ func (con *Consensus) prepare( "reset", e.Reset) return } - // Normally, gov.CRS would return non-nil. Use this for in case of - // unexpected network fluctuation and ensure the robustness. - if !checkWithCancel( - con.ctx, 500*time.Millisecond, checkCRS(nextRound)) { - con.logger.Debug("unable to prepare CRS for DKG set", - "round", nextRound, - "reset", e.Reset) - return - } - nextDkgSet, err := con.nodeSetCache.GetDKGSet(nextRound) - if err != nil { - con.logger.Error("Error getting DKG set for next round", - "round", nextRound, - "reset", e.Reset, - "error", err) - return - } - if _, exist := nextDkgSet[con.ID]; !exist { - con.logger.Info("Not selected as DKG set", + go func() { + // Normally, gov.CRS would return non-nil. Use this for in case + // of unexpected network fluctuation and ensure the robustness. + if !checkWithCancel( + con.ctx, 500*time.Millisecond, checkCRS(nextRound)) { + con.logger.Debug("unable to prepare CRS for DKG set", + "round", nextRound, + "reset", e.Reset) + return + } + nextDkgSet, err := con.nodeSetCache.GetDKGSet(nextRound) + if err != nil { + con.logger.Error("Error getting DKG set for next round", + "round", nextRound, + "reset", e.Reset, + "error", err) + return + } + if _, exist := nextDkgSet[con.ID]; !exist { + con.logger.Info("Not selected as DKG set", + "round", nextRound, + "reset", e.Reset) + return + } + con.logger.Info("Selected as DKG set", "round", nextRound, "reset", e.Reset) - return - } - con.logger.Info("Selected as DKG set", - "round", nextRound, - "reset", e.Reset) - nextConfig := utils.GetConfigWithPanic(con.gov, nextRound, - con.logger) - con.cfgModule.registerDKG(nextRound, e.Reset, utils.GetDKGThreshold( - nextConfig)) - con.event.RegisterHeight(e.NextDKGPreparationHeight(), - func(uint64) { - func() { - con.dkgReady.L.Lock() - defer con.dkgReady.L.Unlock() - con.dkgRunning = 0 - }() - con.runDKG(nextRound, e.Reset, nextConfig) - }) + nextConfig := utils.GetConfigWithPanic(con.gov, nextRound, + con.logger) + con.cfgModule.registerDKG(nextRound, e.Reset, + utils.GetDKGThreshold(nextConfig)) + con.event.RegisterHeight(e.NextDKGPreparationHeight(), + func(uint64) { + func() { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + con.dkgRunning = 0 + }() + con.runDKG(nextRound, e.Reset, nextConfig) + }) + }() }) }) con.roundEvent.TriggerInitEvent() + if initBlock != nil { + con.event.NotifyHeight(initBlock.Finalization.Height) + } return } @@ -1289,7 +1296,7 @@ func (con *Consensus) deliverFinalizedBlocksWithoutLock() (err error) { "pending", con.bcModule.lastPendingBlock()) for _, b := range deliveredBlocks { con.deliverBlock(b) - go con.event.NotifyHeight(b.Finalization.Height) + con.event.NotifyHeight(b.Finalization.Height) } return } diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/dkg.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/dkg.go index 425d96b95..796609dc9 100644 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/dkg.go +++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/crypto/dkg/dkg.go @@ -133,27 +133,79 @@ func (prvs *PrivateKeyShares) Equal(other *PrivateKeyShares) bool { // EncodeRLP implements rlp.Encoder func (prvs *PrivateKeyShares) EncodeRLP(w io.Writer) error { + data := make([][][]byte, 3) + shares := make([][]byte, len(prvs.shares)) + for i, s := range prvs.shares { + shares[i] = s.Bytes() + } + data[0] = shares + + shareIndex := make([][]byte, 0) + for k, v := range prvs.shareIndex { + shareIndex = append(shareIndex, k.GetLittleEndian()) + + vBytes, err := rlp.EncodeToBytes(uint64(v)) + if err != nil { + return err + } + shareIndex = append(shareIndex, vBytes) + } + data[1] = shareIndex + mpks := make([][]byte, len(prvs.masterPrivateKey)) for i, m := range prvs.masterPrivateKey { mpks[i] = m.GetLittleEndian() } - return rlp.Encode(w, mpks) + data[2] = mpks + return rlp.Encode(w, data) } // DecodeRLP implements rlp.Decoder func (prvs *PrivateKeyShares) DecodeRLP(s *rlp.Stream) error { - var dec [][]byte + *prvs = PrivateKeyShares{} + var dec [][][]byte if err := s.Decode(&dec); err != nil { return err } - for _, k := range dec { + var shares []PrivateKey + for _, bs := range dec[0] { + var key PrivateKey + err := key.SetBytes(bs) + if err != nil { + return err + } + shares = append(shares, key) + } + (*prvs).shares = shares + + sharesIndex := map[ID]int{} + for i := 0; i < len(dec[1]); i += 2 { + var key ID + err := key.SetLittleEndian(dec[1][i]) + if err != nil { + return err + } + + var value uint64 + err = rlp.DecodeBytes(dec[1][i+1], &value) + if err != nil { + return err + } + + sharesIndex[key] = int(value) + } + (*prvs).shareIndex = sharesIndex + + var mpks []bls.SecretKey + for _, bs := range dec[2] { var key bls.SecretKey - if err := key.SetLittleEndian(k); err != nil { + if err := key.SetLittleEndian(bs); err != nil { return err } - prvs.masterPrivateKey = append(prvs.masterPrivateKey, key) + mpks = append(mpks, key) } + (*prvs).masterPrivateKey = mpks return nil } diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go index 2eeee9d07..4fc24b407 100644 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go +++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go @@ -209,16 +209,10 @@ func (con *Consensus) assureBuffering() { }) // Register a round event handler to validate next round. con.roundEvt.Register(func(evts []utils.RoundEventParam) { - e := evts[len(evts)-1] - con.heightEvt.RegisterHeight(e.NextRoundValidationHeight(), func( - blockHeight uint64) { - select { - case <-con.ctx.Done(): - return - default: - } - con.roundEvt.ValidateNextRound(blockHeight) - }) + con.heightEvt.RegisterHeight( + evts[len(evts)-1].NextRoundValidationHeight(), + utils.RoundEventRetryHandlerGenerator(con.roundEvt, con.heightEvt), + ) }) con.roundEvt.TriggerInitEvent() con.startAgreement() diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go index 3536a27b3..ff1d91e3d 100644 --- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go +++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go @@ -21,7 +21,6 @@ import ( "context" "fmt" "sync" - "time" "github.com/dexon-foundation/dexon-consensus/common" "github.com/dexon-foundation/dexon-consensus/core/types" @@ -127,6 +126,21 @@ type governanceAccessor interface { DKGResetCount(round uint64) uint64 } +// RoundEventRetryHandlerGenerator generates a handler to common.Event, which +// would register itself to retry next round validation if round event is not +// triggered. +func RoundEventRetryHandlerGenerator( + rEvt *RoundEvent, hEvt *common.Event) func(uint64) { + var hEvtHandler func(uint64) + hEvtHandler = func(h uint64) { + if rEvt.ValidateNextRound(h) == 0 { + // Retry until at least one round event is triggered. + hEvt.RegisterHeight(h+1, hEvtHandler) + } + } + return hEvtHandler +} + // RoundEvent would be triggered when either: // - the next DKG set setup is ready. // - the next DKG set setup is failed, and previous DKG set already reset the @@ -140,9 +154,9 @@ type RoundEvent struct { lastTriggeredRound uint64 lastTriggeredResetCount uint64 roundShift uint64 + dkgFailed bool ctx context.Context ctxCancel context.CancelFunc - retryInterval time.Duration } // NewRoundEvent creates an RoundEvent instance. @@ -158,7 +172,6 @@ func NewRoundEvent(parentCtx context.Context, gov governanceAccessor, logger: logger, lastTriggeredRound: initRound, roundShift: roundShift, - retryInterval: initConfig.LambdaBA, } e.ctx, e.ctxCancel = context.WithCancel(parentCtx) e.config = RoundBasedConfig{} @@ -212,20 +225,20 @@ func (e *RoundEvent) TriggerInitEvent() { // failed to setup, all registered handlers would be called once some decision // is made on chain. // -// This method would block until at least one event is triggered. Multiple -// trigger in one call is possible. -func (e *RoundEvent) ValidateNextRound(blockHeight uint64) { +// The count of triggered events would be returned. +func (e *RoundEvent) ValidateNextRound(blockHeight uint64) (count uint) { // To make triggers continuous and sequential, the next validation should // wait for previous one finishing. That's why I use mutex here directly. var events []RoundEventParam e.lock.Lock() defer e.lock.Unlock() - e.logger.Info("ValidateNextRound", + e.logger.Trace("ValidateNextRound", "height", blockHeight, "round", e.lastTriggeredRound, "count", e.lastTriggeredResetCount) defer func() { - if len(events) == 0 { + count = uint(len(events)) + if count == 0 { return } for _, h := range e.handlers { @@ -235,34 +248,24 @@ func (e *RoundEvent) ValidateNextRound(blockHeight uint64) { } }() var ( - dkgFailed, triggered bool - param RoundEventParam - beginHeight = blockHeight - startRound = e.lastTriggeredRound + triggered bool + param RoundEventParam + beginHeight = blockHeight + startRound = e.lastTriggeredRound ) for { - for { - param, dkgFailed, triggered = e.check(beginHeight, startRound, - dkgFailed) - if !triggered { - break - } - events = append(events, param) - beginHeight = param.BeginHeight - } - if len(events) > 0 { + param, triggered = e.check(beginHeight, startRound) + if !triggered { break } - select { - case <-e.ctx.Done(): - return - case <-time.After(e.retryInterval): - } + events = append(events, param) + beginHeight = param.BeginHeight } + return } -func (e *RoundEvent) check(blockHeight, startRound uint64, lastDKGCheck bool) ( - param RoundEventParam, dkgFailed bool, triggered bool) { +func (e *RoundEvent) check(blockHeight, startRound uint64) ( + param RoundEventParam, triggered bool) { defer func() { if !triggered { return @@ -296,14 +299,14 @@ func (e *RoundEvent) check(blockHeight, startRound uint64, lastDKGCheck bool) ( if resetCount > e.lastTriggeredResetCount { e.lastTriggeredResetCount++ e.config.ExtendLength() + e.dkgFailed = false triggered = true return } - if lastDKGCheck { + if e.dkgFailed { // We know that DKG already failed, now wait for the DKG set from // previous round to reset DKG and don't have to reconstruct the // group public key again. - dkgFailed = true return } if nextRound >= dkgDelayRound { @@ -322,13 +325,14 @@ func (e *RoundEvent) check(blockHeight, startRound uint64, lastDKGCheck bool) ( "group public key setup failed, waiting for DKG reset", "round", nextRound, "reset", e.lastTriggeredResetCount) - dkgFailed = true + e.dkgFailed = true return } } // The DKG set for next round is well prepared. e.lastTriggeredRound = nextRound e.lastTriggeredResetCount = 0 + e.dkgFailed = false rCfg := RoundBasedConfig{} rCfg.SetupRoundBasedFields(nextRound, nextCfg) rCfg.AppendTo(e.config) diff --git a/vendor/vendor.json b/vendor/vendor.json index e602bcf83..6590c303d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -141,16 +141,16 @@ { "checksumSHA1": "8EuKVkP1v/w5fRuuvUaXX5k/F+I=", "path": "github.com/dexon-foundation/dexon-consensus/common", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, { - "checksumSHA1": "0DGA7q0IqImUaB6ooQKS8UWrzAM=", + "checksumSHA1": "9oiyKaDV9gfeP3RVJ9BQpZsNqQQ=", "path": "github.com/dexon-foundation/dexon-consensus/core", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, @@ -165,64 +165,64 @@ { "checksumSHA1": "tQSbYCu5P00lUhKsx3IbBZCuSLY=", "path": "github.com/dexon-foundation/dexon-consensus/core/crypto", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, { - "checksumSHA1": "kC/Tu4is9+jABI/EdvEv7VxwvEo=", + "checksumSHA1": "mMdctxTa/jNwAwZjjYoyEZdLoF8=", "path": "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, { "checksumSHA1": "BhLKK8RveoLaeXc9UyUKMwQqchU=", "path": "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, { "checksumSHA1": "b99zZvbWvBimv1NiPGGF1yQ4dKY=", "path": "github.com/dexon-foundation/dexon-consensus/core/db", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, { - "checksumSHA1": "6gVpBAk9bPqgUo+HkIp2zFz9aF4=", + "checksumSHA1": "4Cj093M4dpIAVBNkRyGb8U2+4rU=", "path": "github.com/dexon-foundation/dexon-consensus/core/syncer", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, { "checksumSHA1": "id8imcgp3SqYhIx0k3Chd0VZrUQ=", "path": "github.com/dexon-foundation/dexon-consensus/core/types", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, { "checksumSHA1": "yoVRmvJDCp/1jSfY7wMt2LBQ9e8=", "path": "github.com/dexon-foundation/dexon-consensus/core/types/dkg", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, { - "checksumSHA1": "GGbVDVOkB+cxRyRTHRdLfU8+gnk=", + "checksumSHA1": "7lK4WuSLR+Cu58XfPtA5YrKk2qA=", "path": "github.com/dexon-foundation/dexon-consensus/core/utils", - "revision": "672d245243b6b85040f96e6638628b86975e9a1b", - "revisionTime": "2019-03-20T15:03:36Z", + "revision": "99723721b79e7f1cf5d1009d9117705d84df5eaa", + "revisionTime": "2019-03-22T07:32:15Z", "version": "single-chain", "versionExact": "single-chain" }, -- cgit v1.2.3