aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--core/blockchain.go4
-rw-r--r--core/blockchain_test.go2
-rw-r--r--core/configuration-chain.go195
-rw-r--r--core/configuration-chain_test.go20
-rw-r--r--core/consensus.go24
-rw-r--r--core/consensus_test.go4
-rw-r--r--core/dkg-tsig-protocol.go7
-rw-r--r--core/dkg-tsig-protocol_test.go3
-rw-r--r--core/syncer/consensus.go1
-rw-r--r--core/utils/round-event.go8
10 files changed, 152 insertions, 116 deletions
diff --git a/core/blockchain.go b/core/blockchain.go
index c5a22b6..610ab28 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -122,6 +122,7 @@ func newBlockChainConfig(prev blockChainConfig, config *types.Config) (
type tsigVerifierGetter interface {
UpdateAndGet(uint64) (TSigVerifier, bool, error)
+ Purge(uint64)
}
type blockChain struct {
@@ -196,6 +197,9 @@ func (bc *blockChain) notifyRoundEvents(evts []utils.RoundEventParam) error {
}
bc.configs = append(bc.configs, c)
}
+ if e.Reset != 0 {
+ bc.vGetter.Purge(e.Round + 1)
+ }
return nil
}
for _, e := range evts {
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 3fe7697..87b8eac 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -44,6 +44,8 @@ func (t *testTSigVerifierGetter) UpdateAndGet(round uint64) (
return &testTSigVerifier{}, true, nil
}
+func (t *testTSigVerifierGetter) Purge(_ uint64) {}
+
type BlockChainTestSuite struct {
suite.Suite
diff --git a/core/configuration-chain.go b/core/configuration-chain.go
index 084f9d0..2c16ac3 100644
--- a/core/configuration-chain.go
+++ b/core/configuration-chain.go
@@ -18,6 +18,7 @@
package core
import (
+ "context"
"fmt"
"sync"
"time"
@@ -57,7 +58,7 @@ func (e ErrMismatchDKG) Error() string {
e.expectRound, e.expectReset, e.actualRound, e.actualReset)
}
-type dkgStepFn func(round uint64, reset uint64) (chan<- struct{}, error)
+type dkgStepFn func(round uint64, reset uint64) error
type configurationChain struct {
ID types.NodeID
@@ -70,7 +71,6 @@ type configurationChain struct {
dkgSigner map[uint64]*dkgShareSecret
npks map[uint64]*typesDKG.NodePublicKeys
dkgResult sync.RWMutex
- abortDKGCh chan chan<- struct{}
tsig map[common.Hash]*tsigProtocol
tsigTouched map[common.Hash]struct{}
tsigReady *sync.Cond
@@ -80,8 +80,11 @@ type configurationChain struct {
mpkReady bool
pendingPrvShare map[types.NodeID]*typesDKG.PrivateShare
// TODO(jimmy-dexon): add timeout to pending psig.
- pendingPsig map[common.Hash][]*typesDKG.PartialSignature
- prevHash common.Hash
+ pendingPsig map[common.Hash][]*typesDKG.PartialSignature
+ prevHash common.Hash
+ dkgCtx context.Context
+ dkgCtxCancel context.CancelFunc
+ dkgRunning bool
}
func newConfigurationChain(
@@ -98,7 +101,6 @@ func newConfigurationChain(
logger: logger,
dkgSigner: make(map[uint64]*dkgShareSecret),
npks: make(map[uint64]*typesDKG.NodePublicKeys),
- abortDKGCh: make(chan chan<- struct{}, 1),
tsig: make(map[common.Hash]*tsigProtocol),
tsigTouched: make(map[common.Hash]struct{}),
tsigReady: sync.NewCond(&sync.Mutex{}),
@@ -110,15 +112,19 @@ func newConfigurationChain(
return configurationChain
}
-func (cc *configurationChain) abortDKG(round, reset uint64) {
+func (cc *configurationChain) abortDKG(
+ parentCtx context.Context,
+ round, reset uint64) {
cc.dkgLock.Lock()
defer cc.dkgLock.Unlock()
if cc.dkg != nil {
- cc.abortDKGNoLock(round, reset)
+ cc.abortDKGNoLock(parentCtx, round, reset)
}
}
-func (cc *configurationChain) abortDKGNoLock(round, reset uint64) bool {
+func (cc *configurationChain) abortDKGNoLock(
+ ctx context.Context,
+ round, reset uint64) bool {
if cc.dkg.round > round ||
(cc.dkg.round == round && cc.dkg.reset >= reset) {
cc.logger.Error("newer DKG already is registered",
@@ -132,30 +138,50 @@ func (cc *configurationChain) abortDKGNoLock(round, reset uint64) bool {
"previous-round", cc.dkg.round,
"previous-reset", cc.dkg.reset)
// Abort DKG routine in previous round.
- aborted := make(chan struct{}, 1)
cc.logger.Error("Aborting DKG in previous round",
"round", round,
"previous-round", cc.dkg.round)
- cc.dkgLock.Unlock()
// Notify current running DKG protocol to abort.
- cc.abortDKGCh <- aborted
+ if cc.dkgCtxCancel != nil {
+ cc.dkgCtxCancel()
+ }
+ cc.dkgLock.Unlock()
// Wait for current running DKG protocol aborting.
- <-aborted
+ for {
+ cc.dkgLock.Lock()
+ if cc.dkgRunning == false {
+ cc.dkg = nil
+ break
+ }
+ select {
+ case <-ctx.Done():
+ return false
+ case <-time.After(100 * time.Millisecond):
+ }
+ cc.dkgLock.Unlock()
+ }
cc.logger.Error("Previous DKG aborted",
"round", round,
"reset", reset)
- cc.dkgLock.Lock()
return true
}
-func (cc *configurationChain) registerDKG(round, reset uint64, threshold int) {
+func (cc *configurationChain) registerDKG(
+ parentCtx context.Context,
+ round, reset uint64,
+ threshold int) {
cc.dkgLock.Lock()
defer cc.dkgLock.Unlock()
if cc.dkg != nil {
// Make sure we only proceed when cc.dkg is nil.
- if !cc.abortDKGNoLock(round, reset) {
+ if !cc.abortDKGNoLock(parentCtx, round, reset) {
return
}
+ select {
+ case <-parentCtx.Done():
+ return
+ default:
+ }
if cc.dkg != nil {
// This panic would only raise when multiple attampts to register
// a DKG protocol at the same time.
@@ -176,6 +202,7 @@ func (cc *configurationChain) registerDKG(round, reset uint64, threshold int) {
cc.pendingPrvShare = make(map[types.NodeID]*typesDKG.PrivateShare)
cc.mpkReady = false
cc.dkg, err = recoverDKGProtocol(cc.ID, cc.recv, round, reset, cc.db)
+ cc.dkgCtx, cc.dkgCtxCancel = context.WithCancel(parentCtx)
if err != nil {
panic(err)
}
@@ -207,45 +234,39 @@ func (cc *configurationChain) registerDKG(round, reset uint64, threshold int) {
}()
}
-func (cc *configurationChain) runDKGPhaseOne(round uint64, reset uint64) (
- abortCh chan<- struct{}, err error) {
+func (cc *configurationChain) runDKGPhaseOne(round uint64, reset uint64) error {
if cc.dkg.round < round ||
(cc.dkg.round == round && cc.dkg.reset < reset) {
- err = ErrDKGNotRegistered
- return
+ return ErrDKGNotRegistered
}
if cc.dkg.round != round || cc.dkg.reset != reset {
cc.logger.Warn("DKG canceled", "round", round, "reset", reset)
- err = ErrSkipButNoError
- return
+ return ErrSkipButNoError
}
cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round)
if cc.gov.IsDKGFinal(round) {
cc.logger.Warn("DKG already final", "round", round)
- err = ErrSkipButNoError
- return
+ return ErrSkipButNoError
}
cc.logger.Debug("Calling Governance.IsDKGMPKReady", "round", round)
- for abortCh == nil && !cc.gov.IsDKGMPKReady(round) {
+ var err error
+ for err == nil && !cc.gov.IsDKGMPKReady(round) {
cc.dkgLock.Unlock()
cc.logger.Debug("DKG MPKs are not ready yet. Try again later...",
"nodeID", cc.ID,
"round", round)
select {
- case abortCh = <-cc.abortDKGCh:
+ case <-cc.dkgCtx.Done():
err = ErrDKGAborted
case <-time.After(500 * time.Millisecond):
}
cc.dkgLock.Lock()
}
- if abortCh != nil {
- return
- }
- return
+ return err
}
func (cc *configurationChain) runDKGPhaseTwoAndThree(
- round uint64, reset uint64) (chan<- struct{}, error) {
+ round uint64, reset uint64) error {
// Check if this node successfully join the protocol.
cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
mpks := cc.gov.DKGMasterPublicKeys(round)
@@ -260,7 +281,7 @@ func (cc *configurationChain) runDKGPhaseTwoAndThree(
cc.logger.Warn("Failed to join DKG protocol",
"round", round,
"reset", reset)
- return nil, ErrSkipButNoError
+ return ErrSkipButNoError
}
// Phase 2(T = 0): Exchange DKG secret key share.
if err := cc.dkg.processMasterPublicKeys(mpks); err != nil {
@@ -273,8 +294,8 @@ func (cc *configurationChain) runDKGPhaseTwoAndThree(
// The time to process private share might be long, check aborting before
// get into that loop.
select {
- case abortCh := <-cc.abortDKGCh:
- return abortCh, ErrDKGAborted
+ case <-cc.dkgCtx.Done():
+ return ErrDKGAborted
default:
}
for _, prvShare := range cc.pendingPrvShare {
@@ -288,7 +309,7 @@ func (cc *configurationChain) runDKGPhaseTwoAndThree(
// Phase 3(T = 0~λ): Propose complaint.
// Propose complaint is done in `processMasterPublicKeys`.
- return nil, nil
+ return nil
}
func (cc *configurationChain) runDKGPhaseFour() {
@@ -322,27 +343,27 @@ func (cc *configurationChain) runDKGPhaseEight() {
cc.dkg.proposeFinalize()
}
-func (cc *configurationChain) runDKGPhaseNine(round uint64, reset uint64) (
- abortCh chan<- struct{}, err error) {
+func (cc *configurationChain) runDKGPhaseNine(round uint64, reset uint64) error {
// Phase 9(T = 6λ): DKG is ready.
// Normally, IsDKGFinal would return true here. Use this for in case of
// unexpected network fluctuation and ensure the robustness of DKG protocol.
cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round)
- for abortCh == nil && !cc.gov.IsDKGFinal(round) {
+ var err error
+ for err == nil && !cc.gov.IsDKGFinal(round) {
cc.dkgLock.Unlock()
cc.logger.Debug("DKG is not ready yet. Try again later...",
"nodeID", cc.ID.String()[:6],
"round", round,
"reset", reset)
select {
- case abortCh = <-cc.abortDKGCh:
+ case <-cc.dkgCtx.Done():
err = ErrDKGAborted
case <-time.After(500 * time.Millisecond):
}
cc.dkgLock.Lock()
}
- if abortCh != nil {
- return
+ if err != nil {
+ return err
}
cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
@@ -351,7 +372,7 @@ func (cc *configurationChain) runDKGPhaseNine(round uint64, reset uint64) (
cc.gov.DKGComplaints(round),
cc.dkg.threshold)
if err != nil {
- return
+ return err
}
qualifies := ""
for nID := range npks.QualifyNodeIDs {
@@ -367,28 +388,29 @@ func (cc *configurationChain) runDKGPhaseNine(round uint64, reset uint64) (
cc.logger.Warn("Self is not in Qualify Nodes",
"round", round,
"reset", reset)
- return
+ return nil
}
signer, err := cc.dkg.recoverShareSecret(npks.QualifyIDs)
if err != nil {
- return
+ return err
}
// Save private shares to DB.
if err = cc.db.PutDKGPrivateKey(round, *signer.privateKey); err != nil {
- return
+ return err
}
cc.dkgResult.Lock()
defer cc.dkgResult.Unlock()
cc.dkgSigner[round] = signer
cc.npks[round] = npks
- return
+ return nil
}
-func (cc *configurationChain) runTick(ticker Ticker) (abortCh chan<- struct{}) {
+func (cc *configurationChain) runTick(ticker Ticker) (aborted bool) {
cc.dkgLock.Unlock()
defer cc.dkgLock.Lock()
select {
- case abortCh = <-cc.abortDKGCh:
+ case <-cc.dkgCtx.Done():
+ aborted = true
case <-ticker.Tick():
}
return
@@ -396,69 +418,42 @@ func (cc *configurationChain) runTick(ticker Ticker) (abortCh chan<- struct{}) {
func (cc *configurationChain) initDKGPhasesFunc() {
cc.dkgRunPhases = []dkgStepFn{
- func(round uint64, reset uint64) (chan<- struct{}, error) {
+ func(round uint64, reset uint64) error {
return cc.runDKGPhaseOne(round, reset)
},
- func(round uint64, reset uint64) (chan<- struct{}, error) {
+ func(round uint64, reset uint64) error {
return cc.runDKGPhaseTwoAndThree(round, reset)
},
- func(round uint64, reset uint64) (chan<- struct{}, error) {
+ func(round uint64, reset uint64) error {
cc.runDKGPhaseFour()
- return nil, nil
+ return nil
},
- func(round uint64, reset uint64) (chan<- struct{}, error) {
+ func(round uint64, reset uint64) error {
cc.runDKGPhaseFiveAndSix(round, reset)
- return nil, nil
+ return nil
},
- func(round uint64, reset uint64) (chan<- struct{}, error) {
+ func(round uint64, reset uint64) error {
complaints := cc.gov.DKGComplaints(round)
cc.runDKGPhaseSeven(complaints)
- return nil, nil
+ return nil
},
- func(round uint64, reset uint64) (chan<- struct{}, error) {
+ func(round uint64, reset uint64) error {
cc.runDKGPhaseEight()
- return nil, nil
+ return nil
},
- func(round uint64, reset uint64) (chan<- struct{}, error) {
+ func(round uint64, reset uint64) error {
return cc.runDKGPhaseNine(round, reset)
},
}
}
func (cc *configurationChain) runDKG(round uint64, reset uint64) (err error) {
- // Check if corresponding DKG signer is ready.
- if _, _, err = cc.getDKGInfo(round); err == nil {
- return ErrSkipButNoError
- }
cc.dkgLock.Lock()
defer cc.dkgLock.Unlock()
- var (
- ticker Ticker
- abortCh chan<- struct{}
- )
- defer func() {
- if ticker != nil {
- ticker.Stop()
- }
- // Here we should hold the cc.dkgLock, reset cc.dkg to nil when done.
- cc.dkg = nil
- if abortCh == nil {
- select {
- case abortCh = <-cc.abortDKGCh:
- // The previous DKG finishes its job, don't overwrite its error
- // with "aborted" here.
- default:
- }
- }
- if abortCh != nil {
- abortCh <- struct{}{}
- }
- }()
- tickStartAt := 1
-
if cc.dkg == nil {
return ErrDKGNotRegistered
}
+ // Make sure the existed dkgProtocol is expected one.
if cc.dkg.round != round || cc.dkg.reset != reset {
return ErrMismatchDKG{
expectRound: round,
@@ -467,19 +462,37 @@ func (cc *configurationChain) runDKG(round uint64, reset uint64) (err error) {
actualReset: cc.dkg.reset,
}
}
+ if cc.dkgRunning {
+ panic(fmt.Errorf("duplicated call to runDKG: %d %d", round, reset))
+ }
+ cc.dkgRunning = true
+ var ticker Ticker
+ defer func() {
+ if ticker != nil {
+ ticker.Stop()
+ }
+ // Here we should hold the cc.dkgLock, reset cc.dkg to nil when done.
+ if cc.dkg != nil {
+ cc.dkg = nil
+ }
+ cc.dkgRunning = false
+ }()
+ // Check if corresponding DKG signer is ready.
+ if _, _, err = cc.getDKGInfo(round); err == nil {
+ return ErrSkipButNoError
+ }
+ tickStartAt := 1
for i := cc.dkg.step; i < len(cc.dkgRunPhases); i++ {
if i >= tickStartAt && ticker == nil {
ticker = newTicker(cc.gov, round, TickerDKG)
}
- if ticker != nil {
- if abortCh = cc.runTick(ticker); abortCh != nil {
- return
- }
+ if ticker != nil && cc.runTick(ticker) {
+ return
}
- switch abortCh, err = cc.dkgRunPhases[i](round, reset); err {
+ switch err = cc.dkgRunPhases[i](round, reset); err {
case ErrSkipButNoError, nil:
cc.dkg.step = i + 1
err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo())
diff --git a/core/configuration-chain_test.go b/core/configuration-chain_test.go
index 4de0c68..0efafd2 100644
--- a/core/configuration-chain_test.go
+++ b/core/configuration-chain_test.go
@@ -19,6 +19,7 @@ package core
import (
"bytes"
+ "context"
"errors"
"sync"
"testing"
@@ -217,7 +218,7 @@ func (s *ConfigurationChainTestSuite) runDKG(
}
for _, cc := range cfgChains {
- cc.registerDKG(round, reset, k)
+ cc.registerDKG(context.Background(), round, reset, k)
}
for _, gov := range recv.govs {
@@ -347,10 +348,10 @@ func (s *ConfigurationChainTestSuite) TestDKGMasterPublicKeyDelayAdd() {
if nID == delayNode {
continue
}
- cc.registerDKG(round, reset, k)
+ cc.registerDKG(context.Background(), round, reset, k)
}
time.Sleep(lambdaDKG)
- cfgChains[delayNode].registerDKG(round, reset, k)
+ cfgChains[delayNode].registerDKG(context.Background(), round, reset, k)
for _, gov := range recv.govs {
s.Require().Len(gov.DKGMasterPublicKeys(round), n-1)
@@ -410,7 +411,7 @@ func (s *ConfigurationChainTestSuite) TestDKGComplaintDelayAdd() {
}
for _, cc := range cfgChains {
- cc.registerDKG(round, reset, k)
+ cc.registerDKG(context.Background(), round, reset, k)
}
for _, gov := range recv.govs {
@@ -617,25 +618,30 @@ func (s *ConfigurationChainTestSuite) TestDKGAbort() {
recv.nodes[nID] = cc
recv.govs[nID] = gov
// The first register should not be blocked.
- cc.registerDKG(round, reset, k)
+ cc.registerDKG(context.Background(), round, reset, k)
// We should be blocked because DKGReady is not enough.
errs := make(chan error, 1)
+ called := make(chan struct{}, 1)
go func() {
+ called <- struct{}{}
errs <- cc.runDKG(round, reset)
}()
// The second register shouldn't be blocked, too.
randHash := common.NewRandomHash()
gov.ResetDKG(randHash[:])
- cc.registerDKG(round, reset+1, k)
+ <-called
+ cc.registerDKG(context.Background(), round, reset+1, k)
err = <-errs
s.Require().EqualError(ErrDKGAborted, err.Error())
go func() {
+ called <- struct{}{}
errs <- cc.runDKG(round, reset+1)
}()
// The third register shouldn't be blocked, too
randHash = common.NewRandomHash()
gov.ProposeCRS(round+1, randHash[:])
- cc.registerDKG(round+1, reset, k)
+ <-called
+ cc.registerDKG(context.Background(), round+1, reset, k)
err = <-errs
s.Require().EqualError(ErrDKGAborted, err.Error())
}
diff --git a/core/consensus.go b/core/consensus.go
index 83727ec..d32238d 100644
--- a/core/consensus.go
+++ b/core/consensus.go
@@ -633,11 +633,11 @@ func (con *Consensus) prepare(
// Measure time elapse for each handler of round events.
elapse := func(what string, lastE utils.RoundEventParam) func() {
start := time.Now()
- con.logger.Info("handle round event",
+ con.logger.Info("Handle round event",
"what", what,
"event", lastE)
return func() {
- con.logger.Info("finish round event",
+ con.logger.Info("Finish round event",
"what", what,
"event", lastE,
"elapse", time.Since(start))
@@ -647,7 +647,7 @@ func (con *Consensus) prepare(
// modules see the up-to-date node set, we need to make sure this action
// should be taken as the first one.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
- defer elapse("purge node set", evts[len(evts)-1])()
+ defer elapse("purge-node-set", evts[len(evts)-1])()
for _, e := range evts {
if e.Reset == 0 {
continue
@@ -659,13 +659,13 @@ func (con *Consensus) prepare(
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
e := evts[len(evts)-1]
go func() {
- defer elapse("abort DKG", e)()
- con.cfgModule.abortDKG(e.Round+1, e.Reset)
+ defer elapse("abort-DKG", e)()
+ con.cfgModule.abortDKG(con.ctx, e.Round+1, e.Reset)
}()
})
// Register round event handler to update BA and BC modules.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
- defer elapse("append config", evts[len(evts)-1])()
+ defer elapse("append-config", evts[len(evts)-1])()
// Always updates newer configs to the later modules first in the flow.
if err := con.bcModule.notifyRoundEvents(evts); err != nil {
panic(err)
@@ -681,7 +681,7 @@ func (con *Consensus) prepare(
// failed to setup.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
e := evts[len(evts)-1]
- defer elapse("reset DKG", e)()
+ defer elapse("reset-DKG", e)()
nextRound := e.Round + 1
if nextRound < DKGDelayRound {
return
@@ -724,7 +724,7 @@ func (con *Consensus) prepare(
}
// Aborting all previous running DKG protocol instance if any.
go func() {
- con.cfgModule.abortDKG(nextRound, e.Reset)
+ con.cfgModule.abortDKG(con.ctx, nextRound, e.Reset)
con.runCRS(e.Round, utils.Rehash(e.CRS, uint(e.Reset+1)), true)
}()
})
@@ -734,7 +734,7 @@ func (con *Consensus) prepare(
// We don't have to propose new CRS during DKG reset, the reset of DKG
// would be done by the DKG set in previous round.
e := evts[len(evts)-1]
- defer elapse("propose CRS", e)()
+ defer elapse("propose-CRS", e)()
if e.Reset != 0 || e.Round < DKGDelayRound {
return
}
@@ -761,7 +761,7 @@ func (con *Consensus) prepare(
// Touch nodeSetCache for next round.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
e := evts[len(evts)-1]
- defer elapse("touch node set cache", e)()
+ defer elapse("touch-NodeSetCache", e)()
if e.Reset != 0 {
return
}
@@ -790,7 +790,7 @@ func (con *Consensus) prepare(
// Trigger round validation method for next period.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
e := evts[len(evts)-1]
- defer elapse("next round", e)()
+ defer elapse("next-round", e)()
// Register a routine to trigger round events.
con.event.RegisterHeight(e.NextRoundValidationHeight(),
utils.RoundEventRetryHandlerGenerator(con.roundEvent, con.event))
@@ -832,7 +832,7 @@ func (con *Consensus) prepare(
"reset", e.Reset)
nextConfig := utils.GetConfigWithPanic(con.gov, nextRound,
con.logger)
- con.cfgModule.registerDKG(nextRound, e.Reset,
+ con.cfgModule.registerDKG(con.ctx, nextRound, e.Reset,
utils.GetDKGThreshold(nextConfig))
con.event.RegisterHeight(e.NextDKGPreparationHeight(),
func(uint64) {
diff --git a/core/consensus_test.go b/core/consensus_test.go
index 2a5cc54..230921c 100644
--- a/core/consensus_test.go
+++ b/core/consensus_test.go
@@ -235,11 +235,11 @@ func (s *ConsensusTestSuite) TestRegisteredDKGRecover() {
s.Require().Nil(con.cfgModule.dkg)
- con.cfgModule.registerDKG(0, 0, 10)
+ con.cfgModule.registerDKG(con.ctx, 0, 0, 10)
_, newCon := s.prepareConsensusWithDB(dMoment, gov, prvKeys[0], conn, dbInst)
- newCon.cfgModule.registerDKG(0, 0, 10)
+ newCon.cfgModule.registerDKG(newCon.ctx, 0, 0, 10)
s.Require().NotNil(newCon.cfgModule.dkg)
s.Require().True(newCon.cfgModule.dkg.prvShares.Equal(con.cfgModule.dkg.prvShares))
diff --git a/core/dkg-tsig-protocol.go b/core/dkg-tsig-protocol.go
index 50c3a0b..9a71bea 100644
--- a/core/dkg-tsig-protocol.go
+++ b/core/dkg-tsig-protocol.go
@@ -555,6 +555,13 @@ func (tc *TSigVerifierCache) UpdateAndGet(round uint64) (
return v, ok, nil
}
+// Purge the cache and returns if success.
+func (tc *TSigVerifierCache) Purge(round uint64) {
+ tc.lock.Lock()
+ defer tc.lock.Unlock()
+ delete(tc.verifier, round)
+}
+
// Update the cache and returns if success.
func (tc *TSigVerifierCache) Update(round uint64) (bool, error) {
tc.lock.Lock()
diff --git a/core/dkg-tsig-protocol_test.go b/core/dkg-tsig-protocol_test.go
index 9de2027..584b3bb 100644
--- a/core/dkg-tsig-protocol_test.go
+++ b/core/dkg-tsig-protocol_test.go
@@ -955,6 +955,9 @@ func (s *DKGTSIGProtocolTestSuite) TestTSigVerifierCache() {
s.Require().True(ok)
s.Equal(uint64(5), cache.minRound)
+ cache.Purge(5)
+ s.Require().Len(cache.verifier, 0)
+ s.Require().Equal(uint64(5), cache.minRound)
}
func (s *DKGTSIGProtocolTestSuite) TestUnexpectedDKGResetCount() {
diff --git a/core/syncer/consensus.go b/core/syncer/consensus.go
index 4fc24b4..f5099e4 100644
--- a/core/syncer/consensus.go
+++ b/core/syncer/consensus.go
@@ -176,6 +176,7 @@ func (con *Consensus) assureBuffering() {
continue
}
con.nodeSetCache.Purge(e.Round + 1)
+ con.tsigVerifier.Purge(e.Round + 1)
}
})
// Register a round event handler to notify CRS to agreementModule.
diff --git a/core/utils/round-event.go b/core/utils/round-event.go
index ff1d91e..7dace84 100644
--- a/core/utils/round-event.go
+++ b/core/utils/round-event.go
@@ -181,7 +181,7 @@ func NewRoundEvent(parentCtx context.Context, gov governanceAccessor,
// block height.
resetCount := gov.DKGResetCount(initRound + 1)
remains := resetCount
- for ; resetCount > 0 && !e.config.Contains(initBlockHeight); remains-- {
+ for ; remains > 0 && !e.config.Contains(initBlockHeight); remains-- {
e.config.ExtendLength()
}
if !e.config.Contains(initBlockHeight) {
@@ -272,7 +272,7 @@ func (e *RoundEvent) check(blockHeight, startRound uint64) (
}
// A simple assertion to make sure we didn't pick the wrong round.
if e.config.RoundID() != e.lastTriggeredRound {
- panic(fmt.Errorf("triggered round not matched: %d, %d",
+ panic(fmt.Errorf("Triggered round not matched: %d, %d",
e.config.RoundID(), e.lastTriggeredRound))
}
param.Round = e.lastTriggeredRound
@@ -280,7 +280,7 @@ func (e *RoundEvent) check(blockHeight, startRound uint64) (
param.BeginHeight = e.config.LastPeriodBeginHeight()
param.CRS = GetCRSWithPanic(e.gov, e.lastTriggeredRound, e.logger)
param.Config = GetConfigWithPanic(e.gov, e.lastTriggeredRound, e.logger)
- e.logger.Info("new RoundEvent triggered",
+ e.logger.Info("New RoundEvent triggered",
"round", e.lastTriggeredRound,
"reset", e.lastTriggeredResetCount,
"begin-height", e.config.LastPeriodBeginHeight(),
@@ -322,7 +322,7 @@ func (e *RoundEvent) check(blockHeight, startRound uint64) (
e.gov.DKGComplaints(nextRound),
GetDKGThreshold(nextCfg)); err != nil {
e.logger.Debug(
- "group public key setup failed, waiting for DKG reset",
+ "Group public key setup failed, waiting for DKG reset",
"round", nextRound,
"reset", e.lastTriggeredResetCount)
e.dkgFailed = true