diff options
author | Jimmy Hu <jimmy.hu@dexon.org> | 2018-12-19 17:16:40 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-12-19 17:16:40 +0800 |
commit | c7b4045802450df361216d9e7da3ec318e67cc34 (patch) | |
tree | c4060817a54e5cf455e830b21e6a91b9fc11004f /core | |
parent | 7bafefa5c70a26a28636123cb2b6598eea3ed380 (diff) | |
download | dexon-consensus-c7b4045802450df361216d9e7da3ec318e67cc34.tar dexon-consensus-c7b4045802450df361216d9e7da3ec318e67cc34.tar.gz dexon-consensus-c7b4045802450df361216d9e7da3ec318e67cc34.tar.bz2 dexon-consensus-c7b4045802450df361216d9e7da3ec318e67cc34.tar.lz dexon-consensus-c7b4045802450df361216d9e7da3ec318e67cc34.tar.xz dexon-consensus-c7b4045802450df361216d9e7da3ec318e67cc34.tar.zst dexon-consensus-c7b4045802450df361216d9e7da3ec318e67cc34.zip |
core: Add a `MPKReady` so `MasterPublicKey` cannot be added afterwards (#375)
* Add type DKGReady
* Add DKGReady to interface and state
* DKG will wait for MPK to be ready before running
* Modify test
* Check if self's MPK is registered
* Add test for delay add MPK
* Rename Ready to MPKReady
Diffstat (limited to 'core')
-rw-r--r-- | core/authenticator.go | 8 | ||||
-rw-r--r-- | core/configuration-chain.go | 34 | ||||
-rw-r--r-- | core/configuration-chain_test.go | 95 | ||||
-rw-r--r-- | core/consensus.go | 12 | ||||
-rw-r--r-- | core/consensus_test.go | 1 | ||||
-rw-r--r-- | core/crypto.go | 9 | ||||
-rw-r--r-- | core/dkg-tsig-protocol.go | 10 | ||||
-rw-r--r-- | core/dkg-tsig-protocol_test.go | 29 | ||||
-rw-r--r-- | core/interfaces.go | 6 | ||||
-rw-r--r-- | core/test/governance.go | 29 | ||||
-rw-r--r-- | core/test/state-change-request.go | 6 | ||||
-rw-r--r-- | core/test/state.go | 73 | ||||
-rw-r--r-- | core/test/state_test.go | 39 | ||||
-rw-r--r-- | core/test/utils.go | 13 | ||||
-rw-r--r-- | core/types/dkg/dkg.go | 21 | ||||
-rw-r--r-- | core/types/dkg/dkg_test.go | 31 |
16 files changed, 406 insertions, 10 deletions
diff --git a/core/authenticator.go b/core/authenticator.go index 5d176cf..8e57f71 100644 --- a/core/authenticator.go +++ b/core/authenticator.go @@ -103,6 +103,14 @@ func (au *Authenticator) SignDKGPartialSignature( return } +// SignDKGMPKReady signs a DKG ready message. +func (au *Authenticator) SignDKGMPKReady( + ready *typesDKG.MPKReady) (err error) { + ready.ProposerID = au.proposerID + ready.Signature, err = au.prvKey.Sign(hashDKGMPKReady(ready)) + return +} + // SignDKGFinalize signs a DKG finalize message. func (au *Authenticator) SignDKGFinalize( final *typesDKG.Finalize) (err error) { diff --git a/core/configuration-chain.go b/core/configuration-chain.go index 3a43042..d341cb5 100644 --- a/core/configuration-chain.go +++ b/core/configuration-chain.go @@ -105,6 +105,13 @@ func (cc *configurationChain) registerDKG(round uint64, threshold int) { cc.recv, round, threshold) + go func() { + ticker := newTicker(cc.gov, round, TickerDKG) + <-ticker.Tick() + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + cc.dkg.proposeMPKReady() + }() } func (cc *configurationChain) runDKG(round uint64) error { @@ -126,13 +133,34 @@ func (cc *configurationChain) runDKG(round uint64) error { cc.logger.Warn("DKG already final", "round", round) return nil } + cc.logger.Debug("Calling Governance.IsDKGMPKReady", "round", round) + for !cc.gov.IsDKGMPKReady(round) { + cc.logger.Info("DKG MPKs are not ready yet. Try again later...", + "nodeID", cc.ID) + cc.dkgLock.Unlock() + time.Sleep(500 * time.Millisecond) + cc.dkgLock.Lock() + } ticker := newTicker(cc.gov, round, TickerDKG) cc.dkgLock.Unlock() <-ticker.Tick() cc.dkgLock.Lock() - // Phase 2(T = 0): Exchange DKG secret key share. + // Check if this node successfully join the protocol. cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round) - cc.dkg.processMasterPublicKeys(cc.gov.DKGMasterPublicKeys(round)) + mpks := cc.gov.DKGMasterPublicKeys(round) + inProtocol := false + for _, mpk := range mpks { + if mpk.ProposerID == cc.ID { + inProtocol = true + break + } + } + if !inProtocol { + cc.logger.Warn("Failed to join DKG protocol", "round", round) + return nil + } + // Phase 2(T = 0): Exchange DKG secret key share. + cc.dkg.processMasterPublicKeys(mpks) cc.mpkReady = true for _, prvShare := range cc.pendingPrvShare { if err := cc.dkg.processPrivateShare(prvShare); err != nil { @@ -219,7 +247,7 @@ func (cc *configurationChain) runDKG(round uint64) error { return nil } -func (cc *configurationChain) isDKGReady(round uint64) bool { +func (cc *configurationChain) isDKGFinal(round uint64) bool { if !cc.gov.IsDKGFinal(round) { return false } diff --git a/core/configuration-chain_test.go b/core/configuration-chain_test.go index 8214d6a..8becd25 100644 --- a/core/configuration-chain_test.go +++ b/core/configuration-chain_test.go @@ -164,6 +164,30 @@ func (r *testCCReceiver) ProposeDKGAntiNackComplaint( }() } +func (r *testCCReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) { + prvKey, exist := r.s.prvKeys[ready.ProposerID] + if !exist { + panic(errors.New("should exist")) + } + var err error + ready.Signature, err = prvKey.Sign(hashDKGMPKReady(ready)) + if err != nil { + panic(err) + } + for _, gov := range r.govs { + // Use Marshal/Unmarshal to do deep copy. + data, err := json.Marshal(ready) + if err != nil { + panic(err) + } + readyCopy := &typesDKG.MPKReady{} + if err := json.Unmarshal(data, readyCopy); err != nil { + panic(err) + } + gov.AddDKGMPKReady(readyCopy.Round, readyCopy) + } +} + func (r *testCCReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) { prvKey, exist := r.s.prvKeys[final.ProposerID] if !exist { @@ -327,6 +351,77 @@ func (s *ConfigurationChainTestSuite) TestConfigurationChain() { } } +func (s *ConfigurationChainTestSuite) TestDKGMasterPublicKeyDelayAdd() { + k := 4 + n := 10 + round := uint64(0) + lambdaDKG := 1000 * time.Millisecond + s.setupNodes(n) + + cfgChains := make(map[types.NodeID]*configurationChain) + recv := newTestCCReceiver(s) + + pks := make([]crypto.PublicKey, 0, len(s.prvKeys)) + for _, prv := range s.prvKeys { + pks = append(pks, prv.PublicKey()) + } + + delayNode := s.nIDs[0] + + for _, nID := range s.nIDs { + state := test.NewState( + pks, 100*time.Millisecond, &common.NullLogger{}, true) + gov, err := test.NewGovernance(state, ConfigRoundShift) + s.Require().NoError(err) + s.Require().NoError(state.RequestChange( + test.StateChangeLambdaDKG, lambdaDKG)) + cache := utils.NewNodeSetCache(gov) + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + cfgChains[nID] = newConfigurationChain( + nID, recv, gov, cache, dbInst, &common.NullLogger{}) + recv.nodes[nID] = cfgChains[nID] + recv.govs[nID] = gov + } + + for nID, cc := range cfgChains { + if nID == delayNode { + continue + } + cc.registerDKG(round, k) + } + time.Sleep(lambdaDKG) + cfgChains[delayNode].registerDKG(round, k) + + for _, gov := range recv.govs { + s.Require().Len(gov.DKGMasterPublicKeys(round), n-1) + } + + errs := make(chan error, n) + wg := sync.WaitGroup{} + wg.Add(n) + for _, cc := range cfgChains { + go func(cc *configurationChain) { + defer wg.Done() + errs <- cc.runDKG(round) + }(cc) + } + wg.Wait() + for range cfgChains { + s.Require().NoError(<-errs) + } + for nID, cc := range cfgChains { + shouldExist := nID != delayNode + _, exist := cc.gpk[round] + s.Equal(shouldExist, exist) + if !exist { + continue + } + _, exist = cc.gpk[round].qualifyNodeIDs[nID] + s.Equal(shouldExist, exist) + } +} + func (s *ConfigurationChainTestSuite) TestDKGComplaintDelayAdd() { k := 4 n := 10 diff --git a/core/consensus.go b/core/consensus.go index 741330e..bf49a72 100644 --- a/core/consensus.go +++ b/core/consensus.go @@ -329,6 +329,16 @@ func (recv *consensusDKGReceiver) ProposeDKGAntiNackComplaint( recv.network.BroadcastDKGPrivateShare(prv) } +// ProposeDKGMPKReady propose a DKGMPKReady message. +func (recv *consensusDKGReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) { + if err := recv.authModule.SignDKGMPKReady(ready); err != nil { + recv.logger.Error("Failed to sign DKG ready", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGFinalize", "ready", ready) + recv.gov.AddDKGMPKReady(ready.Round, ready) +} + // ProposeDKGFinalize propose a DKGFinalize message. func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) { if err := recv.authModule.SignDKGFinalize(final); err != nil { @@ -631,7 +641,7 @@ func (con *Consensus) runCRS(round uint64) { } con.logger.Debug("Calling Governance.IsDKGFinal to check if ready to run CRS", "round", round) - if con.cfgModule.isDKGReady(round) { + if con.cfgModule.isDKGFinal(round) { break } con.logger.Debug("DKG is not ready for running CRS. Retry later...", diff --git a/core/consensus_test.go b/core/consensus_test.go index 84df645..7a87d12 100644 --- a/core/consensus_test.go +++ b/core/consensus_test.go @@ -551,7 +551,6 @@ func (s *ConsensusTestSuite) TestDKGCRS() { _, con := s.prepareConsensus(dMoment, gov, key, conn) nID := types.NewNodeID(key.PublicKey()) cons[nID] = con - con.cfgModule.registerDKG(uint64(0), n/3+1) } for _, con := range cons { go con.runDKG(0, gov.Configuration(0)) diff --git a/core/crypto.go b/core/crypto.go index 914ca08..96ea54f 100644 --- a/core/crypto.go +++ b/core/crypto.go @@ -243,6 +243,15 @@ func verifyDKGPartialSignatureSignature( return true, nil } +func hashDKGMPKReady(ready *typesDKG.MPKReady) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, ready.Round) + + return crypto.Keccak256Hash( + ready.ProposerID.Hash[:], + binaryRound, + ) +} func hashDKGFinalize(final *typesDKG.Finalize) common.Hash { binaryRound := make([]byte, 8) binary.LittleEndian.PutUint64(binaryRound, final.Round) diff --git a/core/dkg-tsig-protocol.go b/core/dkg-tsig-protocol.go index b120f81..ef12cf9 100644 --- a/core/dkg-tsig-protocol.go +++ b/core/dkg-tsig-protocol.go @@ -68,6 +68,9 @@ type dkgReceiver interface { // ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint. ProposeDKGAntiNackComplaint(prv *typesDKG.PrivateShare) + // ProposeDKGMPKReady propose a DKGMPKReady message. + ProposeDKGMPKReady(ready *typesDKG.MPKReady) + // ProposeDKGFinalize propose a DKGFinalize message. ProposeDKGFinalize(final *typesDKG.Finalize) } @@ -339,6 +342,13 @@ func (d *dkgProtocol) processPrivateShare( return nil } +func (d *dkgProtocol) proposeMPKReady() { + d.recv.ProposeDKGMPKReady(&typesDKG.MPKReady{ + ProposerID: d.ID, + Round: d.round, + }) +} + func (d *dkgProtocol) proposeFinalize() { d.recv.ProposeDKGFinalize(&typesDKG.Finalize{ ProposerID: d.ID, diff --git a/core/dkg-tsig-protocol_test.go b/core/dkg-tsig-protocol_test.go index 2dad0e8..1f0ddca 100644 --- a/core/dkg-tsig-protocol_test.go +++ b/core/dkg-tsig-protocol_test.go @@ -47,6 +47,7 @@ type testDKGReceiver struct { mpk *typesDKG.MasterPublicKey prvShare map[types.NodeID]*typesDKG.PrivateShare antiComplaints map[types.NodeID]*typesDKG.PrivateShare + ready []*typesDKG.MPKReady final []*typesDKG.Finalize } @@ -92,6 +93,10 @@ func (r *testDKGReceiver) ProposeDKGAntiNackComplaint( r.antiComplaints[prv.ReceiverID] = prv } +func (r *testDKGReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) { + r.ready = append(r.ready, ready) +} + func (r *testDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) { r.final = append(r.final, final) } @@ -685,6 +690,21 @@ func (s *DKGTSIGProtocolTestSuite) TestPartialSignature() { s.True(gpk.VerifySignature(msgHash, sig)) } +func (s *DKGTSIGProtocolTestSuite) TestProposeReady() { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + recv := newTestDKGReceiver(s, prvKey) + nID := types.NewNodeID(prvKey.PublicKey()) + protocol := newDKGProtocol(nID, recv, 1, 2) + protocol.proposeMPKReady() + s.Require().Len(recv.ready, 1) + ready := recv.ready[0] + s.Equal(&typesDKG.MPKReady{ + ProposerID: nID, + Round: 1, + }, ready) +} + func (s *DKGTSIGProtocolTestSuite) TestProposeFinalize() { prvKey, err := ecdsa.NewPrivateKey() s.Require().NoError(err) @@ -718,6 +738,15 @@ func (s *DKGTSIGProtocolTestSuite) TestTSigVerifierCache() { } for _, protocol := range protocols { + protocol.proposeMPKReady() + } + for _, recv := range receivers { + s.Require().Len(recv.ready, 1) + gov.AddDKGMPKReady(recv.ready[0].Round, recv.ready[0]) + } + s.Require().True(gov.IsDKGMPKReady(round)) + + for _, protocol := range protocols { protocol.proposeFinalize() } diff --git a/core/interfaces.go b/core/interfaces.go index 2ebfe86..fc3bf09 100644 --- a/core/interfaces.go +++ b/core/interfaces.go @@ -130,6 +130,12 @@ type Governance interface { // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey + // AddDKGMPKReady adds a DKG ready message. + AddDKGMPKReady(round uint64, ready *typesDKG.MPKReady) + + // IsDKGFinal checks if DKG is ready. + IsDKGMPKReady(round uint64) bool + // AddDKGFinalize adds a DKG finalize message. AddDKGFinalize(round uint64, final *typesDKG.Finalize) diff --git a/core/test/governance.go b/core/test/governance.go index 9bb042e..769934b 100644 --- a/core/test/governance.go +++ b/core/test/governance.go @@ -163,6 +163,9 @@ func (g *Governance) AddDKGMasterPublicKey( if round != masterPublicKey.Round { return } + if g.IsDKGMPKReady(masterPublicKey.Round) { + return + } if err := g.stateModule.RequestChange( StateAddDKGMasterPublicKey, masterPublicKey); err != nil { panic(err) @@ -176,6 +179,32 @@ func (g *Governance) DKGMasterPublicKeys( return g.stateModule.DKGMasterPublicKeys(round) } +// AddDKGMPKReady adds a DKG ready message. +func (g *Governance) AddDKGMPKReady(round uint64, ready *typesDKG.MPKReady) { + if round != ready.Round { + return + } + if err := g.stateModule.RequestChange(StateAddDKGMPKReady, ready); err != nil { + panic(err) + } + g.broadcastPendingStateChanges() +} + +// IsDKGMPKReady checks if DKG is ready. +func (g *Governance) IsDKGMPKReady(round uint64) bool { + if round == 0 || round == 1 { + // Round 0, 1 are genesis round, their configs should be created + // by default. + g.CatchUpWithRound(round) + } + g.lock.RLock() + defer g.lock.RUnlock() + if round >= uint64(len(g.configs)) { + return false + } + return g.stateModule.IsDKGMPKReady(round, int(g.configs[round].DKGSetSize)/3*2) +} + // AddDKGFinalize adds a DKG finalize message. func (g *Governance) AddDKGFinalize(round uint64, final *typesDKG.Finalize) { if round != final.Round { diff --git a/core/test/state-change-request.go b/core/test/state-change-request.go index 1515fd2..83119b5 100644 --- a/core/test/state-change-request.go +++ b/core/test/state-change-request.go @@ -39,6 +39,7 @@ const ( StateAddCRS StateAddDKGComplaint StateAddDKGMasterPublicKey + StateAddDKGMPKReady StateAddDKGFinal // Configuration related. StateChangeNumChains @@ -115,6 +116,8 @@ func (req *StateChangeRequest) Clone() (copied *StateChangeRequest) { Round: crsReq.Round, CRS: crsReq.CRS, } + case StateAddDKGMPKReady: + copied.Payload = cloneDKGMPKReady(req.Payload.(*typesDKG.MPKReady)) case StateAddDKGFinal: copied.Payload = cloneDKGFinalize(req.Payload.(*typesDKG.Finalize)) case StateAddDKGMasterPublicKey: @@ -154,6 +157,9 @@ func (req *StateChangeRequest) String() (ret string) { ret += fmt.Sprintf( "{Type:AddDKGMasterPublicKey %s", req.Payload.(*typesDKG.MasterPublicKey)) + case StateAddDKGMPKReady: + ret += fmt.Sprintf( + "{Type:AddDKGMPKReady %s", req.Payload.(*typesDKG.MPKReady)) case StateAddDKGFinal: ret += fmt.Sprintf( "{Type:AddDKGFinal %s", req.Payload.(*typesDKG.Finalize)) diff --git a/core/test/state.go b/core/test/state.go index 30ed8af..3c24fb6 100644 --- a/core/test/state.go +++ b/core/test/state.go @@ -43,6 +43,8 @@ var ( ErrMissingPreviousCRS = errors.New("missing previous CRS") // ErrUnknownStateChangeType means a StateChangeType is not recognized. ErrUnknownStateChangeType = errors.New("unknown state change type") + // ErrProposerMPKIsReady means a proposer of one mpk is ready. + ErrProposerMPKIsReady = errors.New("proposer mpk is ready") // ErrProposerIsFinal means a proposer of one complaint is finalized. ErrProposerIsFinal = errors.New("proposer is final") // ErrStateConfigNotEqual means configuration part of two states is not @@ -59,6 +61,9 @@ var ( // states are not equal. ErrStateDKGMasterPublicKeysNotEqual = errors.New( "dkg master public keys not equal") + // ErrStateDKGMPKReadysNotEqual means DKG readys of two states are not + // equal. + ErrStateDKGMPKReadysNotEqual = errors.New("dkg readys not equal") // ErrStateDKGFinalsNotEqual means DKG finalizations of two states are not // equal. ErrStateDKGFinalsNotEqual = errors.New("dkg finalizations not equal") @@ -95,6 +100,7 @@ type State struct { // DKG & CRS dkgComplaints map[uint64]map[types.NodeID][]*typesDKG.Complaint dkgMasterPublicKeys map[uint64]map[types.NodeID]*typesDKG.MasterPublicKey + dkgReadys map[uint64]map[types.NodeID]*typesDKG.MPKReady dkgFinals map[uint64]map[types.NodeID]*typesDKG.Finalize crs []common.Hash // Other stuffs @@ -136,6 +142,8 @@ func NewState( dkgSetSize: uint32(len(nodes)), ownRequests: make(map[common.Hash]*StateChangeRequest), globalRequests: make(map[common.Hash]*StateChangeRequest), + dkgReadys: make( + map[uint64]map[types.NodeID]*typesDKG.MPKReady), dkgFinals: make( map[uint64]map[types.NodeID]*typesDKG.Finalize), dkgComplaints: make( @@ -194,6 +202,9 @@ func (s *State) unpackPayload( case StateAddDKGMasterPublicKey: v = &typesDKG.MasterPublicKey{} err = rlp.DecodeBytes(raw.Payload, v) + case StateAddDKGMPKReady: + v = &typesDKG.MPKReady{} + err = rlp.DecodeBytes(raw.Payload, v) case StateAddDKGFinal: v = &typesDKG.Finalize{} err = rlp.DecodeBytes(raw.Payload, v) @@ -351,6 +362,28 @@ func (s *State) Equal(other *State) error { } } } + // Check DKG readys. + if len(s.dkgReadys) != len(other.dkgReadys) { + return ErrStateDKGMPKReadysNotEqual + } + for round, readysForRound := range s.dkgReadys { + otherReadysForRound, exists := other.dkgReadys[round] + if !exists { + return ErrStateDKGMPKReadysNotEqual + } + if len(readysForRound) != len(otherReadysForRound) { + return ErrStateDKGMPKReadysNotEqual + } + for nID, ready := range readysForRound { + otherReady, exists := otherReadysForRound[nID] + if !exists { + return ErrStateDKGMPKReadysNotEqual + } + if !ready.Equal(otherReady) { + return ErrStateDKGMPKReadysNotEqual + } + } + } // Check DKG finals. if len(s.dkgFinals) != len(other.dkgFinals) { return ErrStateDKGFinalsNotEqual @@ -428,6 +461,7 @@ func (s *State) Clone() (copied *State) { map[uint64]map[types.NodeID][]*typesDKG.Complaint), dkgMasterPublicKeys: make( map[uint64]map[types.NodeID]*typesDKG.MasterPublicKey), + dkgReadys: make(map[uint64]map[types.NodeID]*typesDKG.MPKReady), dkgFinals: make(map[uint64]map[types.NodeID]*typesDKG.Finalize), appliedRequests: make(map[common.Hash]struct{}), } @@ -455,6 +489,12 @@ func (s *State) Clone() (copied *State) { cloneDKGMasterPublicKey(mKey) } } + for round, readysForRound := range s.dkgReadys { + copied.dkgReadys[round] = make(map[types.NodeID]*typesDKG.MPKReady) + for nID, ready := range readysForRound { + copied.dkgReadys[round][nID] = cloneDKGMPKReady(ready) + } + } for round, finalsForRound := range s.dkgFinals { copied.dkgFinals[round] = make(map[types.NodeID]*typesDKG.Finalize) for nID, final := range finalsForRound { @@ -587,6 +627,23 @@ func (s *State) isValidRequest(req *StateChangeRequest) (err error) { // NOTE: there would be no lock in this helper, callers should be // responsible for acquiring appropriate lock. switch req.Type { + case StateAddDKGMasterPublicKey: + mpk := req.Payload.(*typesDKG.MasterPublicKey) + // If we've received identical MPK, ignore it. + mpkForRound, exists := s.dkgMasterPublicKeys[mpk.Round] + if exists { + if oldMpk, exists := mpkForRound[mpk.ProposerID]; exists { + if !oldMpk.Equal(mpk) { + err = ErrDuplicatedChange + } + return + } + } + // If we've received MPK from that proposer, we would ignore + // its mpk. + if _, exists := s.dkgReadys[mpk.Round][mpk.ProposerID]; exists { + return ErrProposerMPKIsReady + } case StateAddDKGComplaint: comp := req.Payload.(*typesDKG.Complaint) // If we've received DKG final from that proposer, we would ignore @@ -656,6 +713,12 @@ func (s *State) applyRequest(req *StateChangeRequest) error { map[types.NodeID]*typesDKG.MasterPublicKey) } s.dkgMasterPublicKeys[mKey.Round][mKey.ProposerID] = mKey + case StateAddDKGMPKReady: + ready := req.Payload.(*typesDKG.MPKReady) + if _, exists := s.dkgReadys[ready.Round]; !exists { + s.dkgReadys[ready.Round] = make(map[types.NodeID]*typesDKG.MPKReady) + } + s.dkgReadys[ready.Round][ready.ProposerID] = ready case StateAddDKGFinal: final := req.Payload.(*typesDKG.Finalize) if _, exists := s.dkgFinals[final.Round]; !exists { @@ -714,6 +777,8 @@ func (s *State) RequestChange( // These cases for for type assertion, make sure callers pass expected types. case StateAddCRS: payload = payload.(*crsAdditionRequest) + case StateAddDKGMPKReady: + payload = payload.(*typesDKG.MPKReady) case StateAddDKGFinal: payload = payload.(*typesDKG.Finalize) case StateAddDKGMasterPublicKey: @@ -783,6 +848,14 @@ func (s *State) DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey { return mpks } +// IsDKGMPKReady checks if current received dkg readys exceeds threshold. +// This information won't be snapshot, thus can't be cached in test.Governance. +func (s *State) IsDKGMPKReady(round uint64, threshold int) bool { + s.lock.RLock() + defer s.lock.RUnlock() + return len(s.dkgReadys[round]) > threshold +} + // IsDKGFinal checks if current received dkg finals exceeds threshold. // This information won't be snapshot, thus can't be cached in test.Governance. func (s *State) IsDKGFinal(round uint64, threshold int) bool { diff --git a/core/test/state_test.go b/core/test/state_test.go index 9daee3a..c05d41b 100644 --- a/core/test/state_test.go +++ b/core/test/state_test.go @@ -70,6 +70,17 @@ func (s *StateTestSuite) newDKGComplaint(round uint64) *typesDKG.Complaint { } } +func (s *StateTestSuite) newDKGMPKReady(round uint64) *typesDKG.MPKReady { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + pubKey := prvKey.PublicKey() + nodeID := types.NewNodeID(pubKey) + // TODO(mission): sign it. + return &typesDKG.MPKReady{ + ProposerID: nodeID, + Round: round, + } +} func (s *StateTestSuite) newDKGFinal(round uint64) *typesDKG.Finalize { prvKey, err := ecdsa.NewPrivateKey() s.Require().NoError(err) @@ -119,9 +130,11 @@ func (s *StateTestSuite) findNode( func (s *StateTestSuite) makeDKGChanges( st *State, masterPubKey *typesDKG.MasterPublicKey, + ready *typesDKG.MPKReady, complaint *typesDKG.Complaint, final *typesDKG.Finalize) { st.RequestChange(StateAddDKGMasterPublicKey, masterPubKey) + st.RequestChange(StateAddDKGMPKReady, ready) st.RequestChange(StateAddDKGComplaint, complaint) st.RequestChange(StateAddDKGFinal, final) } @@ -175,9 +188,10 @@ func (s *StateTestSuite) TestEqual() { crs := common.NewRandomHash() req.NoError(st.ProposeCRS(1, crs)) masterPubKey := s.newDKGMasterPublicKey(2) + ready := s.newDKGMPKReady(2) comp := s.newDKGComplaint(2) final := s.newDKGFinal(2) - s.makeDKGChanges(st, masterPubKey, comp, final) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) // Remove dkg complaints from cloned one to check if equal. st3 := st.Clone() req.NoError(st.Equal(st3)) @@ -188,6 +202,11 @@ func (s *StateTestSuite) TestEqual() { req.NoError(st.Equal(st4)) delete(st4.dkgMasterPublicKeys, uint64(2)) req.Equal(st.Equal(st4), ErrStateDKGMasterPublicKeysNotEqual) + // Remove dkg ready from cloned one to check if equal. + st4a := st.Clone() + req.NoError(st.Equal(st4a)) + delete(st4a.dkgReadys, uint64(2)) + req.Equal(st.Equal(st4a), ErrStateDKGMPKReadysNotEqual) // Remove dkg finalize from cloned one to check if equal. st5 := st.Clone() req.NoError(st.Equal(st5)) @@ -222,9 +241,10 @@ func (s *StateTestSuite) TestPendingChangesEqual() { crs := common.NewRandomHash() req.NoError(st.ProposeCRS(1, crs)) masterPubKey := s.newDKGMasterPublicKey(2) + ready := s.newDKGMPKReady(2) comp := s.newDKGComplaint(2) final := s.newDKGFinal(2) - s.makeDKGChanges(st, masterPubKey, comp, final) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) } func (s *StateTestSuite) TestLocalMode() { @@ -266,17 +286,21 @@ func (s *StateTestSuite) TestLocalMode() { // Test adding node set, DKG complaints, final, master public key. // Make sure everything is empty before changed. req.Empty(st.DKGMasterPublicKeys(2)) + req.False(st.IsDKGMPKReady(2, 0)) req.Empty(st.DKGComplaints(2)) req.False(st.IsDKGFinal(2, 0)) // Add DKG stuffs. masterPubKey := s.newDKGMasterPublicKey(2) + ready := s.newDKGMPKReady(2) comp := s.newDKGComplaint(2) final := s.newDKGFinal(2) - s.makeDKGChanges(st, masterPubKey, comp, final) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) // Check DKGMasterPublicKeys. masterKeyForRound := st.DKGMasterPublicKeys(2) req.Len(masterKeyForRound, 1) req.True(masterKeyForRound[0].Equal(masterPubKey)) + // Check IsDKGMPKReady. + req.True(st.IsDKGMPKReady(2, 0)) // Check DKGComplaints. compForRound := st.DKGComplaints(2) req.Len(compForRound, 1) @@ -307,11 +331,13 @@ func (s *StateTestSuite) TestPacking() { st.RequestChange(StateAddNode, pubKey) // Add DKG stuffs. masterPubKey := s.newDKGMasterPublicKey(2) + ready := s.newDKGMPKReady(2) comp := s.newDKGComplaint(2) final := s.newDKGFinal(2) - s.makeDKGChanges(st, masterPubKey, comp, final) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) // Make sure everything is empty before changed. req.Empty(st.DKGMasterPublicKeys(2)) + req.False(st.IsDKGMPKReady(2, 0)) req.Empty(st.DKGComplaints(2)) req.False(st.IsDKGFinal(2, 0)) // In remote mode, we need to manually convert own requests to global ones. @@ -338,6 +364,8 @@ func (s *StateTestSuite) TestPacking() { compForRound := st.DKGComplaints(2) req.Len(compForRound, 1) req.True(compForRound[0].Equal(comp)) + // Check IsDKGMPKReady. + req.True(st.IsDKGMPKReady(2, 0)) // Check IsDKGFinal. req.True(st.IsDKGFinal(2, 0)) } @@ -371,9 +399,10 @@ func (s *StateTestSuite) TestRequestBroadcastAndPack() { st.RequestChange(StateAddNode, pubKey) // Add DKG stuffs. masterPubKey := s.newDKGMasterPublicKey(2) + ready := s.newDKGMPKReady(2) comp := s.newDKGComplaint(2) final := s.newDKGFinal(2) - s.makeDKGChanges(st, masterPubKey, comp, final) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) // Pack those changes into a byte stream, and pass it to other State // instance. packed, err := st.PackOwnRequests() diff --git a/core/test/utils.go b/core/test/utils.go index 6abd0b5..c6c08fc 100644 --- a/core/test/utils.go +++ b/core/test/utils.go @@ -147,6 +147,19 @@ func cloneDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) ( return } +func cloneDKGMPKReady(ready *typesDKG.MPKReady) ( + copied *typesDKG.MPKReady) { + b, err := rlp.EncodeToBytes(ready) + if err != nil { + panic(err) + } + copied = &typesDKG.MPKReady{} + if err = rlp.DecodeBytes(b, copied); err != nil { + panic(err) + } + return +} + func cloneDKGFinalize(final *typesDKG.Finalize) ( copied *typesDKG.Finalize) { b, err := rlp.EncodeToBytes(final) diff --git a/core/types/dkg/dkg.go b/core/types/dkg/dkg.go index cecc4f1..f021d1b 100644 --- a/core/types/dkg/dkg.go +++ b/core/types/dkg/dkg.go @@ -167,6 +167,27 @@ type PartialSignature struct { Signature crypto.Signature `json:"signature"` } +// MPKReady describe a dig ready message in DKG protocol. +type MPKReady struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Signature crypto.Signature `json:"signature"` +} + +func (ready *MPKReady) String() string { + return fmt.Sprintf("DKGMPKReady{RP:%s Round:%d}", + ready.ProposerID.String()[:6], + ready.Round) +} + +// Equal check equality of two MPKReady instances. +func (ready *MPKReady) Equal(other *MPKReady) bool { + return ready.ProposerID.Equal(other.ProposerID) && + ready.Round == other.Round && + ready.Signature.Type == other.Signature.Type && + bytes.Compare(ready.Signature.Signature, other.Signature.Signature) == 0 +} + // Finalize describe a dig finalize message in DKG protocol. type Finalize struct { ProposerID types.NodeID `json:"proposer_id"` diff --git a/core/types/dkg/dkg_test.go b/core/types/dkg/dkg_test.go index ea46621..4eb17ac 100644 --- a/core/types/dkg/dkg_test.go +++ b/core/types/dkg/dkg_test.go @@ -212,6 +212,37 @@ func (s *DKGTestSuite) TestComplaintEquality() { req.True(comp1.Equal(comp2)) } +func (s *DKGTestSuite) TestMPKReadyEquality() { + var req = s.Require() + ready1 := &MPKReady{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + Round: 1, + Signature: crypto.Signature{ + Signature: s.genRandomBytes(), + }, + } + // Make a copy + ready2 := &MPKReady{} + s.clone(ready1, ready2) + req.True(ready1.Equal(ready2)) + // Change proposer ID. + ready2.ProposerID = types.NodeID{Hash: common.NewRandomHash()} + req.False(ready1.Equal(ready2)) + ready2.ProposerID = ready1.ProposerID + // Change round. + ready2.Round = ready1.Round + 1 + req.False(ready1.Equal(ready2)) + ready2.Round = ready1.Round + // Change signature. + ready2.Signature = crypto.Signature{ + Signature: s.genRandomBytes(), + } + req.False(ready1.Equal(ready2)) + ready2.Signature = ready1.Signature + // After changing every field back, they should be equal. + req.True(ready1.Equal(ready2)) +} + func (s *DKGTestSuite) TestFinalizeEquality() { var req = s.Require() final1 := &Finalize{ |