aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJimmy Hu <jimmy.hu@dexon.org>2019-03-27 20:47:32 +0800
committerWei-Ning Huang <w@byzantine-lab.io>2019-06-15 22:09:55 +0800
commit8732c918f7b5d3dd9ac45b6e00995f5d74bc8a47 (patch)
treeab7aed4bb7d580011469dda917483e785c234a32
parentc52c9e04a916fac3550b0a3c3d8cdf979ab70bb8 (diff)
downloadgo-tangerine-8732c918f7b5d3dd9ac45b6e00995f5d74bc8a47.tar
go-tangerine-8732c918f7b5d3dd9ac45b6e00995f5d74bc8a47.tar.gz
go-tangerine-8732c918f7b5d3dd9ac45b6e00995f5d74bc8a47.tar.bz2
go-tangerine-8732c918f7b5d3dd9ac45b6e00995f5d74bc8a47.tar.lz
go-tangerine-8732c918f7b5d3dd9ac45b6e00995f5d74bc8a47.tar.xz
go-tangerine-8732c918f7b5d3dd9ac45b6e00995f5d74bc8a47.tar.zst
go-tangerine-8732c918f7b5d3dd9ac45b6e00995f5d74bc8a47.zip
core: merge notarySet and DKGSet (#265)
* vendor: sync to latest core * core: merge notarySet and dkgSet * dex: optimize network traffic for finalized block
-rw-r--r--core/governance.go20
-rw-r--r--core/vm/oracle_contract_abi.go14
-rw-r--r--core/vm/oracle_contracts.go67
-rw-r--r--core/vm/oracle_contracts_test.go22
-rw-r--r--dex/cache.go88
-rw-r--r--dex/cache_test.go165
-rw-r--r--dex/handler.go120
-rw-r--r--dex/network.go23
-rw-r--r--dex/peer.go101
-rw-r--r--dex/peer_test.go39
-rw-r--r--dex/protocol.go3
-rw-r--r--dex/protocol_test.go105
-rw-r--r--params/config.go8
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go59
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go22
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go159
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/blockchain.go139
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go32
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go533
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go6
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go2
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/agreement.go146
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go90
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/types/block-randomness.go31
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/types/config.go5
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go6
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go8
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/utils.go8
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go10
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go15
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/utils/utils.go2
-rw-r--r--vendor/vendor.json48
32 files changed, 1064 insertions, 1032 deletions
diff --git a/core/governance.go b/core/governance.go
index bb49efb53..28f0a41fe 100644
--- a/core/governance.go
+++ b/core/governance.go
@@ -146,7 +146,6 @@ func (g *Governance) Configuration(round uint64) *coreTypes.Config {
LambdaBA: time.Duration(c.LambdaBA) * time.Millisecond,
LambdaDKG: time.Duration(c.LambdaDKG) * time.Millisecond,
NotarySetSize: uint32(configHelper.NotarySetSize().Uint64()),
- DKGSetSize: c.DKGSetSize,
RoundLength: c.RoundLength,
MinBlockInterval: time.Duration(c.MinBlockInterval) * time.Millisecond,
}
@@ -199,21 +198,6 @@ func (d *Governance) NotarySetNodeKeyAddresses(round uint64) (map[common.Address
return r, nil
}
-func (d *Governance) DKGSet(round uint64) (map[string]struct{}, error) {
- dkgSet, err := d.nodeSetCache.GetDKGSet(round)
- if err != nil {
- return nil, err
- }
-
- r := make(map[string]struct{}, len(dkgSet))
- for id := range dkgSet {
- if key, exists := d.nodeSetCache.GetPublicKey(id); exists {
- r[hex.EncodeToString(key.Bytes())] = struct{}{}
- }
- }
- return r, nil
-}
-
func (g *Governance) DKGComplaints(round uint64) []*dkgTypes.Complaint {
s := g.GetStateForDKGAtRound(round)
if s == nil {
@@ -245,7 +229,7 @@ func (g *Governance) IsDKGMPKReady(round uint64) bool {
return false
}
config := g.Configuration(round)
- threshold := 2*uint64(config.DKGSetSize)/3 + 1
+ threshold := 2*uint64(config.NotarySetSize)/3 + 1
count := s.DKGMPKReadysCount().Uint64()
return count >= threshold
}
@@ -256,7 +240,7 @@ func (g *Governance) IsDKGFinal(round uint64) bool {
return false
}
config := g.Configuration(round)
- threshold := 2*uint64(config.DKGSetSize)/3 + 1
+ threshold := 2*uint64(config.NotarySetSize)/3 + 1
count := s.DKGFinalizedsCount().Uint64()
return count >= threshold
}
diff --git a/core/vm/oracle_contract_abi.go b/core/vm/oracle_contract_abi.go
index e42e9266f..3345a8f4c 100644
--- a/core/vm/oracle_contract_abi.go
+++ b/core/vm/oracle_contract_abi.go
@@ -58,20 +58,6 @@ const GovernanceABIJSON = `
{
"constant": true,
"inputs": [],
- "name": "dkgSetSize",
- "outputs": [
- {
- "name": "",
- "type": "uint256"
- }
- ],
- "payable": false,
- "stateMutability": "view",
- "type": "function"
- },
- {
- "constant": true,
- "inputs": [],
"name": "totalSupply",
"outputs": [
{
diff --git a/core/vm/oracle_contracts.go b/core/vm/oracle_contracts.go
index 12ad4b150..d7c8ffae2 100644
--- a/core/vm/oracle_contracts.go
+++ b/core/vm/oracle_contracts.go
@@ -88,7 +88,6 @@ const (
notarySetSizeLoc
notaryParamAlphaLoc
notaryParamBetaLoc
- dkgSetSizeLoc
roundLengthLoc
minBlockIntervalLoc
fineValuesLoc
@@ -704,8 +703,8 @@ func (s *GovernanceState) PutDKGMPKReady(addr common.Address, ready bool) {
}
s.setStateBigInt(mapLoc, res)
}
-func (s *GovernanceState) ClearDKGMPKReadys(dkgSet map[coreTypes.NodeID]struct{}) {
- for id := range dkgSet {
+func (s *GovernanceState) ClearDKGMPKReadys(notarySet map[coreTypes.NodeID]struct{}) {
+ for id := range notarySet {
s.PutDKGMPKReady(IdToAddress(id), false)
}
}
@@ -853,22 +852,6 @@ func (s *GovernanceState) NotaryParamBeta() *big.Int {
return s.getStateBigInt(big.NewInt(notaryParamBetaLoc))
}
-// uint256 public dkgSetSize;
-func (s *GovernanceState) DKGSetSize() *big.Int {
- return s.getStateBigInt(big.NewInt(dkgSetSizeLoc))
-}
-func (s *GovernanceState) CalDKGSetSize() {
- nodeSetSize := float64(len(s.QualifiedNodes()))
- setSize := math.Ceil((nodeSetSize*0.6-1)/3)*3 + 1
-
- if nodeSetSize >= 100 {
- alpha := float64(s.NotaryParamAlpha().Uint64()) / decimalMultiplier
- beta := float64(s.NotaryParamBeta().Uint64()) / decimalMultiplier
- setSize = math.Ceil(alpha*math.Log(nodeSetSize) - beta)
- }
- s.setStateBigInt(big.NewInt(dkgSetSizeLoc), big.NewInt(int64(setSize)))
-}
-
// uint256 public roundLength;
func (s *GovernanceState) RoundLength() *big.Int {
return s.getStateBigInt(big.NewInt(roundLengthLoc))
@@ -1038,7 +1021,6 @@ func (s *GovernanceState) UpdateConfiguration(cfg *params.DexconConfig) {
// Calculate set size.
s.CalNotarySetSize()
- s.CalDKGSetSize()
}
type rawConfigStruct struct {
@@ -1070,7 +1052,6 @@ func (s *GovernanceState) UpdateConfigurationRaw(cfg *rawConfigStruct) {
s.SetFineValues(cfg.FineValues)
s.CalNotarySetSize()
- s.CalDKGSetSize()
}
// event ConfigurationChanged();
@@ -1283,15 +1264,15 @@ func (g *GovernanceContract) useGas(gas uint64) ([]byte, error) {
return nil, nil
}
-func (g *GovernanceContract) configDKGSetSize(round *big.Int) *big.Int {
+func (g *GovernanceContract) configNotarySetSize(round *big.Int) *big.Int {
s, err := getConfigState(g.evm, round)
if err != nil {
return big.NewInt(0)
}
- return s.DKGSetSize()
+ return s.NotarySetSize()
}
-func (g *GovernanceContract) getDKGSet(round *big.Int) map[coreTypes.NodeID]struct{} {
+func (g *GovernanceContract) getNotarySet(round *big.Int) map[coreTypes.NodeID]struct{} {
crsRound := g.state.CRSRound()
var crs common.Hash
cmp := round.Cmp(crsRound)
@@ -1316,7 +1297,7 @@ func (g *GovernanceContract) getDKGSet(round *big.Int) map[coreTypes.NodeID]stru
crs = state.CRS()
}
- target := coreTypes.NewDKGSetTarget(coreCommon.Hash(crs))
+ target := coreTypes.NewNotarySetTarget(coreCommon.Hash(crs))
ns := coreTypes.NewNodeSet()
state, err := getConfigState(g.evm, round)
@@ -1330,17 +1311,17 @@ func (g *GovernanceContract) getDKGSet(round *big.Int) map[coreTypes.NodeID]stru
}
ns.Add(coreTypes.NewNodeID(mpk))
}
- return ns.GetSubSet(int(g.configDKGSetSize(round).Uint64()), target)
+ return ns.GetSubSet(int(g.configNotarySetSize(round).Uint64()), target)
}
-func (g *GovernanceContract) inDKGSet(round *big.Int, nodeID coreTypes.NodeID) bool {
- dkgSet := g.getDKGSet(round)
+func (g *GovernanceContract) inNotarySet(round *big.Int, nodeID coreTypes.NodeID) bool {
+ dkgSet := g.getNotarySet(round)
_, ok := dkgSet[nodeID]
return ok
}
func (g *GovernanceContract) clearDKG() {
- dkgSet := g.getDKGSet(g.evm.Round)
+ dkgSet := g.getNotarySet(g.evm.Round)
g.state.ClearDKGMasterPublicKeyProposed()
g.state.ClearDKGMasterPublicKeys()
g.state.ClearDKGComplaintProposed()
@@ -1368,7 +1349,7 @@ func (g *GovernanceContract) addDKGComplaint(comp []byte) ([]byte, error) {
// Calculate 2f
threshold := new(big.Int).Mul(
big.NewInt(2),
- new(big.Int).Div(g.state.DKGSetSize(), big.NewInt(3)))
+ new(big.Int).Div(g.state.NotarySetSize(), big.NewInt(3)))
// If 2f + 1 of DKG set is finalized, one can not propose complaint anymore.
if g.state.DKGFinalizedsCount().Cmp(threshold) > 0 {
@@ -1393,7 +1374,7 @@ func (g *GovernanceContract) addDKGComplaint(comp []byte) ([]byte, error) {
}
// DKGComplaint must belongs to someone in DKG set.
- if !g.inDKGSet(round, dkgComplaint.ProposerID) {
+ if !g.inNotarySet(round, dkgComplaint.ProposerID) {
return nil, errExecutionReverted
}
@@ -1471,7 +1452,7 @@ func (g *GovernanceContract) addDKGMasterPublicKey(mpk []byte) ([]byte, error) {
// Calculate 2f
threshold := new(big.Int).Mul(
big.NewInt(2),
- new(big.Int).Div(g.state.DKGSetSize(), big.NewInt(3)))
+ new(big.Int).Div(g.state.NotarySetSize(), big.NewInt(3)))
// If 2f + 1 of DKG set is mpk ready, one can not propose mpk anymore.
if g.state.DKGMPKReadysCount().Cmp(threshold) > 0 {
@@ -1483,7 +1464,7 @@ func (g *GovernanceContract) addDKGMasterPublicKey(mpk []byte) ([]byte, error) {
}
// DKGMasterPublicKey must belongs to someone in DKG set.
- if !g.inDKGSet(round, dkgMasterPK.ProposerID) {
+ if !g.inNotarySet(round, dkgMasterPK.ProposerID) {
return nil, errExecutionReverted
}
@@ -1514,7 +1495,7 @@ func (g *GovernanceContract) addDKGMPKReady(ready []byte) ([]byte, error) {
}
// DKGFInalize must belongs to someone in DKG set.
- if !g.inDKGSet(round, dkgReady.ProposerID) {
+ if !g.inNotarySet(round, dkgReady.ProposerID) {
return nil, errExecutionReverted
}
@@ -1548,7 +1529,7 @@ func (g *GovernanceContract) addDKGFinalize(finalize []byte) ([]byte, error) {
}
// DKGFInalize must belongs to someone in DKG set.
- if !g.inDKGSet(round, dkgFinalize.ProposerID) {
+ if !g.inNotarySet(round, dkgFinalize.ProposerID) {
return nil, errExecutionReverted
}
@@ -1630,7 +1611,6 @@ func (g *GovernanceContract) register(
g.state.emitStaked(caller, value)
g.state.CalNotarySetSize()
- g.state.CalDKGSetSize()
}
return g.useGas(GovernanceActionGasCost)
}
@@ -1660,7 +1640,6 @@ func (g *GovernanceContract) stake() ([]byte, error) {
g.state.emitStaked(caller, value)
g.state.CalNotarySetSize()
- g.state.CalDKGSetSize()
return g.useGas(GovernanceActionGasCost)
}
@@ -1697,7 +1676,6 @@ func (g *GovernanceContract) unstake(amount *big.Int) ([]byte, error) {
g.state.emitUnstaked(caller, amount)
g.state.CalNotarySetSize()
- g.state.CalDKGSetSize()
return g.useGas(GovernanceActionGasCost)
}
@@ -1778,7 +1756,6 @@ func (g *GovernanceContract) payFine(nodeAddr common.Address) ([]byte, error) {
g.state.emitFinePaid(nodeAddr, g.contract.Value())
g.state.CalNotarySetSize()
- g.state.CalDKGSetSize()
return g.useGas(GovernanceActionGasCost)
}
@@ -1799,7 +1776,7 @@ func (g *GovernanceContract) proposeCRS(nextRound *big.Int, signedCRS []byte) ([
}
threshold := coreUtils.GetDKGThreshold(&coreTypes.Config{
- DKGSetSize: uint32(g.state.DKGSetSize().Uint64())})
+ NotarySetSize: uint32(g.state.NotarySetSize().Uint64())})
dkgGPK, err := g.coreDKGUtils.NewGroupPublicKey(&g.state, nextRound, threshold)
if err != nil {
return nil, errExecutionReverted
@@ -1950,9 +1927,9 @@ func (g *GovernanceContract) resetDKG(newSignedCRS []byte) ([]byte, error) {
// Calculate 2f
threshold := new(big.Int).Mul(
big.NewInt(2),
- new(big.Int).Div(g.state.DKGSetSize(), big.NewInt(3)))
+ new(big.Int).Div(g.state.NotarySetSize(), big.NewInt(3)))
tsigThreshold := coreUtils.GetDKGThreshold(&coreTypes.Config{
- DKGSetSize: uint32(g.state.DKGSetSize().Uint64())})
+ NotarySetSize: uint32(g.state.NotarySetSize().Uint64())})
// If 2f + 1 of DKG set is finalized, check if DKG succeeded.
if g.state.DKGFinalizedsCount().Cmp(threshold) > 0 {
@@ -2248,12 +2225,6 @@ func (g *GovernanceContract) Run(evm *EVM, input []byte, contract *Contract) (re
return nil, errExecutionReverted
}
return res, nil
- case "dkgSetSize":
- res, err := method.Outputs.Pack(g.state.DKGSetSize())
- if err != nil {
- return nil, errExecutionReverted
- }
- return res, nil
case "finedRecords":
record := Bytes32{}
if err := method.Inputs.Unpack(&record, arguments); err != nil {
diff --git a/core/vm/oracle_contracts_test.go b/core/vm/oracle_contracts_test.go
index 80bc9a73f..98cc01505 100644
--- a/core/vm/oracle_contracts_test.go
+++ b/core/vm/oracle_contracts_test.go
@@ -189,7 +189,6 @@ func (g *OracleContractsTestSuite) SetupTest() {
config.NextHalvingSupply = new(big.Int).Mul(big.NewInt(1e18), big.NewInt(2.5e9))
config.LastHalvedAmount = new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1.5e9))
config.MiningVelocity = 0.1875
- config.DKGSetSize = 7
g.config = config
@@ -658,15 +657,6 @@ func (g *OracleContractsTestSuite) TestConfigurationReading() {
res, err = g.call(GovernanceContractAddress, addr, input, big.NewInt(0))
g.Require().NoError(err)
- // DKGSetSize.
- input, err = GovernanceABI.ABI.Pack("dkgSetSize")
- g.Require().NoError(err)
- res, err = g.call(GovernanceContractAddress, addr, input, big.NewInt(0))
- g.Require().NoError(err)
- err = GovernanceABI.ABI.Unpack(&value, "dkgSetSize", res)
- g.Require().NoError(err)
- g.Require().True(uint32(value.Uint64()) > 0)
-
// RoundLength.
input, err = GovernanceABI.ABI.Pack("roundLength")
g.Require().NoError(err)
@@ -934,18 +924,18 @@ func (v *testTSigVerifierMock) VerifySignature(coreCommon.Hash, coreCrypto.Signa
}
func (g *OracleContractsTestSuite) TestResetDKG() {
- for i := uint32(0); i < g.config.DKGSetSize; i++ {
+ for i := 0; i < 7; i++ {
privKey, addr := newPrefundAccount(g.stateDB)
pk := crypto.FromECDSAPub(&privKey.PublicKey)
// Stake.
amount := new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e6))
- input, err := GovernanceABI.ABI.Pack("register", pk, "Test1", "test1@dexon.org", "Taipei", "https://dexon.org")
+ input, err := GovernanceABI.ABI.Pack("register", pk, "Test", "test1@dexon.org", "Taipei", "https://dexon.org")
g.Require().NoError(err)
_, err = g.call(GovernanceContractAddress, addr, input, amount)
g.Require().NoError(err)
}
- g.Require().Len(g.s.QualifiedNodes(), int(g.config.DKGSetSize))
+ g.Require().Len(g.s.QualifiedNodes(), int(g.s.NotarySetSize().Uint64()))
addrs := make(map[int][]common.Address)
dkgSets := make(map[int]map[coreTypes.NodeID]struct{})
@@ -972,7 +962,7 @@ func (g *OracleContractsTestSuite) TestResetDKG() {
}
addrs[round] = []common.Address{}
- target := coreTypes.NewDKGSetTarget(coreCommon.Hash(g.s.CRS()))
+ target := coreTypes.NewNotarySetTarget(coreCommon.Hash(g.s.CRS()))
ns := coreTypes.NewNodeSet()
for _, x := range g.s.QualifiedNodes() {
@@ -982,8 +972,8 @@ func (g *OracleContractsTestSuite) TestResetDKG() {
}
ns.Add(coreTypes.NewNodeID(mpk))
}
- dkgSet := ns.GetSubSet(int(g.s.DKGSetSize().Uint64()), target)
- g.Require().Len(dkgSet, int(g.config.DKGSetSize))
+ dkgSet := ns.GetSubSet(int(g.s.NotarySetSize().Uint64()), target)
+ g.Require().Len(dkgSet, int(g.s.NotarySetSize().Uint64()))
dkgSets[round] = dkgSet
for id := range dkgSet {
diff --git a/dex/cache.go b/dex/cache.go
index 5d4d20dd0..04030eaaf 100644
--- a/dex/cache.go
+++ b/dex/cache.go
@@ -44,23 +44,23 @@ func voteToKey(vote *coreTypes.Vote) voteKey {
}
type cache struct {
- lock sync.RWMutex
- blockCache map[coreCommon.Hash]*coreTypes.Block
- voteCache map[coreTypes.Position]map[voteKey]*coreTypes.Vote
- randomnessCache map[coreCommon.Hash]*coreTypes.BlockRandomnessResult
- votePosition []coreTypes.Position
- db coreDb.Database
- voteSize int
- size int
+ lock sync.RWMutex
+ blockCache map[coreCommon.Hash]*coreTypes.Block
+ finalizedBlockCache map[coreTypes.Position]*coreTypes.Block
+ voteCache map[coreTypes.Position]map[voteKey]*coreTypes.Vote
+ votePosition []coreTypes.Position
+ db coreDb.Database
+ voteSize int
+ size int
}
func newCache(size int, db coreDb.Database) *cache {
return &cache{
- blockCache: make(map[coreCommon.Hash]*coreTypes.Block),
- voteCache: make(map[coreTypes.Position]map[voteKey]*coreTypes.Vote),
- randomnessCache: make(map[coreCommon.Hash]*coreTypes.BlockRandomnessResult),
- db: db,
- size: size,
+ blockCache: make(map[coreCommon.Hash]*coreTypes.Block),
+ finalizedBlockCache: make(map[coreTypes.Position]*coreTypes.Block),
+ voteCache: make(map[coreTypes.Position]map[voteKey]*coreTypes.Vote),
+ db: db,
+ size: size,
}
}
@@ -110,6 +110,28 @@ func (c *cache) addBlock(block *coreTypes.Block) {
c.blockCache[block.Hash] = block
}
+func (c *cache) addFinalizedBlock(block *coreTypes.Block) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ block = block.Clone()
+ if len(c.blockCache) >= c.size {
+ // Randomly delete one entry.
+ for k := range c.blockCache {
+ delete(c.blockCache, k)
+ break
+ }
+ }
+ if len(c.finalizedBlockCache) >= c.size {
+ // Randomly delete one entry.
+ for k := range c.finalizedBlockCache {
+ delete(c.finalizedBlockCache, k)
+ break
+ }
+ }
+ c.blockCache[block.Hash] = block
+ c.finalizedBlockCache[block.Position] = block
+}
+
func (c *cache) blocks(hashes coreCommon.Hashes) []*coreTypes.Block {
c.lock.RLock()
defer c.lock.RUnlock()
@@ -122,48 +144,18 @@ func (c *cache) blocks(hashes coreCommon.Hashes) []*coreTypes.Block {
if err != nil {
continue
}
- // Blocks request from the cache do not need the finalization info.
- block.Finalization = coreTypes.FinalizationResult{}
cacheBlocks = append(cacheBlocks, &block)
}
}
return cacheBlocks
}
-func (c *cache) addRandomness(rand *coreTypes.BlockRandomnessResult) {
- c.lock.Lock()
- defer c.lock.Unlock()
- if len(c.randomnessCache) >= c.size {
- // Randomly delete one entry.
- for k := range c.randomnessCache {
- delete(c.randomnessCache, k)
- break
- }
- }
- c.randomnessCache[rand.BlockHash] = rand
-}
-
-func (c *cache) randomness(hashes coreCommon.Hashes) []*coreTypes.BlockRandomnessResult {
+func (c *cache) finalizedBlock(pos coreTypes.Position) *coreTypes.Block {
c.lock.RLock()
defer c.lock.RUnlock()
- cacheRandomnesss := make([]*coreTypes.BlockRandomnessResult, 0, len(hashes))
- for _, hash := range hashes {
- if block, exist := c.randomnessCache[hash]; exist {
- cacheRandomnesss = append(cacheRandomnesss, block)
- } else {
- block, err := c.db.GetBlock(hash)
- if err != nil {
- continue
- }
- if len(block.Finalization.Randomness) == 0 {
- continue
- }
- cacheRandomnesss = append(cacheRandomnesss, &coreTypes.BlockRandomnessResult{
- BlockHash: block.Hash,
- Position: block.Position,
- Randomness: block.Finalization.Randomness,
- })
- }
+ if block, exist := c.finalizedBlockCache[pos]; exist {
+ return block
}
- return cacheRandomnesss
+ // TODO(jimmy): get finalized block from db
+ return nil
}
diff --git a/dex/cache_test.go b/dex/cache_test.go
index 536e015f0..22b1b9b26 100644
--- a/dex/cache_test.go
+++ b/dex/cache_test.go
@@ -19,6 +19,7 @@ package dex
import (
"math/rand"
+ "reflect"
"sort"
"strings"
"testing"
@@ -205,91 +206,125 @@ func TestCacheBlock(t *testing.T) {
}
}
-func randomBytes() []byte {
- bytes := make([]byte, 32)
- for i := range bytes {
- bytes[i] = byte(rand.Int() % 256)
- }
- return bytes
-}
-
-func TestCacheRandomness(t *testing.T) {
+func TestCacheFinalizedBlock(t *testing.T) {
db, err := coreDb.NewMemBackedDB()
if err != nil {
panic(err)
}
cache := newCache(3, db)
- rand1 := &coreTypes.BlockRandomnessResult{
- BlockHash: coreCommon.NewRandomHash(),
- Randomness: randomBytes(),
+ block1 := &coreTypes.Block{
+ Position: coreTypes.Position{
+ Height: 1,
+ },
+ Hash: coreCommon.NewRandomHash(),
+ Finalization: coreTypes.FinalizationResult{
+ Randomness: randomBytes(),
+ },
}
- rand2 := &coreTypes.BlockRandomnessResult{
- BlockHash: coreCommon.NewRandomHash(),
- Randomness: randomBytes(),
+ block2 := &coreTypes.Block{
+ Position: coreTypes.Position{
+ Height: 2,
+ },
+ Hash: coreCommon.NewRandomHash(),
+ Finalization: coreTypes.FinalizationResult{
+ Randomness: randomBytes(),
+ },
}
- rand3 := &coreTypes.BlockRandomnessResult{
- BlockHash: coreCommon.NewRandomHash(),
- Randomness: randomBytes(),
+ block3 := &coreTypes.Block{
+ Position: coreTypes.Position{
+ Height: 3,
+ },
+ Hash: coreCommon.NewRandomHash(),
+ Finalization: coreTypes.FinalizationResult{
+ Randomness: randomBytes(),
+ },
}
- rand4 := &coreTypes.BlockRandomnessResult{
- BlockHash: coreCommon.NewRandomHash(),
- Randomness: randomBytes(),
+ block4 := &coreTypes.Block{
+ Position: coreTypes.Position{
+ Height: 4,
+ },
+ Hash: coreCommon.NewRandomHash(),
+ Finalization: coreTypes.FinalizationResult{
+ Randomness: randomBytes(),
+ },
}
- cache.addRandomness(rand1)
- cache.addRandomness(rand2)
- cache.addRandomness(rand3)
+ cache.addFinalizedBlock(block1)
+ cache.addFinalizedBlock(block2)
+ cache.addFinalizedBlock(block3)
- hashes := coreCommon.Hashes{rand1.BlockHash, rand2.BlockHash, rand3.BlockHash, rand4.BlockHash}
- hashMap := map[coreCommon.Hash]struct{}{
- rand1.BlockHash: {},
- rand2.BlockHash: {},
- rand3.BlockHash: {},
- }
- rands := cache.randomness(hashes)
- if len(rands) != 3 {
- t.Errorf("fail to get rands: have %d, want 3", len(rands))
- }
- for _, rand := range rands {
- if _, exist := hashMap[rand.BlockHash]; !exist {
- t.Errorf("get wrong rand: have %s, want %v", rand, hashMap)
+ hashes := coreCommon.Hashes{block1.Hash, block2.Hash, block3.Hash, block4.Hash}
+ for i := 0; i < 3; i++ {
+ pos := coreTypes.Position{
+ Height: uint64(i + 1),
+ }
+ block := cache.finalizedBlock(pos)
+ if block.Hash != hashes[i] {
+ t.Errorf("failed to get block: have %s, want %s", block, hashes[i])
}
}
- cache.addRandomness(rand4)
-
- rands = cache.randomness(hashes)
- hashMap[rand4.BlockHash] = struct{}{}
- if len(rands) != 3 {
- t.Errorf("fail to get rands: have %d, want 3", len(rands))
- }
- hasNewRandomness := false
- for _, rand := range rands {
- if _, exist := hashMap[rand.BlockHash]; !exist {
- t.Errorf("get wrong rand: have %s, want %v", rand, hashMap)
- }
- if rand.BlockHash.Equal(rand4.BlockHash) {
- hasNewRandomness = true
- }
+ cache.addFinalizedBlock(block4)
+ block := cache.finalizedBlock(block4.Position)
+ if block == nil {
+ t.Errorf("should have block %s in cache", block4)
}
- if !hasNewRandomness {
- t.Errorf("expect rand %s in cache, have %v", rand4, rands)
+ if block.Hash != block4.Hash {
+ t.Errorf("failed to get block: have %s, want %s", block, block4)
}
- block := &coreTypes.Block{
- Hash: coreCommon.NewRandomHash(),
- Finalization: coreTypes.FinalizationResult{
- Randomness: randomBytes(),
+ block5 := &coreTypes.Block{
+ Position: coreTypes.Position{
+ Height: 5,
},
+ Hash: coreCommon.NewRandomHash(),
}
- if err := db.PutBlock(*block); err != nil {
- panic(err)
+ cache.addBlock(block5)
+ if block := cache.finalizedBlock(block5.Position); block != nil {
+ t.Errorf("unexpected block %s in cache", block)
+ }
+ blocks := cache.blocks(coreCommon.Hashes{block5.Hash})
+ if len(blocks) != 1 {
+ t.Errorf("fail to get blocks: have %d, want 1", len(blocks))
+ } else {
+ if !blocks[0].Hash.Equal(block5.Hash) {
+ t.Errorf("get wrong block: have %s, want %s", blocks[0], block5)
+ }
+ }
+ finalizedBlock5 := block5.Clone()
+ finalizedBlock5.Finalization.Randomness = randomBytes()
+ cache.addFinalizedBlock(finalizedBlock5)
+ block = cache.finalizedBlock(block5.Position)
+ if block == nil {
+ t.Errorf("expecting block %s in cache", finalizedBlock5)
}
- rands = cache.randomness(coreCommon.Hashes{block.Hash})
- if len(rands) != 1 {
- t.Errorf("fail to get rands: have %d, want 1", len(rands))
+ if !reflect.DeepEqual(
+ block.Finalization.Randomness,
+ finalizedBlock5.Finalization.Randomness) {
+ t.Errorf("mismatch randomness, have %s, want %s",
+ block.Finalization.Randomness,
+ finalizedBlock5.Finalization.Randomness)
+ }
+ blocks = cache.blocks(coreCommon.Hashes{block5.Hash})
+ if len(blocks) != 1 {
+ t.Errorf("fail to get blocks: have %d, want 1", len(blocks))
} else {
- if !rands[0].BlockHash.Equal(block.Hash) {
- t.Errorf("get wrong rand: have %s, want %s", rands[0], block)
+ if !blocks[0].Hash.Equal(finalizedBlock5.Hash) {
+ t.Errorf("get wrong block: have %s, want %s", blocks[0], block5)
+ }
+ if !reflect.DeepEqual(
+ blocks[0].Finalization.Randomness,
+ finalizedBlock5.Finalization.Randomness) {
+ t.Errorf("mismatch randomness, have %s, want %s",
+ blocks[0].Finalization.Randomness,
+ finalizedBlock5.Finalization.Randomness)
}
}
}
+
+func randomBytes() []byte {
+ bytes := make([]byte, 32)
+ for i := range bytes {
+ bytes[i] = byte(rand.Int() % 256)
+ }
+ return bytes
+}
diff --git a/dex/handler.go b/dex/handler.go
index 20df41709..8971ad500 100644
--- a/dex/handler.go
+++ b/dex/handler.go
@@ -87,6 +87,9 @@ const (
maxPullVotePeers = 1
pullVoteRateLimit = 10 * time.Second
+
+ maxAgreementResultBroadcast = 3
+ maxFinalizedBlockBroadcast = 3
)
// errIncompatibleConfig is returned if the requested protocols and configs are
@@ -888,19 +891,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
p.MarkAgreement(rlpHash(agreement))
pm.receiveCh <- &agreement
- case msg.Code == RandomnessMsg:
- if atomic.LoadInt32(&pm.receiveCoreMessage) == 0 {
- break
- }
- // Broadcast this to all peer
- var randomnesses []*coreTypes.BlockRandomnessResult
- if err := msg.Decode(&randomnesses); err != nil {
- return errResp(ErrDecode, "msg %v: %v", msg, err)
- }
- for _, randomness := range randomnesses {
- p.MarkRandomness(rlpHash(randomness))
- pm.receiveCh <- randomness
- }
case msg.Code == DKGPrivateShareMsg:
if atomic.LoadInt32(&pm.receiveCoreMessage) == 0 {
break
@@ -949,20 +939,13 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err := msg.Decode(&pos); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
+ if block := pm.cache.finalizedBlock(pos); block != nil {
+ log.Debug("Push finalized block as votes", "block", block)
+ return p.SendCoreBlocks([]*coreTypes.Block{block})
+ }
votes := pm.cache.votes(pos)
log.Debug("Push votes", "votes", votes)
return p.SendVotes(votes)
- case msg.Code == PullRandomnessMsg:
- if atomic.LoadInt32(&pm.receiveCoreMessage) == 0 {
- break
- }
- var hashes coreCommon.Hashes
- if err := msg.Decode(&hashes); err != nil {
- return errResp(ErrDecode, "msg %v: %v", msg, err)
- }
- randomnesses := pm.cache.randomness(hashes)
- log.Debug("Push randomness", "randomness", randomnesses)
- return p.SendRandomnesses(randomnesses)
case msg.Code == GetGovStateMsg:
var hash common.Hash
if err := msg.Decode(&hash); err != nil {
@@ -1098,6 +1081,31 @@ func (pm *ProtocolManager) BroadcastRecords(records []*enr.Record) {
}
}
+// BroadcastFinalizedBlock broadcasts the finalized core block to some of its peers.
+func (pm *ProtocolManager) BroadcastFinalizedBlock(block *coreTypes.Block) {
+ if len(block.Finalization.Randomness) == 0 {
+ log.Warn("Ignore broadcast finalized block without randomness", "block", block)
+ return
+ }
+ pm.cache.addFinalizedBlock(block)
+
+ // send to notary nodes first (direct)
+ label := peerLabel{
+ set: notaryset,
+ round: block.Position.Round,
+ }
+ peers := pm.peers.PeersWithLabel(label)
+ count := maxFinalizedBlockBroadcast
+ for _, peer := range peers {
+ if count <= 0 {
+ break
+ } else {
+ count--
+ peer.AsyncSendCoreBlocks([]*coreTypes.Block{block})
+ }
+ }
+}
+
// BroadcastCoreBlock broadcasts the core block to all its peers.
func (pm *ProtocolManager) BroadcastCoreBlock(block *coreTypes.Block) {
pm.cache.addBlock(block)
@@ -1122,39 +1130,32 @@ func (pm *ProtocolManager) BroadcastVote(vote *coreTypes.Vote) {
func (pm *ProtocolManager) BroadcastAgreementResult(
agreement *coreTypes.AgreementResult) {
- // send to dkg nodes first (direct)
- label := peerLabel{
- set: dkgset,
- round: agreement.Position.Round,
- }
- for _, peer := range pm.peers.PeersWithLabel(label) {
- if !peer.knownAgreements.Contains(rlpHash(agreement)) {
- peer.AsyncSendAgreement(agreement)
- }
+ block := pm.cache.blocks(coreCommon.Hashes{agreement.BlockHash})
+ if len(block) != 0 {
+ block[0].Finalization.Height = agreement.FinalizationHeight
+ block[0].Finalization.Randomness = agreement.Randomness
+ pm.cache.addFinalizedBlock(block[0])
}
- for _, peer := range pm.peers.PeersWithoutAgreement(rlpHash(agreement)) {
- peer.AsyncSendAgreement(agreement)
- }
-}
-
-func (pm *ProtocolManager) BroadcastRandomnessResult(
- randomness *coreTypes.BlockRandomnessResult) {
- pm.cache.addRandomness(randomness)
// send to notary nodes first (direct)
label := peerLabel{
set: notaryset,
- round: randomness.Position.Round,
+ round: agreement.Position.Round,
}
- randomnesses := []*coreTypes.BlockRandomnessResult{randomness}
- for _, peer := range pm.peers.PeersWithLabel(label) {
- if !peer.knownRandomnesses.Contains(rlpHash(randomness)) {
- peer.AsyncSendRandomnesses(randomnesses)
+ peers := pm.peers.PeersWithLabel(label)
+ count := maxAgreementResultBroadcast
+ agrHash := rlpHash(agreement)
+ for _, peer := range peers {
+ if count <= 0 {
+ peer.MarkAgreement(agrHash)
+ } else if !peer.knownAgreements.Contains(agrHash) {
+ count--
+ peer.AsyncSendAgreement(agreement)
}
}
- for _, peer := range pm.peers.PeersWithoutRandomness(rlpHash(randomness)) {
- peer.AsyncSendRandomnesses(randomnesses)
+ for _, peer := range pm.peers.PeersWithoutAgreement(rlpHash(agreement)) {
+ peer.AsyncSendAgreement(agreement)
}
}
@@ -1177,7 +1178,7 @@ func (pm *ProtocolManager) SendDKGPrivateShare(
func (pm *ProtocolManager) BroadcastDKGPrivateShare(
privateShare *dkgTypes.PrivateShare) {
- label := peerLabel{set: dkgset, round: privateShare.Round}
+ label := peerLabel{set: notaryset, round: privateShare.Round}
for _, peer := range pm.peers.PeersWithLabel(label) {
if !peer.knownDKGPrivateShares.Contains(rlpHash(privateShare)) {
peer.AsyncSendDKGPrivateShare(privateShare)
@@ -1187,7 +1188,7 @@ func (pm *ProtocolManager) BroadcastDKGPrivateShare(
func (pm *ProtocolManager) BroadcastDKGPartialSignature(
psig *dkgTypes.PartialSignature) {
- label := peerLabel{set: dkgset, round: psig.Round}
+ label := peerLabel{set: notaryset, round: psig.Round}
for _, peer := range pm.peers.PeersWithLabel(label) {
peer.AsyncSendDKGPartialSignature(psig)
}
@@ -1218,17 +1219,6 @@ func (pm *ProtocolManager) BroadcastPullVotes(
}
}
-func (pm *ProtocolManager) BroadcastPullRandomness(
- hashes coreCommon.Hashes) {
- // TODO(jimmy-dexon): pull from dkg set only.
- for idx, peer := range pm.peers.Peers() {
- if idx >= maxPullPeers {
- break
- }
- peer.AsyncSendPullRandomness(hashes)
- }
-}
-
func (pm *ProtocolManager) txBroadcastLoop() {
queueSizeMax := common.StorageSize(100 * 1024) // 100 KB
currentSize := common.StorageSize(0)
@@ -1321,9 +1311,15 @@ func (pm *ProtocolManager) peerSetLoop() {
for i := round; i <= dexCore.DKGDelayRound; i++ {
pm.peers.BuildConnection(i)
}
+ round = dexCore.DKGDelayRound
} else {
pm.peers.BuildConnection(round)
}
+ CRSRound := pm.gov.CRSRound()
+ if CRSRound > round {
+ pm.peers.BuildConnection(CRSRound)
+ round = CRSRound
+ }
for {
select {
@@ -1340,7 +1336,7 @@ func (pm *ProtocolManager) peerSetLoop() {
}
log.Debug("ProtocolManager: new round", "round", newRound)
- if newRound == round {
+ if newRound <= round {
break
}
diff --git a/dex/network.go b/dex/network.go
index f36850e59..0e2d338c1 100644
--- a/dex/network.go
+++ b/dex/network.go
@@ -45,14 +45,6 @@ func (n *DexconNetwork) PullVotes(pos types.Position) {
n.pm.BroadcastPullVotes(pos)
}
-// PullRandomness tries to pull randomness result from the DEXON network.
-func (n *DexconNetwork) PullRandomness(hashes coreCommon.Hashes) {
- if len(hashes) == 0 {
- return
- }
- n.pm.BroadcastPullRandomness(hashes)
-}
-
// BroadcastVote broadcasts vote to all nodes in DEXON network.
func (n *DexconNetwork) BroadcastVote(vote *types.Vote) {
n.pm.BroadcastVote(vote)
@@ -60,7 +52,11 @@ func (n *DexconNetwork) BroadcastVote(vote *types.Vote) {
// BroadcastBlock broadcasts block to all nodes in DEXON network.
func (n *DexconNetwork) BroadcastBlock(block *types.Block) {
- n.pm.BroadcastCoreBlock(block)
+ if block.IsFinalized() {
+ n.pm.BroadcastFinalizedBlock(block)
+ } else {
+ n.pm.BroadcastCoreBlock(block)
+ }
}
// SendDKGPrivateShare sends PrivateShare to a DKG participant.
@@ -83,13 +79,8 @@ func (n *DexconNetwork) BroadcastDKGPartialSignature(
}
// BroadcastAgreementResult broadcasts rand request to DKG set.
-func (n *DexconNetwork) BroadcastAgreementResult(randRequest *types.AgreementResult) {
- n.pm.BroadcastAgreementResult(randRequest)
-}
-
-// BroadcastRandomnessResult broadcasts rand request to Notary set.
-func (n *DexconNetwork) BroadcastRandomnessResult(randResult *types.BlockRandomnessResult) {
- n.pm.BroadcastRandomnessResult(randResult)
+func (n *DexconNetwork) BroadcastAgreementResult(result *types.AgreementResult) {
+ n.pm.BroadcastAgreementResult(result)
}
// ReceiveChan returns a channel to receive messages from DEXON network.
diff --git a/dex/peer.go b/dex/peer.go
index 0fa1ac61d..0d23e630f 100644
--- a/dex/peer.go
+++ b/dex/peer.go
@@ -67,7 +67,6 @@ const (
maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS)
maxKnownAgreements = 10240
- maxKnownRandomnesses = 10240
maxKnownDKGPrivateShares = 1024 // this related to DKG Size
// maxQueuedTxs is the maximum number of transaction lists to queue up before
@@ -90,7 +89,6 @@ const (
maxQueuedCoreBlocks = 16
maxQueuedVotes = 128
maxQueuedAgreements = 16
- maxQueuedRandomnesses = 16
maxQueuedDKGPrivateShare = 16
maxQueuedDKGParitialSignature = 16
maxQueuedPullBlocks = 128
@@ -114,8 +112,7 @@ type PeerInfo struct {
type setType uint32
const (
- dkgset = iota
- notaryset
+ notaryset = iota
)
type peerLabel struct {
@@ -126,8 +123,6 @@ type peerLabel struct {
func (p peerLabel) String() string {
var t string
switch p.set {
- case dkgset:
- t = fmt.Sprintf("DKGSet round: %d", p.round)
case notaryset:
t = fmt.Sprintf("NotarySet round: %d", p.round)
}
@@ -150,7 +145,6 @@ type peer struct {
knownRecords mapset.Set // Set of node record known to be known by this peer
knownBlocks mapset.Set // Set of block hashes known to be known by this peer
knownAgreements mapset.Set
- knownRandomnesses mapset.Set
knownDKGPrivateShares mapset.Set
queuedTxs chan []*types.Transaction // Queue of transactions to broadcast to the peer
queuedRecords chan []*enr.Record // Queue of node records to broadcast to the peer
@@ -159,7 +153,6 @@ type peer struct {
queuedCoreBlocks chan []*coreTypes.Block
queuedVotes chan []*coreTypes.Vote
queuedAgreements chan *coreTypes.AgreementResult
- queuedRandomnesses chan []*coreTypes.BlockRandomnessResult
queuedDKGPrivateShares chan *dkgTypes.PrivateShare
queuedDKGPartialSignatures chan *dkgTypes.PartialSignature
queuedPullBlocks chan coreCommon.Hashes
@@ -178,7 +171,6 @@ func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
knownRecords: mapset.NewSet(),
knownBlocks: mapset.NewSet(),
knownAgreements: mapset.NewSet(),
- knownRandomnesses: mapset.NewSet(),
knownDKGPrivateShares: mapset.NewSet(),
queuedTxs: make(chan []*types.Transaction, maxQueuedTxs),
queuedRecords: make(chan []*enr.Record, maxQueuedRecords),
@@ -187,7 +179,6 @@ func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
queuedCoreBlocks: make(chan []*coreTypes.Block, maxQueuedCoreBlocks),
queuedVotes: make(chan []*coreTypes.Vote, maxQueuedVotes),
queuedAgreements: make(chan *coreTypes.AgreementResult, maxQueuedAgreements),
- queuedRandomnesses: make(chan []*coreTypes.BlockRandomnessResult, maxQueuedRandomnesses),
queuedDKGPrivateShares: make(chan *dkgTypes.PrivateShare, maxQueuedDKGPrivateShare),
queuedDKGPartialSignatures: make(chan *dkgTypes.PartialSignature, maxQueuedDKGParitialSignature),
queuedPullBlocks: make(chan coreCommon.Hashes, maxQueuedPullBlocks),
@@ -252,11 +243,6 @@ func (p *peer) broadcast() {
return
}
p.Log().Trace("Broadcast agreement")
- case randomnesses := <-p.queuedRandomnesses:
- if err := p.SendRandomnesses(randomnesses); err != nil {
- return
- }
- p.Log().Trace("Broadcast randomnesses", "count", len(randomnesses))
case privateShare := <-p.queuedDKGPrivateShares:
if err := p.SendDKGPrivateShare(privateShare); err != nil {
return
@@ -277,11 +263,6 @@ func (p *peer) broadcast() {
return
}
p.Log().Trace("Pulling Votes", "position", pos)
- case hashes := <-p.queuedPullRandomness:
- if err := p.SendPullRandomness(hashes); err != nil {
- return
- }
- p.Log().Trace("Pulling Randomnesses", "hashes", hashes)
case <-p.term:
return
case <-time.After(100 * time.Millisecond):
@@ -366,13 +347,6 @@ func (p *peer) MarkAgreement(hash common.Hash) {
p.knownAgreements.Add(hash)
}
-func (p *peer) MarkRandomness(hash common.Hash) {
- for p.knownRandomnesses.Cardinality() >= maxKnownRandomnesses {
- p.knownRandomnesses.Pop()
- }
- p.knownRandomnesses.Add(hash)
-}
-
func (p *peer) MarkDKGPrivateShares(hash common.Hash) {
for p.knownDKGPrivateShares.Cardinality() >= maxKnownDKGPrivateShares {
p.knownDKGPrivateShares.Pop()
@@ -513,24 +487,6 @@ func (p *peer) AsyncSendAgreement(agreement *coreTypes.AgreementResult) {
}
}
-func (p *peer) SendRandomnesses(randomnesses []*coreTypes.BlockRandomnessResult) error {
- for _, randomness := range randomnesses {
- p.knownRandomnesses.Add(rlpHash(randomness))
- }
- return p.logSend(p2p.Send(p.rw, RandomnessMsg, randomnesses), RandomnessMsg)
-}
-
-func (p *peer) AsyncSendRandomnesses(randomnesses []*coreTypes.BlockRandomnessResult) {
- select {
- case p.queuedRandomnesses <- randomnesses:
- for _, randomness := range randomnesses {
- p.knownRandomnesses.Add(rlpHash(randomness))
- }
- default:
- p.Log().Debug("Dropping randomness result")
- }
-}
-
func (p *peer) SendDKGPrivateShare(privateShare *dkgTypes.PrivateShare) error {
p.knownDKGPrivateShares.Add(rlpHash(privateShare))
return p.logSend(p2p.Send(p.rw, DKGPrivateShareMsg, privateShare), DKGPrivateShareMsg)
@@ -581,18 +537,6 @@ func (p *peer) AsyncSendPullVotes(pos coreTypes.Position) {
}
}
-func (p *peer) SendPullRandomness(hashes coreCommon.Hashes) error {
- return p.logSend(p2p.Send(p.rw, PullRandomnessMsg, hashes), PullRandomnessMsg)
-}
-
-func (p *peer) AsyncSendPullRandomness(hashes coreCommon.Hashes) {
- select {
- case p.queuedPullRandomness <- hashes:
- default:
- p.Log().Debug("Dropping Pull Randomness")
- }
-}
-
// SendBlockHeaders sends a batch of block headers to the remote peer.
func (p *peer) SendBlockHeaders(flag uint8, headers []*types.HeaderWithGovState) error {
return p.logSend(p2p.Send(p.rw, BlockHeadersMsg, headersData{Flag: flag, Headers: headers}), BlockHeadersMsg)
@@ -871,38 +815,43 @@ func (ps *peerSet) PeersWithLabel(label peerLabel) []*peer {
return list
}
-// PeersWithoutNodeRecord retrieves a list of peers that do not have a
-// given record in their set of known hashes.
-func (ps *peerSet) PeersWithoutNodeRecord(hash common.Hash) []*peer {
+func (ps *peerSet) PeersWithoutLabel(label peerLabel) []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
- list := make([]*peer, 0, len(ps.peers))
- for _, p := range ps.peers {
- if !p.knownRecords.Contains(hash) {
+ length := len(ps.peers) - len(ps.label2Nodes[label])
+ if length <= 0 {
+ return []*peer{}
+ }
+ list := make([]*peer, 0, len(ps.peers)-len(ps.label2Nodes[label]))
+ peersWithLabel := ps.label2Nodes[label]
+ for id, p := range ps.peers {
+ if _, exist := peersWithLabel[id]; !exist {
list = append(list, p)
}
}
return list
}
-func (ps *peerSet) PeersWithoutAgreement(hash common.Hash) []*peer {
+// PeersWithoutNodeRecord retrieves a list of peers that do not have a
+// given record in their set of known hashes.
+func (ps *peerSet) PeersWithoutNodeRecord(hash common.Hash) []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
list := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers {
- if !p.knownAgreements.Contains(hash) {
+ if !p.knownRecords.Contains(hash) {
list = append(list, p)
}
}
return list
}
-func (ps *peerSet) PeersWithoutRandomness(hash common.Hash) []*peer {
+func (ps *peerSet) PeersWithoutAgreement(hash common.Hash) []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
list := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers {
- if !p.knownRandomnesses.Contains(hash) {
+ if !p.knownAgreements.Contains(hash) {
list = append(list, p)
}
}
@@ -956,23 +905,6 @@ func (ps *peerSet) BuildConnection(round uint64) {
log.Info("Build connection", "round", round)
- dkgLabel := peerLabel{set: dkgset, round: round}
- if _, ok := ps.label2Nodes[dkgLabel]; !ok {
- dkgPKs, err := ps.gov.DKGSet(round)
- if err != nil {
- log.Error("get DKG set fail", "round", round, "err", err)
- }
-
- nodes := ps.pksToNodes(dkgPKs)
- ps.label2Nodes[dkgLabel] = nodes
-
- if _, exists := nodes[ps.srvr.Self().ID().String()]; exists {
- ps.buildDirectConn(dkgLabel)
- } else {
- ps.buildGroupConn(dkgLabel)
- }
- }
-
notaryLabel := peerLabel{set: notaryset, round: round}
if _, ok := ps.label2Nodes[notaryLabel]; !ok {
notaryPKs, err := ps.gov.NotarySet(round)
@@ -990,6 +922,7 @@ func (ps *peerSet) BuildConnection(round uint64) {
ps.buildGroupConn(notaryLabel)
}
}
+
}
func (ps *peerSet) ForgetConnection(round uint64) {
diff --git a/dex/peer_test.go b/dex/peer_test.go
index 76a28b1ef..d6bc7e24c 100644
--- a/dex/peer_test.go
+++ b/dex/peer_test.go
@@ -62,31 +62,16 @@ func TestPeerSetBuildAndForgetConn(t *testing.T) {
nodes[1].ID().String(): nodes[1],
nodes[2].ID().String(): nodes[2],
},
- {set: dkgset, round: 10}: {
- self.ID().String(): self,
- nodes[1].ID().String(): nodes[1],
- nodes[3].ID().String(): nodes[3],
- },
{set: notaryset, round: 11}: {
self.ID().String(): self,
nodes[1].ID().String(): nodes[1],
nodes[5].ID().String(): nodes[5],
},
- {set: dkgset, round: 11}: {
- nodes[1].ID().String(): nodes[1],
- nodes[2].ID().String(): nodes[2],
- nodes[5].ID().String(): nodes[5],
- },
{set: notaryset, round: 12}: {
self.ID().String(): self,
nodes[3].ID().String(): nodes[3],
nodes[5].ID().String(): nodes[5],
},
- {set: dkgset, round: 12}: {
- self.ID().String(): self,
- nodes[3].ID().String(): nodes[3],
- nodes[5].ID().String(): nodes[5],
- },
}
if !reflect.DeepEqual(ps.label2Nodes, expectedlabel2Nodes) {
@@ -97,28 +82,12 @@ func TestPeerSetBuildAndForgetConn(t *testing.T) {
{set: notaryset, round: 10}: {},
{set: notaryset, round: 11}: {},
{set: notaryset, round: 12}: {},
- {set: dkgset, round: 10}: {},
- {set: dkgset, round: 12}: {},
}
if !reflect.DeepEqual(ps.directConn, expectedDirectConn) {
t.Errorf("direct conn not match")
}
- expectedGroupConn := []peerLabel{
- {set: dkgset, round: 11},
- }
-
- if len(ps.groupConnPeers) != len(expectedGroupConn) {
- t.Errorf("group conn peers not match")
- }
-
- for _, l := range expectedGroupConn {
- if len(ps.groupConnPeers[l]) == 0 {
- t.Errorf("group conn peers is 0")
- }
- }
-
expectedAllDirect := make(map[string]map[peerLabel]struct{})
for l := range ps.directConn {
@@ -152,11 +121,6 @@ func TestPeerSetBuildAndForgetConn(t *testing.T) {
nodes[3].ID().String(): nodes[3],
nodes[5].ID().String(): nodes[5],
},
- {set: dkgset, round: 12}: {
- self.ID().String(): self,
- nodes[3].ID().String(): nodes[3],
- nodes[5].ID().String(): nodes[5],
- },
}
if !reflect.DeepEqual(ps.label2Nodes, expectedlabel2Nodes) {
@@ -165,14 +129,13 @@ func TestPeerSetBuildAndForgetConn(t *testing.T) {
expectedDirectConn = map[peerLabel]struct{}{
{set: notaryset, round: 12}: {},
- {set: dkgset, round: 12}: {},
}
if !reflect.DeepEqual(ps.directConn, expectedDirectConn) {
t.Error("direct conn not match")
}
- expectedGroupConn = []peerLabel{}
+ expectedGroupConn := []peerLabel{}
if len(ps.groupConnPeers) != len(expectedGroupConn) {
t.Errorf("group conn peers not match")
diff --git a/dex/protocol.go b/dex/protocol.go
index 287bf0883..2bcb57506 100644
--- a/dex/protocol.go
+++ b/dex/protocol.go
@@ -92,7 +92,6 @@ const (
DKGPartialSignatureMsg = 0x25
PullBlocksMsg = 0x26
PullVotesMsg = 0x27
- PullRandomnessMsg = 0x28
GetGovStateMsg = 0x29
GovStateMsg = 0x2a
@@ -157,8 +156,6 @@ type governance interface {
CRSRound() uint64
NotarySet(uint64) (map[string]struct{}, error)
-
- DKGSet(uint64) (map[string]struct{}, error)
}
type dexconApp interface {
diff --git a/dex/protocol_test.go b/dex/protocol_test.go
index 517df97d9..7e0e1a9a4 100644
--- a/dex/protocol_test.go
+++ b/dex/protocol_test.go
@@ -438,6 +438,10 @@ func TestRecvVotes(t *testing.T) {
Height: 13,
},
},
+ PartialSignature: dkg.PartialSignature{
+ Type: "456",
+ Signature: []byte("psig"),
+ },
Signature: coreCrypto.Signature{
Type: "123",
Signature: []byte("sig"),
@@ -453,7 +457,7 @@ func TestRecvVotes(t *testing.T) {
select {
case msg := <-ch:
rvote := msg.(*coreTypes.Vote)
- if rlpHash(rvote) != rlpHash(vote) {
+ if !reflect.DeepEqual(rvote, &vote) {
t.Errorf("vote mismatch")
}
case <-time.After(1 * time.Second):
@@ -474,6 +478,10 @@ func TestSendVotes(t *testing.T) {
Height: 13,
},
},
+ PartialSignature: dkg.PartialSignature{
+ Type: "456",
+ Signature: []byte("psig"),
+ },
Signature: coreCrypto.Signature{
Type: "123",
Signature: []byte("sig"),
@@ -531,10 +539,6 @@ func TestSendVotes(t *testing.T) {
label: &peerLabel{set: notaryset, round: 11},
isReceiver: false,
},
- {
- label: &peerLabel{set: dkgset, round: 10},
- isReceiver: false,
- },
}
pm.peers.label2Nodes = make(map[peerLabel]map[string]*enode.Node)
@@ -669,6 +673,10 @@ func TestRecvAgreement(t *testing.T) {
Height: 13,
},
},
+ PartialSignature: dkg.PartialSignature{
+ Type: "456",
+ Signature: []byte("psig"),
+ },
Signature: coreCrypto.Signature{
Type: "123",
Signature: []byte("sig"),
@@ -676,9 +684,10 @@ func TestRecvAgreement(t *testing.T) {
}
agreement := coreTypes.AgreementResult{
- BlockHash: coreCommon.Hash{9, 9, 9},
- Position: vote.Position,
- Votes: []coreTypes.Vote{vote},
+ BlockHash: coreCommon.Hash{9, 9, 9},
+ Position: vote.Position,
+ Votes: []coreTypes.Vote{vote},
+ Randomness: []byte{9, 4, 8, 7},
}
if err := p2p.Send(p.app, AgreementMsg, &agreement); err != nil {
@@ -714,6 +723,10 @@ func TestSendAgreement(t *testing.T) {
Height: 13,
},
},
+ PartialSignature: dkg.PartialSignature{
+ Type: "456",
+ Signature: []byte("psig"),
+ },
Signature: coreCrypto.Signature{
Type: "123",
Signature: []byte("sig"),
@@ -721,9 +734,10 @@ func TestSendAgreement(t *testing.T) {
}
agreement := coreTypes.AgreementResult{
- BlockHash: coreCommon.Hash{9, 9, 9},
- Position: vote.Position,
- Votes: []coreTypes.Vote{vote},
+ BlockHash: coreCommon.Hash{9, 9, 9},
+ Position: vote.Position,
+ Votes: []coreTypes.Vote{vote},
+ Randomness: []byte{9, 4, 8, 7},
}
waitForRegister(pm, 1)
@@ -745,75 +759,6 @@ func TestSendAgreement(t *testing.T) {
}
}
-func TestRecvRandomnesses(t *testing.T) {
- pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
- pm.SetReceiveCoreMessage(true)
-
- p, _ := newTestPeer("peer", dex64, pm, true)
- defer pm.Stop()
- defer p.close()
-
- randomness := coreTypes.BlockRandomnessResult{
- BlockHash: coreCommon.Hash{8, 8, 8},
- Position: coreTypes.Position{
- Round: 10,
- Height: 13,
- },
- Randomness: []byte{7, 7, 7, 7},
- }
-
- if err := p2p.Send(p.app, RandomnessMsg, []*coreTypes.BlockRandomnessResult{&randomness}); err != nil {
- t.Fatalf("send error: %v", err)
- }
-
- ch := pm.ReceiveChan()
- select {
- case msg := <-ch:
- r := msg.(*coreTypes.BlockRandomnessResult)
- if !reflect.DeepEqual(r, &randomness) {
- t.Errorf("randomness mismatch")
- }
- case <-time.After(1 * time.Second):
- t.Errorf("no randomness received within 1 seconds")
- }
-}
-
-func TestSendRandomnesses(t *testing.T) {
- pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
- pm.SetReceiveCoreMessage(true)
-
- p, _ := newTestPeer("peer", dex64, pm, true)
- defer pm.Stop()
- defer p.close()
-
- randomness := coreTypes.BlockRandomnessResult{
- BlockHash: coreCommon.Hash{8, 8, 8},
- Position: coreTypes.Position{
- Round: 10,
- Height: 13,
- },
- Randomness: []byte{7, 7, 7, 7},
- }
-
- waitForRegister(pm, 1)
- pm.BroadcastRandomnessResult(&randomness)
- msg, err := p.app.ReadMsg()
- if err != nil {
- t.Errorf("%v: read error: %v", p.Peer, err)
- } else if msg.Code != RandomnessMsg {
- t.Errorf("%v: got code %d, want %d", p.Peer, msg.Code, RandomnessMsg)
- }
-
- var rs []*coreTypes.BlockRandomnessResult
- if err := msg.Decode(&rs); err != nil {
- t.Errorf("%v: %v", p.Peer, err)
- }
-
- if !reflect.DeepEqual(rs, []*coreTypes.BlockRandomnessResult{&randomness}) {
- t.Errorf("randomness mismatch")
- }
-}
-
func waitForRegister(pm *ProtocolManager, num int) {
for {
if pm.peers.Len() >= num {
diff --git a/params/config.go b/params/config.go
index 1eb016019..f88e69069 100644
--- a/params/config.go
+++ b/params/config.go
@@ -26,10 +26,10 @@ import (
// Genesis hashes to enforce below configs on.
var (
- MainnetGenesisHash = common.HexToHash("0xa48b24e2e500e3a7f222673c240dcef6c4c4fd720e6c4653349adc6acae96fb8")
- TestnetGenesisHash = common.HexToHash("0xf67217d7715cea0b2e8acada9b6a8e538fc3df0129dab32f8c1f6baff7a50034")
- TaipeiGenesisHash = common.HexToHash("0xdcf32f39178f33cea762dd0e87e2be1eb9327997cb9cf20ef0645030d3ece6be")
- YilanGenesisHash = common.HexToHash("0xbb48abd6cc576af3d0f84173e3476045def2f57c2e891e75a8835036d7012b82")
+ MainnetGenesisHash = common.HexToHash("0x8d5edbabff11387ccff24278e9251b56e18a2604b9913695b2bbd5072533dfb2")
+ TestnetGenesisHash = common.HexToHash("0xc70b04d189f2a1391e843552489fa6e19eb6b29021be2e7e6ceed6a2ccc855ff")
+ TaipeiGenesisHash = common.HexToHash("0xcc805f44f6917b04be770a70fb1cdff089f7197fb68bc6839ef46e62c8011e2c")
+ YilanGenesisHash = common.HexToHash("0x86fec7f128b5e6525f4177debddc2a375439593ebbd9d8c19a58e289d8621ce8")
)
// TrustedCheckpoints associates each known checkpoint with the genesis hash of
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
index d29863df5..979045223 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
@@ -106,7 +106,6 @@ type agreementMgr struct {
signer *utils.Signer
bcModule *blockChain
ctx context.Context
- initRound uint64
configs []agreementMgrConfig
baModule *agreement
recv *consensusBAReceiver
@@ -117,7 +116,7 @@ type agreementMgr struct {
lock sync.RWMutex
}
-func newAgreementMgr(con *Consensus, initRound uint64,
+func newAgreementMgr(con *Consensus,
initConfig agreementMgrConfig) (mgr *agreementMgr, err error) {
mgr = &agreementMgr{
con: con,
@@ -130,7 +129,6 @@ func newAgreementMgr(con *Consensus, initRound uint64,
signer: con.signer,
bcModule: con.bcModule,
ctx: con.ctx,
- initRound: initRound,
processedBAResult: make(map[types.Position]struct{}, maxResultCache),
configs: []agreementMgrConfig{initConfig},
voteFilter: utils.NewVoteFilter(),
@@ -141,21 +139,26 @@ func newAgreementMgr(con *Consensus, initRound uint64,
roundValue: &atomic.Value{},
changeNotaryHeightValue: &atomic.Value{},
}
- mgr.recv.roundValue.Store(uint64(0))
+ mgr.recv.updateRound(uint64(0))
mgr.recv.changeNotaryHeightValue.Store(uint64(0))
+ return mgr, nil
+}
+
+func (mgr *agreementMgr) prepare() {
+ round := mgr.bcModule.tipRound()
agr := newAgreement(
mgr.ID,
mgr.recv,
newLeaderSelector(genValidLeader(mgr), mgr.logger),
mgr.signer,
mgr.logger)
- // Hacky way to initialize first notarySet.
- nodes, err := mgr.cache.GetNodeSet(initRound)
+ nodes, err := mgr.cache.GetNodeSet(round)
if err != nil {
return
}
agr.notarySet = nodes.GetSubSet(
- int(initConfig.notarySetSize), types.NewNotarySetTarget(initConfig.crs))
+ int(mgr.config(round).notarySetSize),
+ types.NewNotarySetTarget(mgr.config(round).crs))
// Hacky way to make agreement module self contained.
mgr.recv.agreementModule = agr
mgr.baModule = agr
@@ -172,17 +175,17 @@ func (mgr *agreementMgr) run() {
mgr.waitGroup.Add(1)
go func() {
defer mgr.waitGroup.Done()
- mgr.runBA(mgr.initRound)
+ mgr.runBA(mgr.bcModule.tipRound())
}()
}
func (mgr *agreementMgr) config(round uint64) *agreementMgrConfig {
mgr.lock.RLock()
defer mgr.lock.RUnlock()
- if round < mgr.initRound {
+ if round < mgr.configs[0].RoundID() {
panic(ErrRoundOutOfRange)
}
- roundIndex := round - mgr.initRound
+ roundIndex := round - mgr.configs[0].RoundID()
if roundIndex >= uint64(len(mgr.configs)) {
return nil
}
@@ -274,6 +277,9 @@ func (mgr *agreementMgr) processAgreementResult(
}
if result.Position == aID && !mgr.baModule.confirmed() {
mgr.logger.Info("Syncing BA", "position", result.Position)
+ if result.Position.Round >= DKGDelayRound {
+ return mgr.baModule.processAgreementResult(result)
+ }
for key := range result.Votes {
if err := mgr.baModule.processVote(&result.Votes[key]); err != nil {
return err
@@ -285,22 +291,36 @@ func (mgr *agreementMgr) processAgreementResult(
if err != nil {
return err
}
- mgr.logger.Debug("Calling Network.PullBlocks for fast syncing BA",
- "hash", result.BlockHash)
- mgr.network.PullBlocks(common.Hashes{result.BlockHash})
- mgr.logger.Debug("Calling Governance.CRS", "round", result.Position.Round)
- crs := utils.GetCRSWithPanic(mgr.gov, result.Position.Round, mgr.logger)
- for key := range result.Votes {
- if err := mgr.baModule.processVote(&result.Votes[key]); err != nil {
- return err
+ if result.Position.Round < DKGDelayRound {
+ mgr.logger.Debug("Calling Network.PullBlocks for fast syncing BA",
+ "hash", result.BlockHash)
+ mgr.network.PullBlocks(common.Hashes{result.BlockHash})
+ for key := range result.Votes {
+ if err := mgr.baModule.processVote(&result.Votes[key]); err != nil {
+ return err
+ }
}
}
+ mgr.logger.Debug("Calling Governance.CRS", "round", result.Position.Round)
+ crs := utils.GetCRSWithPanic(mgr.gov, result.Position.Round, mgr.logger)
leader, err := mgr.cache.GetLeaderNode(result.Position)
if err != nil {
return err
}
mgr.baModule.restart(nIDs, result.Position, leader, crs)
+ if result.Position.Round >= DKGDelayRound {
+ return mgr.baModule.processAgreementResult(result)
+ }
+ }
+ return nil
+}
+
+func (mgr *agreementMgr) processFinalizedBlock(block *types.Block) error {
+ aID := mgr.baModule.agreementID()
+ if block.Position.Older(aID) {
+ return nil
}
+ mgr.baModule.processFinalizedBlock(block)
return nil
}
@@ -377,13 +397,14 @@ Loop:
}
mgr.recv.isNotary = checkRound()
// Run BA for this round.
- mgr.recv.roundValue.Store(currentRound)
+ mgr.recv.updateRound(currentRound)
mgr.recv.changeNotaryHeightValue.Store(curConfig.RoundEndHeight())
mgr.recv.restartNotary <- types.Position{
Round: mgr.recv.round(),
Height: math.MaxUint64,
}
mgr.voteFilter = utils.NewVoteFilter()
+ mgr.recv.emptyBlockHashMap = &sync.Map{}
if err := mgr.baRoutineForOneRound(&setting); err != nil {
mgr.logger.Error("BA routine failed",
"error", err,
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go
index 73d7b7ada..0d1ae58bc 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-state.go
@@ -132,11 +132,14 @@ func (s *preCommitState) clocks() int { return 2 }
func (s *preCommitState) nextState() (agreementState, error) {
s.a.lock.RLock()
defer s.a.lock.RUnlock()
- hash := s.a.lockValue
- if hash == types.NullBlockHash {
- hash = s.a.leader.leaderBlockHash()
+ if s.a.lockValue == types.SkipBlockHash ||
+ s.a.lockValue == types.NullBlockHash {
+ hash := s.a.leader.leaderBlockHash()
+ s.a.recv.ProposeVote(types.NewVote(types.VotePreCom, hash, s.a.period))
+ } else {
+ s.a.recv.ProposeVote(types.NewVote(
+ types.VotePreCom, s.a.lockValue, s.a.period))
}
- s.a.recv.ProposeVote(types.NewVote(types.VotePreCom, hash, s.a.period))
return newCommitState(s.a), nil
}
@@ -154,16 +157,7 @@ func (s *commitState) clocks() int { return 2 }
func (s *commitState) nextState() (agreementState, error) {
s.a.lock.Lock()
defer s.a.lock.Unlock()
- hash, ok := s.a.countVoteNoLock(s.a.period, types.VotePreCom)
- if ok && hash != types.SkipBlockHash {
- if s.a.period > s.a.lockIter {
- s.a.lockValue = hash
- s.a.lockIter = s.a.period
- }
- } else {
- hash = types.SkipBlockHash
- }
- s.a.recv.ProposeVote(types.NewVote(types.VoteCom, hash, s.a.period))
+ s.a.recv.ProposeVote(types.NewVote(types.VoteCom, s.a.lockValue, s.a.period))
return newForwardState(s.a), nil
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
index 16f36bccd..d4f1bbd0c 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
@@ -38,9 +38,11 @@ func init() {
// Errors for agreement module.
var (
- ErrInvalidVote = fmt.Errorf("invalid vote")
- ErrNotInNotarySet = fmt.Errorf("not in notary set")
- ErrIncorrectVoteSignature = fmt.Errorf("incorrect vote signature")
+ ErrInvalidVote = fmt.Errorf("invalid vote")
+ ErrNotInNotarySet = fmt.Errorf("not in notary set")
+ ErrIncorrectVoteSignature = fmt.Errorf("incorrect vote signature")
+ ErrIncorrectVotePartialSignature = fmt.Errorf("incorrect vote psig")
+ ErrMismatchBlockPosition = fmt.Errorf("mismatch block position")
)
// ErrFork for fork error in agreement.
@@ -83,6 +85,7 @@ type agreementReceiver interface {
PullBlocks(common.Hashes)
ReportForkVote(v1, v2 *types.Vote)
ReportForkBlock(b1, b2 *types.Block)
+ VerifyPartialSignature(vote *types.Vote) bool
}
type pendingBlock struct {
@@ -114,20 +117,21 @@ type agreementData struct {
// agreement is the agreement protocal describe in the Crypto Shuffle Algorithm.
type agreement struct {
- state agreementState
- data *agreementData
- aID *atomic.Value
- doneChan chan struct{}
- notarySet map[types.NodeID]struct{}
- hasVoteFast bool
- hasOutput bool
- lock sync.RWMutex
- pendingBlock []pendingBlock
- pendingVote []pendingVote
- candidateBlock map[common.Hash]*types.Block
- fastForward chan uint64
- signer *utils.Signer
- logger common.Logger
+ state agreementState
+ data *agreementData
+ aID *atomic.Value
+ doneChan chan struct{}
+ notarySet map[types.NodeID]struct{}
+ hasVoteFast bool
+ hasOutput bool
+ lock sync.RWMutex
+ pendingBlock []pendingBlock
+ pendingVote []pendingVote
+ pendingAgreementResult map[types.Position]*types.AgreementResult
+ candidateBlock map[common.Hash]*types.Block
+ fastForward chan uint64
+ signer *utils.Signer
+ logger common.Logger
}
// newAgreement creates a agreement instance.
@@ -143,11 +147,12 @@ func newAgreement(
ID: ID,
leader: leader,
},
- aID: &atomic.Value{},
- candidateBlock: make(map[common.Hash]*types.Block),
- fastForward: make(chan uint64, 1),
- signer: signer,
- logger: logger,
+ aID: &atomic.Value{},
+ pendingAgreementResult: make(map[types.Position]*types.AgreementResult),
+ candidateBlock: make(map[common.Hash]*types.Block),
+ fastForward: make(chan uint64, 1),
+ signer: signer,
+ logger: logger,
}
agreement.stop()
return agreement
@@ -176,7 +181,7 @@ func (a *agreement) restart(
a.data.blocks = make(map[types.NodeID]*types.Block)
a.data.requiredVote = len(notarySet)*2/3 + 1
a.data.leader.restart(crs)
- a.data.lockValue = types.NullBlockHash
+ a.data.lockValue = types.SkipBlockHash
a.data.lockIter = 0
a.data.isLeader = a.data.ID == leader
if a.doneChan != nil {
@@ -202,6 +207,22 @@ func (a *agreement) restart(
return
}
+ var result *types.AgreementResult
+ func() {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ newPendingAgreementResult := make(
+ map[types.Position]*types.AgreementResult)
+ for pos, agr := range a.pendingAgreementResult {
+ if pos.Newer(aID) {
+ newPendingAgreementResult[pos] = agr
+ } else if pos == aID {
+ result = agr
+ }
+ }
+ a.pendingAgreementResult = newPendingAgreementResult
+ }()
+
expireTime := time.Now().Add(-10 * time.Second)
replayBlock := make([]*types.Block, 0)
func() {
@@ -212,7 +233,11 @@ func (a *agreement) restart(
if aID.Newer(pending.block.Position) {
continue
} else if pending.block.Position == aID {
- replayBlock = append(replayBlock, pending.block)
+ if result == nil ||
+ result.Position.Round < DKGDelayRound ||
+ result.BlockHash == pending.block.Hash {
+ replayBlock = append(replayBlock, pending.block)
+ }
} else if pending.receivedTime.After(expireTime) {
newPendingBlock = append(newPendingBlock, pending)
}
@@ -229,7 +254,9 @@ func (a *agreement) restart(
if aID.Newer(pending.vote.Position) {
continue
} else if pending.vote.Position == aID {
- replayVote = append(replayVote, pending.vote)
+ if result == nil || result.Position.Round < DKGDelayRound {
+ replayVote = append(replayVote, pending.vote)
+ }
} else if pending.receivedTime.After(expireTime) {
newPendingVote = append(newPendingVote, pending)
}
@@ -244,6 +271,13 @@ func (a *agreement) restart(
}
}
+ if result != nil {
+ if err := a.processAgreementResult(result); err != nil {
+ a.logger.Error("Failed to process agreement result when retarting",
+ "result", result)
+ }
+ }
+
for _, vote := range replayVote {
if err := a.processVote(vote); err != nil {
a.logger.Error("Failed to process vote when restarting agreement",
@@ -332,6 +366,9 @@ func (a *agreement) sanityCheck(vote *types.Vote) error {
if !ok {
return ErrIncorrectVoteSignature
}
+ if !a.data.recv.VerifyPartialSignature(vote) {
+ return ErrIncorrectVotePartialSignature
+ }
return nil
}
@@ -424,11 +461,17 @@ func (a *agreement) processVote(vote *types.Vote) error {
hash != types.SkipBlockHash {
if vote.Type == types.VoteFast {
if !a.hasVoteFast {
- a.data.recv.ProposeVote(
- types.NewVote(types.VoteFastCom, hash, vote.Period))
- a.data.lockValue = hash
- a.data.lockIter = 1
- a.hasVoteFast = true
+ if a.state.state() == stateFast ||
+ a.state.state() == stateFastVote {
+ a.data.recv.ProposeVote(
+ types.NewVote(types.VoteFastCom, hash, vote.Period))
+ a.hasVoteFast = true
+
+ }
+ if a.data.lockIter == 0 {
+ a.data.lockValue = hash
+ a.data.lockIter = 1
+ }
}
} else {
a.hasOutput = true
@@ -457,18 +500,12 @@ func (a *agreement) processVote(vote *types.Vote) error {
if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok &&
hash != types.SkipBlockHash {
// Condition 1.
- if a.data.period >= vote.Period && vote.Period > a.data.lockIter &&
- vote.BlockHash != a.data.lockValue {
+ if vote.Period > a.data.lockIter {
a.data.lockValue = hash
a.data.lockIter = vote.Period
- return nil
}
// Condition 2.
if vote.Period > a.data.period {
- if vote.Period > a.data.lockIter {
- a.data.lockValue = hash
- a.data.lockIter = vote.Period
- }
a.fastForward <- vote.Period
if a.doneChan != nil {
close(a.doneChan)
@@ -508,6 +545,54 @@ func (a *agreement) processVote(vote *types.Vote) error {
return nil
}
+func (a *agreement) processFinalizedBlock(block *types.Block) {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ if a.hasOutput {
+ return
+ }
+ aID := a.agreementID()
+ if aID.Older(block.Position) {
+ return
+ }
+ a.addCandidateBlockNoLock(block)
+ a.hasOutput = true
+ a.data.lock.Lock()
+ defer a.data.lock.Unlock()
+ a.data.recv.ConfirmBlock(block.Hash, nil)
+ if a.doneChan != nil {
+ close(a.doneChan)
+ a.doneChan = nil
+ }
+}
+
+func (a *agreement) processAgreementResult(result *types.AgreementResult) error {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ aID := a.agreementID()
+ if result.Position.Older(aID) {
+ return nil
+ } else if result.Position.Newer(aID) {
+ a.pendingAgreementResult[result.Position] = result
+ return nil
+ }
+ if a.hasOutput {
+ return nil
+ }
+ a.data.lock.Lock()
+ defer a.data.lock.Unlock()
+ if _, exist := a.findCandidateBlockNoLock(result.BlockHash); !exist {
+ a.data.recv.PullBlocks(common.Hashes{result.BlockHash})
+ }
+ a.hasOutput = true
+ a.data.recv.ConfirmBlock(result.BlockHash, nil)
+ if a.doneChan != nil {
+ close(a.doneChan)
+ a.doneChan = nil
+ }
+ return nil
+}
+
func (a *agreement) done() <-chan struct{} {
a.lock.Lock()
defer a.lock.Unlock()
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/blockchain.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/blockchain.go
index 610ab28bd..51747d83b 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/blockchain.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/blockchain.go
@@ -32,18 +32,19 @@ import (
// Errors for sanity check error.
var (
- ErrBlockFromOlderPosition = errors.New("block from older position")
- ErrNotGenesisBlock = errors.New("not a genesis block")
- ErrIsGenesisBlock = errors.New("is a genesis block")
- ErrIncorrectParentHash = errors.New("incorrect parent hash")
- ErrInvalidBlockHeight = errors.New("invalid block height")
- ErrInvalidRoundID = errors.New("invalid round id")
- ErrNotFollowTipPosition = errors.New("not follow tip position")
- ErrDuplicatedPendingBlock = errors.New("duplicated pending block")
- ErrRetrySanityCheckLater = errors.New("retry sanity check later")
- ErrRoundNotSwitch = errors.New("round not switch")
- ErrIncorrectBlockRandomnessResult = errors.New(
+ ErrBlockFromOlderPosition = errors.New("block from older position")
+ ErrNotGenesisBlock = errors.New("not a genesis block")
+ ErrIsGenesisBlock = errors.New("is a genesis block")
+ ErrIncorrectParentHash = errors.New("incorrect parent hash")
+ ErrInvalidBlockHeight = errors.New("invalid block height")
+ ErrInvalidRoundID = errors.New("invalid round id")
+ ErrNotFollowTipPosition = errors.New("not follow tip position")
+ ErrDuplicatedPendingBlock = errors.New("duplicated pending block")
+ ErrRetrySanityCheckLater = errors.New("retry sanity check later")
+ ErrRoundNotSwitch = errors.New("round not switch")
+ ErrIncorrectAgreementResult = errors.New(
"incorrect block randomness result")
+ ErrMissingRandomness = errors.New("missing block randomness")
)
type pendingBlockRecord struct {
@@ -134,7 +135,7 @@ type blockChain struct {
vGetter tsigVerifierGetter
app Application
logger common.Logger
- pendingRandomnesses map[types.Position]*types.BlockRandomnessResult
+ pendingRandomnesses map[types.Position]*types.AgreementResult
configs []blockChainConfig
pendingBlocks pendingBlockRecords
confirmedBlocks types.BlocksByPosition
@@ -154,7 +155,7 @@ func newBlockChain(nID types.NodeID, dMoment time.Time, initBlock *types.Block,
logger: logger,
dMoment: dMoment,
pendingRandomnesses: make(
- map[types.Position]*types.BlockRandomnessResult),
+ map[types.Position]*types.AgreementResult),
}
}
@@ -211,10 +212,10 @@ func (bc *blockChain) notifyRoundEvents(evts []utils.RoundEventParam) error {
}
func (bc *blockChain) proposeBlock(position types.Position,
- proposeTime time.Time) (b *types.Block, err error) {
+ proposeTime time.Time, isEmpty bool) (b *types.Block, err error) {
bc.lock.RLock()
defer bc.lock.RUnlock()
- return bc.prepareBlock(position, proposeTime, false)
+ return bc.prepareBlock(position, proposeTime, isEmpty)
}
func (bc *blockChain) extractBlocks() (ret []*types.Block) {
@@ -222,7 +223,9 @@ func (bc *blockChain) extractBlocks() (ret []*types.Block) {
defer bc.lock.Unlock()
for len(bc.confirmedBlocks) > 0 {
c := bc.confirmedBlocks[0]
- if c.Position.Round >= DKGDelayRound && len(c.Finalization.Randomness) == 0 {
+ if c.Position.Round >= DKGDelayRound &&
+ len(c.Finalization.Randomness) == 0 &&
+ !bc.setRandomnessFromPending(c) {
break
}
c, bc.confirmedBlocks = bc.confirmedBlocks[0], bc.confirmedBlocks[1:]
@@ -230,8 +233,6 @@ func (bc *blockChain) extractBlocks() (ret []*types.Block) {
// to single chain.
c.Finalization.ParentHash = c.ParentHash
c.Finalization.Timestamp = c.Timestamp
- // It's a workaround, the height for application is one-based.
- c.Finalization.Height = c.Position.Height + 1
ret = append(ret, c)
bc.lastDelivered = c
}
@@ -292,6 +293,7 @@ func (bc *blockChain) addEmptyBlock(position types.Position) (
// to be confirmed.
panic(err)
}
+ emptyB.Finalization.Height = emptyB.Position.Height + 1
bc.confirmBlock(emptyB)
bc.checkIfBlocksConfirmed()
return emptyB
@@ -315,6 +317,11 @@ func (bc *blockChain) addEmptyBlock(position types.Position) (
// addBlock should be called when the block is confirmed by BA, we won't perform
// sanity check against this block, it's ok to add block with skipping height.
func (bc *blockChain) addBlock(b *types.Block) error {
+ if b.Position.Round >= DKGDelayRound &&
+ len(b.Finalization.Randomness) == 0 &&
+ !bc.setRandomnessFromPending(b) {
+ return ErrMissingRandomness
+ }
bc.lock.Lock()
defer bc.lock.Unlock()
confirmed := false
@@ -330,6 +337,7 @@ func (bc *blockChain) addBlock(b *types.Block) error {
} else if b.IsGenesis() {
confirmed = true
}
+ delete(bc.pendingRandomnesses, b.Position)
if !confirmed {
return bc.addPendingBlockRecord(pendingBlockRecord{b.Position, b})
}
@@ -338,45 +346,6 @@ func (bc *blockChain) addBlock(b *types.Block) error {
return nil
}
-func (bc *blockChain) shouldAddRandomness(r *types.BlockRandomnessResult) bool {
- bc.lock.RLock()
- defer bc.lock.RUnlock()
- if bc.lastDelivered != nil &&
- bc.lastDelivered.Position.Newer(r.Position) {
- return false
- }
- _, exists := bc.pendingRandomnesses[r.Position]
- if exists {
- return false
- }
- b := bc.findPendingBlock(r.Position)
- return b == nil || len(b.Finalization.Randomness) == 0
-}
-
-func (bc *blockChain) addRandomness(r *types.BlockRandomnessResult) error {
- if !bc.shouldAddRandomness(r) {
- return nil
- }
- ok, err := bc.verifyRandomness(r.BlockHash, r.Position.Round, r.Randomness)
- if err != nil {
- return err
- }
- if !ok {
- return ErrIncorrectBlockRandomnessResult
- }
- bc.lock.Lock()
- defer bc.lock.Unlock()
- if b := bc.findPendingBlock(r.Position); b != nil {
- if !r.BlockHash.Equal(b.Hash) {
- panic(fmt.Errorf("mismathed randomness: %s %s", b, r))
- }
- b.Finalization.Randomness = r.Randomness
- } else {
- bc.pendingRandomnesses[r.Position] = r
- }
- return nil
-}
-
// TODO(mission): remove this method after removing the strong binding between
// BA and blockchain.
func (bc *blockChain) tipRound() uint64 {
@@ -421,24 +390,29 @@ func (bc *blockChain) nextBlock() (uint64, time.Time) {
return tip.Position.Height + 1, tip.Timestamp.Add(config.minBlockInterval)
}
-func (bc *blockChain) pendingBlocksWithoutRandomness() (hashes common.Hashes) {
+func (bc *blockChain) pendingBlocksWithoutRandomness() []*types.Block {
bc.lock.RLock()
defer bc.lock.RUnlock()
+ blocks := make([]*types.Block, 0)
for _, b := range bc.confirmedBlocks {
- if b.Position.Round == 0 || len(b.Finalization.Randomness) > 0 {
+ if b.Position.Round < DKGDelayRound ||
+ len(b.Finalization.Randomness) > 0 ||
+ bc.setRandomnessFromPending(b) {
continue
}
- hashes = append(hashes, b.Hash)
+ blocks = append(blocks, b)
}
for _, r := range bc.pendingBlocks {
- if r.position.Round == 0 {
+ if r.position.Round < DKGDelayRound {
continue
}
- if r.block != nil && len(r.block.Finalization.Randomness) == 0 {
- hashes = append(hashes, r.block.Hash)
+ if r.block != nil &&
+ len(r.block.Finalization.Randomness) == 0 &&
+ !bc.setRandomnessFromPending(r.block) {
+ blocks = append(blocks, r.block)
}
}
- return
+ return blocks
}
func (bc *blockChain) lastDeliveredBlock() *types.Block {
@@ -456,14 +430,6 @@ func (bc *blockChain) lastPendingBlock() *types.Block {
return bc.confirmedBlocks[0]
}
-func (bc *blockChain) processFinalizedBlock(b *types.Block) error {
- return bc.addRandomness(&types.BlockRandomnessResult{
- BlockHash: b.Hash,
- Position: b.Position,
- Randomness: b.Finalization.Randomness,
- })
-}
-
/////////////////////////////////////////////
//
// internal helpers
@@ -492,9 +458,6 @@ func (bc *blockChain) addPendingBlockRecord(p pendingBlockRecord) error {
}
return err
}
- if p.block != nil {
- bc.setRandomnessFromPending(p.block)
- }
return nil
}
@@ -656,17 +619,39 @@ func (bc *blockChain) confirmBlock(b *types.Block) {
bc.logger.Debug("Calling Application.BlockConfirmed", "block", b)
bc.app.BlockConfirmed(*b)
bc.lastConfirmed = b
- bc.setRandomnessFromPending(b)
bc.confirmedBlocks = append(bc.confirmedBlocks, b)
bc.purgeConfig()
}
-func (bc *blockChain) setRandomnessFromPending(b *types.Block) {
+func (bc *blockChain) setRandomnessFromPending(b *types.Block) bool {
if r, exist := bc.pendingRandomnesses[b.Position]; exist {
if !r.BlockHash.Equal(b.Hash) {
panic(fmt.Errorf("mismathed randomness: %s %s", b, r))
}
b.Finalization.Randomness = r.Randomness
delete(bc.pendingRandomnesses, b.Position)
+ return true
}
+ return false
+}
+
+func (bc *blockChain) processAgreementResult(result *types.AgreementResult) error {
+ if result.Position.Round < DKGDelayRound {
+ return nil
+ }
+ ok, err := bc.verifyRandomness(
+ result.BlockHash, result.Position.Round, result.Randomness)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return ErrIncorrectAgreementResult
+ }
+ bc.lock.Lock()
+ defer bc.lock.Unlock()
+ if !result.Position.Newer(bc.lastDelivered.Position) {
+ return nil
+ }
+ bc.pendingRandomnesses[result.Position] = result
+ return nil
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go
index 48b0f2a89..92b283098 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go
@@ -76,7 +76,7 @@ type configurationChain struct {
tsigReady *sync.Cond
cache *utils.NodeSetCache
db db.Database
- dkgSet map[types.NodeID]struct{}
+ notarySet map[types.NodeID]struct{}
mpkReady bool
pendingPrvShare map[types.NodeID]*typesDKG.PrivateShare
// TODO(jimmy-dexon): add timeout to pending psig.
@@ -194,12 +194,12 @@ func (cc *configurationChain) registerDKG(
})
}
}
- dkgSet, err := cc.cache.GetDKGSet(round)
+ notarySet, err := cc.cache.GetNotarySet(round)
if err != nil {
- cc.logger.Error("Error getting DKG set from cache", "error", err)
+ cc.logger.Error("Error getting notary set from cache", "error", err)
return
}
- cc.dkgSet = dkgSet
+ cc.notarySet = notarySet
cc.pendingPrvShare = make(map[types.NodeID]*typesDKG.PrivateShare)
cc.mpkReady = false
cc.dkg, err = recoverDKGProtocol(cc.ID, cc.recv, round, reset, cc.db)
@@ -479,7 +479,7 @@ func (cc *configurationChain) runDKG(round uint64, reset uint64) (err error) {
cc.dkgRunning = false
}()
// Check if corresponding DKG signer is ready.
- if _, _, err = cc.getDKGInfo(round); err == nil {
+ if _, _, err = cc.getDKGInfo(round, true); err == nil {
return ErrSkipButNoError
}
tickStartAt := 1
@@ -518,12 +518,13 @@ func (cc *configurationChain) isDKGFinal(round uint64) bool {
if !cc.gov.IsDKGFinal(round) {
return false
}
- _, _, err := cc.getDKGInfo(round)
+ _, _, err := cc.getDKGInfo(round, false)
return err == nil
}
func (cc *configurationChain) getDKGInfo(
- round uint64) (*typesDKG.NodePublicKeys, *dkgShareSecret, error) {
+ round uint64, ignoreSigner bool) (
+ *typesDKG.NodePublicKeys, *dkgShareSecret, error) {
getFromCache := func() (*typesDKG.NodePublicKeys, *dkgShareSecret) {
cc.dkgResult.RLock()
defer cc.dkgResult.RUnlock()
@@ -532,19 +533,20 @@ func (cc *configurationChain) getDKGInfo(
return npks, signer
}
npks, signer := getFromCache()
- if npks == nil || signer == nil {
- if err := cc.recoverDKGInfo(round); err != nil {
+ if npks == nil || (!ignoreSigner && signer == nil) {
+ if err := cc.recoverDKGInfo(round, ignoreSigner); err != nil {
return nil, nil, err
}
npks, signer = getFromCache()
}
- if npks == nil || signer == nil {
+ if npks == nil || (!ignoreSigner && signer == nil) {
return nil, nil, ErrDKGNotReady
}
return npks, signer, nil
}
-func (cc *configurationChain) recoverDKGInfo(round uint64) error {
+func (cc *configurationChain) recoverDKGInfo(
+ round uint64, ignoreSigner bool) error {
var npksExists, signerExists bool
func() {
cc.dkgResult.Lock()
@@ -582,7 +584,7 @@ func (cc *configurationChain) recoverDKGInfo(round uint64) error {
cc.npks[round] = npks
}()
}
- if !signerExists {
+ if !signerExists && !ignoreSigner {
// Check if we have private shares in DB.
prvKey, err := cc.db.GetDKGPrivateKey(round)
if err != nil {
@@ -603,7 +605,7 @@ func (cc *configurationChain) recoverDKGInfo(round uint64) error {
func (cc *configurationChain) preparePartialSignature(
round uint64, hash common.Hash) (*typesDKG.PartialSignature, error) {
- _, signer, _ := cc.getDKGInfo(round)
+ _, signer, _ := cc.getDKGInfo(round, false)
if signer == nil {
return nil, ErrDKGNotReady
}
@@ -632,7 +634,7 @@ func (cc *configurationChain) untouchTSigHash(hash common.Hash) {
func (cc *configurationChain) runTSig(
round uint64, hash common.Hash) (
crypto.Signature, error) {
- npks, _, _ := cc.getDKGInfo(round)
+ npks, _, _ := cc.getDKGInfo(round, false)
if npks == nil {
return crypto.Signature{}, ErrDKGNotReady
}
@@ -697,7 +699,7 @@ func (cc *configurationChain) processPrivateShare(
if cc.dkg == nil {
return nil
}
- if _, exist := cc.dkgSet[prvShare.ProposerID]; !exist {
+ if _, exist := cc.notarySet[prvShare.ProposerID]; !exist {
return ErrNotDKGParticipant
}
if !cc.mpkReady {
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
index d74a4a290..4a95eac6f 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
@@ -27,6 +27,7 @@ import (
"github.com/dexon-foundation/dexon-consensus/common"
"github.com/dexon-foundation/dexon-consensus/core/crypto"
+ cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg"
"github.com/dexon-foundation/dexon-consensus/core/db"
"github.com/dexon-foundation/dexon-consensus/core/types"
typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg"
@@ -51,6 +52,10 @@ var (
"CRS not ready")
ErrConfigurationNotReady = fmt.Errorf(
"Configuration not ready")
+ ErrIncorrectBlockRandomness = fmt.Errorf(
+ "randomness of block is incorrect")
+ ErrCannotVerifyBlockRandomness = fmt.Errorf(
+ "cannot verify block randomness")
)
// consensusBAReceiver implements agreementReceiver.
@@ -60,8 +65,11 @@ type consensusBAReceiver struct {
agreementModule *agreement
changeNotaryHeightValue *atomic.Value
roundValue *atomic.Value
+ emptyBlockHashMap *sync.Map
isNotary bool
restartNotary chan types.Position
+ npks *typesDKG.NodePublicKeys
+ psigSigner *dkgShareSecret
}
func (recv *consensusBAReceiver) round() uint64 {
@@ -72,10 +80,85 @@ func (recv *consensusBAReceiver) changeNotaryHeight() uint64 {
return recv.changeNotaryHeightValue.Load().(uint64)
}
+func (recv *consensusBAReceiver) emptyBlockHash(pos types.Position) (
+ common.Hash, error) {
+ hashVal, ok := recv.emptyBlockHashMap.Load(pos)
+ if ok {
+ return hashVal.(common.Hash), nil
+ }
+ emptyBlock, err := recv.consensus.bcModule.prepareBlock(
+ pos, time.Time{}, true)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ hash, err := utils.HashBlock(emptyBlock)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ recv.emptyBlockHashMap.Store(pos, hash)
+ return hash, nil
+}
+
+func (recv *consensusBAReceiver) VerifyPartialSignature(vote *types.Vote) bool {
+ if recv.round() >= DKGDelayRound && vote.BlockHash != types.SkipBlockHash {
+ if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom {
+ if recv.npks == nil || recv.npks.Round != vote.Position.Round {
+ var err error
+ recv.npks, _, err =
+ recv.consensus.cfgModule.getDKGInfo(vote.Position.Round, true)
+ if err != nil || recv.npks == nil {
+ recv.consensus.logger.Warn("cannot get npks",
+ "round", vote.Position.Round, "error", err)
+ return false
+ }
+ }
+ pubKey, exist := recv.npks.PublicKeys[vote.ProposerID]
+ if !exist {
+ return false
+ }
+ blockHash := vote.BlockHash
+ if blockHash == types.NullBlockHash {
+ var err error
+ blockHash, err = recv.emptyBlockHash(vote.Position)
+ if err != nil {
+ recv.consensus.logger.Error(
+ "Failed to verify vote for empty block",
+ "position", vote.Position,
+ "error", err)
+ return false
+ }
+ }
+ return pubKey.VerifySignature(
+ vote.BlockHash, crypto.Signature(vote.PartialSignature))
+ }
+ }
+ return len(vote.PartialSignature.Signature) == 0
+}
+
func (recv *consensusBAReceiver) ProposeVote(vote *types.Vote) {
if !recv.isNotary {
return
}
+ if recv.round() >= DKGDelayRound && vote.BlockHash != types.SkipBlockHash {
+ if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom {
+ if recv.psigSigner == nil {
+ return
+ }
+ if vote.BlockHash == types.NullBlockHash {
+ hash, err := recv.emptyBlockHash(vote.Position)
+ if err != nil {
+ recv.consensus.logger.Error(
+ "Failed to propose vote for empty block",
+ "position", vote.Position,
+ "error", err)
+ return
+ }
+ vote.PartialSignature = recv.psigSigner.sign(hash)
+ } else {
+ vote.PartialSignature = recv.psigSigner.sign(vote.BlockHash)
+ }
+ }
+ }
if err := recv.agreementModule.prepareVote(vote); err != nil {
recv.consensus.logger.Error("Failed to prepare vote", "error", err)
return
@@ -120,6 +203,7 @@ func (recv *consensusBAReceiver) ConfirmBlock(
block *types.Block
aID = recv.agreementModule.agreementID()
)
+
isEmptyBlockConfirmed := hash == common.Hash{}
if isEmptyBlockConfirmed {
recv.consensus.logger.Info("Empty block is confirmed", "position", aID)
@@ -177,6 +261,72 @@ func (recv *consensusBAReceiver) ConfirmBlock(
return
}
}
+
+ // It's a workaround, the height for application is one-based.
+ block.Finalization.Height = block.Position.Height + 1
+
+ if len(votes) == 0 && len(block.Finalization.Randomness) == 0 {
+ recv.consensus.logger.Error("No votes to recover randomness",
+ "block", block)
+ } else if votes != nil {
+ voteList := make([]types.Vote, 0, len(votes))
+ IDs := make(cryptoDKG.IDs, 0, len(votes))
+ psigs := make([]cryptoDKG.PartialSignature, 0, len(votes))
+ for _, vote := range votes {
+ if vote.BlockHash != hash {
+ continue
+ }
+ if recv.round() >= DKGDelayRound {
+ ID, exist := recv.npks.IDMap[vote.ProposerID]
+ if !exist {
+ continue
+ }
+ IDs = append(IDs, ID)
+ psigs = append(psigs, vote.PartialSignature)
+ }
+ voteList = append(voteList, *vote)
+ }
+ if recv.round() >= DKGDelayRound {
+ rand, err := cryptoDKG.RecoverSignature(psigs, IDs)
+ if err != nil {
+ recv.consensus.logger.Warn("Unable to recover randomness",
+ "block", block,
+ "error", err)
+ } else {
+ block.Finalization.Randomness = rand.Signature[:]
+ }
+ }
+
+ if recv.isNotary {
+ result := &types.AgreementResult{
+ BlockHash: block.Hash,
+ Position: block.Position,
+ Votes: voteList,
+ FinalizationHeight: block.Finalization.Height,
+ IsEmptyBlock: isEmptyBlockConfirmed,
+ Randomness: block.Finalization.Randomness,
+ }
+ recv.consensus.logger.Debug("Broadcast AgreementResult",
+ "result", result)
+ recv.consensus.network.BroadcastAgreementResult(result)
+ if block.IsEmpty() {
+ if err :=
+ recv.consensus.bcModule.processAgreementResult(
+ result); err != nil {
+ recv.consensus.logger.Warn(
+ "Failed to process agreement result",
+ "result", result)
+ }
+ }
+ if block.Position.Round >= DKGDelayRound {
+ recv.consensus.logger.Debug(
+ "Broadcast finalized block",
+ "block", block)
+ recv.consensus.network.BroadcastBlock(block)
+ }
+ }
+ }
+
if block.Position.Height != 0 &&
!recv.consensus.bcModule.confirmed(block.Position.Height-1) {
go func(hash common.Hash) {
@@ -212,6 +362,11 @@ func (recv *consensusBAReceiver) ConfirmBlock(
recv.consensus.logger.Info("Receive parent block",
"parent-hash", block.ParentHash.String()[:6],
"cur-position", block.Position)
+ if block.Finalization.Height == 0 {
+ // TODO(jimmy): use a seperate message to pull finalized
+ // block. Here, we pull it again as workaround.
+ continue
+ }
recv.consensus.processBlockChan <- block
parentHash = block.ParentHash
if block.Position.Height == 0 ||
@@ -222,25 +377,9 @@ func (recv *consensusBAReceiver) ConfirmBlock(
}
}(block.ParentHash)
}
- if recv.isNotary {
- voteList := make([]types.Vote, 0, len(votes))
- for _, vote := range votes {
- if vote.BlockHash != hash {
- continue
- }
- voteList = append(voteList, *vote)
- }
- result := &types.AgreementResult{
- BlockHash: block.Hash,
- Position: block.Position,
- Votes: voteList,
- IsEmptyBlock: isEmptyBlockConfirmed,
- }
- recv.consensus.logger.Debug("Propose AgreementResult",
- "result", result)
- recv.consensus.network.BroadcastAgreementResult(result)
+ if !block.IsEmpty() {
+ recv.consensus.processBlockChan <- block
}
- recv.consensus.processBlockChan <- block
// Clean the restartNotary channel so BA will not stuck by deadlock.
CleanChannelLoop:
for {
@@ -253,14 +392,14 @@ CleanChannelLoop:
newPos := block.Position
if block.Position.Height+1 == recv.changeNotaryHeight() {
newPos.Round++
- recv.roundValue.Store(newPos.Round)
+ recv.updateRound(newPos.Round)
}
currentRound := recv.round()
changeNotaryHeight := recv.changeNotaryHeight()
if block.Position.Height > changeNotaryHeight &&
block.Position.Round <= currentRound {
panic(fmt.Errorf(
- "round not switch when confirmig: %s, %d, should switch at %d, %s",
+ "round not switch when confirming: %s, %d, should switch at %d, %s",
block, currentRound, changeNotaryHeight, newPos))
}
recv.restartNotary <- newPos
@@ -282,6 +421,18 @@ func (recv *consensusBAReceiver) ReportForkBlock(b1, b2 *types.Block) {
recv.consensus.gov.ReportForkBlock(b1, b2)
}
+func (recv *consensusBAReceiver) updateRound(round uint64) {
+ recv.roundValue.Store(round)
+ var err error
+ _, recv.psigSigner, err =
+ recv.consensus.cfgModule.getDKGInfo(round, false)
+ if err != nil {
+ recv.consensus.logger.Warn("cannot get dkg info",
+ "round", round, "error", err)
+ recv.psigSigner = nil
+ }
+}
+
// consensusDKGReceiver implements dkgReceiver.
type consensusDKGReceiver struct {
ID types.NodeID
@@ -401,13 +552,13 @@ type Consensus struct {
bcModule *blockChain
dMoment time.Time
nodeSetCache *utils.NodeSetCache
+ tsigVerifierCache *TSigVerifierCache
lock sync.RWMutex
ctx context.Context
ctxCancel context.CancelFunc
event *common.Event
roundEvent *utils.RoundEvent
logger common.Logger
- resetRandomnessTicker chan struct{}
resetDeliveryGuardTicker chan struct{}
msgChan chan interface{}
waitGroup sync.WaitGroup
@@ -465,7 +616,6 @@ func NewConsensusFromSyncer(
networkModule Network,
prv crypto.PrivateKey,
confirmedBlocks []*types.Block,
- randomnessResults []*types.BlockRandomnessResult,
cachedMessages []interface{},
logger common.Logger) (*Consensus, error) {
// Setup Consensus instance.
@@ -492,30 +642,13 @@ func NewConsensusFromSyncer(
}
refBlock = b
}
- // Dump all randomness result to the consensus instance.
- for _, r := range randomnessResults {
- if err := con.ProcessBlockRandomnessResult(r, false); err != nil {
- con.logger.Error("failed to process randomness result when syncing",
- "result", r)
- continue
- }
- }
if startWithEmpty {
pos := initBlock.Position
pos.Height++
- block, err := con.bcModule.addEmptyBlock(pos)
+ _, err := con.bcModule.addEmptyBlock(pos)
if err != nil {
panic(err)
}
- con.processBlockChan <- block
- if pos.Round >= DKGDelayRound {
- rand := &types.AgreementResult{
- BlockHash: block.Hash,
- Position: block.Position,
- IsEmptyBlock: true,
- }
- go con.prepareRandomnessResult(rand)
- }
}
return con, nil
}
@@ -566,8 +699,9 @@ func newConsensusForRound(
if usingNonBlocking {
appModule = newNonBlocking(app, debugApp)
}
+ tsigVerifierCache := NewTSigVerifierCache(gov, 7)
bcModule := newBlockChain(ID, dMoment, initBlock, appModule,
- NewTSigVerifierCache(gov, 7), signer, logger)
+ tsigVerifierCache, signer, logger)
// Construct Consensus instance.
con := &Consensus{
ID: ID,
@@ -582,10 +716,10 @@ func newConsensusForRound(
bcModule: bcModule,
dMoment: dMoment,
nodeSetCache: nodeSetCache,
+ tsigVerifierCache: tsigVerifierCache,
signer: signer,
event: common.NewEvent(),
logger: logger,
- resetRandomnessTicker: make(chan struct{}),
resetDeliveryGuardTicker: make(chan struct{}),
msgChan: make(chan interface{}, 1024),
processBlockChan: make(chan *types.Block, 1024),
@@ -600,7 +734,7 @@ func newConsensusForRound(
baConfig := agreementMgrConfig{}
baConfig.from(initRound, initConfig, initCRS)
baConfig.SetRoundBeginHeight(gov.GetRoundHeight(initRound))
- con.baMgr, err = newAgreementMgr(con, initRound, baConfig)
+ con.baMgr, err = newAgreementMgr(con, baConfig)
if err != nil {
panic(err)
}
@@ -690,14 +824,14 @@ func (con *Consensus) prepare(initBlock *types.Block) (err error) {
if nextRound < DKGDelayRound {
return
}
- curDKGSet, err := con.nodeSetCache.GetDKGSet(e.Round)
+ curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round)
if err != nil {
- con.logger.Error("Error getting DKG set when proposing CRS",
+ con.logger.Error("Error getting notary set when proposing CRS",
"round", e.Round,
"error", err)
return
}
- if _, exist := curDKGSet[con.ID]; !exist {
+ if _, exist := curNotarySet[con.ID]; !exist {
return
}
isDKGValid := func() bool {
@@ -733,18 +867,18 @@ func (con *Consensus) prepare(initBlock *types.Block) (err error) {
// Register round event handler to propose new CRS.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
// We don't have to propose new CRS during DKG reset, the reset of DKG
- // would be done by the DKG set in previous round.
+ // would be done by the notary set in previous round.
e := evts[len(evts)-1]
defer elapse("propose-CRS", e)()
if e.Reset != 0 || e.Round < DKGDelayRound {
return
}
- if curDkgSet, err := con.nodeSetCache.GetDKGSet(e.Round); err != nil {
- con.logger.Error("Error getting DKG set when proposing CRS",
+ if curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round); err != nil {
+ con.logger.Error("Error getting notary set when proposing CRS",
"round", e.Round,
"error", err)
} else {
- if _, exist := curDkgSet[con.ID]; !exist {
+ if _, exist := curNotarySet[con.ID]; !exist {
return
}
con.event.RegisterHeight(e.NextCRSProposingHeight(), func(uint64) {
@@ -809,26 +943,26 @@ func (con *Consensus) prepare(initBlock *types.Block) (err error) {
// of unexpected network fluctuation and ensure the robustness.
if !checkWithCancel(
con.ctx, 500*time.Millisecond, checkCRS(nextRound)) {
- con.logger.Debug("unable to prepare CRS for DKG set",
+ con.logger.Debug("unable to prepare CRS for notary set",
"round", nextRound,
"reset", e.Reset)
return
}
- nextDkgSet, err := con.nodeSetCache.GetDKGSet(nextRound)
+ nextNotarySet, err := con.nodeSetCache.GetNotarySet(nextRound)
if err != nil {
- con.logger.Error("Error getting DKG set for next round",
+ con.logger.Error("Error getting notary set for next round",
"round", nextRound,
"reset", e.Reset,
"error", err)
return
}
- if _, exist := nextDkgSet[con.ID]; !exist {
- con.logger.Info("Not selected as DKG set",
+ if _, exist := nextNotarySet[con.ID]; !exist {
+ con.logger.Info("Not selected as notary set",
"round", nextRound,
"reset", e.Reset)
return
}
- con.logger.Info("Selected as DKG set",
+ con.logger.Info("Selected as notary set",
"round", nextRound,
"reset", e.Reset)
nextConfig := utils.GetConfigWithPanic(con.gov, nextRound,
@@ -851,11 +985,14 @@ func (con *Consensus) prepare(initBlock *types.Block) (err error) {
if initBlock != nil {
con.event.NotifyHeight(initBlock.Finalization.Height)
}
+ con.baMgr.prepare()
return
}
// Run starts running DEXON Consensus.
func (con *Consensus) Run() {
+ // There may have emptys block in blockchain added by force sync.
+ blocksWithoutRandomness := con.bcModule.pendingBlocksWithoutRandomness()
// Launch BA routines.
con.baMgr.run()
// Launch network handler.
@@ -865,12 +1002,6 @@ func (con *Consensus) Run() {
con.waitGroup.Add(1)
go con.processMsg()
go con.processBlockLoop()
- // Sleep until dMoment come.
- time.Sleep(con.dMoment.Sub(time.Now().UTC()))
- // Take some time to bootstrap.
- time.Sleep(3 * time.Second)
- con.waitGroup.Add(1)
- go con.pullRandomness()
// Stop dummy receiver if launched.
if con.dummyCancel != nil {
con.logger.Trace("Stop dummy receiver")
@@ -893,6 +1024,11 @@ func (con *Consensus) Run() {
}
con.logger.Trace("Finish dumping cached messages")
}
+ con.generateBlockRandomness(blocksWithoutRandomness)
+ // Sleep until dMoment come.
+ time.Sleep(con.dMoment.Sub(time.Now().UTC()))
+ // Take some time to bootstrap.
+ time.Sleep(3 * time.Second)
con.waitGroup.Add(1)
go con.deliveryGuard()
// Block until done.
@@ -901,6 +1037,76 @@ func (con *Consensus) Run() {
}
}
+func (con *Consensus) generateBlockRandomness(blocks []*types.Block) {
+ con.logger.Debug("Start generating block randomness", "blocks", blocks)
+ isNotarySet := make(map[uint64]bool)
+ for _, block := range blocks {
+ if block.Position.Round < DKGDelayRound {
+ continue
+ }
+ doRun, exist := isNotarySet[block.Position.Round]
+ if !exist {
+ curNotarySet, err := con.nodeSetCache.GetNotarySet(block.Position.Round)
+ if err != nil {
+ con.logger.Error("Error getting notary set when generate block tsig",
+ "round", block.Position.Round,
+ "error", err)
+ continue
+ }
+ _, exist := curNotarySet[con.ID]
+ isNotarySet[block.Position.Round] = exist
+ doRun = exist
+ }
+ if !doRun {
+ continue
+ }
+ go func(block *types.Block) {
+ psig, err := con.cfgModule.preparePartialSignature(
+ block.Position.Round, block.Hash)
+ if err != nil {
+ con.logger.Error("Failed to prepare partial signature",
+ "block", block,
+ "error", err)
+ } else if err = con.signer.SignDKGPartialSignature(psig); err != nil {
+ con.logger.Error("Failed to sign DKG partial signature",
+ "block", block,
+ "error", err)
+ } else if err = con.cfgModule.processPartialSignature(psig); err != nil {
+ con.logger.Error("Failed to process partial signature",
+ "block", block,
+ "error", err)
+ } else {
+ con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
+ "proposer", psig.ProposerID,
+ "block", block)
+ con.network.BroadcastDKGPartialSignature(psig)
+ sig, err := con.cfgModule.runTSig(block.Position.Round, block.Hash)
+ if err != nil {
+ con.logger.Error("Failed to run Block Tsig",
+ "block", block,
+ "error", err)
+ return
+ }
+ result := &types.AgreementResult{
+ BlockHash: block.Hash,
+ Position: block.Position,
+ Randomness: sig.Signature[:],
+ }
+ if err := con.bcModule.processAgreementResult(result); err != nil {
+ con.logger.Error("Failed to process BlockRandomness",
+ "result", result,
+ "error", err)
+ return
+ }
+ con.logger.Debug("Broadcast BlockRandomness",
+ "block", block,
+ "result", result)
+ con.network.BroadcastAgreementResult(result)
+ }
+ }(block)
+ }
+}
+
// runDKG starts running DKG protocol.
func (con *Consensus) runDKG(round, reset uint64, config *types.Config) {
con.dkgReady.L.Lock()
@@ -964,7 +1170,6 @@ func (con *Consensus) Stop() {
con.event.Reset()
con.waitGroup.Wait()
if nbApp, ok := con.app.(*nonBlocking); ok {
- fmt.Println("Stopping nonBlocking App")
nbApp.wait()
}
}
@@ -1019,11 +1224,47 @@ MessageLoop:
ch, e := con.baConfirmedBlock[val.Hash]
return ch, e
}(); exist {
- if err := utils.VerifyBlockSignature(val); err != nil {
- con.logger.Error("VerifyBlockSignature failed",
- "block", val,
- "error", err)
- continue MessageLoop
+ if val.IsEmpty() {
+ hash, err := utils.HashBlock(val)
+ if err != nil {
+ con.logger.Error("error verifying empty block hash",
+ "block", val,
+ "error, err")
+ continue MessageLoop
+ }
+ if hash != val.Hash {
+ con.logger.Error("incorrect confirmed empty block hash",
+ "block", val,
+ "hash", hash)
+ continue MessageLoop
+ }
+ if _, err := con.bcModule.proposeBlock(
+ val.Position, time.Time{}, true); err != nil {
+ con.logger.Error("error adding empty block",
+ "block", val,
+ "error", err)
+ continue MessageLoop
+ }
+ } else {
+ ok, err := con.bcModule.verifyRandomness(
+ val.Hash, val.Position.Round, val.Finalization.Randomness)
+ if err != nil {
+ con.logger.Error("error verifying confirmed block randomness",
+ "block", val,
+ "error", err)
+ continue MessageLoop
+ }
+ if !ok {
+ con.logger.Error("incorrect confirmed block randomness",
+ "block", val)
+ continue MessageLoop
+ }
+ if err := utils.VerifyBlockSignature(val); err != nil {
+ con.logger.Error("VerifyBlockSignature failed",
+ "block", val,
+ "error", err)
+ continue MessageLoop
+ }
}
func() {
con.lock.Lock()
@@ -1036,7 +1277,6 @@ MessageLoop:
ch <- val
}()
} else if val.IsFinalized() {
- // For sync mode.
if err := con.processFinalizedBlock(val); err != nil {
con.logger.Error("Failed to process finalized block",
"block", val,
@@ -1061,13 +1301,6 @@ MessageLoop:
"result", val,
"error", err)
}
- case *types.BlockRandomnessResult:
- if err := con.ProcessBlockRandomnessResult(val, true); err != nil {
- con.logger.Error("Failed to process block randomness result",
- "hash", val.BlockHash.String()[:6],
- "position", val.Position,
- "error", err)
- }
case *typesDKG.PrivateShare:
if err := con.cfgModule.processPrivateShare(val); err != nil {
con.logger.Error("Failed to process private share",
@@ -1101,139 +1334,67 @@ func (con *Consensus) ProcessAgreementResult(
con.baMgr.untouchAgreementResult(rand)
return err
}
+ if err := con.bcModule.processAgreementResult(rand); err != nil {
+ con.baMgr.untouchAgreementResult(rand)
+ return err
+ }
// Syncing BA Module.
if err := con.baMgr.processAgreementResult(rand); err != nil {
+ con.baMgr.untouchAgreementResult(rand)
return err
}
- // Calculating randomness.
- if rand.Position.Round == 0 {
- return nil
- }
- // TODO(mission): find a way to avoid spamming by older agreement results.
- // Sanity check done.
- if !con.cfgModule.touchTSigHash(rand.BlockHash) {
- return nil
- }
con.logger.Debug("Rebroadcast AgreementResult",
"result", rand)
con.network.BroadcastAgreementResult(rand)
- go con.prepareRandomnessResult(rand)
- return nil
+
+ return con.deliverFinalizedBlocks()
}
-func (con *Consensus) prepareRandomnessResult(rand *types.AgreementResult) {
- dkgSet, err := con.nodeSetCache.GetDKGSet(rand.Position.Round)
- if err != nil {
- con.logger.Error("Failed to get dkg set",
- "round", rand.Position.Round, "error", err)
- return
+// preProcessBlock performs Byzantine Agreement on the block.
+func (con *Consensus) preProcessBlock(b *types.Block) (err error) {
+ err = con.baMgr.processBlock(b)
+ if err == nil && con.debugApp != nil {
+ con.debugApp.BlockReceived(b.Hash)
}
- if _, exist := dkgSet[con.ID]; !exist {
+ return
+}
+
+func (con *Consensus) processFinalizedBlock(b *types.Block) (err error) {
+ if b.Position.Round < DKGDelayRound {
return
}
- con.logger.Debug("PrepareRandomness", "round", rand.Position.Round, "hash", rand.BlockHash)
- psig, err := con.cfgModule.preparePartialSignature(rand.Position.Round, rand.BlockHash)
- if err != nil {
- con.logger.Error("Failed to prepare psig",
- "round", rand.Position.Round,
- "hash", rand.BlockHash.String()[:6],
- "error", err)
+ if err = utils.VerifyBlockSignature(b); err != nil {
return
}
- if err = con.signer.SignDKGPartialSignature(psig); err != nil {
- con.logger.Error("Failed to sign psig",
- "hash", rand.BlockHash.String()[:6],
- "error", err)
+ verifier, ok, err := con.tsigVerifierCache.UpdateAndGet(b.Position.Round)
+ if err != nil {
return
}
- if err = con.cfgModule.processPartialSignature(psig); err != nil {
- con.logger.Error("Failed process psig",
- "hash", rand.BlockHash.String()[:6],
- "error", err)
+ if !ok {
+ err = ErrCannotVerifyBlockRandomness
return
}
- con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
- "proposer", psig.ProposerID,
- "round", psig.Round,
- "hash", psig.Hash.String()[:6])
- con.network.BroadcastDKGPartialSignature(psig)
- tsig, err := con.cfgModule.runTSig(rand.Position.Round, rand.BlockHash)
- if err != nil {
- if err != ErrTSigAlreadyRunning {
- con.logger.Error("Failed to run TSIG",
- "position", rand.Position,
- "hash", rand.BlockHash.String()[:6],
- "error", err)
- }
+ if !verifier.VerifySignature(b.Hash, crypto.Signature{
+ Type: "bls",
+ Signature: b.Finalization.Randomness,
+ }) {
+ err = ErrIncorrectBlockRandomness
return
}
- result := &types.BlockRandomnessResult{
- BlockHash: rand.BlockHash,
- Position: rand.Position,
- Randomness: tsig.Signature,
- }
- // ProcessBlockRandomnessResult is not thread-safe so we put the result in
- // the message channnel to be processed in the main thread.
- con.msgChan <- result
-}
-
-// ProcessBlockRandomnessResult processes the randomness result.
-func (con *Consensus) ProcessBlockRandomnessResult(
- rand *types.BlockRandomnessResult, needBroadcast bool) error {
- if rand.Position.Round == 0 {
- return nil
- }
- if !con.bcModule.shouldAddRandomness(rand) {
- return nil
- }
- if err := con.bcModule.addRandomness(rand); err != nil {
- return err
- }
- if needBroadcast {
- con.logger.Debug("Calling Network.BroadcastRandomnessResult",
- "randomness", rand)
- con.network.BroadcastRandomnessResult(rand)
- }
- return con.deliverFinalizedBlocks()
-}
-
-// preProcessBlock performs Byzantine Agreement on the block.
-func (con *Consensus) preProcessBlock(b *types.Block) (err error) {
- err = con.baMgr.processBlock(b)
+ err = con.baMgr.processFinalizedBlock(b)
if err == nil && con.debugApp != nil {
con.debugApp.BlockReceived(b.Hash)
}
return
}
-func (con *Consensus) pullRandomness() {
- defer con.waitGroup.Done()
- for {
- select {
- case <-con.ctx.Done():
- return
- default:
- }
- select {
- case <-con.ctx.Done():
- return
- case <-con.resetRandomnessTicker:
- case <-time.After(1500 * time.Millisecond):
- // TODO(jimmy): pulling period should be related to lambdaBA.
- hashes := con.bcModule.pendingBlocksWithoutRandomness()
- if len(hashes) > 0 {
- con.logger.Debug(
- "Calling Network.PullRandomness", "blocks", hashes)
- con.network.PullRandomness(hashes)
- }
- }
- }
-}
-
func (con *Consensus) deliveryGuard() {
defer con.waitGroup.Done()
- time.Sleep(con.dMoment.Sub(time.Now()))
+ select {
+ case <-con.ctx.Done():
+ case <-time.After(con.dMoment.Sub(time.Now())):
+ }
// Node takes time to start.
select {
case <-con.ctx.Done():
@@ -1259,10 +1420,6 @@ func (con *Consensus) deliveryGuard() {
// deliverBlock deliver a block to application layer.
func (con *Consensus) deliverBlock(b *types.Block) {
select {
- case con.resetRandomnessTicker <- struct{}{}:
- default:
- }
- select {
case con.resetDeliveryGuardTicker <- struct{}{}:
default:
}
@@ -1274,7 +1431,6 @@ func (con *Consensus) deliverBlock(b *types.Block) {
b.Hash, b.Finalization.Height); err != nil {
panic(err)
}
- con.cfgModule.untouchTSigHash(b.Hash)
con.logger.Debug("Calling Application.BlockDelivered", "block", b)
con.app.BlockDelivered(b.Hash, b.Position, b.Finalization.Clone())
if con.debugApp != nil {
@@ -1338,15 +1494,10 @@ func (con *Consensus) processBlock(block *types.Block) (err error) {
return
}
-// processFinalizedBlock is the entry point for handling finalized blocks.
-func (con *Consensus) processFinalizedBlock(block *types.Block) error {
- return con.bcModule.processFinalizedBlock(block)
-}
-
// PrepareBlock would setup header fields of block based on its ProposerID.
func (con *Consensus) proposeBlock(position types.Position) (
*types.Block, error) {
- b, err := con.bcModule.proposeBlock(position, time.Now().UTC())
+ b, err := con.bcModule.proposeBlock(position, time.Now().UTC(), false)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
index 06838e019..407eaeace 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
@@ -64,9 +64,6 @@ type Network interface {
// PullVotes tries to pull votes from the DEXON network.
PullVotes(position types.Position)
- // PullRandomness tries to pull randomness from the DEXON network.
- PullRandomness(hashes common.Hashes)
-
// BroadcastVote broadcasts vote to all nodes in DEXON network.
BroadcastVote(vote *types.Vote)
@@ -76,9 +73,6 @@ type Network interface {
// BroadcastAgreementResult broadcasts agreement result to DKG set.
BroadcastAgreementResult(randRequest *types.AgreementResult)
- // BroadcastRandomnessResult broadcasts rand request to Notary set.
- BroadcastRandomnessResult(randResult *types.BlockRandomnessResult)
-
// SendDKGPrivateShare sends PrivateShare to a DKG participant.
SendDKGPrivateShare(pub crypto.PublicKey, prvShare *typesDKG.PrivateShare)
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go
index 214b4cb6e..8c063286e 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go
@@ -93,7 +93,7 @@ func (l *leaderSelector) restart(crs common.Hash) {
l.numCRS = numCRS
l.hashCRS = crs
l.minCRSBlock = maxHash
- l.minBlockHash = common.Hash{}
+ l.minBlockHash = types.NullBlockHash
l.pendingBlocks = make(map[common.Hash]*types.Block)
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/agreement.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/agreement.go
index f172b3b4c..a4a0f2023 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/agreement.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/agreement.go
@@ -19,10 +19,12 @@ package syncer
import (
"context"
+ "fmt"
"time"
"github.com/dexon-foundation/dexon-consensus/common"
"github.com/dexon-foundation/dexon-consensus/core"
+ "github.com/dexon-foundation/dexon-consensus/core/crypto"
"github.com/dexon-foundation/dexon-consensus/core/types"
"github.com/dexon-foundation/dexon-consensus/core/utils"
)
@@ -30,33 +32,42 @@ import (
// Struct agreement implements struct of BA (Byzantine Agreement) protocol
// needed in syncer, which only receives agreement results.
type agreement struct {
- cache *utils.NodeSetCache
- inputChan chan interface{}
- outputChan chan<- *types.Block
- pullChan chan<- common.Hash
- blocks map[types.Position]map[common.Hash]*types.Block
- agreementResults map[common.Hash]struct{}
- latestCRSRound uint64
- pendings map[uint64]map[common.Hash]*types.AgreementResult
- logger common.Logger
- confirmedBlocks map[common.Hash]struct{}
- ctx context.Context
- ctxCancel context.CancelFunc
+ chainTip uint64
+ cache *utils.NodeSetCache
+ tsigVerifierCache *core.TSigVerifierCache
+ inputChan chan interface{}
+ outputChan chan<- *types.Block
+ pullChan chan<- common.Hash
+ blocks map[types.Position]map[common.Hash]*types.Block
+ agreementResults map[common.Hash]struct{}
+ latestCRSRound uint64
+ pendingAgrs map[uint64]map[common.Hash]*types.AgreementResult
+ pendingBlocks map[uint64]map[common.Hash]*types.Block
+ logger common.Logger
+ confirmedBlocks map[common.Hash]struct{}
+ ctx context.Context
+ ctxCancel context.CancelFunc
}
// newAgreement creates a new agreement instance.
-func newAgreement(ch chan<- *types.Block, pullChan chan<- common.Hash,
- cache *utils.NodeSetCache, logger common.Logger) *agreement {
+func newAgreement(chainTip uint64,
+ ch chan<- *types.Block, pullChan chan<- common.Hash,
+ cache *utils.NodeSetCache, verifier *core.TSigVerifierCache,
+ logger common.Logger) *agreement {
a := &agreement{
- cache: cache,
- inputChan: make(chan interface{}, 1000),
- outputChan: ch,
- pullChan: pullChan,
- blocks: make(map[types.Position]map[common.Hash]*types.Block),
- agreementResults: make(map[common.Hash]struct{}),
- logger: logger,
- pendings: make(
+ chainTip: chainTip,
+ cache: cache,
+ tsigVerifierCache: verifier,
+ inputChan: make(chan interface{}, 1000),
+ outputChan: ch,
+ pullChan: pullChan,
+ blocks: make(map[types.Position]map[common.Hash]*types.Block),
+ agreementResults: make(map[common.Hash]struct{}),
+ logger: logger,
+ pendingAgrs: make(
map[uint64]map[common.Hash]*types.AgreementResult),
+ pendingBlocks: make(
+ map[uint64]map[common.Hash]*types.Block),
confirmedBlocks: make(map[common.Hash]struct{}),
}
a.ctx, a.ctxCancel = context.WithCancel(context.Background())
@@ -76,7 +87,11 @@ func (a *agreement) run() {
}
switch v := val.(type) {
case *types.Block:
- a.processBlock(v)
+ if v.IsFinalized() {
+ a.processFinalizedBlock(v)
+ } else {
+ a.processBlock(v)
+ }
case *types.AgreementResult:
a.processAgreementResult(v)
case uint64:
@@ -100,6 +115,49 @@ func (a *agreement) processBlock(b *types.Block) {
}
}
+func (a *agreement) processFinalizedBlock(block *types.Block) {
+ if block.Position.Round < core.DKGDelayRound {
+ return
+ }
+ // Cache those results that CRS is not ready yet.
+ if _, exists := a.confirmedBlocks[block.Hash]; exists {
+ a.logger.Trace("finalized block already confirmed", "block", block)
+ return
+ }
+ if block.Position.Round > a.latestCRSRound {
+ pendingsForRound, exists := a.pendingBlocks[block.Position.Round]
+ if !exists {
+ pendingsForRound = make(map[common.Hash]*types.Block)
+ a.pendingBlocks[block.Position.Round] = pendingsForRound
+ }
+ pendingsForRound[block.Hash] = block
+ a.logger.Trace("finalized block cached", "block", block)
+ return
+ }
+ if err := utils.VerifyBlockSignature(block); err != nil {
+ return
+ }
+ verifier, ok, err := a.tsigVerifierCache.UpdateAndGet(block.Position.Round)
+ if err != nil {
+ a.logger.Error("error verifying block randomness",
+ "block", block,
+ "error", err)
+ return
+ }
+ if !ok {
+ a.logger.Error("cannot verify block randomness", "block", block)
+ return
+ }
+ if !verifier.VerifySignature(block.Hash, crypto.Signature{
+ Type: "bls",
+ Signature: block.Finalization.Randomness,
+ }) {
+ a.logger.Error("incorrect block randomness", "block", block)
+ return
+ }
+ a.confirm(block)
+}
+
func (a *agreement) processAgreementResult(r *types.AgreementResult) {
// Cache those results that CRS is not ready yet.
if _, exists := a.confirmedBlocks[r.BlockHash]; exists {
@@ -107,10 +165,10 @@ func (a *agreement) processAgreementResult(r *types.AgreementResult) {
return
}
if r.Position.Round > a.latestCRSRound {
- pendingsForRound, exists := a.pendings[r.Position.Round]
+ pendingsForRound, exists := a.pendingAgrs[r.Position.Round]
if !exists {
pendingsForRound = make(map[common.Hash]*types.AgreementResult)
- a.pendings[r.Position.Round] = pendingsForRound
+ a.pendingAgrs[r.Position.Round] = pendingsForRound
}
pendingsForRound[r.BlockHash] = r
a.logger.Trace("Agreement result cached", "result", r)
@@ -122,9 +180,32 @@ func (a *agreement) processAgreementResult(r *types.AgreementResult) {
"error", err)
return
}
+ if r.Position.Round >= core.DKGDelayRound {
+ verifier, ok, err := a.tsigVerifierCache.UpdateAndGet(r.Position.Round)
+ if err != nil {
+ a.logger.Error("error verifying agreement result randomness",
+ "result", r,
+ "error", err)
+ return
+ }
+ if !ok {
+ a.logger.Error("cannot verify agreement result randomness", "result", r)
+ return
+ }
+ if !verifier.VerifySignature(r.BlockHash, crypto.Signature{
+ Type: "bls",
+ Signature: r.Randomness,
+ }) {
+ a.logger.Error("incorrect agreement result randomness", "result", r)
+ return
+ }
+ }
if r.IsEmptyBlock {
b := &types.Block{
Position: r.Position,
+ Finalization: types.FinalizationResult{
+ Randomness: r.Randomness,
+ },
}
// Empty blocks should be confirmed directly, they won't be sent over
// the wire.
@@ -133,6 +214,7 @@ func (a *agreement) processAgreementResult(r *types.AgreementResult) {
}
if bs, exist := a.blocks[r.Position]; exist {
if b, exist := bs[r.BlockHash]; exist {
+ b.Finalization.Randomness = r.Randomness
a.confirm(b)
return
}
@@ -164,11 +246,11 @@ func (a *agreement) processNewCRS(round uint64) {
a.latestCRSRound = round
// Verify all pending results.
for r := prevRound; r <= a.latestCRSRound; r++ {
- pendingsForRound := a.pendings[r]
+ pendingsForRound := a.pendingAgrs[r]
if pendingsForRound == nil {
continue
}
- delete(a.pendings, r)
+ delete(a.pendingAgrs, r)
for _, res := range pendingsForRound {
if err := core.VerifyAgreementResult(res, a.cache); err != nil {
a.logger.Error("Invalid agreement result",
@@ -185,6 +267,11 @@ func (a *agreement) processNewCRS(round uint64) {
// confirm notifies consensus the confirmation of a block in BA.
func (a *agreement) confirm(b *types.Block) {
+ if b.Position.Round >= core.DKGDelayRound &&
+ len(b.Finalization.Randomness) == 0 {
+ panic(fmt.Errorf("confirm a block %s without randomness", b))
+ }
+ b.Finalization.Height = b.Position.Height + 1
if _, exist := a.confirmedBlocks[b.Hash]; !exist {
delete(a.blocks, b.Position)
delete(a.agreementResults, b.Hash)
@@ -202,4 +289,9 @@ func (a *agreement) confirm(b *types.Block) {
}
a.confirmedBlocks[b.Hash] = struct{}{}
}
+ if b.Position.Height > a.chainTip+1 {
+ if _, exist := a.confirmedBlocks[b.ParentHash]; !exist {
+ a.pullChan <- b.ParentHash
+ }
+ }
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go
index 24c781aac..b692b56ef 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go
@@ -63,7 +63,6 @@ type Consensus struct {
nodeSetCache *utils.NodeSetCache
tsigVerifier *core.TSigVerifierCache
- randomnessResults map[common.Hash]*types.BlockRandomnessResult
blocks types.BlocksByPosition
agreementModule *agreement
agreementRoundCut uint64
@@ -100,24 +99,28 @@ func NewConsensus(
logger common.Logger) *Consensus {
con := &Consensus{
- dMoment: dMoment,
- app: app,
- gov: gov,
- db: db,
- network: network,
- nodeSetCache: utils.NewNodeSetCache(gov),
- tsigVerifier: core.NewTSigVerifierCache(gov, 7),
- prv: prv,
- logger: logger,
- receiveChan: make(chan *types.Block, 1000),
- pullChan: make(chan common.Hash, 1000),
- randomnessResults: make(map[common.Hash]*types.BlockRandomnessResult),
- heightEvt: common.NewEvent(),
+ dMoment: dMoment,
+ app: app,
+ gov: gov,
+ db: db,
+ network: network,
+ nodeSetCache: utils.NewNodeSetCache(gov),
+ tsigVerifier: core.NewTSigVerifierCache(gov, 7),
+ prv: prv,
+ logger: logger,
+ receiveChan: make(chan *types.Block, 1000),
+ pullChan: make(chan common.Hash, 1000),
+ heightEvt: common.NewEvent(),
}
con.ctx, con.ctxCancel = context.WithCancel(context.Background())
_, con.initChainTipHeight = db.GetCompactionChainTipInfo()
con.agreementModule = newAgreement(
- con.receiveChan, con.pullChan, con.nodeSetCache, con.logger)
+ con.initChainTipHeight,
+ con.receiveChan,
+ con.pullChan,
+ con.nodeSetCache,
+ con.tsigVerifier,
+ con.logger)
con.agreementWaitGroup.Add(1)
go func() {
defer con.agreementWaitGroup.Done()
@@ -360,10 +363,6 @@ func (con *Consensus) GetSyncedConsensus() (*core.Consensus, error) {
}
// flush all blocks in con.blocks into core.Consensus, and build
// core.Consensus from syncer.
- randomnessResults := []*types.BlockRandomnessResult{}
- for _, r := range con.randomnessResults {
- randomnessResults = append(randomnessResults, r)
- }
con.dummyCancel()
<-con.dummyFinished
var err error
@@ -377,7 +376,6 @@ func (con *Consensus) GetSyncedConsensus() (*core.Consensus, error) {
con.network,
con.prv,
con.blocks,
- randomnessResults,
con.dummyMsgBuffer,
con.logger)
return con.syncedConsensus, err
@@ -475,55 +473,6 @@ func (con *Consensus) startAgreement() {
}()
}
-func (con *Consensus) cacheRandomnessResult(r *types.BlockRandomnessResult) {
- // There is no block randomness at round-0.
- if r.Position.Round == 0 {
- return
- }
- // We only have to cache randomness result after cutting round.
- if func() bool {
- con.lock.RLock()
- defer con.lock.RUnlock()
- if len(con.blocks) > 0 && r.Position.Older(con.blocks[0].Position) {
- return true
- }
- if r.Position.Round > con.latestCRSRound {
- // We can't process randomness from rounds that its CRS is still
- // unknown.
- return true
- }
- _, exists := con.randomnessResults[r.BlockHash]
- return exists
- }() {
- return
- }
- v, ok, err := con.tsigVerifier.UpdateAndGet(r.Position.Round)
- if err != nil {
- con.logger.Error("Unable to get tsig verifier",
- "hash", r.BlockHash.String()[:6],
- "position", r.Position,
- "error", err,
- )
- return
- }
- if !ok {
- con.logger.Error("Tsig is not ready", "position", &r.Position)
- return
- }
- if !v.VerifySignature(r.BlockHash, crypto.Signature{
- Type: "bls",
- Signature: r.Randomness}) {
- con.logger.Info("Block randomness is not valid",
- "position", r.Position,
- "hash", r.BlockHash.String()[:6],
- )
- return
- }
- con.lock.Lock()
- defer con.lock.Unlock()
- con.randomnessResults[r.BlockHash] = r
-}
-
// startNetwork starts network for receiving blocks and agreement results.
func (con *Consensus) startNetwork() {
con.waitGroup.Add(1)
@@ -542,9 +491,6 @@ func (con *Consensus) startNetwork() {
if v.Position.Height <= con.initChainTipHeight {
continue loop
}
- case *types.BlockRandomnessResult:
- con.cacheRandomnessResult(v)
- continue loop
default:
continue loop
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block-randomness.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block-randomness.go
index 74360c735..b87e8a11d 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block-randomness.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/block-randomness.go
@@ -26,27 +26,20 @@ import (
// AgreementResult describes an agremeent result.
type AgreementResult struct {
- BlockHash common.Hash `json:"block_hash"`
- Position Position `json:"position"`
- Votes []Vote `json:"votes"`
- IsEmptyBlock bool `json:"is_empty_block"`
+ BlockHash common.Hash `json:"block_hash"`
+ Position Position `json:"position"`
+ Votes []Vote `json:"votes"`
+ IsEmptyBlock bool `json:"is_empty_block"`
+ FinalizationHeight uint64 `json:"finalization_height"`
+ Randomness []byte `json:"randomness"`
}
func (r *AgreementResult) String() string {
- return fmt.Sprintf("agreementResult{Hash:%s %s}",
- r.BlockHash.String()[:6], r.Position)
-}
-
-// BlockRandomnessResult describes a block randomness result
-type BlockRandomnessResult struct {
- BlockHash common.Hash `json:"block_hash"`
- Position Position `json:"position"`
- Randomness []byte `json:"randomness"`
-}
-
-func (r *BlockRandomnessResult) String() string {
- return fmt.Sprintf("blockRandomness{Block:%s Pos:%s Rand:%s}",
+ if len(r.Randomness) == 0 {
+ return fmt.Sprintf("agreementResult{Block:%s Pos:%s}",
+ r.BlockHash.String()[:6], r.Position)
+ }
+ return fmt.Sprintf("agreementResult{Block:%s Pos:%s Rand:%s}",
r.BlockHash.String()[:6], r.Position,
- hex.EncodeToString(r.Randomness)[:6],
- )
+ hex.EncodeToString(r.Randomness)[:6])
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/config.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/config.go
index eda09f06e..dce38369e 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/config.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/config.go
@@ -30,7 +30,6 @@ type Config struct {
// Set related.
NotarySetSize uint32
- DKGSetSize uint32
// Time related.
RoundLength uint64
@@ -43,7 +42,6 @@ func (c *Config) Clone() *Config {
LambdaBA: c.LambdaBA,
LambdaDKG: c.LambdaDKG,
NotarySetSize: c.NotarySetSize,
- DKGSetSize: c.DKGSetSize,
RoundLength: c.RoundLength,
MinBlockInterval: c.MinBlockInterval,
}
@@ -60,8 +58,6 @@ func (c *Config) Bytes() []byte {
binaryNotarySetSize := make([]byte, 4)
binary.LittleEndian.PutUint32(binaryNotarySetSize, c.NotarySetSize)
- binaryDKGSetSize := make([]byte, 4)
- binary.LittleEndian.PutUint32(binaryDKGSetSize, c.DKGSetSize)
binaryRoundLength := make([]byte, 8)
binary.LittleEndian.PutUint64(binaryRoundLength, c.RoundLength)
@@ -73,7 +69,6 @@ func (c *Config) Bytes() []byte {
enc = append(enc, binaryLambdaBA...)
enc = append(enc, binaryLambdaDKG...)
enc = append(enc, binaryNotarySetSize...)
- enc = append(enc, binaryDKGSetSize...)
enc = append(enc, binaryRoundLength...)
enc = append(enc, binaryMinBlockInterval...)
return enc
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go
index fccfbb6aa..806000763 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go
@@ -40,7 +40,6 @@ type subSetTargetType byte
const (
targetNotarySet subSetTargetType = iota
- targetDKGSet
targetNodeLeader
)
@@ -89,11 +88,6 @@ func NewNotarySetTarget(crs common.Hash) *SubSetTarget {
return newTarget(targetNotarySet, crs[:])
}
-// NewDKGSetTarget is the target for getting DKG Set.
-func NewDKGSetTarget(crs common.Hash) *SubSetTarget {
- return newTarget(targetDKGSet, crs[:])
-}
-
// NewNodeLeaderTarget is the target for getting leader of fast BA.
func NewNodeLeaderTarget(crs common.Hash, height uint64) *SubSetTarget {
binaryHeight := make([]byte, 8)
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go
index c4a625edd..8bc0c78c2 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/vote.go
@@ -22,6 +22,7 @@ import (
"github.com/dexon-foundation/dexon-consensus/common"
"github.com/dexon-foundation/dexon-consensus/core/crypto"
+ cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg"
)
// VoteType is the type of vote.
@@ -61,8 +62,9 @@ type VoteHeader struct {
// Vote is the vote structure defined in Crypto Shuffle Algorithm.
type Vote struct {
- VoteHeader `json:"header"`
- Signature crypto.Signature `json:"signature"`
+ VoteHeader `json:"header"`
+ PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"`
+ Signature crypto.Signature `json:"signature"`
}
func (v *Vote) String() string {
@@ -91,6 +93,8 @@ func (v *Vote) Clone() *Vote {
Period: v.Period,
Position: v.Position,
},
+ PartialSignature: cryptoDKG.PartialSignature(
+ crypto.Signature(v.PartialSignature).Clone()),
Signature: v.Signature.Clone(),
}
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils.go
index bd3170153..0250cf29b 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils.go
@@ -159,6 +159,12 @@ func HashConfigurationBlock(
// instance.
func VerifyAgreementResult(
res *types.AgreementResult, cache *utils.NodeSetCache) error {
+ if res.Position.Round >= DKGDelayRound {
+ if len(res.Randomness) == 0 {
+ return ErrMissingRandomness
+ }
+ return nil
+ }
notarySet, err := cache.GetNotarySet(res.Position.Round)
if err != nil {
return err
@@ -203,7 +209,7 @@ func VerifyAgreementResult(
}
voted[vote.ProposerID] = struct{}{}
}
- if len(voted) < len(notarySet)/3*2+1 {
+ if len(voted) < len(notarySet)*2/3+1 {
return ErrNotEnoughVotes
}
return nil
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go
index 8be503fe3..34bf08ff3 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go
@@ -36,7 +36,7 @@ func hashWitness(witness *types.Witness) (common.Hash, error) {
// HashBlock generates hash of a types.Block.
func HashBlock(block *types.Block) (common.Hash, error) {
- hashPosition := hashPosition(block.Position)
+ hashPosition := HashPosition(block.Position)
binaryTimestamp, err := block.Timestamp.UTC().MarshalBinary()
if err != nil {
return common.Hash{}, err
@@ -88,13 +88,14 @@ func HashVote(vote *types.Vote) common.Hash {
binaryPeriod := make([]byte, 8)
binary.LittleEndian.PutUint64(binaryPeriod, vote.Period)
- hashPosition := hashPosition(vote.Position)
+ hashPosition := HashPosition(vote.Position)
hash := crypto.Keccak256Hash(
vote.ProposerID.Hash[:],
vote.BlockHash[:],
binaryPeriod,
hashPosition[:],
+ vote.PartialSignature.Signature[:],
[]byte{byte(vote.Type)},
)
return hash
@@ -114,7 +115,7 @@ func VerifyVoteSignature(vote *types.Vote) (bool, error) {
}
func hashCRS(block *types.Block, crs common.Hash) common.Hash {
- hashPos := hashPosition(block.Position)
+ hashPos := HashPosition(block.Position)
return crypto.Keccak256Hash(crs[:], hashPos[:])
}
@@ -132,7 +133,8 @@ func VerifyCRSSignature(block *types.Block, crs common.Hash) (
return true, nil
}
-func hashPosition(position types.Position) common.Hash {
+// HashPosition generates hash of a types.Position.
+func HashPosition(position types.Position) common.Hash {
binaryRound := make([]byte, 8)
binary.LittleEndian.PutUint64(binaryRound, position.Round)
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go
index 00901237d..89ebd2409 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go
@@ -39,7 +39,6 @@ type sets struct {
crs common.Hash
nodeSet *types.NodeSet
notarySet map[types.NodeID]struct{}
- dkgSet map[types.NodeID]struct{}
leaderNode map[uint64]types.NodeID
}
@@ -134,16 +133,6 @@ func (cache *NodeSetCache) GetNotarySet(
return cache.cloneMap(IDs.notarySet), nil
}
-// GetDKGSet returns of DKG set of this round.
-func (cache *NodeSetCache) GetDKGSet(
- round uint64) (map[types.NodeID]struct{}, error) {
- IDs, err := cache.getOrUpdate(round)
- if err != nil {
- return nil, err
- }
- return cache.cloneMap(IDs.dkgSet), nil
-}
-
// GetLeaderNode returns the BA leader of the position.
func (cache *NodeSetCache) GetLeaderNode(pos types.Position) (
types.NodeID, error) {
@@ -254,10 +243,6 @@ func (cache *NodeSetCache) update(round uint64) (nIDs *sets, err error) {
notarySet: make(map[types.NodeID]struct{}),
leaderNode: make(map[uint64]types.NodeID, cfg.RoundLength),
}
- if round >= dkgDelayRound {
- nIDs.dkgSet = nodeSet.GetSubSet(
- int(cfg.DKGSetSize), types.NewDKGSetTarget(crs))
- }
nIDs.notarySet = nodeSet.GetSubSet(
int(cfg.NotarySetSize), types.NewNotarySetTarget(crs))
cache.rounds[round] = nIDs
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/utils.go
index 14687d6ac..fcdf4224e 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/utils.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/utils.go
@@ -136,7 +136,7 @@ func LaunchDummyReceiver(
// GetDKGThreshold return expected threshold for given DKG set size.
func GetDKGThreshold(config *types.Config) int {
- return int(config.DKGSetSize/3) + 1
+ return int(config.NotarySetSize*2/3) + 1
}
// GetNextRoundValidationHeight returns the block height to check if the next
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 53474352d..7c3c37469 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -141,16 +141,16 @@
{
"checksumSHA1": "8EuKVkP1v/w5fRuuvUaXX5k/F+I=",
"path": "github.com/dexon-foundation/dexon-consensus/common",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
- "checksumSHA1": "MSq3BukG5RnWbWAVNGibFcHzEAI=",
+ "checksumSHA1": "BXM+kUVDhV8uQt0MuE9mwdHNB2c=",
"path": "github.com/dexon-foundation/dexon-consensus/core",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},
@@ -165,64 +165,64 @@
{
"checksumSHA1": "tQSbYCu5P00lUhKsx3IbBZCuSLY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
"checksumSHA1": "mMdctxTa/jNwAwZjjYoyEZdLoF8=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
"checksumSHA1": "BhLKK8RveoLaeXc9UyUKMwQqchU=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
"checksumSHA1": "b99zZvbWvBimv1NiPGGF1yQ4dKY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/db",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
- "checksumSHA1": "//GAtzIAkSWkRZhDh/+WhssGUnQ=",
+ "checksumSHA1": "+k6JeUI7CGgSK6aQBV5l3d9/YaE=",
"path": "github.com/dexon-foundation/dexon-consensus/core/syncer",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
- "checksumSHA1": "id8imcgp3SqYhIx0k3Chd0VZrUQ=",
+ "checksumSHA1": "/Dys3UcnQ3w8Yc9YzN0GAVXFwwQ=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
"checksumSHA1": "yoVRmvJDCp/1jSfY7wMt2LBQ9e8=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types/dkg",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
- "checksumSHA1": "H0xXSiixBuBEXD/n2rwJCy8rZ/I=",
+ "checksumSHA1": "hZq/A+YpLzTjL+cCsIly+qZiEfM=",
"path": "github.com/dexon-foundation/dexon-consensus/core/utils",
- "revision": "fa3b5a29499739e90b3cf17f9a0cf60a72a64fc0",
- "revisionTime": "2019-03-24T06:43:33Z",
+ "revision": "e41fcf0bb3f5fe6d473ef2056b6143b92c65faf3",
+ "revisionTime": "2019-03-27T10:11:57Z",
"version": "single-chain",
"versionExact": "single-chain"
},