aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
authorMission Liao <mission.liao@dexon.org>2018-12-29 20:56:05 +0800
committerWei-Ning Huang <w@byzantine-lab.io>2019-06-12 17:27:20 +0800
commitdca9e3e2521648b6632062429e4e3deabf5b54f3 (patch)
tree0906bbc49c07bae1f735aa5afb1a3daa00d06617 /vendor/github.com
parent17b7cf172b16b91f458d17f2aa38832cd0120d0d (diff)
downloadgo-tangerine-dca9e3e2521648b6632062429e4e3deabf5b54f3.tar
go-tangerine-dca9e3e2521648b6632062429e4e3deabf5b54f3.tar.gz
go-tangerine-dca9e3e2521648b6632062429e4e3deabf5b54f3.tar.bz2
go-tangerine-dca9e3e2521648b6632062429e4e3deabf5b54f3.tar.lz
go-tangerine-dca9e3e2521648b6632062429e4e3deabf5b54f3.tar.xz
go-tangerine-dca9e3e2521648b6632062429e4e3deabf5b54f3.tar.zst
go-tangerine-dca9e3e2521648b6632062429e4e3deabf5b54f3.zip
Sync latest DEXON core (#111)
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/consensus-timestamp.go9
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go125
2 files changed, 90 insertions, 44 deletions
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus-timestamp.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus-timestamp.go
index a1ace97f2..d7ce8e23e 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus-timestamp.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus-timestamp.go
@@ -80,6 +80,15 @@ func (ct *consensusTimestamp) appendConfig(
if round != uint64(len(ct.numChainsOfRounds))+ct.numChainsBase {
return ErrRoundNotIncreasing
}
+ // This segment is to handle the corner case for config checking logic in
+ // processBlock method.
+ if len(ct.numChainsOfRounds) == 1 {
+ if ct.numChainsOfRounds[0] > config.NumChains {
+ ct.resizeTimetamps(ct.numChainsOfRounds[0])
+ } else {
+ ct.resizeTimetamps(config.NumChains)
+ }
+ }
ct.numChainsOfRounds = append(ct.numChainsOfRounds, config.NumChains)
return nil
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go
index c767a6d53..d334bbd88 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go
@@ -18,6 +18,7 @@
package syncer
import (
+ "bytes"
"context"
"fmt"
"sort"
@@ -65,7 +66,7 @@ type Consensus struct {
validatedChains map[uint32]struct{}
finalizedBlockHashes common.Hashes
latticeLastRound uint64
- randomnessResults []*types.BlockRandomnessResult
+ randomnessResults map[common.Hash]*types.BlockRandomnessResult
blocks []types.ByPosition
agreements []*agreement
configs []*types.Config
@@ -107,9 +108,10 @@ func NewConsensus(
configs: []*types.Config{
utils.GetConfigWithPanic(gov, 0, logger),
},
- roundBeginTimes: []time.Time{dMoment},
- receiveChan: make(chan *types.Block, 1000),
- pullChan: make(chan common.Hash, 1000),
+ roundBeginTimes: []time.Time{dMoment},
+ receiveChan: make(chan *types.Block, 1000),
+ pullChan: make(chan common.Hash, 1000),
+ randomnessResults: make(map[common.Hash]*types.BlockRandomnessResult),
}
con.ctx, con.ctxCancel = context.WithCancel(context.Background())
return con
@@ -260,6 +262,9 @@ func (con *Consensus) ensureAgreementOverlapRound() bool {
for r = range tipRoundMap {
break
}
+ con.logger.Info("check agreement round cut",
+ "tip-round", r,
+ "configs", len(con.configs))
if tipRoundMap[r] == con.configs[r].NumChains {
con.agreementRoundCut = r
con.logger.Debug("agreement round cut found, round", r)
@@ -500,12 +505,16 @@ func (con *Consensus) GetSyncedConsensus() (*core.Consensus, error) {
// flush all blocks in con.blocks into core.Consensus, and build
// core.Consensus from syncer.
confirmedBlocks := []*types.Block{}
+ randomnessResults := []*types.BlockRandomnessResult{}
func() {
con.lock.Lock()
defer con.lock.Unlock()
for _, bs := range con.blocks {
confirmedBlocks = append(confirmedBlocks, bs...)
}
+ for _, r := range con.randomnessResults {
+ randomnessResults = append(randomnessResults, r)
+ }
}()
var err error
con.syncedConsensus, err = core.NewConsensusFromSyncer(
@@ -518,7 +527,7 @@ func (con *Consensus) GetSyncedConsensus() (*core.Consensus, error) {
con.prv,
con.lattice,
confirmedBlocks,
- con.randomnessResults,
+ randomnessResults,
con.logger)
return con.syncedConsensus, err
}
@@ -552,28 +561,12 @@ func (con *Consensus) buildEmptyBlock(b *types.Block, parent *types.Block) {
b.Acks = common.NewSortedHashes(common.Hashes{parent.Hash})
}
-// setupConfigs is called by SyncBlocks with blocks from compaction chain. In
-// the first time, setupConfigs setups from round 0.
-func (con *Consensus) setupConfigs(blocks []*types.Block) {
- // Find max round in blocks.
- var maxRound uint64
- for _, b := range blocks {
- if b.Position.Round > maxRound {
- maxRound = b.Position.Round
- }
- }
- // Get configs from governance.
- //
- // In fullnode, the notification of new round is yet another TX, which
- // needs to be executed after corresponding block delivered. Thus, the
- // configuration for 'maxRound + core.ConfigRoundShift' won't be ready when
- // seeing this block.
- untilRound := maxRound + core.ConfigRoundShift - 1
+func (con *Consensus) setupConfigsUntilRound(round uint64) {
curMaxNumChains := uint32(0)
func() {
con.lock.Lock()
defer con.lock.Unlock()
- for r := uint64(len(con.configs)); r <= untilRound; r++ {
+ for r := uint64(len(con.configs)); r <= round; r++ {
cfg := utils.GetConfigWithPanic(con.gov, r, con.logger)
con.configs = append(con.configs, cfg)
con.roundBeginTimes = append(
@@ -583,19 +576,41 @@ func (con *Consensus) setupConfigs(blocks []*types.Block) {
curMaxNumChains = cfg.NumChains
}
}
+ // Notify core.Lattice for new configs.
+ if con.lattice != nil {
+ for con.latticeLastRound+1 <= round {
+ con.latticeLastRound++
+ if err := con.lattice.AppendConfig(
+ con.latticeLastRound,
+ con.configs[con.latticeLastRound]); err != nil {
+ panic(err)
+ }
+ }
+ }
}()
con.resizeByNumChains(curMaxNumChains)
- // Notify core.Lattice for new configs.
- if con.lattice != nil {
- for con.latticeLastRound+1 <= untilRound {
- con.latticeLastRound++
- if err := con.lattice.AppendConfig(
- con.latticeLastRound,
- con.configs[con.latticeLastRound]); err != nil {
- panic(err)
- }
+}
+
+// setupConfigs is called by SyncBlocks with blocks from compaction chain. In
+// the first time, setupConfigs setups from round 0.
+func (con *Consensus) setupConfigs(blocks []*types.Block) {
+ // Find max round in blocks.
+ var maxRound uint64
+ for _, b := range blocks {
+ if b.Position.Round > maxRound {
+ maxRound = b.Position.Round
}
}
+ con.logger.Info("syncer setupConfigs",
+ "max", maxRound,
+ "lattice", con.latticeLastRound)
+ // Get configs from governance.
+ //
+ // In fullnode, the notification of new round is yet another TX, which
+ // needs to be executed after corresponding block delivered. Thus, the
+ // configuration for 'maxRound + core.ConfigRoundShift' won't be ready when
+ // seeing this block.
+ con.setupConfigsUntilRound(maxRound + core.ConfigRoundShift - 1)
}
// resizeByNumChains resizes fake lattice and agreement if numChains increases.
@@ -648,6 +663,28 @@ func (con *Consensus) startAgreement(numChains uint32) {
}()
}
+func (con *Consensus) cacheRandomnessResult(r *types.BlockRandomnessResult) {
+ // We only have to cache randomness result after cutting round.
+ if r.Position.Round < func() uint64 {
+ con.lock.RLock()
+ defer con.lock.RUnlock()
+ return con.agreementRoundCut
+ }() {
+ return
+ }
+ con.lock.Lock()
+ defer con.lock.Unlock()
+ if old, exists := con.randomnessResults[r.BlockHash]; exists {
+ if bytes.Compare(old.Randomness, r.Randomness) != 0 {
+ panic(fmt.Errorf("receive different randomness result: %s, %s",
+ r.BlockHash.String()[:6], &r.Position))
+ }
+ // We don't have to assign the map again.
+ return
+ }
+ con.randomnessResults[r.BlockHash] = r
+}
+
// startNetwork starts network for receiving blocks and agreement results.
func (con *Consensus) startNetwork() {
go func() {
@@ -664,13 +701,7 @@ func (con *Consensus) startNetwork() {
case *types.AgreementResult:
pos = v.Position
case *types.BlockRandomnessResult:
- func() {
- con.lock.Lock()
- defer con.lock.Unlock()
- if v.Position.Round >= con.agreementRoundCut {
- con.randomnessResults = append(con.randomnessResults, v)
- }
- }()
+ con.cacheRandomnessResult(v)
continue Loop
default:
continue Loop
@@ -697,12 +728,13 @@ func (con *Consensus) startCRSMonitor() {
var lastNotifiedRound uint64
// Notify all agreements for new CRS.
notifyNewCRS := func(round uint64) {
+ con.setupConfigsUntilRound(round)
+ con.lock.Lock()
+ defer con.lock.Unlock()
if round == lastNotifiedRound {
return
}
- con.logger.Debug("CRS is ready", "round", round)
- con.lock.RLock()
- defer con.lock.RUnlock()
+ con.logger.Info("CRS is ready", "round", round)
lastNotifiedRound = round
for _, a := range con.agreements {
a.inputChan <- round
@@ -719,8 +751,13 @@ func (con *Consensus) startCRSMonitor() {
}
// Notify agreement modules for the latest round that CRS is
// available if the round is not notified yet.
- if (con.gov.CRS(lastNotifiedRound+1) != common.Hash{}) {
- notifyNewCRS(lastNotifiedRound + 1)
+ checked := lastNotifiedRound + 1
+ for (con.gov.CRS(checked) != common.Hash{}) {
+ checked++
+ }
+ checked--
+ if checked > lastNotifiedRound {
+ notifyNewCRS(checked)
}
}
}()