aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMission Liao <mission.liao@dexon.org>2018-10-17 16:33:51 +0800
committerGitHub <noreply@github.com>2018-10-17 16:33:51 +0800
commit6f1df59f8b32d30d5a7a0d9449f2dca698a8ac39 (patch)
tree20dbf51d8185f01bfdca924b6c220c63505d2862
parent1fafb0a4b992ff225796d01f2271c2573967abae (diff)
downloadtangerine-consensus-6f1df59f8b32d30d5a7a0d9449f2dca698a8ac39.tar
tangerine-consensus-6f1df59f8b32d30d5a7a0d9449f2dca698a8ac39.tar.gz
tangerine-consensus-6f1df59f8b32d30d5a7a0d9449f2dca698a8ac39.tar.bz2
tangerine-consensus-6f1df59f8b32d30d5a7a0d9449f2dca698a8ac39.tar.lz
tangerine-consensus-6f1df59f8b32d30d5a7a0d9449f2dca698a8ac39.tar.xz
tangerine-consensus-6f1df59f8b32d30d5a7a0d9449f2dca698a8ac39.tar.zst
tangerine-consensus-6f1df59f8b32d30d5a7a0d9449f2dca698a8ac39.zip
core: genesis consensus timestamp (#217)
* Refine the initial value for empty time slot. * Fix DATA RACE netowrkConnection is reset for each test, however, our Consensus instance is not stopped after one test is finished, they might continue use network interface for a while.
-rw-r--r--core/consensus-timestamp.go37
-rw-r--r--core/consensus-timestamp_test.go8
-rw-r--r--core/consensus_test.go25
-rw-r--r--core/lattice.go4
-rw-r--r--integration_test/node.go2
-rw-r--r--integration_test/utils.go2
6 files changed, 48 insertions, 30 deletions
diff --git a/core/consensus-timestamp.go b/core/consensus-timestamp.go
index e04cd07..9551328 100644
--- a/core/consensus-timestamp.go
+++ b/core/consensus-timestamp.go
@@ -31,6 +31,9 @@ type consensusTimestamp struct {
// This part keeps configs for each round.
numChainsForRounds []uint32
numChainsRoundBase uint64
+
+ // dMoment represents the genesis time.
+ dMoment time.Time
}
var (
@@ -40,10 +43,12 @@ var (
)
// newConsensusTimestamp creates timestamper object.
-func newConsensusTimestamp(numChains uint32) *consensusTimestamp {
+func newConsensusTimestamp(
+ dMoment time.Time, numChains uint32) *consensusTimestamp {
return &consensusTimestamp{
numChainsForRounds: []uint32{numChains},
numChainsRoundBase: uint64(0),
+ dMoment: dMoment,
}
}
@@ -59,29 +64,31 @@ func (ct *consensusTimestamp) appendConfig(
return nil
}
+func (ct *consensusTimestamp) getNumChains(round uint64) uint32 {
+ roundIndex := round - ct.numChainsRoundBase
+ return ct.numChainsForRounds[roundIndex]
+}
+
// ProcessBlocks is the entry function.
func (ct *consensusTimestamp) processBlocks(blocks []*types.Block) (err error) {
for _, block := range blocks {
- if !block.IsGenesis() {
- round := block.Position.Round - ct.numChainsRoundBase
- ts := ct.chainTimestamps[:ct.numChainsForRounds[round]]
- if block.Finalization.Timestamp, err = getMedianTime(ts); err != nil {
- return
- }
- } else {
- block.Finalization.Timestamp = time.Time{}
+ numChains := ct.getNumChains(block.Position.Round)
+ // Fulfill empty time slots with d-moment. This part also means
+ // each time we increasing number of chains, we can't increase over
+ // 49% of previous number of chains.
+ for uint32(len(ct.chainTimestamps)) < numChains {
+ ct.chainTimestamps = append(ct.chainTimestamps, ct.dMoment)
}
-
- for uint32(len(ct.chainTimestamps)) <= block.Position.ChainID {
- ct.chainTimestamps = append(ct.chainTimestamps, time.Time{})
+ ts := ct.chainTimestamps[:numChains]
+ if block.Finalization.Timestamp, err = getMedianTime(ts); err != nil {
+ return
}
-
if !block.Timestamp.After(ct.chainTimestamps[block.Position.ChainID]) {
return ErrTimestampNotIncrease
}
-
ct.chainTimestamps[block.Position.ChainID] = block.Timestamp
-
+ // Purge configs for older rounds, rounds of blocks from total ordering
+ // would increase.
if block.Position.Round > ct.numChainsRoundBase {
ct.numChainsRoundBase++
ct.numChainsForRounds = ct.numChainsForRounds[1:]
diff --git a/core/consensus-timestamp_test.go b/core/consensus-timestamp_test.go
index a6cc435..49002aa 100644
--- a/core/consensus-timestamp_test.go
+++ b/core/consensus-timestamp_test.go
@@ -94,7 +94,7 @@ func (s *ConsensusTimestampTest) TestTimestampPartition() {
chainNum := 19
sigma := 100 * time.Millisecond
totalTimestamps := make([]time.Time, 0)
- ct := newConsensusTimestamp(uint32(chainNum))
+ ct := newConsensusTimestamp(time.Time{}, uint32(chainNum))
totalBlockNum := 0
for _, blockNum := range blockNums {
totalBlockNum += blockNum
@@ -110,7 +110,7 @@ func (s *ConsensusTimestampTest) TestTimestampPartition() {
totalChain = append(totalChain, chain...)
totalTimestamps = append(totalTimestamps, timestamps...)
}
- ct2 := newConsensusTimestamp(uint32(chainNum))
+ ct2 := newConsensusTimestamp(time.Time{}, uint32(chainNum))
err := ct2.processBlocks(totalChain)
s.Require().NoError(err)
timestamps2 := s.extractTimestamps(totalChain)
@@ -120,7 +120,7 @@ func (s *ConsensusTimestampTest) TestTimestampPartition() {
func (s *ConsensusTimestampTest) TestTimestampIncrease() {
chainNum := 19
sigma := 100 * time.Millisecond
- ct := newConsensusTimestamp(uint32(chainNum))
+ ct := newConsensusTimestamp(time.Time{}, uint32(chainNum))
chain := s.generateBlocksWithTimestamp(1000, chainNum, time.Second, sigma)
err := ct.processBlocks(chain)
s.Require().NoError(err)
@@ -129,7 +129,7 @@ func (s *ConsensusTimestampTest) TestTimestampIncrease() {
s.False(timestamps[i].Before(timestamps[i-1]))
}
// Test if the processBlocks is stable.
- ct2 := newConsensusTimestamp(uint32(chainNum))
+ ct2 := newConsensusTimestamp(time.Time{}, uint32(chainNum))
ct2.processBlocks(chain)
s.Require().NoError(err)
timestamps2 := s.extractTimestamps(chain)
diff --git a/core/consensus_test.go b/core/consensus_test.go
index 555e7dd..39aac7a 100644
--- a/core/consensus_test.go
+++ b/core/consensus_test.go
@@ -135,8 +135,8 @@ type ConsensusTestSuite struct {
conn *networkConnection
}
-func (s *ConsensusTestSuite) SetupTest() {
- s.conn = &networkConnection{
+func (s *ConsensusTestSuite) newNetworkConnection() *networkConnection {
+ return &networkConnection{
s: s,
cons: make(map[types.NodeID]*Consensus),
}
@@ -157,17 +157,20 @@ func (s *ConsensusTestSuite) prepareGenesisBlock(
}
func (s *ConsensusTestSuite) prepareConsensus(
- dMoment time.Time, gov *test.Governance, prvKey crypto.PrivateKey) (
+ dMoment time.Time,
+ gov *test.Governance,
+ prvKey crypto.PrivateKey,
+ conn *networkConnection) (
*test.App, *Consensus) {
app := test.NewApp()
db, err := blockdb.NewMemBackedBlockDB()
s.Require().Nil(err)
nID := types.NewNodeID(prvKey.PublicKey())
- network := s.conn.newNetwork(nID)
+ network := conn.newNetwork(nID)
con := NewConsensus(dMoment, app, gov, db,
network, prvKey)
- s.conn.setCon(nID, con)
+ conn.setCon(nID, con)
return app, con
}
@@ -191,6 +194,7 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
req = s.Require()
prvKeys = gov.PrivateKeys()
nodes []types.NodeID
+ conn = s.newNetworkConnection()
)
s.Require().Nil(err)
// Setup core.Consensus and test.App.
@@ -201,7 +205,7 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() {
dMoment := time.Now().UTC()
for _, key := range prvKeys {
nID := types.NewNodeID(key.PublicKey())
- app, con := s.prepareConsensus(dMoment, gov, key)
+ app, con := s.prepareConsensus(dMoment, gov, key, conn)
objs[nID] = &struct {
app *test.App
con *Consensus
@@ -409,13 +413,14 @@ func (s *ConsensusTestSuite) TestPrepareBlock() {
req = s.Require()
nodes []types.NodeID
prvKeys = gov.PrivateKeys()
+ conn = s.newNetworkConnection()
)
s.Require().Nil(err)
dMoment := time.Now().UTC()
// Setup core.Consensus and test.App.
cons := map[types.NodeID]*Consensus{}
for _, key := range prvKeys {
- _, con := s.prepareConsensus(dMoment, gov, key)
+ _, con := s.prepareConsensus(dMoment, gov, key, conn)
nID := types.NewNodeID(key.PublicKey())
cons[nID] = con
nodes = append(nodes, nID)
@@ -448,10 +453,11 @@ func (s *ConsensusTestSuite) TestPrepareBlock() {
}
func (s *ConsensusTestSuite) TestPrepareGenesisBlock() {
+ conn := s.newNetworkConnection()
gov, err := test.NewGovernance(4, time.Second)
s.Require().NoError(err)
prvKey := gov.PrivateKeys()[0]
- _, con := s.prepareConsensus(time.Now().UTC(), gov, prvKey)
+ _, con := s.prepareConsensus(time.Now().UTC(), gov, prvKey, conn)
block := &types.Block{
Position: types.Position{ChainID: 0},
}
@@ -467,6 +473,7 @@ func (s *ConsensusTestSuite) TestDKGCRS() {
n = 7
lambda = 100
}
+ conn := s.newNetworkConnection()
gov, err := test.NewGovernance(n, lambda*time.Millisecond)
s.Require().Nil(err)
gov.RoundInterval = 200 * lambda * time.Millisecond
@@ -474,7 +481,7 @@ func (s *ConsensusTestSuite) TestDKGCRS() {
cons := map[types.NodeID]*Consensus{}
dMoment := time.Now().UTC()
for _, key := range prvKeys {
- _, con := s.prepareConsensus(dMoment, gov, key)
+ _, con := s.prepareConsensus(dMoment, gov, key, conn)
nID := types.NewNodeID(key.PublicKey())
cons[nID] = con
con.cfgModule.registerDKG(uint64(0), n/3+1)
diff --git a/core/lattice.go b/core/lattice.go
index 442214b..ea5286d 100644
--- a/core/lattice.go
+++ b/core/lattice.go
@@ -58,7 +58,7 @@ func NewLattice(
pool: newBlockPool(cfg.NumChains),
data: newLatticeData(db, dataConfig),
toModule: newTotalOrdering(toConfig),
- ctModule: newConsensusTimestamp(cfg.NumChains),
+ ctModule: newConsensusTimestamp(dMoment, cfg.NumChains),
}
return
}
@@ -140,6 +140,8 @@ func (s *Lattice) ProcessBlock(
s.lock.Lock()
defer s.lock.Unlock()
if inLattice, err = s.data.addBlock(input); err != nil {
+ // TODO(mission): if sanity check failed with "acking block doesn't
+ // exists", we should keep it in a pool.
return
}
// TODO(mission): remove this hack, BA related stuffs should not
diff --git a/integration_test/node.go b/integration_test/node.go
index 7e230a9..cf8be8c 100644
--- a/integration_test/node.go
+++ b/integration_test/node.go
@@ -81,11 +81,11 @@ func NewNode(
gov core.Governance,
db blockdb.BlockDatabase,
privateKey crypto.PrivateKey,
+ dMoment time.Time,
networkLatency test.LatencyModel,
proposingLatency test.LatencyModel) *Node {
var (
- dMoment = time.Now().UTC()
chainID = uint32(math.MaxUint32)
governanceConfig = gov.Configuration(0)
nodeSetKeys = gov.NodeSet(0)
diff --git a/integration_test/utils.go b/integration_test/utils.go
index 6c665ad..d9c4995 100644
--- a/integration_test/utils.go
+++ b/integration_test/utils.go
@@ -25,6 +25,7 @@ func PrepareNodes(
if err != nil {
return
}
+ dMoment := time.Now().UTC()
for _, prvKey := range gov.PrivateKeys() {
nID := types.NewNodeID(prvKey.PublicKey())
apps[nID] = test.NewApp()
@@ -37,6 +38,7 @@ func PrepareNodes(
gov,
dbs[nID],
prvKey,
+ dMoment,
networkLatency,
proposingLatency,
)