aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/install_tools.sh2
-rw-r--r--core/consensus-timestamp.go6
-rw-r--r--core/consensus-timestamp_test.go10
-rw-r--r--core/consensus.go16
-rw-r--r--core/lattice-data.go522
-rw-r--r--core/lattice-data_test.go630
-rw-r--r--core/lattice.go13
-rw-r--r--core/lattice_test.go20
-rw-r--r--core/total-ordering.go8
-rw-r--r--core/total-ordering_test.go27
-rw-r--r--core/types/position.go35
-rw-r--r--core/types/position_test.go98
-rw-r--r--core/utils.go8
-rw-r--r--integration_test/node.go4
14 files changed, 802 insertions, 597 deletions
diff --git a/bin/install_tools.sh b/bin/install_tools.sh
index e22056b..ca0a04b 100755
--- a/bin/install_tools.sh
+++ b/bin/install_tools.sh
@@ -4,5 +4,5 @@ if ! which dep >/dev/null 2>&1; then
go get -u github.com/golang/dep/cmd/dep
fi
if ! which golint >/dev/null 2>&1; then
- go get -u github.com/golang/lint/golint
+ go get -u golang.org/x/lint/golint
fi
diff --git a/core/consensus-timestamp.go b/core/consensus-timestamp.go
index b21b3ed..55a6a9e 100644
--- a/core/consensus-timestamp.go
+++ b/core/consensus-timestamp.go
@@ -30,7 +30,6 @@ type consensusTimestamp struct {
// This part keeps configs for each round.
numChainsForRounds []uint32
- minRound uint64
}
var (
@@ -40,10 +39,9 @@ var (
)
// newConsensusTimestamp create timestamper object.
-func newConsensusTimestamp(round uint64, numChains uint32) *consensusTimestamp {
+func newConsensusTimestamp(numChains uint32) *consensusTimestamp {
return &consensusTimestamp{
numChainsForRounds: []uint32{numChains},
- minRound: round,
}
}
@@ -52,7 +50,7 @@ func newConsensusTimestamp(round uint64, numChains uint32) *consensusTimestamp {
func (ct *consensusTimestamp) appendConfig(
round uint64, config *types.Config) error {
- if round != ct.minRound+uint64(len(ct.numChainsForRounds)) {
+ if round != uint64(len(ct.numChainsForRounds)) {
return ErrRoundNotIncreasing
}
ct.numChainsForRounds = append(ct.numChainsForRounds, config.NumChains)
diff --git a/core/consensus-timestamp_test.go b/core/consensus-timestamp_test.go
index 7128f16..a6cc435 100644
--- a/core/consensus-timestamp_test.go
+++ b/core/consensus-timestamp_test.go
@@ -90,12 +90,11 @@ func (s *ConsensusTimestampTest) extractTimestamps(
// TestTimestampPartition verifies that processing segments of compatction chain
// should have the same result as processing the whole chain at once.
func (s *ConsensusTimestampTest) TestTimestampPartition() {
- var round uint64
blockNums := []int{50, 100, 30}
chainNum := 19
sigma := 100 * time.Millisecond
totalTimestamps := make([]time.Time, 0)
- ct := newConsensusTimestamp(round, uint32(chainNum))
+ ct := newConsensusTimestamp(uint32(chainNum))
totalBlockNum := 0
for _, blockNum := range blockNums {
totalBlockNum += blockNum
@@ -111,7 +110,7 @@ func (s *ConsensusTimestampTest) TestTimestampPartition() {
totalChain = append(totalChain, chain...)
totalTimestamps = append(totalTimestamps, timestamps...)
}
- ct2 := newConsensusTimestamp(round, uint32(chainNum))
+ ct2 := newConsensusTimestamp(uint32(chainNum))
err := ct2.processBlocks(totalChain)
s.Require().NoError(err)
timestamps2 := s.extractTimestamps(totalChain)
@@ -119,10 +118,9 @@ func (s *ConsensusTimestampTest) TestTimestampPartition() {
}
func (s *ConsensusTimestampTest) TestTimestampIncrease() {
- var round uint64
chainNum := 19
sigma := 100 * time.Millisecond
- ct := newConsensusTimestamp(round, uint32(chainNum))
+ ct := newConsensusTimestamp(uint32(chainNum))
chain := s.generateBlocksWithTimestamp(1000, chainNum, time.Second, sigma)
err := ct.processBlocks(chain)
s.Require().NoError(err)
@@ -131,7 +129,7 @@ func (s *ConsensusTimestampTest) TestTimestampIncrease() {
s.False(timestamps[i].Before(timestamps[i-1]))
}
// Test if the processBlocks is stable.
- ct2 := newConsensusTimestamp(round, uint32(chainNum))
+ ct2 := newConsensusTimestamp(uint32(chainNum))
ct2.processBlocks(chain)
s.Require().NoError(err)
timestamps2 := s.extractTimestamps(chain)
diff --git a/core/consensus.go b/core/consensus.go
index dcc6f38..90a8f09 100644
--- a/core/consensus.go
+++ b/core/consensus.go
@@ -232,15 +232,15 @@ func NewConsensus(
prv crypto.PrivateKey) *Consensus {
// TODO(w): load latest blockHeight from DB, and use config at that height.
- var round uint64
+ var (
+ round uint64
+ dMoment = time.Now().UTC()
+ )
config := gov.Configuration(round)
- // TODO(w): notarySet is different for each chain, need to write a
- // GetNotarySetForChain(nodeSet, chainID, crs) function to get the
- // correct notary set for a given chain.
nodeSetCache := NewNodeSetCache(gov)
crs := gov.CRS(round)
// Setup acking by information returned from Governace.
- nodes, err := nodeSetCache.GetNodeSet(0)
+ nodes, err := nodeSetCache.GetNodeSet(round)
if err != nil {
panic(err)
}
@@ -253,7 +253,7 @@ func NewConsensus(
// Setup nonblocking module.
nbModule := newNonBlocking(app, debugApp)
// Init lattice.
- lattice := NewLattice(round, config, authModule, nbModule, nbModule, db)
+ lattice := NewLattice(dMoment, config, authModule, nbModule, nbModule, db)
// Init configuration chain.
ID := types.NewNodeID(prv.PublicKey())
cfgModule := newConfigurationChain(
@@ -268,7 +268,7 @@ func NewConsensus(
gov)
// Register DKG for the initial round. This is a temporary function call for
// simulation.
- cfgModule.registerDKG(0, int(config.DKGSetSize)/3)
+ cfgModule.registerDKG(round, int(config.DKGSetSize)/3)
// Construct Consensus instance.
con := &Consensus{
ID: ID,
@@ -279,7 +279,7 @@ func NewConsensus(
gov: gov,
db: db,
network: network,
- tickerObj: newTicker(gov, 0, TickerBA),
+ tickerObj: newTicker(gov, round, TickerBA),
dkgReady: sync.NewCond(&sync.Mutex{}),
cfgModule: cfgModule,
nodeSetCache: nodeSetCache,
diff --git a/core/lattice-data.go b/core/lattice-data.go
index a674f3c..f0bda11 100644
--- a/core/lattice-data.go
+++ b/core/lattice-data.go
@@ -20,6 +20,7 @@ package core
import (
"errors"
"fmt"
+ "sort"
"time"
"github.com/dexon-foundation/dexon-consensus-core/common"
@@ -30,60 +31,87 @@ import (
// Errors for sanity check error.
var (
ErrAckingBlockNotExists = fmt.Errorf("acking block not exists")
- ErrInvalidParentChain = fmt.Errorf("invalid parent chain")
ErrDuplicatedAckOnOneChain = fmt.Errorf("duplicated ack on one chain")
- ErrChainStatusCorrupt = fmt.Errorf("chain status corrupt")
ErrInvalidChainID = fmt.Errorf("invalid chain id")
ErrInvalidProposerID = fmt.Errorf("invalid proposer id")
- ErrInvalidTimestamp = fmt.Errorf("invalid timestamp")
ErrInvalidWitness = fmt.Errorf("invalid witness data")
ErrInvalidBlock = fmt.Errorf("invalid block")
- ErrForkBlock = fmt.Errorf("fork block")
ErrNotAckParent = fmt.Errorf("not ack parent")
ErrDoubleAck = fmt.Errorf("double ack")
ErrAcksNotSorted = fmt.Errorf("acks not sorted")
ErrInvalidBlockHeight = fmt.Errorf("invalid block height")
ErrAlreadyInLattice = fmt.Errorf("block already in lattice")
ErrIncorrectBlockTime = fmt.Errorf("block timestamp is incorrect")
+ ErrInvalidRoundID = fmt.Errorf("invalid round id")
ErrUnknownRoundID = fmt.Errorf("unknown round id")
+ ErrRoundOutOfRange = fmt.Errorf("round out of range")
+ ErrRoundNotSwitch = fmt.Errorf("round not switch")
+ ErrNotGenesisBlock = fmt.Errorf("not a genesis block")
+ ErrUnexpectedGenesisBlock = fmt.Errorf("unexpected genesis block")
)
// Errors for method usage
var (
- ErrRoundNotIncreasing = errors.New("round not increasing")
- ErrPurgedBlockNotFound = errors.New("purged block not found")
+ ErrRoundNotIncreasing = errors.New("round not increasing")
+ ErrPurgedBlockNotFound = errors.New("purged block not found")
+ ErrPurgeNotDeliveredBlock = errors.New("not purge from head")
)
// latticeDataConfig is the configuration for latticeData for each round.
type latticeDataConfig struct {
// Number of chains between runs
- prevNumChains uint32
- numChains uint32
+ numChains uint32
// Block interval specifies reasonable time difference between
// parent/child blocks.
minBlockTimeInterval time.Duration
maxBlockTimeInterval time.Duration
- // the range of clean cut at the beginning of this round.
- cleanCutRange int
+ // roundBeginTime is the beginning of round, as local time.
+ roundBeginTime time.Time
+ roundInterval time.Duration
+ // roundEndTime is a cache for begin + interval.
+ roundEndTime time.Time
+}
+
+// Initiate latticeDataConfig from types.Config.
+func (config *latticeDataConfig) fromConfig(cfg *types.Config) {
+ config.numChains = cfg.NumChains
+ config.minBlockTimeInterval = cfg.MinBlockInterval
+ config.maxBlockTimeInterval = cfg.MaxBlockInterval
+ config.roundInterval = cfg.RoundInterval
+}
+
+func (config *latticeDataConfig) setRoundBeginTime(begin time.Time) {
+ config.roundBeginTime = begin
+ config.roundEndTime = begin.Add(config.roundInterval)
+}
+
+// Check if timestamp of a block is valid according to a reference time.
+func (config *latticeDataConfig) isValidBlockTime(
+ b *types.Block, ref time.Time) bool {
+ return !(b.Timestamp.Before(ref.Add(config.minBlockTimeInterval)) ||
+ b.Timestamp.After(ref.Add(config.maxBlockTimeInterval)))
+}
+
+// isValidGenesisBlockTime check if a timestamp is valid for a genesis block.
+func (config *latticeDataConfig) isValidGenesisBlockTime(b *types.Block) bool {
+ return !(b.Timestamp.Before(config.roundBeginTime) || b.Timestamp.After(
+ config.roundBeginTime.Add(config.maxBlockTimeInterval)))
+}
+
+// newGenesisLatticeDataConfig constructs a latticeDataConfig instance.
+func newGenesisLatticeDataConfig(
+ dMoment time.Time, config *types.Config) *latticeDataConfig {
+ c := &latticeDataConfig{}
+ c.fromConfig(config)
+ c.setRoundBeginTime(dMoment)
+ return c
}
// newLatticeDataConfig constructs a latticeDataConfig instance.
func newLatticeDataConfig(prev, cur *types.Config) *latticeDataConfig {
- config := &latticeDataConfig{
- numChains: cur.NumChains,
- minBlockTimeInterval: cur.MinBlockInterval,
- maxBlockTimeInterval: cur.MaxBlockInterval,
- }
- if prev != nil {
- config.prevNumChains = prev.NumChains
- if prev.K != cur.K ||
- prev.PhiRatio != cur.PhiRatio ||
- prev.NumChains != cur.NumChains {
- // K plus 2 is a magic from GOD.
- config.cleanCutRange = prev.K + 2
- }
- }
- return config
+ c := &latticeDataConfig{}
+ c.fromConfig(cur)
+ return c
}
// latticeData is a module for storing lattice.
@@ -95,56 +123,29 @@ type latticeData struct {
// blockByHash stores blocks, indexed by block hash.
blockByHash map[common.Hash]*types.Block
// This stores configuration for each round.
- configs []*latticeDataConfig
- minRound uint64
+ configs []*latticeDataConfig
}
// newLatticeData creates a new latticeData struct.
-func newLatticeData(db blockdb.Reader,
- round uint64, config *latticeDataConfig) (data *latticeData) {
+func newLatticeData(
+ db blockdb.Reader, genesisConfig *latticeDataConfig) (data *latticeData) {
data = &latticeData{
db: db,
- chains: make([]*chainStatus, config.numChains),
+ chains: make([]*chainStatus, genesisConfig.numChains),
blockByHash: make(map[common.Hash]*types.Block),
- configs: []*latticeDataConfig{config},
- minRound: round,
+ configs: []*latticeDataConfig{genesisConfig},
}
for i := range data.chains {
data.chains[i] = &chainStatus{
- ID: uint32(i),
- blocks: []*types.Block{},
- nextAck: make([]uint64, config.numChains),
+ ID: uint32(i),
+ blocks: []*types.Block{},
+ lastAckPos: make([]*types.Position, genesisConfig.numChains),
}
}
return
}
-func (data *latticeData) sanityCheck(b *types.Block) error {
- config := data.getConfig(b.Position.Round)
- if config == nil {
- return ErrUnknownRoundID
- }
- // Check if the chain id is valid.
- if b.Position.ChainID >= config.numChains {
- return ErrInvalidChainID
- }
-
- // TODO(mission): Check if its proposer is in validator set somewhere,
- // lattice doesn't have to know about node set.
-
- // Check if it forks
- if bInLattice := data.chains[b.Position.ChainID].getBlockByHeight(
- b.Position.Height); bInLattice != nil {
-
- if b.Hash != bInLattice.Hash {
- return ErrForkBlock
- }
- return ErrAlreadyInLattice
- }
- // TODO(mission): check if fork by loading blocks from DB if the block
- // doesn't exists because forking is serious.
-
- // Check if it acks older blocks.
+func (data *latticeData) checkAckingRelations(b *types.Block) error {
acksByChainID := make(map[uint32]struct{}, len(data.chains))
for _, hash := range b.Acks {
bAck, err := data.findBlock(hash)
@@ -154,8 +155,15 @@ func (data *latticeData) sanityCheck(b *types.Block) error {
}
return err
}
- if bAck.Position.Height <
- data.chains[bAck.Position.ChainID].nextAck[b.Position.ChainID] {
+ // Check if it acks blocks from old rounds, the allowed round difference
+ // is 1.
+ if DiffUint64(bAck.Position.Round, b.Position.Round) > 1 {
+ return ErrRoundOutOfRange
+ }
+ // Check if it acks older blocks than blocks on the same chain.
+ lastAckPos :=
+ data.chains[bAck.Position.ChainID].lastAckPos[b.Position.ChainID]
+ if lastAckPos != nil && !bAck.Position.Newer(lastAckPos) {
return ErrDoubleAck
}
// Check if ack two blocks on the same chain. This would need
@@ -165,39 +173,94 @@ func (data *latticeData) sanityCheck(b *types.Block) error {
}
acksByChainID[bAck.Position.ChainID] = struct{}{}
}
+ return nil
+}
- // Check non-genesis blocks if it acks its parent.
- if b.Position.Height > 0 {
+func (data *latticeData) sanityCheck(b *types.Block) error {
+ // TODO(mission): Check if its proposer is in validator set somewhere,
+ // lattice doesn't have to know about node set.
+ config := data.getConfig(b.Position.Round)
+ if config == nil {
+ return ErrInvalidRoundID
+ }
+ // Check if the chain id is valid.
+ if b.Position.ChainID >= config.numChains {
+ return ErrInvalidChainID
+ }
+ // Make sure parent block is arrived.
+ chain := data.chains[b.Position.ChainID]
+ chainTip := chain.tip
+ if chainTip == nil {
+ if !b.ParentHash.Equal(common.Hash{}) {
+ return ErrAckingBlockNotExists
+ }
+ if !b.IsGenesis() {
+ return ErrNotGenesisBlock
+ }
+ if !config.isValidGenesisBlockTime(b) {
+ return ErrIncorrectBlockTime
+ }
+ return data.checkAckingRelations(b)
+ }
+ // Check parent block if parent hash is specified.
+ if !b.ParentHash.Equal(common.Hash{}) {
+ if !b.ParentHash.Equal(chainTip.Hash) {
+ return ErrAckingBlockNotExists
+ }
if !b.IsAcking(b.ParentHash) {
return ErrNotAckParent
}
- bParent, err := data.findBlock(b.ParentHash)
- if err != nil {
- // This error should never happened, a parent block is always
- // an acked block.
- panic(err)
+ }
+ chainTipConfig := data.getConfig(chainTip.Position.Round)
+ // Round can't be rewinded.
+ if chainTip.Position.Round > b.Position.Round {
+ return ErrInvalidRoundID
+ }
+ checkTip := false
+ if chainTip.Timestamp.After(chainTipConfig.roundEndTime) {
+ // Round switching should happen when chainTip already pass
+ // round end time of its round.
+ if chainTip.Position.Round == b.Position.Round {
+ return ErrRoundNotSwitch
}
- if bParent.Position.ChainID != b.Position.ChainID {
- return ErrInvalidParentChain
+ // The round ID is continuous.
+ if b.Position.Round-chainTip.Position.Round == 1 {
+ checkTip = true
+ } else {
+ // This block should be genesis block of new round because round
+ // ID is not continuous.
+ if !b.IsGenesis() {
+ return ErrNotGenesisBlock
+ }
+ if !config.isValidGenesisBlockTime(b) {
+ return ErrIncorrectBlockTime
+ }
}
- if bParent.Position.Height != b.Position.Height-1 {
+ } else {
+ if chainTip.Position.Round != b.Position.Round {
+ // Round should not switch.
+ return ErrInvalidRoundID
+ }
+ checkTip = true
+ }
+ // Validate the relation between chain tip when needed.
+ if checkTip {
+ if b.Position.Height != chainTip.Position.Height+1 {
return ErrInvalidBlockHeight
}
- if bParent.Witness.Height > b.Witness.Height {
+ if b.Witness.Height < chainTip.Witness.Height {
return ErrInvalidWitness
}
- // Check if its timestamp is valid.
- if !b.Timestamp.After(bParent.Timestamp) {
- return ErrInvalidTimestamp
- }
- // Check if its timestamp is in expected range.
- if b.Timestamp.Before(
- bParent.Timestamp.Add(config.minBlockTimeInterval)) ||
- b.Timestamp.After(
- bParent.Timestamp.Add(config.maxBlockTimeInterval)) {
-
+ if !config.isValidBlockTime(b, chainTip.Timestamp) {
return ErrIncorrectBlockTime
}
+ // Chain tip should be acked.
+ if !b.IsAcking(chainTip.Hash) {
+ return ErrNotAckParent
+ }
+ }
+ if err := data.checkAckingRelations(b); err != nil {
+ return err
}
return nil
}
@@ -220,13 +283,13 @@ func (data *latticeData) addBlock(
return
}
data.blockByHash[block.Hash] = block
- // Update nextAcks.
+ // Update lastAckPos.
for _, ack := range block.Acks {
if bAck, err = data.findBlock(ack); err != nil {
return
}
- data.chains[bAck.Position.ChainID].nextAck[block.Position.ChainID] =
- bAck.Position.Height + 1
+ data.chains[bAck.Position.ChainID].lastAckPos[block.Position.ChainID] =
+ bAck.Position.Clone()
}
// Extract blocks that deliverable to total ordering.
// A block is deliverable to total ordering iff:
@@ -234,26 +297,28 @@ func (data *latticeData) addBlock(
for {
updated = false
for _, status := range data.chains {
- tip := status.getBlockByHeight(status.nextOutput)
- if tip == nil {
+ if status.nextOutputIndex >= len(status.blocks) {
continue
}
+ tip := status.blocks[status.nextOutputIndex]
allAckingBlockDelivered := true
for _, ack := range tip.Acks {
if bAck, err = data.findBlock(ack); err != nil {
return
}
- if data.chains[bAck.Position.ChainID].nextOutput >
- bAck.Position.Height {
-
+ // Check if this block is outputed or not.
+ idx := data.chains[bAck.Position.ChainID].findBlock(
+ &bAck.Position)
+ if idx == -1 ||
+ idx < data.chains[bAck.Position.ChainID].nextOutputIndex {
continue
}
// This acked block exists and not delivered yet.
allAckingBlockDelivered = false
}
if allAckingBlockDelivered {
+ status.nextOutputIndex++
deliverable = append(deliverable, tip)
- status.nextOutput++
updated = true
}
}
@@ -261,67 +326,96 @@ func (data *latticeData) addBlock(
break
}
}
-
- // Delete old blocks in "chains" and "blocks" to release memory space.
- //
- // A block is safe to be deleted iff:
- // - It's delivered to total ordering
- // - All chains (including its proposing chain) acks some block with
- // higher height in its proposing chain.
- //
- // This works because blocks of height below this minimum are not going to be
- // acked anymore, the ackings of these blocks are illegal.
- for _, status := range data.chains {
- status.purge()
- }
return
}
-// prepareBlock helps to setup fields of block based on its ProposerID,
+// prepareBlock helps to setup fields of block based on its ChainID and Round,
// including:
-// - Set 'Acks' and 'Timestamps' for the highest block of each validator not
-// acked by this proposer before.
-// - Set 'ParentHash' and 'Height' from parent block, if we can't find a
-// parent, these fields would be setup like a genesis block.
-func (data *latticeData) prepareBlock(block *types.Block) {
- config := data.getConfig(block.Position.Round)
+// - Acks
+// - Timestamp
+// - ParentHash and Height from parent block. If there is no valid parent block
+// (ex. Newly added chain or bootstrap ), these fields would be setup as
+// genesis block.
+func (data *latticeData) prepareBlock(b *types.Block) error {
+ var (
+ minTimestamp, maxTimestamp time.Time
+ config *latticeDataConfig
+ acks common.Hashes
+ bindTip bool
+ chainTip *types.Block
+ )
+ if config = data.getConfig(b.Position.Round); config == nil {
+ return ErrUnknownRoundID
+ }
// Reset fields to make sure we got these information from parent block.
- block.Position.Height = 0
- block.ParentHash = common.Hash{}
- acks := common.Hashes{}
- for chainID := range data.chains {
- // find height of the latest block for that validator.
- var (
- curBlock *types.Block
- nextHeight = data.chains[chainID].nextAck[block.Position.ChainID]
- )
- for {
- tmpBlock := data.chains[chainID].getBlockByHeight(nextHeight)
- if tmpBlock == nil {
- break
+ b.Position.Height = 0
+ b.ParentHash = common.Hash{}
+ // Decide valid timestamp range.
+ homeChain := data.chains[b.Position.ChainID]
+ if homeChain.tip != nil {
+ chainTip = homeChain.tip
+ if b.Position.Round < chainTip.Position.Round {
+ return ErrInvalidRoundID
+ }
+ chainTipConfig := data.getConfig(chainTip.Position.Round)
+ if chainTip.Timestamp.After(chainTipConfig.roundEndTime) {
+ if b.Position.Round == chainTip.Position.Round {
+ return ErrRoundNotSwitch
+ }
+ if b.Position.Round == chainTip.Position.Round+1 {
+ bindTip = true
+ }
+ } else {
+ if b.Position.Round != chainTip.Position.Round {
+ return ErrInvalidRoundID
}
- curBlock = tmpBlock
- nextHeight++
+ bindTip = true
}
- if curBlock == nil {
+ // TODO(mission): find a way to prevent us to assign a witness height
+ // from Jurassic period.
+ b.Witness.Height = chainTip.Witness.Height
+ }
+ // For blocks with continuous round ID, assign timestamp range based on
+ // parent block and bound config.
+ if bindTip {
+ minTimestamp = chainTip.Timestamp.Add(config.minBlockTimeInterval)
+ maxTimestamp = chainTip.Timestamp.Add(config.maxBlockTimeInterval)
+ // When a chain is removed and added back, the reference block
+ // of previous round can't be used as parent block.
+ b.ParentHash = chainTip.Hash
+ b.Position.Height = chainTip.Position.Height + 1
+ } else {
+ // Discontinuous round ID detected, another fresh start of
+ // new round.
+ minTimestamp = config.roundBeginTime
+ maxTimestamp = config.roundBeginTime.Add(config.maxBlockTimeInterval)
+ }
+ // Fix timestamp if the given one is invalid.
+ if b.Timestamp.Before(minTimestamp) {
+ b.Timestamp = minTimestamp
+ } else if b.Timestamp.After(maxTimestamp) {
+ b.Timestamp = maxTimestamp
+ }
+ // Setup acks fields.
+ for _, status := range data.chains {
+ // Check if we can ack latest block on that chain.
+ if status.tip == nil {
continue
}
- acks = append(acks, curBlock.Hash)
- if uint32(chainID) == block.Position.ChainID {
- block.ParentHash = curBlock.Hash
- block.Position.Height = curBlock.Position.Height + 1
- block.Witness.Height = curBlock.Witness.Height
- minTimestamp := curBlock.Timestamp.Add(config.minBlockTimeInterval)
- maxTimestamp := curBlock.Timestamp.Add(config.maxBlockTimeInterval)
- if block.Timestamp.Before(minTimestamp) {
- block.Timestamp = minTimestamp
- } else if block.Timestamp.After(maxTimestamp) {
- block.Timestamp = maxTimestamp
- }
+ lastAckPos := status.lastAckPos[b.Position.ChainID]
+ if lastAckPos != nil && !status.tip.Position.Newer(lastAckPos) {
+ // The reference block is already acked.
+ continue
+ }
+ // Can't ack block too old or too new to us.
+ if DiffUint64(
+ status.tip.Position.Round, b.Position.Round) > 1 {
+ continue
}
+ acks = append(acks, status.tip.Hash)
}
- block.Acks = common.NewSortedHashes(acks)
- return
+ b.Acks = common.NewSortedHashes(acks)
+ return nil
}
// TODO(mission): make more abstraction for this method.
@@ -350,122 +444,118 @@ func (data *latticeData) purgeBlocks(blocks []*types.Block) error {
return ErrPurgedBlockNotFound
}
delete(data.blockByHash, b.Hash)
+ // blocks would be purged in ascending order in position.
+ if err := data.chains[b.Position.ChainID].purgeBlock(b); err != nil {
+ return err
+ }
}
return nil
}
// getConfig get configuration for lattice-data by round ID.
func (data *latticeData) getConfig(round uint64) (config *latticeDataConfig) {
- if round < data.minRound {
- return
- }
- diff := round - data.minRound
- if diff >= uint64(len(data.configs)) {
+ if round >= uint64(len(data.configs)) {
return
}
- return data.configs[diff]
+ return data.configs[round]
}
// appendConfig appends a configuration for upcoming round. When you append
// a config for round R, next time you can only append the config for round R+1.
func (data *latticeData) appendConfig(
round uint64, config *latticeDataConfig) (err error) {
-
- if round != data.minRound+uint64(len(data.configs)) {
+ // Make sure caller knows which round this config belongs to.
+ if round != uint64(len(data.configs)) {
return ErrRoundNotIncreasing
}
+ // Set round beginning time.
+ config.setRoundBeginTime(data.configs[len(data.configs)-1].roundEndTime)
data.configs = append(data.configs, config)
+ // Resize each slice if incoming config contains larger number of chains.
+ if uint32(len(data.chains)) < config.numChains {
+ count := config.numChains - uint32(len(data.chains))
+ for _, status := range data.chains {
+ status.lastAckPos = append(
+ status.lastAckPos, make([]*types.Position, count)...)
+ }
+ for i := uint32(len(data.chains)); i < config.numChains; i++ {
+ data.chains = append(data.chains, &chainStatus{
+ ID: i,
+ blocks: []*types.Block{},
+ lastAckPos: make([]*types.Position, config.numChains),
+ })
+ }
+ }
return nil
}
type chainStatus struct {
// ID keeps the chainID of this chain status.
ID uint32
-
// blocks stores blocks proposed for this chain, sorted by height.
blocks []*types.Block
-
- // minHeight keeps minimum height in blocks.
- minHeight uint64
-
- // nextAck stores the height of next height that should be acked, i.e. last
- // acked height + 1. Initialized to 0.
- // being acked. For example, rb.chains[vid1].nextAck[vid2] - 1 is the last
- // acked height by vid2 acking vid1.
- nextAck []uint64
-
- // nextOutput is the next output height of block, default to 0.
- nextOutput uint64
+ // tip is the last block on this chain.
+ tip *types.Block
+ // lastAckPos caches last acking position from other chains. Nil means
+ // not acked yet.
+ lastAckPos []*types.Position
+ // the index to be output next time.
+ nextOutputIndex int
}
-func (s *chainStatus) getBlockByHeight(height uint64) (b *types.Block) {
- if height < s.minHeight {
- return
+// findBlock finds index of block in current pending blocks on this chain.
+// -1 means not found.
+func (s *chainStatus) findBlock(pos *types.Position) (idx int) {
+ idx = sort.Search(len(s.blocks), func(i int) bool {
+ return s.blocks[i].Position.Newer(pos) ||
+ s.blocks[i].Position.Equal(pos)
+ })
+ if idx == len(s.blocks) {
+ idx = -1
+ } else if !s.blocks[idx].Position.Equal(pos) {
+ idx = -1
}
- idx := int(height - s.minHeight)
- if idx >= len(s.blocks) {
+ return idx
+}
+
+// getBlock returns a pending block by giving its index from findBlock method.
+func (s *chainStatus) getBlock(idx int) (b *types.Block) {
+ if idx < 0 || idx >= len(s.blocks) {
return
}
b = s.blocks[idx]
return
}
+// addBlock adds a block to pending blocks on this chain.
func (s *chainStatus) addBlock(b *types.Block) error {
- if len(s.blocks) > 0 {
- // Make sure the height of incoming block should be
- // plus one to current latest blocks if exists.
- if s.blocks[len(s.blocks)-1].Position.Height != b.Position.Height-1 {
- return ErrChainStatusCorrupt
- }
- } else {
- if b.Position.Height != 0 {
- return ErrChainStatusCorrupt
- }
- }
s.blocks = append(s.blocks, b)
+ s.tip = b
return nil
}
-func (s *chainStatus) calcPurgeHeight() (safe uint64, ok bool) {
- // blocks with height less than min(nextOutput, nextAck...)
- // are safe to be purged.
- safe = s.nextOutput
- for _, ackedHeight := range s.nextAck {
- if safe > ackedHeight {
- safe = ackedHeight
+// TODO(mission): change back to nextHeight.
+// nextPosition returns a valid position for new block in this chain.
+func (s *chainStatus) nextPosition() types.Position {
+ if s.tip == nil {
+ return types.Position{
+ ChainID: s.ID,
+ Height: 0,
}
}
- // Both 'nextOutput' and 'nextAck' represents some block to be
- // outputed/acked. To find a block already outputed/acked, the height
- // needs to be minus 1.
- if safe == 0 {
- // Avoid underflow.
- return
- }
- safe--
- if safe < s.minHeight {
- return
- }
- ok = true
- return
-}
-
-// purge blocks if they are safe to be deleted from working set.
-func (s *chainStatus) purge() {
- safe, ok := s.calcPurgeHeight()
- if !ok {
- return
+ return types.Position{
+ ChainID: s.ID,
+ Height: s.tip.Position.Height + 1,
}
- newMinIndex := safe - s.minHeight + 1
- s.blocks = s.blocks[newMinIndex:]
- s.minHeight = safe + 1
- return
}
-// nextPosition returns a valid position for new block in this chain.
-func (s *chainStatus) nextPosition() types.Position {
- return types.Position{
- ChainID: s.ID,
- Height: s.minHeight + uint64(len(s.blocks)),
+// purgeBlock purge a block from cache, make sure this block already
+// persists to blockdb.
+func (s *chainStatus) purgeBlock(b *types.Block) error {
+ if b.Hash != s.blocks[0].Hash || s.nextOutputIndex <= 0 {
+ return ErrPurgeNotDeliveredBlock
}
+ s.blocks = s.blocks[1:]
+ s.nextOutputIndex--
+ return nil
}
diff --git a/core/lattice-data_test.go b/core/lattice-data_test.go
index 9e3ce52..cb7d892 100644
--- a/core/lattice-data_test.go
+++ b/core/lattice-data_test.go
@@ -43,130 +43,127 @@ type LatticeDataTestSuite struct {
// | | |
// 0 0 0 0 (block height)
// 0 1 2 3 (validator)
-func (s *LatticeDataTestSuite) genTestCase1() (data *latticeData) {
- // Create new reliableBroadcast instance with 4 validators
+func (s *LatticeDataTestSuite) genTestCase1() (
+ data *latticeData, blocks map[uint32]map[uint64]*types.Block) {
+ // Create new latticeData instance with 4 validators
var (
- round uint64
- b *types.Block
delivered []*types.Block
- h common.Hash
chainNum uint32 = 4
req = s.Require()
+ now = time.Now().UTC()
err error
)
+ // Setup stuffs.
+ genesisConfig := &latticeDataConfig{
+ numChains: chainNum,
+ minBlockTimeInterval: 2 * time.Nanosecond,
+ maxBlockTimeInterval: 1000 * time.Second,
+ roundInterval: 500 * time.Second,
+ }
+ genesisConfig.setRoundBeginTime(now)
db, err := blockdb.NewMemBackedBlockDB()
req.NoError(err)
- data = newLatticeData(
- db, round, s.newConfig(chainNum, 2*time.Nanosecond, 1000*time.Second))
+ data = newLatticeData(db, genesisConfig)
+ config := &latticeDataConfig{
+ numChains: chainNum,
+ minBlockTimeInterval: 2 * time.Nanosecond,
+ maxBlockTimeInterval: 1000 * time.Second,
+ roundInterval: 1000 * time.Second,
+ }
+ config.setRoundBeginTime(now)
+ data.appendConfig(1, config)
// Add genesis blocks.
- for i := uint32(0); i < chainNum; i++ {
- b = s.prepareGenesisBlock(i)
+ addBlock := func(b *types.Block) {
+ s.hashBlock(b)
delivered, err = data.addBlock(b)
- // Genesis blocks are safe to be added to DAG, they acks no one.
+ req.NoError(err)
req.Len(delivered, 1)
- req.Nil(err)
+ req.Equal(delivered[0].Hash, b.Hash)
}
-
+ // Genesis blocks are safe to be added to DAG, they acks no one.
+ b00 := s.prepareGenesisBlock(0)
+ addBlock(b00)
+ b10 := s.prepareGenesisBlock(1)
+ addBlock(b10)
+ b20 := s.prepareGenesisBlock(2)
+ addBlock(b20)
+ b30 := s.prepareGenesisBlock(3)
+ addBlock(b30)
// Add block 0-1 which acks 0-0.
- h = data.chains[0].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
+ b01 := &types.Block{
+ ParentHash: b00.Hash,
Hash: common.NewRandomHash(),
Timestamp: time.Now().UTC(),
Position: types.Position{
ChainID: 0,
Height: 1,
},
- Acks: common.NewSortedHashes(common.Hashes{h}),
+ Acks: common.NewSortedHashes(common.Hashes{b00.Hash}),
Witness: types.Witness{
Height: 1,
},
}
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Equal(delivered[0].Hash, b.Hash)
- req.Nil(err)
- req.NotNil(data.chains[0].getBlockByHeight(1))
-
+ addBlock(b01)
// Add block 0-2 which acks 0-1 and 1-0.
- h = data.chains[0].getBlockByHeight(1).Hash
- b = &types.Block{
- ParentHash: h,
+ b02 := &types.Block{
+ ParentHash: b01.Hash,
Position: types.Position{
ChainID: 0,
Height: 2,
},
Timestamp: time.Now().UTC(),
Acks: common.NewSortedHashes(common.Hashes{
- h,
- data.chains[1].getBlockByHeight(0).Hash,
+ b01.Hash,
+ b10.Hash,
}),
Witness: types.Witness{
Height: 2,
},
}
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Equal(delivered[0].Hash, b.Hash)
- req.Nil(err)
- req.NotNil(data.chains[0].getBlockByHeight(2))
-
+ addBlock(b02)
// Add block 0-3 which acks 0-2.
- h = data.chains[0].getBlockByHeight(2).Hash
- b = &types.Block{
- ParentHash: h,
+ b03 := &types.Block{
+ ParentHash: b02.Hash,
Hash: common.NewRandomHash(),
Timestamp: time.Now().UTC(),
Position: types.Position{
ChainID: 0,
Height: 3,
},
- Acks: common.NewSortedHashes(common.Hashes{h}),
+ Acks: common.NewSortedHashes(common.Hashes{b02.Hash}),
Witness: types.Witness{
Height: 3,
},
}
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Equal(delivered[0].Hash, b.Hash)
- req.Nil(err)
- req.NotNil(data.chains[0].getBlockByHeight(3))
-
+ addBlock(b03)
// Add block 3-1 which acks 3-0.
- h = data.chains[3].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
+ b31 := &types.Block{
+ ParentHash: b30.Hash,
Hash: common.NewRandomHash(),
Timestamp: time.Now().UTC(),
Position: types.Position{
ChainID: 3,
Height: 1,
},
- Acks: common.NewSortedHashes(common.Hashes{h}),
+ Acks: common.NewSortedHashes(common.Hashes{b30.Hash}),
Witness: types.Witness{
Height: 1,
},
}
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Equal(delivered[0].Hash, b.Hash)
- req.Nil(err)
- req.NotNil(data.chains[3].getBlockByHeight(0))
- return
-}
-
-func (s *LatticeDataTestSuite) newConfig(numChains uint32,
- minBlockInterval, maxBlockInterval time.Duration) *latticeDataConfig {
-
- return &latticeDataConfig{
- numChains: numChains,
- minBlockTimeInterval: minBlockInterval,
- maxBlockTimeInterval: maxBlockInterval,
+ addBlock(b31)
+ // Return created blocks.
+ blocks = map[uint32]map[uint64]*types.Block{
+ 0: map[uint64]*types.Block{
+ 0: b00,
+ 1: b01,
+ 2: b02,
+ 3: b03,
+ },
+ 1: map[uint64]*types.Block{0: b10},
+ 2: map[uint64]*types.Block{0: b20},
+ 3: map[uint64]*types.Block{0: b30},
}
+ return
}
// hashBlock is a helper to hash a block and check if any error.
@@ -192,262 +189,168 @@ func (s *LatticeDataTestSuite) prepareGenesisBlock(
return
}
-func (s *LatticeDataTestSuite) TestSanityCheckInDataLayer() {
+func (s *LatticeDataTestSuite) TestSanityCheck() {
var (
- b *types.Block
- h common.Hash
- data = s.genTestCase1()
- req = s.Require()
- err error
+ data, blocks = s.genTestCase1()
+ req = s.Require()
)
-
+ check := func(expectedErr error, b *types.Block) {
+ s.hashBlock(b)
+ err := data.sanityCheck(b)
+ req.NotNil(err)
+ req.Equal(expectedErr.Error(), err.Error())
+ }
// Non-genesis block with no ack, should get error.
- b = &types.Block{
- ParentHash: common.NewRandomHash(),
+ check(ErrNotAckParent, &types.Block{
+ ParentHash: blocks[1][0].Hash,
Position: types.Position{
- ChainID: 0,
- Height: 10,
+ ChainID: 1,
+ Height: 1,
},
- Acks: common.NewSortedHashes(common.Hashes{}),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrNotAckParent.Error(), err.Error())
-
+ Acks: common.NewSortedHashes(common.Hashes{}),
+ Timestamp: time.Now().UTC(),
+ })
// Non-genesis block which acks its parent but the height is invalid.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
+ check(ErrInvalidBlockHeight, &types.Block{
+ ParentHash: blocks[1][0].Hash,
Position: types.Position{
ChainID: 1,
Height: 2,
},
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrInvalidBlockHeight.Error(), err.Error())
-
+ Acks: common.NewSortedHashes(common.Hashes{blocks[1][0].Hash}),
+ Timestamp: time.Now().UTC(),
+ })
// Invalid chain ID.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
+ check(ErrInvalidChainID, &types.Block{
+ ParentHash: blocks[1][0].Hash,
Position: types.Position{
ChainID: 100,
Height: 1,
},
- Acks: common.NewSortedHashes(common.Hashes{h}),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrInvalidChainID.Error(), err.Error())
-
- // Fork block.
- h = data.chains[0].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
- Position: types.Position{
- ChainID: 0,
- Height: 1,
- },
- Acks: common.NewSortedHashes(common.Hashes{h}),
+ Acks: common.NewSortedHashes(common.Hashes{blocks[1][0].Hash}),
Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrForkBlock.Error(), err.Error())
-
+ })
// Replicated ack.
- h = data.chains[0].getBlockByHeight(3).Hash
- b = &types.Block{
- ParentHash: h,
+ check(ErrDoubleAck, &types.Block{
+ ParentHash: blocks[0][3].Hash,
Position: types.Position{
ChainID: 0,
Height: 4,
},
Acks: common.NewSortedHashes(common.Hashes{
- h,
- data.chains[1].getBlockByHeight(0).Hash,
+ blocks[0][3].Hash,
+ blocks[1][0].Hash,
}),
Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(ErrDoubleAck.Error(), err.Error())
-
+ Witness: types.Witness{
+ Height: 4,
+ },
+ })
// Acking block doesn't exists.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
+ check(ErrAckingBlockNotExists, &types.Block{
+ ParentHash: blocks[1][0].Hash,
Position: types.Position{
ChainID: 1,
Height: 1,
},
Acks: common.NewSortedHashes(common.Hashes{
- h,
+ blocks[1][0].Hash,
common.NewRandomHash(),
}),
Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err.Error(), ErrAckingBlockNotExists.Error())
-
+ })
// Parent block on different chain.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
+ check(ErrAckingBlockNotExists, &types.Block{
+ ParentHash: blocks[1][0].Hash,
Position: types.Position{
ChainID: 2,
Height: 1,
},
Acks: common.NewSortedHashes(common.Hashes{
- h,
- data.chains[2].getBlockByHeight(0).Hash,
+ blocks[1][0].Hash,
+ blocks[2][0].Hash,
}),
Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err.Error(), ErrInvalidParentChain.Error())
-
+ })
// Ack two blocks on the same chain.
- h = data.chains[2].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
+ check(ErrDuplicatedAckOnOneChain, &types.Block{
+ ParentHash: blocks[2][0].Hash,
Position: types.Position{
ChainID: 2,
Height: 1,
},
Acks: common.NewSortedHashes(common.Hashes{
- h,
- data.chains[0].getBlockByHeight(0).Hash,
- data.chains[0].getBlockByHeight(1).Hash,
+ blocks[2][0].Hash,
+ blocks[0][0].Hash,
+ blocks[0][1].Hash,
}),
Timestamp: time.Now().UTC(),
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err.Error(), ErrDuplicatedAckOnOneChain.Error())
-
+ })
// Witness height decreases.
- h = data.chains[0].getBlockByHeight(3).Hash
- b = &types.Block{
- ParentHash: h,
+ check(ErrInvalidWitness, &types.Block{
+ ParentHash: blocks[0][3].Hash,
Position: types.Position{
ChainID: 0,
Height: 4,
},
Timestamp: time.Now().UTC(),
Acks: common.NewSortedHashes(common.Hashes{
- h,
+ blocks[0][3].Hash,
}),
Witness: types.Witness{
Height: 2,
},
- }
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err.Error(), ErrInvalidWitness.Error())
-
+ })
// Add block 3-1 which acks 3-0, and violet reasonable block time interval.
- h = data.chains[2].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
+ b := &types.Block{
+ ParentHash: blocks[2][0].Hash,
Hash: common.NewRandomHash(),
Position: types.Position{
ChainID: 2,
Height: 1,
},
- Acks: common.NewSortedHashes(common.Hashes{h}),
+ Acks: common.NewSortedHashes(common.Hashes{blocks[2][0].Hash}),
+ Timestamp: time.Now().UTC(),
}
- b.Timestamp = data.chains[2].getBlockByHeight(0).Timestamp.Add(
+ b.Timestamp = blocks[2][0].Timestamp.Add(
data.getConfig(0).maxBlockTimeInterval + time.Nanosecond)
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err, ErrIncorrectBlockTime)
+ check(ErrIncorrectBlockTime, b)
// Violet minimum block time interval.
- b.Timestamp =
- data.chains[2].getBlockByHeight(0).Timestamp.Add(1 * time.Nanosecond)
- s.hashBlock(b)
- err = data.sanityCheck(b)
- req.NotNil(err)
- req.Equal(err, ErrIncorrectBlockTime)
-
- // Normal block.
- h = data.chains[1].getBlockByHeight(0).Hash
- b = &types.Block{
- ParentHash: h,
+ b.Timestamp = blocks[2][0].Timestamp.Add(1 * time.Nanosecond)
+ check(ErrIncorrectBlockTime, b)
+ // Add a normal block with timestamp pass round cutting time.
+ b11 := &types.Block{
+ ParentHash: blocks[1][0].Hash,
Position: types.Position{
ChainID: 1,
Height: 1,
},
- Acks: common.NewSortedHashes(common.Hashes{h}),
- Timestamp: time.Now().UTC(),
+ Acks: common.NewSortedHashes(common.Hashes{blocks[1][0].Hash}),
+ Timestamp: time.Now().UTC().Add(500 * time.Second),
}
- s.hashBlock(b)
- req.Nil(data.sanityCheck(b))
-}
-
-func (s *LatticeDataTestSuite) TestRandomIntensiveAcking() {
- var (
- round uint64
- chainNum uint32 = 19
- req = s.Require()
- delivered []*types.Block
- extracted []*types.Block
- b *types.Block
- err error
- )
- db, err := blockdb.NewMemBackedBlockDB()
+ s.hashBlock(b11)
+ req.NoError(data.sanityCheck(b11))
+ _, err := data.addBlock(b11)
req.NoError(err)
- data := newLatticeData(
- db, round, s.newConfig(chainNum, 0, 1000*time.Second))
- // Generate genesis blocks.
- for i := uint32(0); i < chainNum; i++ {
- b = s.prepareGenesisBlock(i)
- delivered, err = data.addBlock(b)
- req.Len(delivered, 1)
- req.Nil(err)
- }
-
- for i := 0; i < 5000; i++ {
- b := &types.Block{
- Position: types.Position{
- ChainID: uint32(rand.Intn(int(chainNum))),
- },
- Timestamp: time.Now().UTC(),
- }
- data.prepareBlock(b)
- s.hashBlock(b)
- delivered, err = data.addBlock(b)
- req.Nil(err)
- for _, b := range delivered {
- req.NoError(db.Put(*b))
- }
- req.NoError(data.purgeBlocks(delivered))
- extracted = append(extracted, delivered...)
+ // A block didn't perform round switching.
+ b12 := &types.Block{
+ ParentHash: b11.Hash,
+ Position: types.Position{
+ ChainID: 1,
+ Height: 2,
+ },
+ Acks: common.NewSortedHashes(common.Hashes{b11.Hash}),
+ Timestamp: time.Now().UTC().Add(501 * time.Second),
}
-
- // The len of array extractedBlocks should be about 5000.
- req.True(len(extracted) > 4500)
- // The len of data.blockInfos should be small if deleting mechanism works.
- req.True(len(data.blockByHash) < 500)
+ check(ErrRoundNotSwitch, b12)
+ // A block with expected new round ID should be OK.
+ b12.Position.Round = 1
+ s.hashBlock(b12)
+ req.NoError(data.sanityCheck(b12))
}
func (s *LatticeDataTestSuite) TestRandomlyGeneratedBlocks() {
var (
- round uint64
chainNum uint32 = 19
blockNum = 50
repeat = 20
@@ -456,20 +359,27 @@ func (s *LatticeDataTestSuite) TestRandomlyGeneratedBlocks() {
req = s.Require()
datum []*latticeData
)
-
if testing.Short() {
chainNum = 7
repeat = 3
}
-
+ // Setup configuration that no restriction on block interval and
+ // round cutting.
+ genesisConfig := &latticeDataConfig{
+ numChains: chainNum,
+ minBlockTimeInterval: 0,
+ maxBlockTimeInterval: 1000 * time.Second,
+ roundInterval: 1000 * time.Second,
+ }
+ genesisConfig.setRoundBeginTime(time.Now().UTC())
// Prepare a randomly generated blocks.
db, err := blockdb.NewMemBackedBlockDB()
- req.Nil(err)
+ req.NoError(err)
gen := test.NewBlocksGenerator(nil, hashBlock)
_, err = gen.Generate(chainNum, blockNum, nil, db)
- req.Nil(err)
+ req.NoError(err)
iter, err := db.GetAll()
- req.Nil(err)
+ req.NoError(err)
// Setup a revealer that would reveal blocks randomly but still form
// valid DAG without holes.
revealer, err := test.NewRandomDAGRevealer(iter)
@@ -478,8 +388,9 @@ func (s *LatticeDataTestSuite) TestRandomlyGeneratedBlocks() {
revealedHashesAsString := map[string]struct{}{}
deliveredHashesAsString := map[string]struct{}{}
for i := 0; i < repeat; i++ {
- data := newLatticeData(
- nil, round, s.newConfig(chainNum, 0, 1000*time.Second))
+ db, err := blockdb.NewMemBackedBlockDB()
+ req.NoError(err)
+ data := newLatticeData(db, genesisConfig)
deliveredHashes := common.Hashes{}
revealedHashes := common.Hashes{}
revealer.Reset()
@@ -492,18 +403,18 @@ func (s *LatticeDataTestSuite) TestRandomlyGeneratedBlocks() {
break
}
}
- s.Require().Nil(err)
+ req.NoError(err)
revealedHashes = append(revealedHashes, b.Hash)
-
// Pass blocks to lattice.
+ req.NoError(data.sanityCheck(&b))
delivered, err = data.addBlock(&b)
- req.Nil(err)
+ req.NoError(err)
for _, b := range delivered {
deliveredHashes = append(deliveredHashes, b.Hash)
}
}
// To make it easier to check, sort hashes of
- // strongly acked blocks, and concatenate them into
+ // delivered blocks, and concatenate them into
// a string.
sort.Sort(deliveredHashes)
asString := ""
@@ -519,10 +430,10 @@ func (s *LatticeDataTestSuite) TestRandomlyGeneratedBlocks() {
revealedHashesAsString[asString] = struct{}{}
datum = append(datum, data)
}
- // Make sure concatenated hashes of strongly acked blocks are identical.
+ // Make sure concatenated hashes of delivered blocks are identical.
req.Len(deliveredHashesAsString, 1)
for h := range deliveredHashesAsString {
- // Make sure at least some blocks are strongly acked.
+ // Make sure at least some blocks are delivered.
req.True(len(h) > 0)
}
// Make sure we test for more than 1 revealing sequence.
@@ -534,36 +445,43 @@ func (s *LatticeDataTestSuite) TestRandomlyGeneratedBlocks() {
if i == j {
continue
}
+ // Check chain status of this pair.
for chainID, statusI := range bI.chains {
- req.Equal(statusI.minHeight, bJ.chains[chainID].minHeight)
- req.Equal(statusI.nextOutput, bJ.chains[chainID].nextOutput)
+ req.Equal(statusI.tip, bJ.chains[chainID].tip)
req.Equal(len(statusI.blocks), len(bJ.chains[chainID].blocks))
- // Check nextAck.
- for x, ackI := range statusI.nextAck {
- req.Equal(ackI, bJ.chains[chainID].nextAck[x])
+ // Check lastAckPos.
+ for x, pos := range statusI.lastAckPos {
+ req.Equal(pos, bJ.chains[chainID].lastAckPos[x])
}
// Check blocks.
if len(statusI.blocks) > 0 {
req.Equal(statusI.blocks[0], bJ.chains[chainID].blocks[0])
}
}
- // Check blockByHash.
- req.Equal(bI.blockByHash, bJ.blockByHash)
}
}
}
func (s *LatticeDataTestSuite) TestPrepareBlock() {
var (
- round uint64
chainNum uint32 = 4
req = s.Require()
minInterval = 50 * time.Millisecond
delivered []*types.Block
err error
- data = newLatticeData(
- nil, round, s.newConfig(chainNum, 0, 3000*time.Second))
)
+ // Setup configuration that no restriction on block interval and
+ // round cutting.
+ genesisConfig := &latticeDataConfig{
+ numChains: chainNum,
+ minBlockTimeInterval: 0,
+ maxBlockTimeInterval: 3000 * time.Second,
+ roundInterval: 3000 * time.Second,
+ }
+ genesisConfig.setRoundBeginTime(time.Now().UTC())
+ db, err := blockdb.NewMemBackedBlockDB()
+ req.NoError(err)
+ data := newLatticeData(db, genesisConfig)
// Setup genesis blocks.
b00 := s.prepareGenesisBlock(0)
time.Sleep(minInterval)
@@ -574,17 +492,17 @@ func (s *LatticeDataTestSuite) TestPrepareBlock() {
b30 := s.prepareGenesisBlock(3)
// Submit these blocks to lattice.
delivered, err = data.addBlock(b00)
+ req.NoError(err)
req.Len(delivered, 1)
- req.Nil(err)
delivered, err = data.addBlock(b10)
+ req.NoError(err)
req.Len(delivered, 1)
- req.Nil(err)
delivered, err = data.addBlock(b20)
+ req.NoError(err)
req.Len(delivered, 1)
- req.Nil(err)
delivered, err = data.addBlock(b30)
+ req.NoError(err)
req.Len(delivered, 1)
- req.Nil(err)
// We should be able to collect all 4 genesis blocks by calling
// prepareBlock.
b11 := &types.Block{
@@ -593,7 +511,7 @@ func (s *LatticeDataTestSuite) TestPrepareBlock() {
},
Timestamp: time.Now().UTC(),
}
- data.prepareBlock(b11)
+ req.NoError(data.prepareBlock(b11))
s.hashBlock(b11)
req.Contains(b11.Acks, b00.Hash)
req.Contains(b11.Acks, b10.Hash)
@@ -603,7 +521,7 @@ func (s *LatticeDataTestSuite) TestPrepareBlock() {
req.Equal(b11.Position.Height, uint64(1))
delivered, err = data.addBlock(b11)
req.Len(delivered, 1)
- req.Nil(err)
+ req.NoError(err)
// Propose/Process a block based on collected info.
b12 := &types.Block{
Position: types.Position{
@@ -611,7 +529,7 @@ func (s *LatticeDataTestSuite) TestPrepareBlock() {
},
Timestamp: time.Now().UTC(),
}
- data.prepareBlock(b12)
+ req.NoError(data.prepareBlock(b12))
s.hashBlock(b12)
// This time we only need to ack b11.
req.Len(b12.Acks, 1)
@@ -625,7 +543,7 @@ func (s *LatticeDataTestSuite) TestPrepareBlock() {
ChainID: 0,
},
}
- data.prepareBlock(b01)
+ req.NoError(data.prepareBlock(b01))
s.hashBlock(b01)
req.Len(b01.Acks, 4)
req.Contains(b01.Acks, b00.Hash)
@@ -636,63 +554,129 @@ func (s *LatticeDataTestSuite) TestPrepareBlock() {
req.Equal(b01.Position.Height, uint64(1))
}
-func (s *LatticeDataTestSuite) TestCalcPurgeHeight() {
- // Test chainStatus.calcPurgeHeight, we don't have
- // to prepare blocks to test it.
- var req = s.Require()
- chain := &chainStatus{
- minHeight: 0,
- nextOutput: 0,
- nextAck: []uint64{1, 1, 1, 1},
- }
- // When calculated safe is underflow, nok.
- safe, ok := chain.calcPurgeHeight()
- req.False(ok)
- // height=1 is outputed, and acked by everyone else.
- chain.nextOutput = 1
- safe, ok = chain.calcPurgeHeight()
- req.True(ok)
- req.Equal(safe, uint64(0))
- // Should take nextAck's height into consideration.
- chain.nextOutput = 2
- safe, ok = chain.calcPurgeHeight()
- req.True(ok)
- req.Equal(safe, uint64(0))
- // When minHeight is large that safe height, return nok.
- chain.minHeight = 1
- chain.nextOutput = 1
- safe, ok = chain.calcPurgeHeight()
- req.False(ok)
-}
-
-func (s *LatticeDataTestSuite) TestPurge() {
- // Make a simplest test case to test chainStatus.purge.
- // Make sure status after purge 1 block expected.
- b00 := &types.Block{Hash: common.NewRandomHash()}
- b01 := &types.Block{Hash: common.NewRandomHash()}
- b02 := &types.Block{Hash: common.NewRandomHash()}
- chain := &chainStatus{
- blocks: []*types.Block{b00, b01, b02},
- nextAck: []uint64{1, 1, 1, 1},
- nextOutput: 1,
- }
- chain.purge()
- s.Equal(chain.minHeight, uint64(1))
- s.Require().Len(chain.blocks, 2)
- s.Equal(chain.blocks[0].Hash, b01.Hash)
- s.Equal(chain.blocks[1].Hash, b02.Hash)
-}
-
func (s *LatticeDataTestSuite) TestNextPosition() {
// Test 'NextPosition' method when lattice is ready.
- data := s.genTestCase1()
+ data, _ := s.genTestCase1()
s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 4})
-
// Test 'NextPosition' method when lattice is empty.
- data = newLatticeData(nil, 0, s.newConfig(4, 0, 1000*time.Second))
+ // Setup a configuration that no restriction on block interval and
+ // round cutting.
+ genesisConfig := &latticeDataConfig{
+ numChains: 4,
+ minBlockTimeInterval: 0,
+ maxBlockTimeInterval: 1000 * time.Second,
+ roundInterval: 1000 * time.Second,
+ }
+ genesisConfig.setRoundBeginTime(time.Now().UTC())
+ data = newLatticeData(nil, genesisConfig)
s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 0})
}
+func (s *LatticeDataTestSuite) TestNumChainsChange() {
+ // This test case verify the behavior when NumChains
+ // changes. We only reply on methods of latticeData
+ // for test. It would run in this way:
+ // - Several configs would be prepared in advance, scenario for NumChains
+ // increasing and decreasing would be included.
+ // - Blocks would be prepared from candidate chains.
+ // - Once a block is detected as last block of that chain in that round,
+ // that chain would be revmoved from candidate chains.
+ // - Once candidate chains are empty, proceed to next round until the last
+ // round.
+ //
+ // These scenarioes would be checked in this test:
+ // - Each block generated successfully by latticeData.prepareBlock
+ // should be no error when passing to latticeData.sanityCheck
+ // and latticeData.addBlock.
+ // - The delivered blocks should form a valid DAG.
+
+ begin := time.Now().UTC()
+ fixConfig := func(config *latticeDataConfig) *latticeDataConfig {
+ config.minBlockTimeInterval = 10 * time.Second
+ config.maxBlockTimeInterval = time.Hour // We don't care time.
+ config.roundInterval = 100 * time.Second
+ config.setRoundBeginTime(begin)
+ begin = config.roundEndTime
+ return config
+ }
+ var (
+ req = s.Require()
+ maxChains = uint32(16)
+ configs = []*latticeDataConfig{
+ fixConfig(&latticeDataConfig{numChains: 13}),
+ fixConfig(&latticeDataConfig{numChains: 10}),
+ fixConfig(&latticeDataConfig{numChains: maxChains}),
+ fixConfig(&latticeDataConfig{numChains: 7}),
+ }
+ randObj = rand.New(rand.NewSource(time.Now().UnixNano()))
+ )
+ // Setup blockdb instance.
+ db, err := blockdb.NewMemBackedBlockDB()
+ req.NoError(err)
+ // Set up latticeData instance.
+ lattice := newLatticeData(db, configs[0])
+ req.NoError(lattice.appendConfig(1, configs[1]))
+ req.NoError(lattice.appendConfig(2, configs[2]))
+ req.NoError(lattice.appendConfig(3, configs[3]))
+ // Run until candidate chains are empty.
+ var (
+ delivered []*types.Block
+ candidateChainIDs []uint32
+ nextRound uint64
+ )
+ for {
+ // Decide chainID.
+ if len(candidateChainIDs) == 0 {
+ // Proceed to next round.
+ if nextRound >= uint64(len(configs)) {
+ break
+ }
+ c := configs[nextRound]
+ nextRound++
+ for i := uint32(0); i < c.numChains; i++ {
+ candidateChainIDs = append(candidateChainIDs, i)
+ }
+ }
+ chainID := candidateChainIDs[randObj.Intn(len(candidateChainIDs))]
+ // Prepare blocks, see if we are legal to propose block at
+ // this position.
+ b := &types.Block{
+ Position: types.Position{
+ ChainID: chainID,
+ Round: nextRound - 1,
+ }}
+ err = lattice.prepareBlock(b)
+ if err == ErrRoundNotSwitch {
+ // This round is done, remove this channel from candidate.
+ for i := range candidateChainIDs {
+ if candidateChainIDs[i] != chainID {
+ continue
+ }
+ candidateChainIDs = append(
+ candidateChainIDs[:i], candidateChainIDs[i+1:]...)
+ break
+ }
+ continue
+ }
+ req.NoError(err)
+ s.hashBlock(b)
+ // Do the actual lattice usage.
+ req.NoError(lattice.sanityCheck(b))
+ d, err := lattice.addBlock(b)
+ req.NoError(err)
+ delivered = append(delivered, d...)
+ }
+ // verify delivered form a DAG.
+ dag := map[common.Hash]struct{}{}
+ for _, b := range delivered {
+ for _, ack := range b.Acks {
+ _, exists := dag[ack]
+ req.True(exists)
+ }
+ dag[b.Hash] = struct{}{}
+ }
+}
+
func TestLatticeData(t *testing.T) {
suite.Run(t, new(LatticeDataTestSuite))
}
diff --git a/core/lattice.go b/core/lattice.go
index 18e7ae6..d634650 100644
--- a/core/lattice.go
+++ b/core/lattice.go
@@ -43,12 +43,14 @@ type Lattice struct {
// NewLattice constructs an Lattice instance.
func NewLattice(
- round uint64,
+ dMoment time.Time,
cfg *types.Config,
authModule *Authenticator,
app Application,
debug Debug,
db blockdb.BlockDatabase) (s *Lattice) {
+ // Create genesis latticeDataConfig.
+ dataConfig := newGenesisLatticeDataConfig(dMoment, cfg)
s = &Lattice{
authModule: authModule,
chainNum: cfg.NumChains,
@@ -56,13 +58,12 @@ func NewLattice(
debug: debug,
lastConfig: cfg,
pool: newBlockPool(cfg.NumChains),
- data: newLatticeData(db, round, newLatticeDataConfig(nil, cfg)),
+ data: newLatticeData(db, dataConfig),
toModule: newTotalOrdering(
- round,
uint64(cfg.K),
uint64(float32(cfg.NumChains-1)*cfg.PhiRatio+1),
cfg.NumChains),
- ctModule: newConsensusTimestamp(round, cfg.NumChains),
+ ctModule: newConsensusTimestamp(cfg.NumChains),
}
return
}
@@ -75,7 +76,9 @@ func (s *Lattice) PrepareBlock(
defer s.lock.RUnlock()
b.Timestamp = proposeTime
- s.data.prepareBlock(b)
+ if err = s.data.prepareBlock(b); err != nil {
+ return
+ }
b.Payload = s.app.PreparePayload(b.Position)
b.Witness = s.app.PrepareWitness(b.Witness.Height)
if err = s.authModule.SignBlock(b); err != nil {
diff --git a/core/lattice_test.go b/core/lattice_test.go
index 3ba5ed5..f84182c 100644
--- a/core/lattice_test.go
+++ b/core/lattice_test.go
@@ -94,11 +94,8 @@ type LatticeTestSuite struct {
}
func (s *LatticeTestSuite) newTestLatticeMgr(
- cfg *types.Config) *testLatticeMgr {
- var (
- req = s.Require()
- round uint64
- )
+ cfg *types.Config, dMoment time.Time) *testLatticeMgr {
+ var req = s.Require()
// Setup private key.
prvKey, err := ecdsa.NewPrivateKey()
req.Nil(err)
@@ -113,7 +110,7 @@ func (s *LatticeTestSuite) newTestLatticeMgr(
app: app,
db: db,
lattice: NewLattice(
- round,
+ dMoment,
cfg,
NewAuthenticator(prvKey),
app,
@@ -137,8 +134,10 @@ func (s *LatticeTestSuite) TestBasicUsage() {
K: 0,
MinBlockInterval: 0,
MaxBlockInterval: 3000 * time.Second,
+ RoundInterval: time.Hour,
}
- master = s.newTestLatticeMgr(&cfg)
+ dMoment = time.Now().UTC()
+ master = s.newTestLatticeMgr(&cfg, dMoment)
apps = []*test.App{master.app}
revealSeq = map[string]struct{}{}
)
@@ -167,7 +166,7 @@ func (s *LatticeTestSuite) TestBasicUsage() {
for i := 0; i < otherLatticeNum; i++ {
revealer.Reset()
revealed := ""
- other := s.newTestLatticeMgr(&cfg)
+ other := s.newTestLatticeMgr(&cfg, dMoment)
for {
b, err := revealer.Next()
if err != nil {
@@ -208,14 +207,15 @@ func (s *LatticeTestSuite) TestSanityCheck() {
MinBlockInterval: 0,
MaxBlockInterval: 3000 * time.Second,
}
- lattice = s.newTestLatticeMgr(&cfg).lattice
+ lattice = s.newTestLatticeMgr(&cfg, time.Now().UTC()).lattice
auth = lattice.authModule // Steal auth module from lattice, :(
req = s.Require()
err error
)
// A block properly signed should pass sanity check.
b := &types.Block{
- Position: types.Position{ChainID: 0},
+ Position: types.Position{ChainID: 0},
+ Timestamp: time.Now().UTC(),
}
req.NoError(auth.SignBlock(b))
req.NoError(lattice.SanityCheck(b))
diff --git a/core/total-ordering.go b/core/total-ordering.go
index dd2d99c..c8e0b25 100644
--- a/core/total-ordering.go
+++ b/core/total-ordering.go
@@ -588,11 +588,10 @@ type totalOrdering struct {
candidateChainIDs []uint32
// configs keeps configuration for each round in continuous way.
- configs []*totalOrderingConfig
- minRound uint64
+ configs []*totalOrderingConfig
}
-func newTotalOrdering(round, k, phi uint64, chainNum uint32) *totalOrdering {
+func newTotalOrdering(k, phi uint64, chainNum uint32) *totalOrdering {
to := &totalOrdering{
pendings: make(map[common.Hash]*types.Block),
k: k,
@@ -612,7 +611,6 @@ func newTotalOrdering(round, k, phi uint64, chainNum uint32) *totalOrdering {
phi: phi,
numChains: chainNum,
}}
- to.minRound = round
return to
}
@@ -621,7 +619,7 @@ func newTotalOrdering(round, k, phi uint64, chainNum uint32) *totalOrdering {
func (to *totalOrdering) appendConfig(
round uint64, config *types.Config) error {
- if round != to.minRound+uint64(len(to.configs)) {
+ if round != uint64(len(to.configs)) {
return ErrRoundNotIncreasing
}
to.configs = append(to.configs, &totalOrderingConfig{
diff --git a/core/total-ordering_test.go b/core/total-ordering_test.go
index fcadc89..d66a8fe 100644
--- a/core/total-ordering_test.go
+++ b/core/total-ordering_test.go
@@ -77,7 +77,6 @@ func (s *TotalOrderingTestSuite) TestBlockRelation() {
//
// The DAG used below is:
// A <- B <- C
- var round uint64
nodes := test.GenerateRandomNodeIDs(5)
vID := nodes[0]
blockA := s.genGenesisBlock(nodes, 0, common.Hashes{})
@@ -102,7 +101,7 @@ func (s *TotalOrderingTestSuite) TestBlockRelation() {
Acks: common.NewSortedHashes(common.Hashes{blockB.Hash}),
}
- to := newTotalOrdering(round, 1, 3, uint32(len(nodes)))
+ to := newTotalOrdering(1, 3, uint32(len(nodes)))
s.checkNotDeliver(to, blockA)
s.checkNotDeliver(to, blockB)
s.checkNotDeliver(to, blockC)
@@ -242,7 +241,6 @@ func (s *TotalOrderingTestSuite) TestGrade() {
func (s *TotalOrderingTestSuite) TestCycleDetection() {
// Make sure we don't get hang by cycle from
// block's acks.
- var round uint64
nodes := test.GenerateRandomNodeIDs(5)
// create blocks with cycles in acking relation.
@@ -284,7 +282,7 @@ func (s *TotalOrderingTestSuite) TestCycleDetection() {
b10.Acks = append(b10.Acks, b10.Hash)
// Make sure we won't hang when cycle exists.
- to := newTotalOrdering(round, 1, 3, uint32(len(nodes)))
+ to := newTotalOrdering(1, 3, uint32(len(nodes)))
s.checkNotDeliver(to, b00)
s.checkNotDeliver(to, b01)
s.checkNotDeliver(to, b02)
@@ -296,9 +294,8 @@ func (s *TotalOrderingTestSuite) TestCycleDetection() {
}
func (s *TotalOrderingTestSuite) TestNotValidDAGDetection() {
- var round uint64
nodes := test.GenerateRandomNodeIDs(5)
- to := newTotalOrdering(round, 1, 3, uint32(len(nodes)))
+ to := newTotalOrdering(1, 3, uint32(len(nodes)))
b00 := s.genGenesisBlock(nodes, 0, common.Hashes{})
b01 := &types.Block{
@@ -329,9 +326,8 @@ func (s *TotalOrderingTestSuite) TestEarlyDeliver() {
// A B
// Even when B is not received, A should
// be able to be delivered.
- var round uint64
nodes := test.GenerateRandomNodeIDs(5)
- to := newTotalOrdering(round, 2, 3, uint32(len(nodes)))
+ to := newTotalOrdering(2, 3, uint32(len(nodes)))
genNextBlock := func(b *types.Block) *types.Block {
return &types.Block{
ProposerID: b.ProposerID,
@@ -435,9 +431,8 @@ func (s *TotalOrderingTestSuite) TestEarlyDeliver() {
func (s *TotalOrderingTestSuite) TestBasicCaseForK2() {
// It's a handcrafted test case.
- var round uint64
nodes := test.GenerateRandomNodeIDs(5)
- to := newTotalOrdering(round, 2, 3, uint32(len(nodes)))
+ to := newTotalOrdering(2, 3, uint32(len(nodes)))
// Setup blocks.
b00 := s.genGenesisBlock(nodes, 0, common.Hashes{})
b10 := s.genGenesisBlock(nodes, 1, common.Hashes{})
@@ -771,10 +766,9 @@ func (s *TotalOrderingTestSuite) TestBasicCaseForK0() {
// v v v v
// o o o <- o Height: 0
var (
- round uint64
req = s.Require()
nodes = test.GenerateRandomNodeIDs(5)
- to = newTotalOrdering(round, 0, 3, uint32(len(nodes)))
+ to = newTotalOrdering(0, 3, uint32(len(nodes)))
)
// Setup blocks.
b00 := s.genGenesisBlock(nodes, 0, common.Hashes{})
@@ -944,7 +938,6 @@ func (s *TotalOrderingTestSuite) baseTestRandomlyGeneratedBlocks(
func (s *TotalOrderingTestSuite) TestRandomlyGeneratedBlocks() {
var (
- round uint64
chainNum = uint32(23)
blockNum = 50
phi uint64 = 10
@@ -965,25 +958,25 @@ func (s *TotalOrderingTestSuite) TestRandomlyGeneratedBlocks() {
for _, gen := range ackingCountGenerators {
// Test for K=0.
constructor := func(chainNum uint32) *totalOrdering {
- return newTotalOrdering(round, 0, phi, chainNum)
+ return newTotalOrdering(0, phi, chainNum)
}
s.baseTestRandomlyGeneratedBlocks(
constructor, chainNum, blockNum, gen, repeat)
// Test for K=1,
constructor = func(chainNum uint32) *totalOrdering {
- return newTotalOrdering(round, 1, phi, chainNum)
+ return newTotalOrdering(1, phi, chainNum)
}
s.baseTestRandomlyGeneratedBlocks(
constructor, chainNum, blockNum, gen, repeat)
// Test for K=2,
constructor = func(chainNum uint32) *totalOrdering {
- return newTotalOrdering(round, 2, phi, chainNum)
+ return newTotalOrdering(2, phi, chainNum)
}
s.baseTestRandomlyGeneratedBlocks(
constructor, chainNum, blockNum, gen, repeat)
// Test for K=3,
constructor = func(chainNum uint32) *totalOrdering {
- return newTotalOrdering(round, 3, phi, chainNum)
+ return newTotalOrdering(3, phi, chainNum)
}
s.baseTestRandomlyGeneratedBlocks(
constructor, chainNum, blockNum, gen, repeat)
diff --git a/core/types/position.go b/core/types/position.go
index af57b29..4e254ba 100644
--- a/core/types/position.go
+++ b/core/types/position.go
@@ -17,9 +17,44 @@
package types
+import "errors"
+
+// ErrComparePositionOnDifferentChains raised when attempting to
+// compare two positions with different chain ID.
+var ErrComparePositionOnDifferentChains = errors.New(
+ "position on different chain")
+
// Position describes the position in the block lattice of an entity.
type Position struct {
ChainID uint32 `json:"chain_id"`
Round uint64 `json:"round"`
Height uint64 `json:"height"`
}
+
+// Equal checks if two positions are equal, it panics when their chainIDs
+// are different.
+func (pos *Position) Equal(other *Position) bool {
+ if pos.ChainID != other.ChainID {
+ panic(ErrComparePositionOnDifferentChains)
+ }
+ return pos.Round == other.Round && pos.Height == other.Height
+}
+
+// Newer checks if one block is newer than another one on the same chain.
+// If two blocks on different chain compared by this function, it would panic.
+func (pos *Position) Newer(other *Position) bool {
+ if pos.ChainID != other.ChainID {
+ panic(ErrComparePositionOnDifferentChains)
+ }
+ return pos.Round > other.Round ||
+ (pos.Round == other.Round && pos.Height > other.Height)
+}
+
+// Clone a position instance.
+func (pos *Position) Clone() *Position {
+ return &Position{
+ ChainID: pos.ChainID,
+ Round: pos.Round,
+ Height: pos.Height,
+ }
+}
diff --git a/core/types/position_test.go b/core/types/position_test.go
new file mode 100644
index 0000000..48f8dbd
--- /dev/null
+++ b/core/types/position_test.go
@@ -0,0 +1,98 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package types
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+)
+
+type PositionTestSuite struct {
+ suite.Suite
+}
+
+func (s *PositionTestSuite) TestNewer() {
+ pos := Position{
+ Round: 1,
+ ChainID: 1,
+ Height: 1,
+ }
+ s.Panics(func() {
+ pos.Newer(&Position{ChainID: 2})
+ })
+ s.False(pos.Newer(&Position{
+ Round: 2,
+ ChainID: 1,
+ Height: 0,
+ }))
+ s.False(pos.Newer(&Position{
+ Round: 1,
+ ChainID: 1,
+ Height: 2,
+ }))
+ s.True(pos.Newer(&Position{
+ Round: 0,
+ ChainID: 1,
+ Height: 100,
+ }))
+ s.True(pos.Newer(&Position{
+ Round: 1,
+ ChainID: 1,
+ Height: 0,
+ }))
+}
+
+func (s *PositionTestSuite) TestSearchInAsendingOrder() {
+ positions := []*Position{
+ &Position{Round: 0, Height: 1},
+ &Position{Round: 0, Height: 2},
+ &Position{Round: 0, Height: 3},
+ &Position{Round: 2, Height: 0},
+ &Position{Round: 2, Height: 1},
+ &Position{Round: 2, Height: 2},
+ &Position{Round: 4, Height: 0},
+ &Position{Round: 4, Height: 1},
+ &Position{Round: 4, Height: 2},
+ }
+ search := func(pos *Position) int {
+ return sort.Search(len(positions), func(i int) bool {
+ return positions[i].Newer(pos) || positions[i].Equal(pos)
+ })
+ }
+ s.Equal(0, search(&Position{Round: 0, Height: 0}))
+ s.Equal(len(positions), search(&Position{Round: 4, Height: 4}))
+ s.Equal(0, search(&Position{Round: 0, Height: 1}))
+ s.Equal(len(positions)-1, search(&Position{Round: 4, Height: 2}))
+ s.Equal(2, search(&Position{Round: 0, Height: 3}))
+}
+
+func (s *PositionTestSuite) TestEqual() {
+ pos := Position{}
+ s.Panics(func() {
+ pos.Equal(&Position{ChainID: 1})
+ })
+ s.True(pos.Equal(&Position{}))
+ s.False(pos.Equal(&Position{Round: 1}))
+ s.False(pos.Equal(&Position{Height: 1}))
+}
+
+func TestPosition(t *testing.T) {
+ suite.Run(t, new(PositionTestSuite))
+}
diff --git a/core/utils.go b/core/utils.go
index 3c1d211..ac95678 100644
--- a/core/utils.go
+++ b/core/utils.go
@@ -130,3 +130,11 @@ func HashConfigurationBlock(
prevHash[:],
)
}
+
+// DiffUint64 calculates difference between two uint64.
+func DiffUint64(a, b uint64) uint64 {
+ if a > b {
+ return a - b
+ }
+ return b - a
+}
diff --git a/integration_test/node.go b/integration_test/node.go
index d75f1ae..7e230a9 100644
--- a/integration_test/node.go
+++ b/integration_test/node.go
@@ -85,7 +85,7 @@ func NewNode(
proposingLatency test.LatencyModel) *Node {
var (
- round uint64
+ dMoment = time.Now().UTC()
chainID = uint32(math.MaxUint32)
governanceConfig = gov.Configuration(0)
nodeSetKeys = gov.NodeSet(0)
@@ -116,7 +116,7 @@ func NewNode(
app: app,
db: db,
lattice: core.NewLattice(
- round,
+ dMoment,
governanceConfig,
core.NewAuthenticator(privateKey),
app,