aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cmd/dexcon-simulation-with-scheduler/main.go26
-rw-r--r--core/consensus_test.go2
-rw-r--r--core/lattice.go4
-rw-r--r--core/lattice_test.go4
-rw-r--r--core/test/app.go50
-rw-r--r--core/test/app_test.go20
-rw-r--r--core/test/stopper_test.go4
-rw-r--r--integration_test/node.go259
-rw-r--r--integration_test/non-byzantine_test.go101
-rw-r--r--integration_test/stats_test.go18
-rw-r--r--integration_test/utils.go106
-rw-r--r--integration_test/utils_test.go61
12 files changed, 499 insertions, 156 deletions
diff --git a/cmd/dexcon-simulation-with-scheduler/main.go b/cmd/dexcon-simulation-with-scheduler/main.go
index 5e04538..64147b6 100644
--- a/cmd/dexcon-simulation-with-scheduler/main.go
+++ b/cmd/dexcon-simulation-with-scheduler/main.go
@@ -57,23 +57,31 @@ func main() {
Sigma: cfg.Node.Legacy.ProposeIntervalSigma,
Mean: cfg.Node.Legacy.ProposeIntervalMean,
}
+ // Setup key pairs.
+ prvKeys, pubKeys, err := test.NewKeys(cfg.Node.Num)
+ if err != nil {
+ log.Fatal("could not setup key pairs: ", err)
+ }
+ // Setup governance instance.
+ gov, err := test.NewGovernance(
+ pubKeys, time.Duration(cfg.Networking.Mean)*time.Millisecond)
+ if err != nil {
+ log.Fatal("could not setup governance: ", err)
+ }
// Setup nodes and other consensus related stuffs.
- apps, dbs, nodes, err := integration.PrepareNodes(
- cfg.Node.Num, networkLatency, proposingLatency)
+ nodes, err := integration.PrepareNodes(
+ gov, prvKeys, uint32(cfg.Node.Num), networkLatency, proposingLatency)
if err != nil {
log.Fatal("could not setup nodes: ", err)
}
+ apps, dbs := integration.CollectAppAndDBFromNodes(nodes)
blockPerNode := int(math.Ceil(
float64(cfg.Node.MaxBlock) / float64(cfg.Node.Num)))
sch := test.NewScheduler(
test.NewStopByConfirmedBlocks(blockPerNode, apps, dbs))
- for nID, v := range nodes {
- sch.RegisterEventHandler(nID, v)
- if err = sch.Seed(integration.NewProposeBlockEvent(
- nID, time.Now().UTC())); err != nil {
-
- log.Fatal("unable to set seed simulation events: ", err)
- }
+ now := time.Now().UTC()
+ for _, v := range nodes {
+ v.Bootstrap(sch, now)
}
// Run the simulation.
sch.Run(cfg.Scheduler.WorkerNum)
diff --git a/core/consensus_test.go b/core/consensus_test.go
index 1ba9fa0..40fad9b 100644
--- a/core/consensus_test.go
+++ b/core/consensus_test.go
@@ -178,7 +178,7 @@ func (s *ConsensusTestSuite) prepareConsensus(
conn *networkConnection) (
*test.App, *Consensus) {
- app := test.NewApp()
+ app := test.NewApp(nil)
db, err := blockdb.NewMemBackedBlockDB()
s.Require().NoError(err)
nID := types.NewNodeID(prvKey.PublicKey())
diff --git a/core/lattice.go b/core/lattice.go
index 68b05c2..c1339be 100644
--- a/core/lattice.go
+++ b/core/lattice.go
@@ -36,7 +36,6 @@ var (
type Lattice struct {
lock sync.RWMutex
authModule *Authenticator
- chainNum uint32
app Application
debug Debug
pool blockPool
@@ -61,7 +60,6 @@ func NewLattice(
toConfig := newGenesisTotalOrderingConfig(dMoment, cfg)
s = &Lattice{
authModule: authModule,
- chainNum: cfg.NumChains,
app: app,
debug: debug,
pool: newBlockPool(cfg.NumChains),
@@ -180,7 +178,7 @@ func (s *Lattice) addBlockToLattice(
// Replay tips in pool to check their validity.
for {
hasOutput := false
- for i := uint32(0); i < s.chainNum; i++ {
+ for i := uint32(0); i < uint32(len(s.pool)); i++ {
var tip *types.Block
if tip = s.pool.tip(i); tip == nil {
continue
diff --git a/core/lattice_test.go b/core/lattice_test.go
index ec40d05..bace8a2 100644
--- a/core/lattice_test.go
+++ b/core/lattice_test.go
@@ -101,13 +101,13 @@ func (s *LatticeTestSuite) newTestLatticeMgr(
// Setup blockdb.
db, err := blockdb.NewMemBackedBlockDB()
req.NoError(err)
- // Setup application.
- app := test.NewApp()
// Setup governance.
_, pubKeys, err := test.NewKeys(int(cfg.NotarySetSize))
req.NoError(err)
gov, err := test.NewGovernance(pubKeys, cfg.LambdaBA)
req.NoError(err)
+ // Setup application.
+ app := test.NewApp(gov.State())
// Setup compaction chain.
cc := newCompactionChain(gov)
cc.init(&types.Block{})
diff --git a/core/test/app.go b/core/test/app.go
index 546c9e5..4976ae0 100644
--- a/core/test/app.go
+++ b/core/test/app.go
@@ -88,22 +88,30 @@ type App struct {
Delivered map[common.Hash]*AppDeliveredRecord
DeliverSequence common.Hashes
deliveredLock sync.RWMutex
+ blocks map[common.Hash]*types.Block
+ blocksLock sync.Mutex
+ state *State
}
// NewApp constructs a TestApp instance.
-func NewApp() *App {
+func NewApp(state *State) *App {
return &App{
Acked: make(map[common.Hash]*AppAckedRecord),
TotalOrdered: []*AppTotalOrderRecord{},
TotalOrderedByHash: make(map[common.Hash]*AppTotalOrderRecord),
Delivered: make(map[common.Hash]*AppDeliveredRecord),
DeliverSequence: common.Hashes{},
+ blocks: make(map[common.Hash]*types.Block),
+ state: state,
}
}
// PreparePayload implements Application interface.
func (app *App) PreparePayload(position types.Position) ([]byte, error) {
- return []byte{}, nil
+ if app.state == nil {
+ return []byte{}, nil
+ }
+ return app.state.PackRequests()
}
// PrepareWitness implements Application interface.
@@ -119,7 +127,10 @@ func (app *App) VerifyBlock(block *types.Block) types.BlockVerifyStatus {
}
// BlockConfirmed implements Application interface.
-func (app *App) BlockConfirmed(_ types.Block) {
+func (app *App) BlockConfirmed(b types.Block) {
+ app.blocksLock.Lock()
+ defer app.blocksLock.Unlock()
+ app.blocks[b.Hash] = &b
}
// StronglyAcked implements Application interface.
@@ -152,15 +163,30 @@ func (app *App) TotalOrderingDelivered(blockHashes common.Hashes, mode uint32) {
// BlockDelivered implements Application interface.
func (app *App) BlockDelivered(
blockHash common.Hash, result types.FinalizationResult) {
- app.deliveredLock.Lock()
- defer app.deliveredLock.Unlock()
-
- app.Delivered[blockHash] = &AppDeliveredRecord{
- ConsensusTime: result.Timestamp,
- ConsensusHeight: result.Height,
- When: time.Now().UTC(),
- }
- app.DeliverSequence = append(app.DeliverSequence, blockHash)
+ func() {
+ app.deliveredLock.Lock()
+ defer app.deliveredLock.Unlock()
+ app.Delivered[blockHash] = &AppDeliveredRecord{
+ ConsensusTime: result.Timestamp,
+ ConsensusHeight: result.Height,
+ When: time.Now().UTC(),
+ }
+ app.DeliverSequence = append(app.DeliverSequence, blockHash)
+ }()
+ // Apply packed state change requests in payload.
+ func() {
+ if app.state == nil {
+ return
+ }
+ app.blocksLock.Lock()
+ defer app.blocksLock.Unlock()
+ b := app.blocks[blockHash]
+ if err := app.state.Apply(b.Payload); err != nil {
+ if err != ErrDuplicatedChange {
+ panic(err)
+ }
+ }
+ }()
}
// Compare performs these checks against another App instance
diff --git a/core/test/app_test.go b/core/test/app_test.go
index 823bde0..933343f 100644
--- a/core/test/app_test.go
+++ b/core/test/app_test.go
@@ -91,12 +91,12 @@ func (s *AppTestSuite) deliverBlock(
func (s *AppTestSuite) TestCompare() {
req := s.Require()
- app1 := NewApp()
+ app1 := NewApp(nil)
s.setupAppByTotalOrderDeliver(app1, s.to1)
s.setupAppByTotalOrderDeliver(app1, s.to2)
s.setupAppByTotalOrderDeliver(app1, s.to3)
// An App with different deliver sequence.
- app2 := NewApp()
+ app2 := NewApp(nil)
s.setupAppByTotalOrderDeliver(app2, s.to1)
s.setupAppByTotalOrderDeliver(app2, s.to2)
hash := common.NewRandomHash()
@@ -105,7 +105,7 @@ func (s *AppTestSuite) TestCompare() {
s.deliverBlockWithTimeFromSequenceLength(app2, hash)
req.Equal(ErrMismatchBlockHashSequence, app1.Compare(app2))
// An App with different consensus time for the same block.
- app3 := NewApp()
+ app3 := NewApp(nil)
s.setupAppByTotalOrderDeliver(app3, s.to1)
s.setupAppByTotalOrderDeliver(app3, s.to2)
for _, h := range s.to3.BlockHashes {
@@ -120,7 +120,7 @@ func (s *AppTestSuite) TestCompare() {
req.Equal(ErrMismatchConsensusTime, app1.Compare(app3))
req.Equal(ErrMismatchConsensusTime, app3.Compare(app1))
// An App without any delivered blocks.
- app4 := NewApp()
+ app4 := NewApp(nil)
req.Equal(ErrEmptyDeliverSequence, app4.Compare(app1))
req.Equal(ErrEmptyDeliverSequence, app1.Compare(app4))
}
@@ -129,7 +129,7 @@ func (s *AppTestSuite) TestVerify() {
req := s.Require()
// An OK App instance.
- app1 := NewApp()
+ app1 := NewApp(nil)
s.setupAppByTotalOrderDeliver(app1, s.to1)
s.setupAppByTotalOrderDeliver(app1, s.to2)
s.setupAppByTotalOrderDeliver(app1, s.to3)
@@ -139,7 +139,7 @@ func (s *AppTestSuite) TestVerify() {
uint64(len(app1.DeliverSequence)))
req.Equal(ErrDeliveredBlockNotAcked, app1.Verify())
// The consensus time is out of order.
- app2 := NewApp()
+ app2 := NewApp(nil)
s.setupAppByTotalOrderDeliver(app2, s.to1)
for _, h := range s.to2.BlockHashes {
app2.StronglyAcked(h)
@@ -149,14 +149,14 @@ func (s *AppTestSuite) TestVerify() {
uint64(len(app2.DeliverSequence)+1))
req.Equal(ErrConsensusTimestampOutOfOrder, app2.Verify())
// A delivered block is not found in total ordering delivers.
- app3 := NewApp()
+ app3 := NewApp(nil)
s.setupAppByTotalOrderDeliver(app3, s.to1)
hash := common.NewRandomHash()
app3.StronglyAcked(hash)
s.deliverBlockWithTimeFromSequenceLength(app3, hash)
req.Equal(ErrMismatchTotalOrderingAndDelivered, app3.Verify())
// A delivered block is not found in total ordering delivers.
- app4 := NewApp()
+ app4 := NewApp(nil)
s.setupAppByTotalOrderDeliver(app4, s.to1)
for _, h := range s.to2.BlockHashes {
app4.StronglyAcked(h)
@@ -167,10 +167,10 @@ func (s *AppTestSuite) TestVerify() {
app4.TotalOrderingDelivered(common.Hashes{hash}, core.TotalOrderingModeNormal)
s.deliverBlockWithTimeFromSequenceLength(app4, hash)
// Witness ack on unknown block.
- app5 := NewApp()
+ app5 := NewApp(nil)
s.setupAppByTotalOrderDeliver(app5, s.to1)
// The conensus height is out of order.
- app6 := NewApp()
+ app6 := NewApp(nil)
s.setupAppByTotalOrderDeliver(app6, s.to1)
for _, h := range s.to2.BlockHashes {
app6.StronglyAcked(h)
diff --git a/core/test/stopper_test.go b/core/test/stopper_test.go
index 23c0137..f54c2d1 100644
--- a/core/test/stopper_test.go
+++ b/core/test/stopper_test.go
@@ -70,7 +70,7 @@ func (s *StopperTestSuite) TestStopByConfirmedBlocks() {
nodes = GenerateRandomNodeIDs(2)
)
for _, nID := range nodes {
- apps[nID] = NewApp()
+ apps[nID] = NewApp(nil)
db, err := blockdb.NewMemBackedBlockDB()
req.NoError(err)
dbs[nID] = db
@@ -118,7 +118,7 @@ func (s *StopperTestSuite) TestStopByRound() {
nodes = GenerateRandomNodeIDs(2)
)
for _, nID := range nodes {
- apps[nID] = NewApp()
+ apps[nID] = NewApp(nil)
db, err := blockdb.NewMemBackedBlockDB()
req.NoError(err)
dbs[nID] = db
diff --git a/integration_test/node.go b/integration_test/node.go
index bcb44bb..facd153 100644
--- a/integration_test/node.go
+++ b/integration_test/node.go
@@ -19,8 +19,6 @@ package integration
import (
"fmt"
- "math"
- "sort"
"time"
"github.com/dexon-foundation/dexon-consensus-core/common"
@@ -43,17 +41,22 @@ type consensusEventPayload struct {
PiggyBack interface{}
}
-// NewProposeBlockEvent constructs an test.Event that would trigger
+// newProposeBlockEvent constructs an test.Event that would trigger
// block proposing.
-func NewProposeBlockEvent(nID types.NodeID, when time.Time) *test.Event {
+func newProposeBlockEvent(nID types.NodeID,
+ roundID uint64, chainID uint32, when time.Time) *test.Event {
return test.NewEvent(nID, when, &consensusEventPayload{
Type: evtProposeBlock,
+ PiggyBack: &struct {
+ round uint64
+ chain uint32
+ }{roundID, chainID},
})
}
-// NewReceiveBlockEvent constructs an test.Event that would trigger
+// newReceiveBlockEvent constructs an test.Event that would trigger
// block received.
-func NewReceiveBlockEvent(
+func newReceiveBlockEvent(
nID types.NodeID, when time.Time, block *types.Block) *test.Event {
return test.NewEvent(nID, when, &consensusEventPayload{
@@ -65,67 +68,75 @@ func NewReceiveBlockEvent(
// Node is designed to work with test.Scheduler.
type Node struct {
ID types.NodeID
- chainNum uint32
- chainID uint32
+ ownChains []uint32
+ roundEndTimes []time.Time
+ roundToNotify uint64
lattice *core.Lattice
- app *test.App
- db blockdb.BlockDatabase
+ appModule *test.App
+ stateModule *test.State
+ govModule *test.Governance
+ dbModule blockdb.BlockDatabase
broadcastTargets map[types.NodeID]struct{}
networkLatency test.LatencyModel
proposingLatency test.LatencyModel
prevFinalHeight uint64
pendings []*types.Block
+ prevHash common.Hash
+ // This variable caches the maximum NumChains seen by this node when
+ // it's notified for round switching.
+ latticeMaxNumChains uint32
}
-// NewNode constructs an instance of Node.
-func NewNode(
- app *test.App,
- gov core.Governance,
- db blockdb.BlockDatabase,
+// newNode constructs an instance of Node.
+func newNode(
+ gov *test.Governance,
privateKey crypto.PrivateKey,
dMoment time.Time,
+ ownChains []uint32,
networkLatency test.LatencyModel,
- proposingLatency test.LatencyModel) *Node {
-
- var (
- chainID = uint32(math.MaxUint32)
- governanceConfig = gov.Configuration(0)
- nodeSetKeys = gov.NodeSet(0)
- nodeID = types.NewNodeID(privateKey.PublicKey())
- )
- broadcastTargets := make(map[types.NodeID]struct{})
- for _, k := range nodeSetKeys {
- broadcastTargets[types.NewNodeID(k)] = struct{}{}
- }
- hashes := common.Hashes{}
- for nID := range broadcastTargets {
- hashes = append(hashes, nID.Hash)
- }
- sort.Sort(hashes)
- for i, h := range hashes {
- if h == nodeID.Hash {
- chainID = uint32(i)
- }
+ proposingLatency test.LatencyModel) (*Node, error) {
+ // Load all configs prepared in core.Governance into core.Lattice.
+ copiedGov := gov.Clone()
+ configs := loadAllConfigs(copiedGov)
+ // Setup blockdb.
+ db, err := blockdb.NewMemBackedBlockDB()
+ if err != nil {
+ return nil, err
+ }
+ // Setup test.App
+ app := test.NewApp(copiedGov.State())
+ // Setup lattice instance.
+ lattice := core.NewLattice(
+ dMoment,
+ configs[0],
+ core.NewAuthenticator(privateKey),
+ app,
+ app,
+ db,
+ &common.NullLogger{})
+ n := &Node{
+ ID: types.NewNodeID(privateKey.PublicKey()),
+ ownChains: ownChains,
+ roundEndTimes: genRoundEndTimes(configs, dMoment),
+ roundToNotify: 2,
+ networkLatency: networkLatency,
+ proposingLatency: proposingLatency,
+ appModule: app,
+ stateModule: copiedGov.State(),
+ dbModule: db,
+ govModule: copiedGov,
+ lattice: lattice,
+ latticeMaxNumChains: configs[0].NumChains,
}
- delete(broadcastTargets, nodeID)
- return &Node{
- ID: nodeID,
- chainID: chainID,
- chainNum: governanceConfig.NumChains,
- broadcastTargets: broadcastTargets,
- networkLatency: networkLatency,
- proposingLatency: proposingLatency,
- app: app,
- db: db,
- lattice: core.NewLattice(
- dMoment,
- governanceConfig,
- core.NewAuthenticator(privateKey),
- app,
- app,
- db,
- &common.NullLogger{}),
+ for idx, config := range configs[1:] {
+ if err := lattice.AppendConfig(uint64(idx+1), config); err != nil {
+ return nil, err
+ }
+ if config.NumChains > n.latticeMaxNumChains {
+ n.latticeMaxNumChains = config.NumChains
+ }
}
+ return n, nil
}
// Handle implements test.EventHandler interface.
@@ -142,47 +153,65 @@ func (n *Node) Handle(e *test.Event) (events []*test.Event) {
return
}
-func (n *Node) handleProposeBlock(when time.Time, _ interface{}) (
+func (n *Node) handleProposeBlock(when time.Time, payload interface{}) (
events []*test.Event, err error) {
-
- b, err := n.prepareBlock(when)
+ pos := payload.(*struct {
+ round uint64
+ chain uint32
+ })
+ b, err := n.prepareBlock(pos.round, pos.chain, when)
if err != nil {
+ if err == core.ErrInvalidChainID {
+ // This chain is not included in this round, retry in next round.
+ events = append(events, newProposeBlockEvent(
+ n.ID, b.Position.Round+1, b.Position.ChainID,
+ n.roundEndTimes[b.Position.Round]))
+ }
return
}
- if err = n.processBlock(b); err != nil {
- return
+ if events, err = n.processBlock(b); err != nil {
+ // It's shouldn't be error when prepared.
+ panic(err)
}
// Create 'block received' event for each other nodes.
for nID := range n.broadcastTargets {
- events = append(events, NewReceiveBlockEvent(
+ events = append(events, newReceiveBlockEvent(
nID, when.Add(n.networkLatency.Delay()), b.Clone()))
}
// Create next 'block proposing' event for this nodes.
- events = append(events, NewProposeBlockEvent(
- n.ID, when.Add(n.proposingLatency.Delay())))
+ events = append(events, newProposeBlockEvent(n.ID,
+ b.Position.Round,
+ b.Position.ChainID,
+ when.Add(n.proposingLatency.Delay())))
return
}
func (n *Node) handleReceiveBlock(piggyback interface{}) (
events []*test.Event, err error) {
-
- err = n.processBlock(piggyback.(*types.Block))
+ events, err = n.processBlock(piggyback.(*types.Block))
if err != nil {
panic(err)
}
return
}
-func (n *Node) prepareBlock(when time.Time) (b *types.Block, err error) {
+func (n *Node) prepareBlock(
+ round uint64, chainID uint32, when time.Time) (b *types.Block, err error) {
b = &types.Block{
Position: types.Position{
- ChainID: n.chainID,
+ Round: round,
+ ChainID: chainID,
}}
- err = n.lattice.PrepareBlock(b, when)
+ if err = n.lattice.PrepareBlock(b, when); err != nil {
+ if err == core.ErrRoundNotSwitch {
+ b.Position.Round++
+ err = n.lattice.PrepareBlock(b, when)
+ }
+ }
return
}
-func (n *Node) processBlock(b *types.Block) (err error) {
+func (n *Node) processBlock(b *types.Block) (events []*test.Event, err error) {
// TODO(mission): this segment of code is identical to testLatticeMgr in
// core/lattice_test.go, except the compaction-chain part.
var (
@@ -223,15 +252,105 @@ func (n *Node) processBlock(b *types.Block) (err error) {
}
// Deliver blocks.
for _, b = range delivered {
- if err = n.db.Put(*b); err != nil {
+ if err = n.dbModule.Put(*b); err != nil {
panic(err)
}
b.Finalization.Height = n.prevFinalHeight + 1
- n.app.BlockDelivered(b.Hash, b.Finalization)
+ b.Finalization.ParentHash = n.prevHash
+ n.appModule.BlockDelivered(b.Hash, b.Finalization)
n.prevFinalHeight++
+ n.prevHash = b.Hash
+ events = append(events, n.checkRoundSwitch(b)...)
}
if err = n.lattice.PurgeBlocks(delivered); err != nil {
panic(err)
}
return
}
+
+func (n *Node) checkRoundSwitch(b *types.Block) (evts []*test.Event) {
+ if !b.Timestamp.After(n.roundEndTimes[b.Position.Round]) {
+ return
+ }
+ if b.Position.Round+2 != n.roundToNotify {
+ return
+ }
+ // Handle round switching logic.
+ n.govModule.NotifyRoundHeight(n.roundToNotify, b.Finalization.Height)
+ if n.roundToNotify == uint64(len(n.roundEndTimes)) {
+ config := n.govModule.Configuration(n.roundToNotify)
+ if config == nil {
+ panic(fmt.Errorf(
+ "config is not ready for round: %v", n.roundToNotify-1))
+ }
+ // Cache round ended time for each round.
+ n.roundEndTimes = append(n.roundEndTimes,
+ n.roundEndTimes[len(n.roundEndTimes)-1].Add(
+ config.RoundInterval))
+ // Add new config to lattice module.
+ if err := n.lattice.AppendConfig(n.roundToNotify, config); err != nil {
+ panic(err)
+ }
+ if config.NumChains > n.latticeMaxNumChains {
+ // We can be sure that lattice module can support this number of
+ // chains.
+ for _, chainID := range n.ownChains {
+ if chainID < n.latticeMaxNumChains {
+ continue
+ }
+ if chainID >= config.NumChains {
+ continue
+ }
+ // For newly added chains, add block proposing seed event.
+ evts = append(evts, newProposeBlockEvent(n.ID, n.roundToNotify,
+ chainID, n.roundEndTimes[n.roundToNotify-1]))
+ }
+ n.latticeMaxNumChains = config.NumChains
+ }
+ } else if n.roundToNotify > uint64(len(n.roundEndTimes)) {
+ panic(fmt.Errorf(
+ "config notification not incremental: %v, cached configs: %v",
+ n.roundToNotify, len(n.roundEndTimes)))
+ }
+ n.roundToNotify++
+ return
+}
+
+// Bootstrap this node with block proposing event.
+func (n *Node) Bootstrap(sch *test.Scheduler, now time.Time) (err error) {
+ sch.RegisterEventHandler(n.ID, n)
+ for _, chainID := range n.ownChains {
+ if chainID >= n.latticeMaxNumChains {
+ continue
+ }
+ err = sch.Seed(newProposeBlockEvent(n.ID, 0, chainID, now))
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (n *Node) setBroadcastTargets(targets map[types.NodeID]struct{}) {
+ // Clone targets, except self.
+ targetsCopy := make(map[types.NodeID]struct{})
+ for nID := range targets {
+ if nID == n.ID {
+ continue
+ }
+ targetsCopy[nID] = struct{}{}
+ }
+ n.broadcastTargets = targetsCopy
+}
+
+func (n *Node) app() *test.App {
+ return n.appModule
+}
+
+func (n *Node) db() blockdb.BlockDatabase {
+ return n.dbModule
+}
+
+func (n *Node) gov() *test.Governance {
+ return n.govModule
+}
diff --git a/integration_test/non-byzantine_test.go b/integration_test/non-byzantine_test.go
index a95b10c..395a61f 100644
--- a/integration_test/non-byzantine_test.go
+++ b/integration_test/non-byzantine_test.go
@@ -21,9 +21,7 @@ import (
"testing"
"time"
- "github.com/dexon-foundation/dexon-consensus-core/core/blockdb"
"github.com/dexon-foundation/dexon-consensus-core/core/test"
- "github.com/dexon-foundation/dexon-consensus-core/core/types"
"github.com/stretchr/testify/suite"
)
@@ -32,10 +30,48 @@ type NonByzantineTestSuite struct {
}
func (s *NonByzantineTestSuite) TestNonByzantine() {
- numNodes := 25
+ var (
+ networkLatency = &test.NormalLatencyModel{
+ Sigma: 20,
+ Mean: 250,
+ }
+ proposingLatency = &test.NormalLatencyModel{
+ Sigma: 30,
+ Mean: 500,
+ }
+ numNodes = 25
+ req = s.Require()
+ )
if testing.Short() {
numNodes = 7
}
+ // Setup key pairs.
+ prvKeys, pubKeys, err := test.NewKeys(numNodes)
+ req.NoError(err)
+ // Setup governance.
+ gov, err := test.NewGovernance(pubKeys, 250*time.Millisecond)
+ req.NoError(err)
+ // Setup nodes.
+ nodes, err := PrepareNodes(
+ gov, prvKeys, 25, networkLatency, proposingLatency)
+ req.NoError(err)
+ // Setup scheduler.
+ apps, dbs := CollectAppAndDBFromNodes(nodes)
+ now := time.Now().UTC()
+ sch := test.NewScheduler(test.NewStopByConfirmedBlocks(50, apps, dbs))
+ for _, n := range nodes {
+ req.NoError(n.Bootstrap(sch, now))
+ }
+ sch.Run(4)
+ // Check results by comparing test.App instances.
+ req.NoError(VerifyApps(apps))
+}
+
+func (s *NonByzantineTestSuite) TestConfigurationChange() {
+ // This test case verify the correctness of core.Lattice when configuration
+ // changes.
+ // - Configuration changes are registered at 'pickedNode', and would carried
+ // in blocks' payload and broadcast to other nodes.
var (
networkLatency = &test.NormalLatencyModel{
Sigma: 20,
@@ -45,20 +81,57 @@ func (s *NonByzantineTestSuite) TestNonByzantine() {
Sigma: 30,
Mean: 500,
}
- apps = make(map[types.NodeID]*test.App)
- dbs = make(map[types.NodeID]blockdb.BlockDatabase)
- req = s.Require()
+ numNodes = 4
+ req = s.Require()
+ maxNumChains = uint32(9)
)
-
- apps, dbs, nodes, err := PrepareNodes(
- numNodes, networkLatency, proposingLatency)
- req.Nil(err)
+ // Setup key pairs.
+ prvKeys, pubKeys, err := test.NewKeys(numNodes)
+ req.NoError(err)
+ // Setup governance.
+ gov, err := test.NewGovernance(pubKeys, 250*time.Millisecond)
+ req.NoError(err)
+ // Change default round interval, expect 1 round produce 30 blocks.
+ gov.State().RequestChange(test.StateChangeRoundInterval, 15*time.Second)
+ // Setup nodes.
+ nodes, err := PrepareNodes(
+ gov, prvKeys, maxNumChains, networkLatency, proposingLatency)
+ req.NoError(err)
+ // Set scheduler.
+ apps, dbs := CollectAppAndDBFromNodes(nodes)
now := time.Now().UTC()
- sch := test.NewScheduler(test.NewStopByConfirmedBlocks(50, apps, dbs))
- for vID, v := range nodes {
- sch.RegisterEventHandler(vID, v)
- req.Nil(sch.Seed(NewProposeBlockEvent(vID, now)))
+ sch := test.NewScheduler(test.NewStopByRound(9, apps, dbs))
+ for _, n := range nodes {
+ req.NoError(n.Bootstrap(sch, now))
+ }
+ // Register some configuration changes at some node.
+ var pickedNode *Node
+ for _, pickedNode = range nodes {
+ break
}
+ // Config changes for round 4, numChains from 4 to 7.
+ req.NoError(pickedNode.gov().RegisterConfigChange(
+ 4, test.StateChangeNumChains, uint32(7)))
+ req.NoError(pickedNode.gov().RegisterConfigChange(
+ 4, test.StateChangeK, 3))
+ req.NoError(pickedNode.gov().RegisterConfigChange(
+ 4, test.StateChangePhiRatio, float32(0.5)))
+ // Config changes for round 5, numChains from 7 to 9.
+ req.NoError(pickedNode.gov().RegisterConfigChange(
+ 5, test.StateChangeNumChains, maxNumChains))
+ req.NoError(pickedNode.gov().RegisterConfigChange(
+ 5, test.StateChangeK, 0))
+ // Config changes for round 6, numChains from 9 to 7.
+ req.NoError(pickedNode.gov().RegisterConfigChange(
+ 6, test.StateChangeNumChains, uint32(7)))
+ req.NoError(pickedNode.gov().RegisterConfigChange(
+ 6, test.StateChangeK, 1))
+ // Config changes for round 6, numChains from 7 to 5.
+ req.NoError(pickedNode.gov().RegisterConfigChange(
+ 7, test.StateChangeNumChains, uint32(5)))
+ req.NoError(pickedNode.gov().RegisterConfigChange(
+ 7, test.StateChangeK, 1))
+ // Perform test.
sch.Run(4)
// Check results by comparing test.App instances.
req.NoError(VerifyApps(apps))
diff --git a/integration_test/stats_test.go b/integration_test/stats_test.go
index 54c827d..c6bf4f2 100644
--- a/integration_test/stats_test.go
+++ b/integration_test/stats_test.go
@@ -20,16 +20,18 @@ func (s *EventStatsTestSuite) TestCalculate() {
proposingLatency = &test.FixedLatencyModel{Latency: 300}
req = s.Require()
)
-
- apps, dbs, nodes, err := PrepareNodes(
- 7, networkLatency, proposingLatency)
- req.Nil(err)
-
+ prvKeys, pubKeys, err := test.NewKeys(7)
+ req.NoError(err)
+ gov, err := test.NewGovernance(pubKeys, 100*time.Millisecond)
+ req.NoError(err)
+ nodes, err := PrepareNodes(
+ gov, prvKeys, 7, networkLatency, proposingLatency)
+ req.NoError(err)
+ apps, dbs := CollectAppAndDBFromNodes(nodes)
sch := test.NewScheduler(test.NewStopByConfirmedBlocks(50, apps, dbs))
now := time.Now().UTC()
- for vID, v := range nodes {
- sch.RegisterEventHandler(vID, v)
- req.Nil(sch.Seed(NewProposeBlockEvent(vID, now)))
+ for _, n := range nodes {
+ req.NoError(n.Bootstrap(sch, now))
}
sch.Run(10)
req.Nil(VerifyApps(apps))
diff --git a/integration_test/utils.go b/integration_test/utils.go
index df6c215..2efacda 100644
--- a/integration_test/utils.go
+++ b/integration_test/utils.go
@@ -1,49 +1,91 @@
package integration
import (
+ "errors"
"time"
+ "github.com/dexon-foundation/dexon-consensus-core/core"
"github.com/dexon-foundation/dexon-consensus-core/core/blockdb"
+ "github.com/dexon-foundation/dexon-consensus-core/core/crypto"
"github.com/dexon-foundation/dexon-consensus-core/core/test"
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
+func genRoundEndTimes(
+ configs []*types.Config, dMoment time.Time) (ends []time.Time) {
+ now := dMoment
+ for _, config := range configs {
+ now = now.Add(config.RoundInterval)
+ ends = append(ends, now)
+ }
+ return
+}
+
+// loadAllConfigs loads all prepared configuration from governance,
+// starts from round 0.
+func loadAllConfigs(gov core.Governance) (configs []*types.Config) {
+ var round uint64
+ for {
+ config := gov.Configuration(round)
+ if config == nil {
+ break
+ }
+ configs = append(configs, config)
+ round++
+ }
+ return
+}
+
+// decideOwnChains compute which chainIDs belongs to this node.
+func decideOwnChains(numChains uint32, numNodes, id int) (own []uint32) {
+ var cur = uint32(id)
+ if numNodes == 0 {
+ panic(errors.New("attempt to arrange chains on 0 nodes"))
+ }
+ for {
+ if cur >= numChains {
+ break
+ }
+ own = append(own, cur)
+ cur += uint32(numNodes)
+ }
+ return
+}
+
// PrepareNodes setups nodes for testing.
func PrepareNodes(
- nodeCount int,
+ gov *test.Governance,
+ prvKeys []crypto.PrivateKey,
+ maxNumChains uint32,
networkLatency, proposingLatency test.LatencyModel) (
- apps map[types.NodeID]*test.App,
- dbs map[types.NodeID]blockdb.BlockDatabase,
- nodes map[types.NodeID]*Node,
- err error) {
- apps = make(map[types.NodeID]*test.App)
- dbs = make(map[types.NodeID]blockdb.BlockDatabase)
- nodes = make(map[types.NodeID]*Node)
- prvKeys, pubKeys, err := test.NewKeys(nodeCount)
- if err != nil {
- return
- }
- gov, err := test.NewGovernance(pubKeys, 700*time.Millisecond)
- if err != nil {
+ nodes map[types.NodeID]*Node, err error) {
+ if maxNumChains == 0 {
+ err = errors.New("zero NumChains is unexpected")
return
}
+ // Setup nodes, count of nodes is derived from the count of private keys
+ // hold in Governance.
+ nodes = make(map[types.NodeID]*Node)
dMoment := time.Now().UTC()
+ broadcastTargets := make(map[types.NodeID]struct{})
for idx, prvKey := range prvKeys {
- nID := types.NewNodeID(pubKeys[idx])
- apps[nID] = test.NewApp()
- dbs[nID], err = blockdb.NewMemBackedBlockDB()
- if err != nil {
- return
- }
- nodes[nID] = NewNode(
- apps[nID],
+ nID := types.NewNodeID(prvKey.PublicKey())
+ broadcastTargets[nID] = struct{}{}
+ // Decides which chains are owned by this node.
+ if nodes[nID], err = newNode(
gov,
- dbs[nID],
prvKey,
dMoment,
+ decideOwnChains(maxNumChains, len(prvKeys), idx),
networkLatency,
- proposingLatency,
- )
+ proposingLatency); err != nil {
+ return
+ }
+ }
+ // Assign broadcast targets.
+ for _, n := range nodes {
+ n.setBroadcastTargets(broadcastTargets)
+ n.gov().State().SwitchToRemoteMode()
}
return
}
@@ -65,3 +107,17 @@ func VerifyApps(apps map[types.NodeID]*test.App) (err error) {
}
return
}
+
+// CollectAppAndDBFromNodes collects test.App and blockdb.BlockDatabase
+// from nodes.
+func CollectAppAndDBFromNodes(nodes map[types.NodeID]*Node) (
+ apps map[types.NodeID]*test.App,
+ dbs map[types.NodeID]blockdb.BlockDatabase) {
+ apps = make(map[types.NodeID]*test.App)
+ dbs = make(map[types.NodeID]blockdb.BlockDatabase)
+ for nID, node := range nodes {
+ apps[nID] = node.app()
+ dbs[nID] = node.db()
+ }
+ return
+}
diff --git a/integration_test/utils_test.go b/integration_test/utils_test.go
new file mode 100644
index 0000000..3b1e769
--- /dev/null
+++ b/integration_test/utils_test.go
@@ -0,0 +1,61 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package integration
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+)
+
+type UtilsTestSuite struct {
+ suite.Suite
+}
+
+func (s *UtilsTestSuite) TestDecideOwnChains() {
+ // Basic test for each node index.
+ s.Empty(decideOwnChains(1, 1, 1))
+ s.Equal(decideOwnChains(1, 1, 0), []uint32{0})
+ s.Equal(decideOwnChains(30, 7, 4), []uint32{4, 11, 18, 25})
+ // Make sure every chain is covered.
+ isAllCovered := func(numChains uint32, numNodes int) bool {
+ if numNodes == 0 {
+ decideOwnChains(numChains, numNodes, 0)
+ return false
+ }
+ covered := make(map[uint32]struct{})
+ for i := 0; i < numNodes; i++ {
+ for _, chainID := range decideOwnChains(numChains, numNodes, i) {
+ s.Require().True(chainID < numChains)
+ covered[chainID] = struct{}{}
+ }
+ }
+ return uint32(len(covered)) == numChains
+ }
+ s.True(isAllCovered(100, 33))
+ s.True(isAllCovered(100, 200))
+ s.True(isAllCovered(100, 50))
+ s.True(isAllCovered(100, 1))
+ s.Panics(func() {
+ isAllCovered(100, 0)
+ })
+}
+
+func TestUtils(t *testing.T) {
+ suite.Run(t, new(UtilsTestSuite))
+}