diff options
author | Mission Liao <mission.liao@dexon.org> | 2018-11-01 14:53:31 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-11-01 14:53:31 +0800 |
commit | ebfa4a6164dab7db29859538c1aa0e9659bd951a (patch) | |
tree | 317ee8ee45194ec63b4475565bf91cc7862494db /cmd/dexcon-simulation-with-scheduler/main.go | |
parent | 56fe2ee9435a89a46c0f3d527580aac43c85dc65 (diff) | |
download | dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.gz dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.bz2 dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.lz dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.xz dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.zst dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.zip |
core: core.Lattice supports config change (#276)
Besides making core.Lattice supports config change,
This PR also include the first test for below scenario:
- Configuration changes are registered before test
running
- Those changes are carried/broadcasted as payload
of blocks
- Only one node would initiate these changes, however,
all nodes would finally receive/apply those changes
to their own test.Governance instance.
Diffstat (limited to 'cmd/dexcon-simulation-with-scheduler/main.go')
-rw-r--r-- | cmd/dexcon-simulation-with-scheduler/main.go | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/cmd/dexcon-simulation-with-scheduler/main.go b/cmd/dexcon-simulation-with-scheduler/main.go index 5e04538..64147b6 100644 --- a/cmd/dexcon-simulation-with-scheduler/main.go +++ b/cmd/dexcon-simulation-with-scheduler/main.go @@ -57,23 +57,31 @@ func main() { Sigma: cfg.Node.Legacy.ProposeIntervalSigma, Mean: cfg.Node.Legacy.ProposeIntervalMean, } + // Setup key pairs. + prvKeys, pubKeys, err := test.NewKeys(cfg.Node.Num) + if err != nil { + log.Fatal("could not setup key pairs: ", err) + } + // Setup governance instance. + gov, err := test.NewGovernance( + pubKeys, time.Duration(cfg.Networking.Mean)*time.Millisecond) + if err != nil { + log.Fatal("could not setup governance: ", err) + } // Setup nodes and other consensus related stuffs. - apps, dbs, nodes, err := integration.PrepareNodes( - cfg.Node.Num, networkLatency, proposingLatency) + nodes, err := integration.PrepareNodes( + gov, prvKeys, uint32(cfg.Node.Num), networkLatency, proposingLatency) if err != nil { log.Fatal("could not setup nodes: ", err) } + apps, dbs := integration.CollectAppAndDBFromNodes(nodes) blockPerNode := int(math.Ceil( float64(cfg.Node.MaxBlock) / float64(cfg.Node.Num))) sch := test.NewScheduler( test.NewStopByConfirmedBlocks(blockPerNode, apps, dbs)) - for nID, v := range nodes { - sch.RegisterEventHandler(nID, v) - if err = sch.Seed(integration.NewProposeBlockEvent( - nID, time.Now().UTC())); err != nil { - - log.Fatal("unable to set seed simulation events: ", err) - } + now := time.Now().UTC() + for _, v := range nodes { + v.Bootstrap(sch, now) } // Run the simulation. sch.Run(cfg.Scheduler.WorkerNum) |