diff options
author | Mission Liao <mission.liao@dexon.org> | 2018-11-01 14:53:31 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-11-01 14:53:31 +0800 |
commit | ebfa4a6164dab7db29859538c1aa0e9659bd951a (patch) | |
tree | 317ee8ee45194ec63b4475565bf91cc7862494db /integration_test | |
parent | 56fe2ee9435a89a46c0f3d527580aac43c85dc65 (diff) | |
download | dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.gz dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.bz2 dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.lz dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.xz dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.tar.zst dexon-consensus-ebfa4a6164dab7db29859538c1aa0e9659bd951a.zip |
core: core.Lattice supports config change (#276)
Besides making core.Lattice supports config change,
This PR also include the first test for below scenario:
- Configuration changes are registered before test
running
- Those changes are carried/broadcasted as payload
of blocks
- Only one node would initiate these changes, however,
all nodes would finally receive/apply those changes
to their own test.Governance instance.
Diffstat (limited to 'integration_test')
-rw-r--r-- | integration_test/node.go | 259 | ||||
-rw-r--r-- | integration_test/non-byzantine_test.go | 101 | ||||
-rw-r--r-- | integration_test/stats_test.go | 18 | ||||
-rw-r--r-- | integration_test/utils.go | 106 | ||||
-rw-r--r-- | integration_test/utils_test.go | 61 |
5 files changed, 428 insertions, 117 deletions
diff --git a/integration_test/node.go b/integration_test/node.go index bcb44bb..facd153 100644 --- a/integration_test/node.go +++ b/integration_test/node.go @@ -19,8 +19,6 @@ package integration import ( "fmt" - "math" - "sort" "time" "github.com/dexon-foundation/dexon-consensus-core/common" @@ -43,17 +41,22 @@ type consensusEventPayload struct { PiggyBack interface{} } -// NewProposeBlockEvent constructs an test.Event that would trigger +// newProposeBlockEvent constructs an test.Event that would trigger // block proposing. -func NewProposeBlockEvent(nID types.NodeID, when time.Time) *test.Event { +func newProposeBlockEvent(nID types.NodeID, + roundID uint64, chainID uint32, when time.Time) *test.Event { return test.NewEvent(nID, when, &consensusEventPayload{ Type: evtProposeBlock, + PiggyBack: &struct { + round uint64 + chain uint32 + }{roundID, chainID}, }) } -// NewReceiveBlockEvent constructs an test.Event that would trigger +// newReceiveBlockEvent constructs an test.Event that would trigger // block received. -func NewReceiveBlockEvent( +func newReceiveBlockEvent( nID types.NodeID, when time.Time, block *types.Block) *test.Event { return test.NewEvent(nID, when, &consensusEventPayload{ @@ -65,67 +68,75 @@ func NewReceiveBlockEvent( // Node is designed to work with test.Scheduler. type Node struct { ID types.NodeID - chainNum uint32 - chainID uint32 + ownChains []uint32 + roundEndTimes []time.Time + roundToNotify uint64 lattice *core.Lattice - app *test.App - db blockdb.BlockDatabase + appModule *test.App + stateModule *test.State + govModule *test.Governance + dbModule blockdb.BlockDatabase broadcastTargets map[types.NodeID]struct{} networkLatency test.LatencyModel proposingLatency test.LatencyModel prevFinalHeight uint64 pendings []*types.Block + prevHash common.Hash + // This variable caches the maximum NumChains seen by this node when + // it's notified for round switching. + latticeMaxNumChains uint32 } -// NewNode constructs an instance of Node. -func NewNode( - app *test.App, - gov core.Governance, - db blockdb.BlockDatabase, +// newNode constructs an instance of Node. +func newNode( + gov *test.Governance, privateKey crypto.PrivateKey, dMoment time.Time, + ownChains []uint32, networkLatency test.LatencyModel, - proposingLatency test.LatencyModel) *Node { - - var ( - chainID = uint32(math.MaxUint32) - governanceConfig = gov.Configuration(0) - nodeSetKeys = gov.NodeSet(0) - nodeID = types.NewNodeID(privateKey.PublicKey()) - ) - broadcastTargets := make(map[types.NodeID]struct{}) - for _, k := range nodeSetKeys { - broadcastTargets[types.NewNodeID(k)] = struct{}{} - } - hashes := common.Hashes{} - for nID := range broadcastTargets { - hashes = append(hashes, nID.Hash) - } - sort.Sort(hashes) - for i, h := range hashes { - if h == nodeID.Hash { - chainID = uint32(i) - } + proposingLatency test.LatencyModel) (*Node, error) { + // Load all configs prepared in core.Governance into core.Lattice. + copiedGov := gov.Clone() + configs := loadAllConfigs(copiedGov) + // Setup blockdb. + db, err := blockdb.NewMemBackedBlockDB() + if err != nil { + return nil, err + } + // Setup test.App + app := test.NewApp(copiedGov.State()) + // Setup lattice instance. + lattice := core.NewLattice( + dMoment, + configs[0], + core.NewAuthenticator(privateKey), + app, + app, + db, + &common.NullLogger{}) + n := &Node{ + ID: types.NewNodeID(privateKey.PublicKey()), + ownChains: ownChains, + roundEndTimes: genRoundEndTimes(configs, dMoment), + roundToNotify: 2, + networkLatency: networkLatency, + proposingLatency: proposingLatency, + appModule: app, + stateModule: copiedGov.State(), + dbModule: db, + govModule: copiedGov, + lattice: lattice, + latticeMaxNumChains: configs[0].NumChains, } - delete(broadcastTargets, nodeID) - return &Node{ - ID: nodeID, - chainID: chainID, - chainNum: governanceConfig.NumChains, - broadcastTargets: broadcastTargets, - networkLatency: networkLatency, - proposingLatency: proposingLatency, - app: app, - db: db, - lattice: core.NewLattice( - dMoment, - governanceConfig, - core.NewAuthenticator(privateKey), - app, - app, - db, - &common.NullLogger{}), + for idx, config := range configs[1:] { + if err := lattice.AppendConfig(uint64(idx+1), config); err != nil { + return nil, err + } + if config.NumChains > n.latticeMaxNumChains { + n.latticeMaxNumChains = config.NumChains + } } + return n, nil } // Handle implements test.EventHandler interface. @@ -142,47 +153,65 @@ func (n *Node) Handle(e *test.Event) (events []*test.Event) { return } -func (n *Node) handleProposeBlock(when time.Time, _ interface{}) ( +func (n *Node) handleProposeBlock(when time.Time, payload interface{}) ( events []*test.Event, err error) { - - b, err := n.prepareBlock(when) + pos := payload.(*struct { + round uint64 + chain uint32 + }) + b, err := n.prepareBlock(pos.round, pos.chain, when) if err != nil { + if err == core.ErrInvalidChainID { + // This chain is not included in this round, retry in next round. + events = append(events, newProposeBlockEvent( + n.ID, b.Position.Round+1, b.Position.ChainID, + n.roundEndTimes[b.Position.Round])) + } return } - if err = n.processBlock(b); err != nil { - return + if events, err = n.processBlock(b); err != nil { + // It's shouldn't be error when prepared. + panic(err) } // Create 'block received' event for each other nodes. for nID := range n.broadcastTargets { - events = append(events, NewReceiveBlockEvent( + events = append(events, newReceiveBlockEvent( nID, when.Add(n.networkLatency.Delay()), b.Clone())) } // Create next 'block proposing' event for this nodes. - events = append(events, NewProposeBlockEvent( - n.ID, when.Add(n.proposingLatency.Delay()))) + events = append(events, newProposeBlockEvent(n.ID, + b.Position.Round, + b.Position.ChainID, + when.Add(n.proposingLatency.Delay()))) return } func (n *Node) handleReceiveBlock(piggyback interface{}) ( events []*test.Event, err error) { - - err = n.processBlock(piggyback.(*types.Block)) + events, err = n.processBlock(piggyback.(*types.Block)) if err != nil { panic(err) } return } -func (n *Node) prepareBlock(when time.Time) (b *types.Block, err error) { +func (n *Node) prepareBlock( + round uint64, chainID uint32, when time.Time) (b *types.Block, err error) { b = &types.Block{ Position: types.Position{ - ChainID: n.chainID, + Round: round, + ChainID: chainID, }} - err = n.lattice.PrepareBlock(b, when) + if err = n.lattice.PrepareBlock(b, when); err != nil { + if err == core.ErrRoundNotSwitch { + b.Position.Round++ + err = n.lattice.PrepareBlock(b, when) + } + } return } -func (n *Node) processBlock(b *types.Block) (err error) { +func (n *Node) processBlock(b *types.Block) (events []*test.Event, err error) { // TODO(mission): this segment of code is identical to testLatticeMgr in // core/lattice_test.go, except the compaction-chain part. var ( @@ -223,15 +252,105 @@ func (n *Node) processBlock(b *types.Block) (err error) { } // Deliver blocks. for _, b = range delivered { - if err = n.db.Put(*b); err != nil { + if err = n.dbModule.Put(*b); err != nil { panic(err) } b.Finalization.Height = n.prevFinalHeight + 1 - n.app.BlockDelivered(b.Hash, b.Finalization) + b.Finalization.ParentHash = n.prevHash + n.appModule.BlockDelivered(b.Hash, b.Finalization) n.prevFinalHeight++ + n.prevHash = b.Hash + events = append(events, n.checkRoundSwitch(b)...) } if err = n.lattice.PurgeBlocks(delivered); err != nil { panic(err) } return } + +func (n *Node) checkRoundSwitch(b *types.Block) (evts []*test.Event) { + if !b.Timestamp.After(n.roundEndTimes[b.Position.Round]) { + return + } + if b.Position.Round+2 != n.roundToNotify { + return + } + // Handle round switching logic. + n.govModule.NotifyRoundHeight(n.roundToNotify, b.Finalization.Height) + if n.roundToNotify == uint64(len(n.roundEndTimes)) { + config := n.govModule.Configuration(n.roundToNotify) + if config == nil { + panic(fmt.Errorf( + "config is not ready for round: %v", n.roundToNotify-1)) + } + // Cache round ended time for each round. + n.roundEndTimes = append(n.roundEndTimes, + n.roundEndTimes[len(n.roundEndTimes)-1].Add( + config.RoundInterval)) + // Add new config to lattice module. + if err := n.lattice.AppendConfig(n.roundToNotify, config); err != nil { + panic(err) + } + if config.NumChains > n.latticeMaxNumChains { + // We can be sure that lattice module can support this number of + // chains. + for _, chainID := range n.ownChains { + if chainID < n.latticeMaxNumChains { + continue + } + if chainID >= config.NumChains { + continue + } + // For newly added chains, add block proposing seed event. + evts = append(evts, newProposeBlockEvent(n.ID, n.roundToNotify, + chainID, n.roundEndTimes[n.roundToNotify-1])) + } + n.latticeMaxNumChains = config.NumChains + } + } else if n.roundToNotify > uint64(len(n.roundEndTimes)) { + panic(fmt.Errorf( + "config notification not incremental: %v, cached configs: %v", + n.roundToNotify, len(n.roundEndTimes))) + } + n.roundToNotify++ + return +} + +// Bootstrap this node with block proposing event. +func (n *Node) Bootstrap(sch *test.Scheduler, now time.Time) (err error) { + sch.RegisterEventHandler(n.ID, n) + for _, chainID := range n.ownChains { + if chainID >= n.latticeMaxNumChains { + continue + } + err = sch.Seed(newProposeBlockEvent(n.ID, 0, chainID, now)) + if err != nil { + return + } + } + return +} + +func (n *Node) setBroadcastTargets(targets map[types.NodeID]struct{}) { + // Clone targets, except self. + targetsCopy := make(map[types.NodeID]struct{}) + for nID := range targets { + if nID == n.ID { + continue + } + targetsCopy[nID] = struct{}{} + } + n.broadcastTargets = targetsCopy +} + +func (n *Node) app() *test.App { + return n.appModule +} + +func (n *Node) db() blockdb.BlockDatabase { + return n.dbModule +} + +func (n *Node) gov() *test.Governance { + return n.govModule +} diff --git a/integration_test/non-byzantine_test.go b/integration_test/non-byzantine_test.go index a95b10c..395a61f 100644 --- a/integration_test/non-byzantine_test.go +++ b/integration_test/non-byzantine_test.go @@ -21,9 +21,7 @@ import ( "testing" "time" - "github.com/dexon-foundation/dexon-consensus-core/core/blockdb" "github.com/dexon-foundation/dexon-consensus-core/core/test" - "github.com/dexon-foundation/dexon-consensus-core/core/types" "github.com/stretchr/testify/suite" ) @@ -32,10 +30,48 @@ type NonByzantineTestSuite struct { } func (s *NonByzantineTestSuite) TestNonByzantine() { - numNodes := 25 + var ( + networkLatency = &test.NormalLatencyModel{ + Sigma: 20, + Mean: 250, + } + proposingLatency = &test.NormalLatencyModel{ + Sigma: 30, + Mean: 500, + } + numNodes = 25 + req = s.Require() + ) if testing.Short() { numNodes = 7 } + // Setup key pairs. + prvKeys, pubKeys, err := test.NewKeys(numNodes) + req.NoError(err) + // Setup governance. + gov, err := test.NewGovernance(pubKeys, 250*time.Millisecond) + req.NoError(err) + // Setup nodes. + nodes, err := PrepareNodes( + gov, prvKeys, 25, networkLatency, proposingLatency) + req.NoError(err) + // Setup scheduler. + apps, dbs := CollectAppAndDBFromNodes(nodes) + now := time.Now().UTC() + sch := test.NewScheduler(test.NewStopByConfirmedBlocks(50, apps, dbs)) + for _, n := range nodes { + req.NoError(n.Bootstrap(sch, now)) + } + sch.Run(4) + // Check results by comparing test.App instances. + req.NoError(VerifyApps(apps)) +} + +func (s *NonByzantineTestSuite) TestConfigurationChange() { + // This test case verify the correctness of core.Lattice when configuration + // changes. + // - Configuration changes are registered at 'pickedNode', and would carried + // in blocks' payload and broadcast to other nodes. var ( networkLatency = &test.NormalLatencyModel{ Sigma: 20, @@ -45,20 +81,57 @@ func (s *NonByzantineTestSuite) TestNonByzantine() { Sigma: 30, Mean: 500, } - apps = make(map[types.NodeID]*test.App) - dbs = make(map[types.NodeID]blockdb.BlockDatabase) - req = s.Require() + numNodes = 4 + req = s.Require() + maxNumChains = uint32(9) ) - - apps, dbs, nodes, err := PrepareNodes( - numNodes, networkLatency, proposingLatency) - req.Nil(err) + // Setup key pairs. + prvKeys, pubKeys, err := test.NewKeys(numNodes) + req.NoError(err) + // Setup governance. + gov, err := test.NewGovernance(pubKeys, 250*time.Millisecond) + req.NoError(err) + // Change default round interval, expect 1 round produce 30 blocks. + gov.State().RequestChange(test.StateChangeRoundInterval, 15*time.Second) + // Setup nodes. + nodes, err := PrepareNodes( + gov, prvKeys, maxNumChains, networkLatency, proposingLatency) + req.NoError(err) + // Set scheduler. + apps, dbs := CollectAppAndDBFromNodes(nodes) now := time.Now().UTC() - sch := test.NewScheduler(test.NewStopByConfirmedBlocks(50, apps, dbs)) - for vID, v := range nodes { - sch.RegisterEventHandler(vID, v) - req.Nil(sch.Seed(NewProposeBlockEvent(vID, now))) + sch := test.NewScheduler(test.NewStopByRound(9, apps, dbs)) + for _, n := range nodes { + req.NoError(n.Bootstrap(sch, now)) + } + // Register some configuration changes at some node. + var pickedNode *Node + for _, pickedNode = range nodes { + break } + // Config changes for round 4, numChains from 4 to 7. + req.NoError(pickedNode.gov().RegisterConfigChange( + 4, test.StateChangeNumChains, uint32(7))) + req.NoError(pickedNode.gov().RegisterConfigChange( + 4, test.StateChangeK, 3)) + req.NoError(pickedNode.gov().RegisterConfigChange( + 4, test.StateChangePhiRatio, float32(0.5))) + // Config changes for round 5, numChains from 7 to 9. + req.NoError(pickedNode.gov().RegisterConfigChange( + 5, test.StateChangeNumChains, maxNumChains)) + req.NoError(pickedNode.gov().RegisterConfigChange( + 5, test.StateChangeK, 0)) + // Config changes for round 6, numChains from 9 to 7. + req.NoError(pickedNode.gov().RegisterConfigChange( + 6, test.StateChangeNumChains, uint32(7))) + req.NoError(pickedNode.gov().RegisterConfigChange( + 6, test.StateChangeK, 1)) + // Config changes for round 6, numChains from 7 to 5. + req.NoError(pickedNode.gov().RegisterConfigChange( + 7, test.StateChangeNumChains, uint32(5))) + req.NoError(pickedNode.gov().RegisterConfigChange( + 7, test.StateChangeK, 1)) + // Perform test. sch.Run(4) // Check results by comparing test.App instances. req.NoError(VerifyApps(apps)) diff --git a/integration_test/stats_test.go b/integration_test/stats_test.go index 54c827d..c6bf4f2 100644 --- a/integration_test/stats_test.go +++ b/integration_test/stats_test.go @@ -20,16 +20,18 @@ func (s *EventStatsTestSuite) TestCalculate() { proposingLatency = &test.FixedLatencyModel{Latency: 300} req = s.Require() ) - - apps, dbs, nodes, err := PrepareNodes( - 7, networkLatency, proposingLatency) - req.Nil(err) - + prvKeys, pubKeys, err := test.NewKeys(7) + req.NoError(err) + gov, err := test.NewGovernance(pubKeys, 100*time.Millisecond) + req.NoError(err) + nodes, err := PrepareNodes( + gov, prvKeys, 7, networkLatency, proposingLatency) + req.NoError(err) + apps, dbs := CollectAppAndDBFromNodes(nodes) sch := test.NewScheduler(test.NewStopByConfirmedBlocks(50, apps, dbs)) now := time.Now().UTC() - for vID, v := range nodes { - sch.RegisterEventHandler(vID, v) - req.Nil(sch.Seed(NewProposeBlockEvent(vID, now))) + for _, n := range nodes { + req.NoError(n.Bootstrap(sch, now)) } sch.Run(10) req.Nil(VerifyApps(apps)) diff --git a/integration_test/utils.go b/integration_test/utils.go index df6c215..2efacda 100644 --- a/integration_test/utils.go +++ b/integration_test/utils.go @@ -1,49 +1,91 @@ package integration import ( + "errors" "time" + "github.com/dexon-foundation/dexon-consensus-core/core" "github.com/dexon-foundation/dexon-consensus-core/core/blockdb" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" "github.com/dexon-foundation/dexon-consensus-core/core/test" "github.com/dexon-foundation/dexon-consensus-core/core/types" ) +func genRoundEndTimes( + configs []*types.Config, dMoment time.Time) (ends []time.Time) { + now := dMoment + for _, config := range configs { + now = now.Add(config.RoundInterval) + ends = append(ends, now) + } + return +} + +// loadAllConfigs loads all prepared configuration from governance, +// starts from round 0. +func loadAllConfigs(gov core.Governance) (configs []*types.Config) { + var round uint64 + for { + config := gov.Configuration(round) + if config == nil { + break + } + configs = append(configs, config) + round++ + } + return +} + +// decideOwnChains compute which chainIDs belongs to this node. +func decideOwnChains(numChains uint32, numNodes, id int) (own []uint32) { + var cur = uint32(id) + if numNodes == 0 { + panic(errors.New("attempt to arrange chains on 0 nodes")) + } + for { + if cur >= numChains { + break + } + own = append(own, cur) + cur += uint32(numNodes) + } + return +} + // PrepareNodes setups nodes for testing. func PrepareNodes( - nodeCount int, + gov *test.Governance, + prvKeys []crypto.PrivateKey, + maxNumChains uint32, networkLatency, proposingLatency test.LatencyModel) ( - apps map[types.NodeID]*test.App, - dbs map[types.NodeID]blockdb.BlockDatabase, - nodes map[types.NodeID]*Node, - err error) { - apps = make(map[types.NodeID]*test.App) - dbs = make(map[types.NodeID]blockdb.BlockDatabase) - nodes = make(map[types.NodeID]*Node) - prvKeys, pubKeys, err := test.NewKeys(nodeCount) - if err != nil { - return - } - gov, err := test.NewGovernance(pubKeys, 700*time.Millisecond) - if err != nil { + nodes map[types.NodeID]*Node, err error) { + if maxNumChains == 0 { + err = errors.New("zero NumChains is unexpected") return } + // Setup nodes, count of nodes is derived from the count of private keys + // hold in Governance. + nodes = make(map[types.NodeID]*Node) dMoment := time.Now().UTC() + broadcastTargets := make(map[types.NodeID]struct{}) for idx, prvKey := range prvKeys { - nID := types.NewNodeID(pubKeys[idx]) - apps[nID] = test.NewApp() - dbs[nID], err = blockdb.NewMemBackedBlockDB() - if err != nil { - return - } - nodes[nID] = NewNode( - apps[nID], + nID := types.NewNodeID(prvKey.PublicKey()) + broadcastTargets[nID] = struct{}{} + // Decides which chains are owned by this node. + if nodes[nID], err = newNode( gov, - dbs[nID], prvKey, dMoment, + decideOwnChains(maxNumChains, len(prvKeys), idx), networkLatency, - proposingLatency, - ) + proposingLatency); err != nil { + return + } + } + // Assign broadcast targets. + for _, n := range nodes { + n.setBroadcastTargets(broadcastTargets) + n.gov().State().SwitchToRemoteMode() } return } @@ -65,3 +107,17 @@ func VerifyApps(apps map[types.NodeID]*test.App) (err error) { } return } + +// CollectAppAndDBFromNodes collects test.App and blockdb.BlockDatabase +// from nodes. +func CollectAppAndDBFromNodes(nodes map[types.NodeID]*Node) ( + apps map[types.NodeID]*test.App, + dbs map[types.NodeID]blockdb.BlockDatabase) { + apps = make(map[types.NodeID]*test.App) + dbs = make(map[types.NodeID]blockdb.BlockDatabase) + for nID, node := range nodes { + apps[nID] = node.app() + dbs[nID] = node.db() + } + return +} diff --git a/integration_test/utils_test.go b/integration_test/utils_test.go new file mode 100644 index 0000000..3b1e769 --- /dev/null +++ b/integration_test/utils_test.go @@ -0,0 +1,61 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package integration + +import ( + "testing" + + "github.com/stretchr/testify/suite" +) + +type UtilsTestSuite struct { + suite.Suite +} + +func (s *UtilsTestSuite) TestDecideOwnChains() { + // Basic test for each node index. + s.Empty(decideOwnChains(1, 1, 1)) + s.Equal(decideOwnChains(1, 1, 0), []uint32{0}) + s.Equal(decideOwnChains(30, 7, 4), []uint32{4, 11, 18, 25}) + // Make sure every chain is covered. + isAllCovered := func(numChains uint32, numNodes int) bool { + if numNodes == 0 { + decideOwnChains(numChains, numNodes, 0) + return false + } + covered := make(map[uint32]struct{}) + for i := 0; i < numNodes; i++ { + for _, chainID := range decideOwnChains(numChains, numNodes, i) { + s.Require().True(chainID < numChains) + covered[chainID] = struct{}{} + } + } + return uint32(len(covered)) == numChains + } + s.True(isAllCovered(100, 33)) + s.True(isAllCovered(100, 200)) + s.True(isAllCovered(100, 50)) + s.True(isAllCovered(100, 1)) + s.Panics(func() { + isAllCovered(100, 0) + }) +} + +func TestUtils(t *testing.T) { + suite.Run(t, new(UtilsTestSuite)) +} |