aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--core/test/network.go8
-rw-r--r--core/test/network_test.go16
-rw-r--r--integration_test/byzantine_test.go183
3 files changed, 195 insertions, 12 deletions
diff --git a/core/test/network.go b/core/test/network.go
index c922b7b..000037f 100644
--- a/core/test/network.go
+++ b/core/test/network.go
@@ -37,6 +37,8 @@ import (
const (
// Count of maximum count of peers to pull votes from.
maxPullingPeerCount = 3
+ maxBlockCache = 1000
+ maxVoteCache = 128
)
// NetworkType is the simulation network type.
@@ -181,7 +183,7 @@ func NewNetwork(pubKey crypto.PublicKey, config NetworkConfig) (
toNode: make(chan interface{}, 1000),
sentRandomness: make(map[common.Hash]struct{}),
sentAgreement: make(map[common.Hash]struct{}),
- blockCache: make(map[common.Hash]*types.Block),
+ blockCache: make(map[common.Hash]*types.Block, maxBlockCache),
randomnessCache: make(map[common.Hash]*types.BlockRandomnessResult),
unreceivedBlocks: make(map[common.Hash]chan<- common.Hash),
unreceivedRandomness: make(map[common.Hash]chan<- common.Hash),
@@ -632,7 +634,7 @@ Loop:
func (n *Network) addBlockToCache(b *types.Block) {
n.blockCacheLock.Lock()
defer n.blockCacheLock.Unlock()
- if len(n.blockCache) > 1000 {
+ if len(n.blockCache) > maxBlockCache {
// Randomly purge one block from cache.
for k := range n.blockCache {
delete(n.blockCache, k)
@@ -645,7 +647,7 @@ func (n *Network) addBlockToCache(b *types.Block) {
func (n *Network) addVoteToCache(v *types.Vote) {
n.voteCacheLock.Lock()
defer n.voteCacheLock.Unlock()
- if n.voteCacheSize >= 128 {
+ if n.voteCacheSize >= maxVoteCache {
pos := n.votePositions[0]
n.voteCacheSize -= len(n.voteCache[pos])
delete(n.voteCache, pos)
diff --git a/core/test/network_test.go b/core/test/network_test.go
index d040a16..fd5f97f 100644
--- a/core/test/network_test.go
+++ b/core/test/network_test.go
@@ -166,13 +166,11 @@ func (s *NetworkTestSuite) TestPullBlocks() {
}
func (s *NetworkTestSuite) TestPullVotes() {
- // The functionality of pulling votes is not deterministic, so the test here
- // only tries to "retry pulling votes until we can get some votes back".
var (
- peerCount = 10
+ peerCount = maxPullingPeerCount
maxRound = uint64(5)
- voteCount = 200
- voteTestCount = 15
+ voteCount = maxVoteCache
+ voteTestCount = maxVoteCache / 2
req = s.Require()
)
_, pubKeys, err := NewKeys(peerCount)
@@ -218,14 +216,15 @@ func (s *NetworkTestSuite) TestPullVotes() {
break
}
}
+ time.Sleep(1 * time.Second)
// Try to pull all votes with timeout.
- ctx, cancelFunc := context.WithTimeout(context.Background(), 3*time.Second)
- defer func() { cancelFunc() }()
for len(votesToTest) > 0 {
for vHeader := range votesToTest {
master.PullVotes(vHeader.Position)
break
}
+ ctx, cancelFunc := context.WithTimeout(context.Background(), 500*time.Millisecond)
+ defer cancelFunc()
select {
case v := <-master.ReceiveChan():
vv, ok := v.(*types.Vote)
@@ -234,8 +233,7 @@ func (s *NetworkTestSuite) TestPullVotes() {
}
delete(votesToTest, vv.VoteHeader)
case <-ctx.Done():
- req.True(false)
- default:
+ s.FailNow("PullVote Fail")
}
}
}
diff --git a/integration_test/byzantine_test.go b/integration_test/byzantine_test.go
new file mode 100644
index 0000000..fac8c0a
--- /dev/null
+++ b/integration_test/byzantine_test.go
@@ -0,0 +1,183 @@
+// Copyright 2019 The dexon-consensus Authors
+// This file is part of the dexon-consensus library.
+//
+// The dexon-consensus library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package integration
+
+import (
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/dexon-foundation/dexon-consensus/common"
+ "github.com/dexon-foundation/dexon-consensus/core"
+ "github.com/dexon-foundation/dexon-consensus/core/crypto"
+ "github.com/dexon-foundation/dexon-consensus/core/db"
+ "github.com/dexon-foundation/dexon-consensus/core/test"
+ "github.com/dexon-foundation/dexon-consensus/core/types"
+ "github.com/dexon-foundation/dexon-consensus/core/utils"
+ "github.com/stretchr/testify/suite"
+)
+
+// There is no scheduler in these tests, we need to wait a long period to make
+// sure these tests are ok.
+type ByzantineTestSuite struct {
+ suite.Suite
+
+ directLatencyModel map[types.NodeID]test.LatencyModel
+}
+
+func (s *ByzantineTestSuite) SetupTest() {
+ s.directLatencyModel = make(map[types.NodeID]test.LatencyModel)
+}
+
+func (s *ByzantineTestSuite) setupNodes(
+ dMoment time.Time,
+ prvKeys []crypto.PrivateKey,
+ seedGov *test.Governance) map[types.NodeID]*node {
+ var (
+ wg sync.WaitGroup
+ )
+ // Setup peer server at transport layer.
+ server := test.NewFakeTransportServer()
+ serverChannel, err := server.Host()
+ s.Require().NoError(err)
+ // setup nodes.
+ nodes := make(map[types.NodeID]*node)
+ wg.Add(len(prvKeys))
+ for _, k := range prvKeys {
+ dbInst, err := db.NewMemBackedDB()
+ s.Require().NoError(err)
+ nID := types.NewNodeID(k.PublicKey())
+ // Prepare essential modules: app, gov, db.
+ var directLatencyModel test.LatencyModel
+ if model, exist := s.directLatencyModel[nID]; exist {
+ directLatencyModel = model
+ } else {
+ directLatencyModel = &test.FixedLatencyModel{}
+ }
+ networkModule := test.NewNetwork(k.PublicKey(), test.NetworkConfig{
+ Type: test.NetworkTypeFake,
+ DirectLatency: directLatencyModel,
+ GossipLatency: &test.FixedLatencyModel{},
+ Marshaller: test.NewDefaultMarshaller(nil)},
+ )
+ gov := seedGov.Clone()
+ gov.SwitchToRemoteMode(networkModule)
+ gov.NotifyRound(0)
+ networkModule.AddNodeSetCache(utils.NewNodeSetCache(gov))
+ app := test.NewApp(1, gov)
+ nodes[nID] = &node{nID, nil, app, gov, dbInst, networkModule}
+ go func() {
+ defer wg.Done()
+ s.Require().NoError(networkModule.Setup(serverChannel))
+ go networkModule.Run()
+ }()
+ }
+ // Make sure transport layer is ready.
+ s.Require().NoError(server.WaitForPeers(uint32(len(prvKeys))))
+ wg.Wait()
+ for _, k := range prvKeys {
+ node := nodes[types.NewNodeID(k.PublicKey())]
+ // Now is the consensus module.
+ node.con = core.NewConsensus(
+ dMoment,
+ node.app,
+ node.gov,
+ node.db,
+ node.network,
+ k,
+ &common.NullLogger{},
+ )
+ }
+ return nodes
+}
+
+func (s *ByzantineTestSuite) verifyNodes(nodes map[types.NodeID]*node) {
+ for ID, node := range nodes {
+ s.Require().NoError(test.VerifyDB(node.db))
+ s.Require().NoError(node.app.Verify())
+ for otherID, otherNode := range nodes {
+ if ID == otherID {
+ continue
+ }
+ s.Require().NoError(node.app.Compare(otherNode.app))
+ }
+ }
+}
+
+func (s *ByzantineTestSuite) TestOneSlowNodeOneDeadNode() {
+ // 4 nodes setup with one slow node and one dead node.
+ // The network of slow node is very slow.
+ var (
+ req = s.Require()
+ peerCount = 4
+ dMoment = time.Now().UTC()
+ untilRound = uint64(3)
+ )
+ if testing.Short() {
+ untilRound = 1
+ }
+ prvKeys, pubKeys, err := test.NewKeys(peerCount)
+ req.NoError(err)
+ // Setup seed governance instance. Give a short latency to make this test
+ // run faster.
+ lambda := 100 * time.Millisecond
+ seedGov, err := test.NewGovernance(
+ test.NewState(
+ pubKeys, lambda, &common.NullLogger{}, true),
+ core.ConfigRoundShift)
+ req.NoError(err)
+ req.NoError(seedGov.State().RequestChange(
+ test.StateChangeRoundInterval, 100*time.Second))
+ slowNodeID := types.NewNodeID(pubKeys[0])
+ deadNodeID := types.NewNodeID(pubKeys[1])
+ s.directLatencyModel[slowNodeID] = &test.FixedLatencyModel{
+ Latency: lambda.Seconds() * 1000 * 2,
+ }
+ nodes := s.setupNodes(dMoment, prvKeys, seedGov)
+ for _, n := range nodes {
+ if n.ID == deadNodeID {
+ continue
+ }
+ go n.con.Run()
+ defer n.con.Stop()
+ }
+Loop:
+ for {
+ <-time.After(5 * time.Second)
+ fmt.Println("check latest position delivered by each node")
+ for _, n := range nodes {
+ if n.ID == deadNodeID {
+ continue
+ }
+ latestPos := n.app.GetLatestDeliveredPosition()
+ fmt.Println("latestPos", n.ID, &latestPos)
+ if latestPos.Round < untilRound {
+ continue Loop
+ }
+ }
+ // Oh ya.
+ break
+ }
+ delete(nodes, deadNodeID)
+ s.verifyNodes(nodes)
+}
+
+func TestByzantine(t *testing.T) {
+ suite.Run(t, new(ByzantineTestSuite))
+}