aboutsummaryrefslogtreecommitdiffstats
path: root/core/test
diff options
context:
space:
mode:
Diffstat (limited to 'core/test')
-rw-r--r--core/test/blocks-generator.go157
-rw-r--r--core/test/blocks-generator_test.go44
-rw-r--r--core/test/fake-transport.go28
-rw-r--r--core/test/governance.go28
-rw-r--r--core/test/interface.go12
-rw-r--r--core/test/revealer.go36
-rw-r--r--core/test/revealer_test.go12
-rw-r--r--core/test/scheduler-event.go8
-rw-r--r--core/test/scheduler.go20
-rw-r--r--core/test/scheduler_test.go60
-rw-r--r--core/test/stopper.go42
-rw-r--r--core/test/stopper_test.go34
-rw-r--r--core/test/tcp-transport.go86
-rw-r--r--core/test/transport_test.go64
-rw-r--r--core/test/utils.go10
15 files changed, 322 insertions, 319 deletions
diff --git a/core/test/blocks-generator.go b/core/test/blocks-generator.go
index 93867f1..759e3a6 100644
--- a/core/test/blocks-generator.go
+++ b/core/test/blocks-generator.go
@@ -28,94 +28,97 @@ import (
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
+// TODO(mission): blocks generator should generate blocks based on chain,
+// not nodes.
+
// ErrParentNotAcked would be raised when some block doesn't
// ack its parent block.
var ErrParentNotAcked = errors.New("parent is not acked")
-// validatorStatus is a state holder for each validator
+// nodeStatus is a state holder for each node
// during generating blocks.
-type validatorStatus struct {
+type nodeStatus struct {
blocks []*types.Block
- lastAckingHeight map[types.ValidatorID]uint64
+ lastAckingHeight map[types.NodeID]uint64
}
type hashBlockFn func(*types.Block) (common.Hash, error)
// getAckedBlockHash would randomly pick one block between
// last acked one to current head.
-func (vs *validatorStatus) getAckedBlockHash(
- ackedVID types.ValidatorID,
- ackedValidator *validatorStatus,
+func (vs *nodeStatus) getAckedBlockHash(
+ ackedNID types.NodeID,
+ ackedNode *nodeStatus,
randGen *rand.Rand) (
hash common.Hash, ok bool) {
- baseAckingHeight, exists := vs.lastAckingHeight[ackedVID]
+ baseAckingHeight, exists := vs.lastAckingHeight[ackedNID]
if exists {
// Do not ack the same block(height) twice.
baseAckingHeight++
}
- totalBlockCount := uint64(len(ackedValidator.blocks))
+ totalBlockCount := uint64(len(ackedNode.blocks))
if totalBlockCount <= baseAckingHeight {
// There is no new block to ack.
return
}
ackableRange := totalBlockCount - baseAckingHeight
height := uint64((randGen.Uint64() % ackableRange) + baseAckingHeight)
- vs.lastAckingHeight[ackedVID] = height
- hash = ackedValidator.blocks[height].Hash
+ vs.lastAckingHeight[ackedNID] = height
+ hash = ackedNode.blocks[height].Hash
ok = true
return
}
-// validatorSetStatus is a state holder for all validators
+// nodeSetStatus is a state holder for all nodes
// during generating blocks.
-type validatorSetStatus struct {
- status map[types.ValidatorID]*validatorStatus
- proposerChain map[types.ValidatorID]uint32
+type nodeSetStatus struct {
+ status map[types.NodeID]*nodeStatus
+ proposerChain map[types.NodeID]uint32
timestamps []time.Time
- validatorIDs []types.ValidatorID
+ nodeIDs []types.NodeID
randGen *rand.Rand
hashBlock hashBlockFn
}
-func newValidatorSetStatus(vIDs []types.ValidatorID, hashBlock hashBlockFn) *validatorSetStatus {
- status := make(map[types.ValidatorID]*validatorStatus)
- timestamps := make([]time.Time, 0, len(vIDs))
- proposerChain := make(map[types.ValidatorID]uint32)
- for i, vID := range vIDs {
- status[vID] = &validatorStatus{
+func newNodeSetStatus(nIDs []types.NodeID, hashBlock hashBlockFn) *nodeSetStatus {
+ status := make(map[types.NodeID]*nodeStatus)
+ timestamps := make([]time.Time, 0, len(nIDs))
+ proposerChain := make(map[types.NodeID]uint32)
+ for i, nID := range nIDs {
+ status[nID] = &nodeStatus{
blocks: []*types.Block{},
- lastAckingHeight: make(map[types.ValidatorID]uint64),
+ lastAckingHeight: make(map[types.NodeID]uint64),
}
timestamps = append(timestamps, time.Now().UTC())
- proposerChain[vID] = uint32(i)
+ proposerChain[nID] = uint32(i)
}
- return &validatorSetStatus{
+ return &nodeSetStatus{
status: status,
proposerChain: proposerChain,
timestamps: timestamps,
- validatorIDs: vIDs,
+ nodeIDs: nIDs,
randGen: rand.New(rand.NewSource(time.Now().UnixNano())),
hashBlock: hashBlock,
}
}
-// findIncompleteValidators is a helper to check which validator
+// findIncompleteNodes is a helper to check which node
// doesn't generate enough blocks.
-func (vs *validatorSetStatus) findIncompleteValidators(
- blockCount int) (vIDs []types.ValidatorID) {
+func (vs *nodeSetStatus) findIncompleteNodes(
+ blockCount int) (nIDs []types.NodeID) {
- for vID, status := range vs.status {
+ for nID, status := range vs.status {
if len(status.blocks) < blockCount {
- vIDs = append(vIDs, vID)
+ nIDs = append(nIDs, nID)
}
}
return
}
// prepareAcksForNewBlock collects acks for one block.
-func (vs *validatorSetStatus) prepareAcksForNewBlock(
- proposerID types.ValidatorID, ackingCount int) (
+func (vs *nodeSetStatus) prepareAcksForNewBlock(
+ proposerID types.NodeID, ackingCount int) (
acks common.Hashes, err error) {
acks = common.Hashes{}
@@ -123,22 +126,22 @@ func (vs *validatorSetStatus) prepareAcksForNewBlock(
// The 'Acks' filed of genesis blocks would always be empty.
return
}
- // Pick validatorIDs to be acked.
- ackingVIDs := map[types.ValidatorID]struct{}{
+ // Pick nodeIDs to be acked.
+ ackingNIDs := map[types.NodeID]struct{}{
proposerID: struct{}{}, // Acking parent block is always required.
}
if ackingCount > 0 {
ackingCount-- // We would always include ack to parent block.
}
- for _, i := range vs.randGen.Perm(len(vs.validatorIDs))[:ackingCount] {
- ackingVIDs[vs.validatorIDs[i]] = struct{}{}
+ for _, i := range vs.randGen.Perm(len(vs.nodeIDs))[:ackingCount] {
+ ackingNIDs[vs.nodeIDs[i]] = struct{}{}
}
// Generate acks.
- for vID := range ackingVIDs {
+ for nID := range ackingNIDs {
ack, ok := vs.status[proposerID].getAckedBlockHash(
- vID, vs.status[vID], vs.randGen)
+ nID, vs.status[nID], vs.randGen)
if !ok {
- if vID == proposerID {
+ if nID == proposerID {
err = ErrParentNotAcked
}
continue
@@ -148,9 +151,9 @@ func (vs *validatorSetStatus) prepareAcksForNewBlock(
return
}
-// proposeBlock propose new block and update validator status.
-func (vs *validatorSetStatus) proposeBlock(
- proposerID types.ValidatorID,
+// proposeBlock propose new block and update node status.
+func (vs *nodeSetStatus) proposeBlock(
+ proposerID types.NodeID,
acks common.Hashes) (*types.Block, error) {
status := vs.status[proposerID]
@@ -171,8 +174,8 @@ func (vs *validatorSetStatus) proposeBlock(
Acks: common.NewSortedHashes(acks),
Timestamp: vs.timestamps[chainID],
}
- for i, vID := range vs.validatorIDs {
- if vID == proposerID {
+ for i, nID := range vs.nodeIDs {
+ if nID == proposerID {
newBlock.Position.ChainID = uint32(i)
}
}
@@ -188,13 +191,13 @@ func (vs *validatorSetStatus) proposeBlock(
// normalAckingCountGenerator would randomly pick acking count
// by a normal distribution.
func normalAckingCountGenerator(
- validatorCount int, mean, deviation float64) func() int {
+ nodeCount int, mean, deviation float64) func() int {
return func() int {
var expected float64
for {
expected = rand.NormFloat64()*deviation + mean
- if expected >= 0 && expected <= float64(validatorCount) {
+ if expected >= 0 && expected <= float64(nodeCount) {
break
}
}
@@ -208,32 +211,32 @@ func MaxAckingCountGenerator(count int) func() int {
return func() int { return count }
}
-// generateValidatorPicker is a function generator, which would generate
-// a function to randomly pick one validator ID from a slice of validator ID.
-func generateValidatorPicker() func([]types.ValidatorID) types.ValidatorID {
+// generateNodePicker is a function generator, which would generate
+// a function to randomly pick one node ID from a slice of node ID.
+func generateNodePicker() func([]types.NodeID) types.NodeID {
privateRand := rand.New(rand.NewSource(time.Now().UnixNano()))
- return func(vIDs []types.ValidatorID) types.ValidatorID {
- return vIDs[privateRand.Intn(len(vIDs))]
+ return func(nIDs []types.NodeID) types.NodeID {
+ return nIDs[privateRand.Intn(len(nIDs))]
}
}
// BlocksGenerator could generate blocks forming valid DAGs.
type BlocksGenerator struct {
- validatorPicker func([]types.ValidatorID) types.ValidatorID
- hashBlock hashBlockFn
+ nodePicker func([]types.NodeID) types.NodeID
+ hashBlock hashBlockFn
}
// NewBlocksGenerator constructs BlockGenerator.
-func NewBlocksGenerator(validatorPicker func(
- []types.ValidatorID) types.ValidatorID,
+func NewBlocksGenerator(nodePicker func(
+ []types.NodeID) types.NodeID,
hashBlock hashBlockFn) *BlocksGenerator {
- if validatorPicker == nil {
- validatorPicker = generateValidatorPicker()
+ if nodePicker == nil {
+ nodePicker = generateNodePicker()
}
return &BlocksGenerator{
- validatorPicker: validatorPicker,
- hashBlock: hashBlock,
+ nodePicker: nodePicker,
+ hashBlock: hashBlock,
}
}
@@ -244,45 +247,45 @@ func NewBlocksGenerator(validatorPicker func(
// has maximum 2 acks.
// func () int { return 2 }
// The default ackingCountGenerator would randomly pick a number based on
-// the validatorCount you provided with a normal distribution.
+// the nodeCount you provided with a normal distribution.
func (gen *BlocksGenerator) Generate(
- validatorCount int,
+ nodeCount int,
blockCount int,
ackingCountGenerator func() int,
writer blockdb.Writer) (
- validators types.ValidatorIDs, err error) {
+ nodes types.NodeIDs, err error) {
if ackingCountGenerator == nil {
ackingCountGenerator = normalAckingCountGenerator(
- validatorCount,
- float64(validatorCount/2),
- float64(validatorCount/4+1))
+ nodeCount,
+ float64(nodeCount/2),
+ float64(nodeCount/4+1))
}
- validators = types.ValidatorIDs{}
- for i := 0; i < validatorCount; i++ {
- validators = append(
- validators, types.ValidatorID{Hash: common.NewRandomHash()})
+ nodes = types.NodeIDs{}
+ for i := 0; i < nodeCount; i++ {
+ nodes = append(
+ nodes, types.NodeID{Hash: common.NewRandomHash()})
}
- status := newValidatorSetStatus(validators, gen.hashBlock)
+ status := newNodeSetStatus(nodes, gen.hashBlock)
// We would record the smallest height of block that could be acked
- // from each validator's point-of-view.
- toAck := make(map[types.ValidatorID]map[types.ValidatorID]uint64)
- for _, vID := range validators {
- toAck[vID] = make(map[types.ValidatorID]uint64)
+ // from each node's point-of-view.
+ toAck := make(map[types.NodeID]map[types.NodeID]uint64)
+ for _, nID := range nodes {
+ toAck[nID] = make(map[types.NodeID]uint64)
}
for {
- // Find validators that doesn't propose enough blocks and
+ // Find nodes that doesn't propose enough blocks and
// pick one from them randomly.
- notYet := status.findIncompleteValidators(blockCount)
+ notYet := status.findIncompleteNodes(blockCount)
if len(notYet) == 0 {
break
}
// Propose a new block.
var (
- proposerID = gen.validatorPicker(notYet)
+ proposerID = gen.nodePicker(notYet)
acks common.Hashes
)
acks, err = status.prepareAcksForNewBlock(
diff --git a/core/test/blocks-generator_test.go b/core/test/blocks-generator_test.go
index e607796..f8aa325 100644
--- a/core/test/blocks-generator_test.go
+++ b/core/test/blocks-generator_test.go
@@ -33,21 +33,21 @@ type BlocksGeneratorTestCase struct {
func (s *BlocksGeneratorTestCase) TestGenerate() {
// This test case is to make sure the generated blocks are legimate.
- validatorCount := 19
+ nodeCount := 19
blockCount := 50
gen := NewBlocksGenerator(nil, stableRandomHash)
db, err := blockdb.NewMemBackedBlockDB()
s.Require().Nil(err)
- validators, err := gen.Generate(
- validatorCount, blockCount, nil, db)
+ nodes, err := gen.Generate(
+ nodeCount, blockCount, nil, db)
s.Require().Nil(err)
- s.Require().Len(validators, validatorCount)
+ s.Require().Len(nodes, nodeCount)
// Load all blocks in that database for further checking.
iter, err := db.GetAll()
s.Require().Nil(err)
- blocksByValidator := make(map[types.ValidatorID][]*types.Block)
+ blocksByNode := make(map[types.NodeID][]*types.Block)
blocksByHash := make(map[common.Hash]*types.Block)
for {
block, err := iter.Next()
@@ -56,9 +56,9 @@ func (s *BlocksGeneratorTestCase) TestGenerate() {
}
s.Nil(err)
- blocksByValidator[block.ProposerID] =
- append(blocksByValidator[block.ProposerID], &block)
- sort.Sort(types.ByHeight(blocksByValidator[block.ProposerID]))
+ blocksByNode[block.ProposerID] =
+ append(blocksByNode[block.ProposerID], &block)
+ sort.Sort(types.ByHeight(blocksByNode[block.ProposerID]))
blocksByHash[block.Hash] = &block
}
@@ -67,8 +67,8 @@ func (s *BlocksGeneratorTestCase) TestGenerate() {
// compared to its parent block.
// - Parent Ack: always ack its parent block.
// - No Acks in genesis bloc
- for _, blocks := range blocksByValidator {
- lastAckingHeights := map[types.ValidatorID]uint64{}
+ for _, blocks := range blocksByNode {
+ lastAckingHeights := map[types.NodeID]uint64{}
s.Require().NotEmpty(blocks)
// Check genesis block.
@@ -106,19 +106,19 @@ func (s *BlocksGeneratorTestCase) TestGenerate() {
func (s *BlocksGeneratorTestCase) TestGenerateWithMaxAckCount() {
var (
- validatorCount = 13
- blockCount = 50
- gen = NewBlocksGenerator(nil, stableRandomHash)
- req = s.Require()
+ nodeCount = 13
+ blockCount = 50
+ gen = NewBlocksGenerator(nil, stableRandomHash)
+ req = s.Require()
)
// Generate with 0 acks.
db, err := blockdb.NewMemBackedBlockDB()
req.Nil(err)
- validators, err := gen.Generate(
- validatorCount, blockCount, MaxAckingCountGenerator(0), db)
+ nodes, err := gen.Generate(
+ nodeCount, blockCount, MaxAckingCountGenerator(0), db)
req.Nil(err)
- req.Len(validators, validatorCount)
+ req.Len(nodes, nodeCount)
// Load blocks to check their acking count.
iter, err := db.GetAll()
req.Nil(err)
@@ -137,11 +137,11 @@ func (s *BlocksGeneratorTestCase) TestGenerateWithMaxAckCount() {
// Generate with acks as many as possible.
db, err = blockdb.NewMemBackedBlockDB()
req.Nil(err)
- validators, err = gen.Generate(
- validatorCount, blockCount, MaxAckingCountGenerator(
- validatorCount), db)
+ nodes, err = gen.Generate(
+ nodeCount, blockCount, MaxAckingCountGenerator(
+ nodeCount), db)
req.Nil(err)
- req.Len(validators, validatorCount)
+ req.Len(nodes, nodeCount)
// Load blocks to verify the average acking count.
totalAckingCount := 0
totalBlockCount := 0
@@ -160,7 +160,7 @@ func (s *BlocksGeneratorTestCase) TestGenerateWithMaxAckCount() {
totalBlockCount++
}
req.NotZero(totalBlockCount)
- req.True((totalAckingCount / totalBlockCount) >= (validatorCount / 2))
+ req.True((totalAckingCount / totalBlockCount) >= (nodeCount / 2))
}
func TestBlocksGenerator(t *testing.T) {
diff --git a/core/test/fake-transport.go b/core/test/fake-transport.go
index 2615bd4..2f1686e 100644
--- a/core/test/fake-transport.go
+++ b/core/test/fake-transport.go
@@ -28,10 +28,10 @@ import (
// by using golang channel.
type FakeTransport struct {
peerType TransportPeerType
- vID types.ValidatorID
+ nID types.NodeID
recvChannel chan *TransportEnvelope
serverChannel chan<- *TransportEnvelope
- peers map[types.ValidatorID]chan<- *TransportEnvelope
+ peers map[types.NodeID]chan<- *TransportEnvelope
latency LatencyModel
}
@@ -45,19 +45,19 @@ func NewFakeTransportServer() TransportServer {
// NewFakeTransportClient constructs FakeTransport instance for peer.
func NewFakeTransportClient(
- vID types.ValidatorID, latency LatencyModel) TransportClient {
+ nID types.NodeID, latency LatencyModel) TransportClient {
return &FakeTransport{
peerType: TransportPeer,
recvChannel: make(chan *TransportEnvelope, 1000),
- vID: vID,
+ nID: nID,
latency: latency,
}
}
// Send implements Transport.Send method.
func (t *FakeTransport) Send(
- endpoint types.ValidatorID, msg interface{}) (err error) {
+ endpoint types.NodeID, msg interface{}) (err error) {
ch, exists := t.peers[endpoint]
if !exists {
@@ -70,7 +70,7 @@ func (t *FakeTransport) Send(
}
ch <- &TransportEnvelope{
PeerType: t.peerType,
- From: t.vID,
+ From: t.nID,
Msg: msg,
}
}(ch)
@@ -82,7 +82,7 @@ func (t *FakeTransport) Report(msg interface{}) (err error) {
go func() {
t.serverChannel <- &TransportEnvelope{
PeerType: TransportPeer,
- From: t.vID,
+ From: t.nID,
Msg: msg,
}
}()
@@ -92,7 +92,7 @@ func (t *FakeTransport) Report(msg interface{}) (err error) {
// Broadcast implements Transport.Broadcast method.
func (t *FakeTransport) Broadcast(msg interface{}) (err error) {
for k := range t.peers {
- if k == t.vID {
+ if k == t.nID {
continue
}
t.Send(k, msg)
@@ -107,10 +107,10 @@ func (t *FakeTransport) Close() (err error) {
}
// Peers implements Transport.Peers method.
-func (t *FakeTransport) Peers() (peers map[types.ValidatorID]struct{}) {
- peers = make(map[types.ValidatorID]struct{})
- for vID := range t.peers {
- peers[vID] = struct{}{}
+func (t *FakeTransport) Peers() (peers map[types.NodeID]struct{}) {
+ peers = make(map[types.NodeID]struct{})
+ for nID := range t.peers {
+ peers[nID] = struct{}{}
}
return
}
@@ -135,7 +135,7 @@ func (t *FakeTransport) Join(
continue
}
if t.peers, ok =
- envelope.Msg.(map[types.ValidatorID]chan<- *TransportEnvelope); !ok {
+ envelope.Msg.(map[types.NodeID]chan<- *TransportEnvelope); !ok {
envelopes = append(envelopes, envelope)
continue
@@ -155,7 +155,7 @@ func (t *FakeTransport) Host() (chan *TransportEnvelope, error) {
// WaitForPeers implements TransportServer.WaitForPeers method.
func (t *FakeTransport) WaitForPeers(numPeers int) (err error) {
- t.peers = make(map[types.ValidatorID]chan<- *TransportEnvelope)
+ t.peers = make(map[types.NodeID]chan<- *TransportEnvelope)
for {
envelope := <-t.recvChannel
// Panic here if some peer send other stuffs before
diff --git a/core/test/governance.go b/core/test/governance.go
index c5746cb..a2e6f69 100644
--- a/core/test/governance.go
+++ b/core/test/governance.go
@@ -28,44 +28,44 @@ import (
var (
// ErrPrivateKeyNotExists means caller request private key for an
- // unknown validator ID.
+ // unknown node ID.
ErrPrivateKeyNotExists = fmt.Errorf("private key not exists")
)
// Governance is an implementation of Goverance for testing purpose.
type Governance struct {
lambda time.Duration
- notarySet map[types.ValidatorID]struct{}
- privateKeys map[types.ValidatorID]crypto.PrivateKey
+ notarySet map[types.NodeID]struct{}
+ privateKeys map[types.NodeID]crypto.PrivateKey
DKGComplaint map[uint64][]*types.DKGComplaint
DKGMasterPublicKey map[uint64][]*types.DKGMasterPublicKey
}
// NewGovernance constructs a Governance instance.
-func NewGovernance(validatorCount int, lambda time.Duration) (
+func NewGovernance(nodeCount int, lambda time.Duration) (
g *Governance, err error) {
g = &Governance{
lambda: lambda,
- notarySet: make(map[types.ValidatorID]struct{}),
- privateKeys: make(map[types.ValidatorID]crypto.PrivateKey),
+ notarySet: make(map[types.NodeID]struct{}),
+ privateKeys: make(map[types.NodeID]crypto.PrivateKey),
DKGComplaint: make(map[uint64][]*types.DKGComplaint),
DKGMasterPublicKey: make(map[uint64][]*types.DKGMasterPublicKey),
}
- for i := 0; i < validatorCount; i++ {
+ for i := 0; i < nodeCount; i++ {
prv, err := eth.NewPrivateKey()
if err != nil {
return nil, err
}
- vID := types.NewValidatorID(prv.PublicKey())
- g.notarySet[vID] = struct{}{}
- g.privateKeys[vID] = prv
+ nID := types.NewNodeID(prv.PublicKey())
+ g.notarySet[nID] = struct{}{}
+ g.privateKeys[nID] = prv
}
return
}
// GetNotarySet implements Governance interface to return current
// notary set.
-func (g *Governance) GetNotarySet() map[types.ValidatorID]struct{} {
+func (g *Governance) GetNotarySet() map[types.NodeID]struct{} {
return g.notarySet
}
@@ -81,12 +81,12 @@ func (g *Governance) GetConfiguration(blockHeight uint64) *types.Config {
}
}
-// GetPrivateKey return the private key for that validator, this function
+// GetPrivateKey return the private key for that node, this function
// is a test utility and not a general Governance interface.
func (g *Governance) GetPrivateKey(
- vID types.ValidatorID) (key crypto.PrivateKey, err error) {
+ nID types.NodeID) (key crypto.PrivateKey, err error) {
- key, exists := g.privateKeys[vID]
+ key, exists := g.privateKeys[nID]
if !exists {
err = ErrPrivateKeyNotExists
return
diff --git a/core/test/interface.go b/core/test/interface.go
index 0dc2382..a422ee7 100644
--- a/core/test/interface.go
+++ b/core/test/interface.go
@@ -38,7 +38,7 @@ type Stopper interface {
// moment.
// The Stopper should check state of that handler and return 'true'
// if the execution could be stopped.
- ShouldStop(vID types.ValidatorID) bool
+ ShouldStop(nID types.NodeID) bool
}
// EventHandler defines an interface to handle a Scheduler event.
@@ -62,8 +62,8 @@ type TransportEnvelope struct {
// PeerType defines the type of source peer, could be either "peer" or
// "server".
PeerType TransportPeerType
- // From defines the validatorID of the source peer.
- From types.ValidatorID
+ // From defines the nodeID of the source peer.
+ From types.NodeID
// Msg is the actual payload of this message.
Msg interface{}
}
@@ -92,14 +92,14 @@ type Transport interface {
// Broadcast a message to all peers in network.
Broadcast(msg interface{}) error
// Send one message to a peer.
- Send(endpoint types.ValidatorID, msg interface{}) error
+ Send(endpoint types.NodeID, msg interface{}) error
// Close would cleanup allocated resources.
Close() error
- // Peers return IDs of all connected validators in p2p favor.
+ // Peers return IDs of all connected nodes in p2p favor.
// This method should be accessed after ether 'Join' or 'WaitForPeers'
// returned.
- Peers() map[types.ValidatorID]struct{}
+ Peers() map[types.NodeID]struct{}
}
// Marshaller defines an interface to convert between interface{} and []byte.
diff --git a/core/test/revealer.go b/core/test/revealer.go
index b8eb9b4..b3af4d7 100644
--- a/core/test/revealer.go
+++ b/core/test/revealer.go
@@ -63,12 +63,12 @@ func loadAllBlocks(iter blockdb.BlockIterator) (
// all blocks from blockdb, and randomly pick one block to reveal if
// it still forms a valid DAG in revealed blocks.
type RandomDAGRevealer struct {
- // blocksByValidator group all blocks by validators and sorting
+ // blocksByNode group all blocks by nodes and sorting
// them by height.
- blocksByValidator map[types.ValidatorID][]*types.Block
- // tipIndexes store the height of next block from one validator
+ blocksByNode map[types.NodeID][]*types.Block
+ // tipIndexes store the height of next block from one node
// to check if is candidate.
- tipIndexes map[types.ValidatorID]int
+ tipIndexes map[types.NodeID]int
// candidate are blocks that forms valid DAG with
// current revealed blocks.
candidates []*types.Block
@@ -86,19 +86,19 @@ func NewRandomDAGRevealer(
return
}
- // Rearrange blocks by validators and height.
- blocksByValidator := make(map[types.ValidatorID][]*types.Block)
+ // Rearrange blocks by nodes and height.
+ blocksByNode := make(map[types.NodeID][]*types.Block)
for _, block := range blocks {
- blocksByValidator[block.ProposerID] =
- append(blocksByValidator[block.ProposerID], block)
+ blocksByNode[block.ProposerID] =
+ append(blocksByNode[block.ProposerID], block)
}
// Make sure blocks are sorted by block heights, from lower to higher.
- for vID := range blocksByValidator {
- sort.Sort(types.ByHeight(blocksByValidator[vID]))
+ for nID := range blocksByNode {
+ sort.Sort(types.ByHeight(blocksByNode[nID]))
}
r = &RandomDAGRevealer{
- blocksByValidator: blocksByValidator,
- randGen: rand.New(rand.NewSource(time.Now().UnixNano())),
+ blocksByNode: blocksByNode,
+ randGen: rand.New(rand.NewSource(time.Now().UnixNano())),
}
// Make sure this revealer is ready to use.
r.Reset()
@@ -107,8 +107,8 @@ func NewRandomDAGRevealer(
// pickCandidates is a helper function to pick candidates from current tips.
func (r *RandomDAGRevealer) pickCandidates() {
- for vID, tip := range r.tipIndexes {
- blocks, exists := r.blocksByValidator[vID]
+ for nID, tip := range r.tipIndexes {
+ blocks, exists := r.blocksByNode[nID]
if !exists {
continue
}
@@ -117,7 +117,7 @@ func (r *RandomDAGRevealer) pickCandidates() {
}
block := blocks[tip]
if isAllAckingBlockRevealed(block, r.revealed) {
- r.tipIndexes[vID]++
+ r.tipIndexes[nID]++
r.candidates = append(r.candidates, block)
}
}
@@ -145,9 +145,9 @@ func (r *RandomDAGRevealer) Next() (types.Block, error) {
// Reset implement Revealer.Reset method, which would reset the revealing.
func (r *RandomDAGRevealer) Reset() {
- r.tipIndexes = make(map[types.ValidatorID]int)
- for vID := range r.blocksByValidator {
- r.tipIndexes[vID] = 0
+ r.tipIndexes = make(map[types.NodeID]int)
+ for nID := range r.blocksByNode {
+ r.tipIndexes[nID] = 0
}
r.revealed = make(map[common.Hash]struct{})
r.candidates = []*types.Block{}
diff --git a/core/test/revealer_test.go b/core/test/revealer_test.go
index 16d3b18..0c20520 100644
--- a/core/test/revealer_test.go
+++ b/core/test/revealer_test.go
@@ -35,9 +35,9 @@ type RevealerTestSuite struct {
func (s *RevealerTestSuite) SetupSuite() {
var (
- err error
- validatorCount = 19
- blockCount = 50
+ err error
+ nodeCount = 19
+ blockCount = 50
)
// Setup block database.
s.db, err = blockdb.NewMemBackedBlockDB()
@@ -45,10 +45,10 @@ func (s *RevealerTestSuite) SetupSuite() {
// Randomly generate blocks.
gen := NewBlocksGenerator(nil, stableRandomHash)
- validators, err := gen.Generate(
- validatorCount, blockCount, nil, s.db)
+ nodes, err := gen.Generate(
+ nodeCount, blockCount, nil, s.db)
s.Require().Nil(err)
- s.Require().Len(validators, validatorCount)
+ s.Require().Len(nodes, nodeCount)
// Cache the count of total generated block.
iter, err := s.db.GetAll()
diff --git a/core/test/scheduler-event.go b/core/test/scheduler-event.go
index 85968c5..2863b54 100644
--- a/core/test/scheduler-event.go
+++ b/core/test/scheduler-event.go
@@ -27,8 +27,8 @@ import (
type Event struct {
// HistoryIndex is the index of this event in history.
HistoryIndex int
- // ValidatorID is the ID of handler that this event deginated to.
- ValidatorID types.ValidatorID
+ // NodeID is the ID of handler that this event deginated to.
+ NodeID types.NodeID
// Time is the expected execution time of this event.
Time time.Time
// ExecError record the error when handling this event.
@@ -67,12 +67,12 @@ func (eq *eventQueue) Pop() interface{} {
// NewEvent is the constructor for Event.
func NewEvent(
- vID types.ValidatorID, when time.Time, payload interface{}) *Event {
+ nID types.NodeID, when time.Time, payload interface{}) *Event {
return &Event{
HistoryIndex: -1,
ParentHistoryIndex: -1,
- ValidatorID: vID,
+ NodeID: nID,
Time: when,
Payload: payload,
}
diff --git a/core/test/scheduler.go b/core/test/scheduler.go
index 6a3a40a..7c5bbde 100644
--- a/core/test/scheduler.go
+++ b/core/test/scheduler.go
@@ -48,7 +48,7 @@ type Scheduler struct {
history []*Event
historyLock sync.RWMutex
isStarted bool
- handlers map[types.ValidatorID]*schedulerHandlerRecord
+ handlers map[types.NodeID]*schedulerHandlerRecord
handlersLock sync.RWMutex
eventNotification chan struct{}
ctx context.Context
@@ -62,7 +62,7 @@ func NewScheduler(stopper Stopper) *Scheduler {
return &Scheduler{
events: eventQueue{},
history: []*Event{},
- handlers: make(map[types.ValidatorID]*schedulerHandlerRecord),
+ handlers: make(map[types.NodeID]*schedulerHandlerRecord),
eventNotification: make(chan struct{}, 100000),
ctx: ctx,
cancelFunc: cancel,
@@ -98,15 +98,15 @@ func (sch *Scheduler) Seed(e *Event) error {
}
// RegisterEventHandler register an event handler by providing ID of
-// corresponding validator.
+// corresponding node.
func (sch *Scheduler) RegisterEventHandler(
- vID types.ValidatorID,
+ nID types.NodeID,
handler EventHandler) {
sch.handlersLock.Lock()
defer sch.handlersLock.Unlock()
- sch.handlers[vID] = &schedulerHandlerRecord{handler: handler}
+ sch.handlers[nID] = &schedulerHandlerRecord{handler: handler}
}
// nextTick would pick the oldest event from eventQueue.
@@ -144,12 +144,12 @@ func (sch *Scheduler) workerRoutine(wg *sync.WaitGroup) {
handleEvent := func(e *Event) {
// Find correspond handler record.
- hRec := func(vID types.ValidatorID) *schedulerHandlerRecord {
+ hRec := func(nID types.NodeID) *schedulerHandlerRecord {
sch.handlersLock.RLock()
defer sch.handlersLock.RUnlock()
- return sch.handlers[vID]
- }(e.ValidatorID)
+ return sch.handlers[nID]
+ }(e.NodeID)
newEvents := func() []*Event {
// This lock makes sure there would be no concurrent access
@@ -161,8 +161,8 @@ func (sch *Scheduler) workerRoutine(wg *sync.WaitGroup) {
beforeExecution := time.Now().UTC()
newEvents := hRec.handler.Handle(e)
e.ExecInterval = time.Now().UTC().Sub(beforeExecution)
- // It's safe to check status of that validator under 'hRec.lock'.
- if sch.stopper.ShouldStop(e.ValidatorID) {
+ // It's safe to check status of that node under 'hRec.lock'.
+ if sch.stopper.ShouldStop(e.NodeID) {
sch.cancelFunc()
}
return newEvents
diff --git a/core/test/scheduler_test.go b/core/test/scheduler_test.go
index 5aef36e..1e6d52f 100644
--- a/core/test/scheduler_test.go
+++ b/core/test/scheduler_test.go
@@ -33,16 +33,16 @@ type SchedulerTestSuite struct {
type simpleStopper struct {
lock sync.Mutex
- touched map[types.ValidatorID]int
+ touched map[types.NodeID]int
touchedCount int
}
func newSimpleStopper(
- validators []types.ValidatorID, touchedCount int) *simpleStopper {
+ nodes []types.NodeID, touchedCount int) *simpleStopper {
- touched := make(map[types.ValidatorID]int)
- for _, vID := range validators {
- touched[vID] = 0
+ touched := make(map[types.NodeID]int)
+ for _, nID := range nodes {
+ touched[nID] = 0
}
return &simpleStopper{
touched: touched,
@@ -50,11 +50,11 @@ func newSimpleStopper(
}
}
-func (stopper *simpleStopper) ShouldStop(vID types.ValidatorID) bool {
+func (stopper *simpleStopper) ShouldStop(nID types.NodeID) bool {
stopper.lock.Lock()
defer stopper.lock.Unlock()
- stopper.touched[vID] = stopper.touched[vID] + 1
+ stopper.touched[nID] = stopper.touched[nID] + 1
for _, count := range stopper.touched {
if count < stopper.touchedCount {
return false
@@ -65,26 +65,26 @@ func (stopper *simpleStopper) ShouldStop(vID types.ValidatorID) bool {
type simpleHandler struct {
count int
- vID types.ValidatorID
+ nID types.NodeID
}
func (handler *simpleHandler) Handle(e *Event) (events []*Event) {
- if e.ValidatorID == handler.vID {
+ if e.NodeID == handler.nID {
handler.count++
}
return
}
type fixedLatencyHandler struct {
- vID types.ValidatorID
+ nID types.NodeID
}
func (handler *fixedLatencyHandler) Handle(e *Event) (events []*Event) {
// Simulate execution time.
time.Sleep(500 * time.Millisecond)
return []*Event{&Event{
- ValidatorID: handler.vID,
- Time: e.Time.Add(800 * time.Millisecond),
+ NodeID: handler.nID,
+ Time: e.Time.Add(800 * time.Millisecond),
}}
}
@@ -114,22 +114,22 @@ func (s *SchedulerTestSuite) TestEventSequence() {
func (s *SchedulerTestSuite) TestBasicRound() {
// This test case makes sure these facts:
- // - event is dispatched by validatorID attached to each handler.
+ // - event is dispatched by NodeID attached to each handler.
// - stopper can stop the execution when condition is met.
var (
- req = s.Require()
- validators = GenerateRandomValidatorIDs(3)
- stopper = newSimpleStopper(validators, 2)
- sch = NewScheduler(stopper)
- handlers = make(map[types.ValidatorID]*simpleHandler)
+ req = s.Require()
+ nodes = GenerateRandomNodeIDs(3)
+ stopper = newSimpleStopper(nodes, 2)
+ sch = NewScheduler(stopper)
+ handlers = make(map[types.NodeID]*simpleHandler)
)
- for _, vID := range validators {
- handler := &simpleHandler{vID: vID}
- handlers[vID] = handler
- sch.RegisterEventHandler(vID, handler)
- req.Nil(sch.Seed(&Event{ValidatorID: vID}))
- req.Nil(sch.Seed(&Event{ValidatorID: vID}))
+ for _, nID := range nodes {
+ handler := &simpleHandler{nID: nID}
+ handlers[nID] = handler
+ sch.RegisterEventHandler(nID, handler)
+ req.Nil(sch.Seed(&Event{NodeID: nID}))
+ req.Nil(sch.Seed(&Event{NodeID: nID}))
}
sch.Run(10)
// Verify result.
@@ -143,16 +143,16 @@ func (s *SchedulerTestSuite) TestChildEvent() {
// assigned correctly.
var (
req = s.Require()
- vID = types.ValidatorID{Hash: common.NewRandomHash()}
- stopper = newSimpleStopper(types.ValidatorIDs{vID}, 3)
- handler = &fixedLatencyHandler{vID: vID}
+ nID = types.NodeID{Hash: common.NewRandomHash()}
+ stopper = newSimpleStopper(types.NodeIDs{nID}, 3)
+ handler = &fixedLatencyHandler{nID: nID}
sch = NewScheduler(stopper)
)
- sch.RegisterEventHandler(vID, handler)
+ sch.RegisterEventHandler(nID, handler)
req.Nil(sch.Seed(&Event{
- ValidatorID: vID,
- Time: time.Now().UTC(),
+ NodeID: nID,
+ Time: time.Now().UTC(),
}))
sch.Run(1)
// Verify result.
diff --git a/core/test/stopper.go b/core/test/stopper.go
index 7c75958..9fe5592 100644
--- a/core/test/stopper.go
+++ b/core/test/stopper.go
@@ -24,13 +24,13 @@ import (
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
-// StopByConfirmedBlocks would make sure each validators confirms
+// StopByConfirmedBlocks would make sure each nodes confirms
// at least X blocks proposed by itself.
type StopByConfirmedBlocks struct {
- apps map[types.ValidatorID]*App
- dbs map[types.ValidatorID]blockdb.BlockDatabase
- lastCheckDelivered map[types.ValidatorID]int
- confirmedBlocks map[types.ValidatorID]int
+ apps map[types.NodeID]*App
+ dbs map[types.NodeID]blockdb.BlockDatabase
+ lastCheckDelivered map[types.NodeID]int
+ confirmedBlocks map[types.NodeID]int
blockCount int
lock sync.Mutex
}
@@ -38,45 +38,45 @@ type StopByConfirmedBlocks struct {
// NewStopByConfirmedBlocks construct an StopByConfirmedBlocks instance.
func NewStopByConfirmedBlocks(
blockCount int,
- apps map[types.ValidatorID]*App,
- dbs map[types.ValidatorID]blockdb.BlockDatabase) *StopByConfirmedBlocks {
+ apps map[types.NodeID]*App,
+ dbs map[types.NodeID]blockdb.BlockDatabase) *StopByConfirmedBlocks {
- confirmedBlocks := make(map[types.ValidatorID]int)
- for vID := range apps {
- confirmedBlocks[vID] = 0
+ confirmedBlocks := make(map[types.NodeID]int)
+ for nID := range apps {
+ confirmedBlocks[nID] = 0
}
return &StopByConfirmedBlocks{
apps: apps,
dbs: dbs,
- lastCheckDelivered: make(map[types.ValidatorID]int),
+ lastCheckDelivered: make(map[types.NodeID]int),
confirmedBlocks: confirmedBlocks,
blockCount: blockCount,
}
}
// ShouldStop implements Stopper interface.
-func (s *StopByConfirmedBlocks) ShouldStop(vID types.ValidatorID) bool {
+func (s *StopByConfirmedBlocks) ShouldStop(nID types.NodeID) bool {
s.lock.Lock()
defer s.lock.Unlock()
- // Accumulate confirmed blocks proposed by this validator in this round.
- lastChecked := s.lastCheckDelivered[vID]
- currentConfirmedBlocks := s.confirmedBlocks[vID]
- db := s.dbs[vID]
- s.apps[vID].Check(func(app *App) {
+ // Accumulate confirmed blocks proposed by this node in this round.
+ lastChecked := s.lastCheckDelivered[nID]
+ currentConfirmedBlocks := s.confirmedBlocks[nID]
+ db := s.dbs[nID]
+ s.apps[nID].Check(func(app *App) {
for _, h := range app.DeliverSequence[lastChecked:] {
b, err := db.Get(h)
if err != nil {
panic(err)
}
- if b.ProposerID == vID {
+ if b.ProposerID == nID {
currentConfirmedBlocks++
}
}
- s.lastCheckDelivered[vID] = len(app.DeliverSequence)
+ s.lastCheckDelivered[nID] = len(app.DeliverSequence)
})
- s.confirmedBlocks[vID] = currentConfirmedBlocks
- // Check if all validators confirmed at least 'blockCount' blocks.
+ s.confirmedBlocks[nID] = currentConfirmedBlocks
+ // Check if all nodes confirmed at least 'blockCount' blocks.
for _, v := range s.confirmedBlocks {
if v < s.blockCount {
return false
diff --git a/core/test/stopper_test.go b/core/test/stopper_test.go
index 9a0e430..262e178 100644
--- a/core/test/stopper_test.go
+++ b/core/test/stopper_test.go
@@ -33,20 +33,20 @@ type StopperTestSuite struct {
func (s *StopperTestSuite) TestStopByConfirmedBlocks() {
// This test case makes sure this stopper would stop when
- // all validators confirmed at least 'x' count of blocks produced
+ // all nodes confirmed at least 'x' count of blocks produced
// by themselves.
var (
req = s.Require()
)
- apps := make(map[types.ValidatorID]*App)
- dbs := make(map[types.ValidatorID]blockdb.BlockDatabase)
- validators := GenerateRandomValidatorIDs(2)
+ apps := make(map[types.NodeID]*App)
+ dbs := make(map[types.NodeID]blockdb.BlockDatabase)
+ nodes := GenerateRandomNodeIDs(2)
db, err := blockdb.NewMemBackedBlockDB()
req.Nil(err)
- for _, vID := range validators {
- apps[vID] = NewApp()
- dbs[vID] = db
+ for _, nID := range nodes {
+ apps[nID] = NewApp()
+ dbs[nID] = db
}
deliver := func(blocks []*types.Block) {
hashes := common.Hashes{}
@@ -54,8 +54,8 @@ func (s *StopperTestSuite) TestStopByConfirmedBlocks() {
hashes = append(hashes, b.Hash)
req.Nil(db.Put(*b))
}
- for _, vID := range validators {
- app := apps[vID]
+ for _, nID := range nodes {
+ app := apps[nID]
for _, h := range hashes {
app.StronglyAcked(h)
}
@@ -67,35 +67,35 @@ func (s *StopperTestSuite) TestStopByConfirmedBlocks() {
}
stopper := NewStopByConfirmedBlocks(2, apps, dbs)
b00 := &types.Block{
- ProposerID: validators[0],
+ ProposerID: nodes[0],
Hash: common.NewRandomHash(),
}
deliver([]*types.Block{b00})
b10 := &types.Block{
- ProposerID: validators[1],
+ ProposerID: nodes[1],
Hash: common.NewRandomHash(),
}
b11 := &types.Block{
- ProposerID: validators[1],
+ ProposerID: nodes[1],
ParentHash: b10.Hash,
Hash: common.NewRandomHash(),
}
deliver([]*types.Block{b10, b11})
- req.False(stopper.ShouldStop(validators[1]))
+ req.False(stopper.ShouldStop(nodes[1]))
b12 := &types.Block{
- ProposerID: validators[1],
+ ProposerID: nodes[1],
ParentHash: b11.Hash,
Hash: common.NewRandomHash(),
}
deliver([]*types.Block{b12})
- req.False(stopper.ShouldStop(validators[1]))
+ req.False(stopper.ShouldStop(nodes[1]))
b01 := &types.Block{
- ProposerID: validators[0],
+ ProposerID: nodes[0],
ParentHash: b00.Hash,
Hash: common.NewRandomHash(),
}
deliver([]*types.Block{b01})
- req.True(stopper.ShouldStop(validators[0]))
+ req.True(stopper.ShouldStop(nodes[0]))
}
func TestStopper(t *testing.T) {
diff --git a/core/test/tcp-transport.go b/core/test/tcp-transport.go
index 2afea14..8bbaf9c 100644
--- a/core/test/tcp-transport.go
+++ b/core/test/tcp-transport.go
@@ -37,18 +37,18 @@ import (
// tcpMessage is the general message between peers and server.
type tcpMessage struct {
- ValidatorID types.ValidatorID `json:"vid"`
- Type string `json:"type"`
- Info string `json:"conn"`
+ NodeID types.NodeID `json:"nid"`
+ Type string `json:"type"`
+ Info string `json:"conn"`
}
// TCPTransport implements Transport interface via TCP connection.
type TCPTransport struct {
peerType TransportPeerType
- vID types.ValidatorID
+ nID types.NodeID
localPort int
- peersInfo map[types.ValidatorID]string
- peers map[types.ValidatorID]chan<- []byte
+ peersInfo map[types.NodeID]string
+ peers map[types.NodeID]chan<- []byte
peersLock sync.RWMutex
recvChannel chan *TransportEnvelope
ctx context.Context
@@ -60,7 +60,7 @@ type TCPTransport struct {
// NewTCPTransport constructs an TCPTransport instance.
func NewTCPTransport(
peerType TransportPeerType,
- vID types.ValidatorID,
+ nID types.NodeID,
latency LatencyModel,
marshaller Marshaller,
localPort int) *TCPTransport {
@@ -68,9 +68,9 @@ func NewTCPTransport(
ctx, cancel := context.WithCancel(context.Background())
return &TCPTransport{
peerType: peerType,
- vID: vID,
- peersInfo: make(map[types.ValidatorID]string),
- peers: make(map[types.ValidatorID]chan<- []byte),
+ nID: nID,
+ peersInfo: make(map[types.NodeID]string),
+ peers: make(map[types.NodeID]chan<- []byte),
recvChannel: make(chan *TransportEnvelope, 1000),
ctx: ctx,
cancel: cancel,
@@ -82,7 +82,7 @@ func NewTCPTransport(
// Send implements Transport.Send method.
func (t *TCPTransport) Send(
- endpoint types.ValidatorID, msg interface{}) (err error) {
+ endpoint types.NodeID, msg interface{}) (err error) {
payload, err := t.marshalMessage(msg)
if err != nil {
@@ -110,8 +110,8 @@ func (t *TCPTransport) Broadcast(msg interface{}) (err error) {
t.peersLock.RLock()
defer t.peersLock.RUnlock()
- for vID, ch := range t.peers {
- if vID == t.vID {
+ for nID, ch := range t.peers {
+ if nID == t.nID {
continue
}
go func(ch chan<- []byte) {
@@ -131,7 +131,7 @@ func (t *TCPTransport) Close() (err error) {
// Reset peers.
t.peersLock.Lock()
defer t.peersLock.Unlock()
- t.peers = make(map[types.ValidatorID]chan<- []byte)
+ t.peers = make(map[types.NodeID]chan<- []byte)
// Tell our user that this channel is closed.
close(t.recvChannel)
t.recvChannel = nil
@@ -139,10 +139,10 @@ func (t *TCPTransport) Close() (err error) {
}
// Peers implements Transport.Peers method.
-func (t *TCPTransport) Peers() (peers map[types.ValidatorID]struct{}) {
- peers = make(map[types.ValidatorID]struct{})
- for vID := range t.peersInfo {
- peers[vID] = struct{}{}
+func (t *TCPTransport) Peers() (peers map[types.NodeID]struct{}) {
+ peers = make(map[types.NodeID]struct{})
+ for nID := range t.peersInfo {
+ peers[nID] = struct{}{}
}
return
}
@@ -152,16 +152,16 @@ func (t *TCPTransport) marshalMessage(
msgCarrier := struct {
PeerType TransportPeerType `json:"peer_type"`
- From types.ValidatorID `json:"from"`
+ From types.NodeID `json:"from"`
Type string `json:"type"`
Payload interface{} `json:"payload"`
}{
PeerType: t.peerType,
- From: t.vID,
+ From: t.nID,
Payload: msg,
}
switch msg.(type) {
- case map[types.ValidatorID]string:
+ case map[types.NodeID]string:
msgCarrier.Type = "peerlist"
case *tcpMessage:
msgCarrier.Type = "trans-msg"
@@ -188,13 +188,13 @@ func (t *TCPTransport) marshalMessage(
func (t *TCPTransport) unmarshalMessage(
payload []byte) (
peerType TransportPeerType,
- from types.ValidatorID,
+ from types.NodeID,
msg interface{},
err error) {
msgCarrier := struct {
PeerType TransportPeerType `json:"peer_type"`
- From types.ValidatorID `json:"from"`
+ From types.NodeID `json:"from"`
Type string `json:"type"`
Payload json.RawMessage `json:"payload"`
}{}
@@ -205,7 +205,7 @@ func (t *TCPTransport) unmarshalMessage(
from = msgCarrier.From
switch msgCarrier.Type {
case "peerlist":
- var peers map[types.ValidatorID]string
+ var peers map[types.NodeID]string
if err = json.Unmarshal(msgCarrier.Payload, &peers); err != nil {
return
}
@@ -376,12 +376,12 @@ func (t *TCPTransport) listenerRoutine(listener *net.TCPListener) {
// we only utilize the write part for simplicity.
func (t *TCPTransport) buildConnectionsToPeers() (err error) {
var wg sync.WaitGroup
- for vID, addr := range t.peersInfo {
- if vID == t.vID {
+ for nID, addr := range t.peersInfo {
+ if nID == t.nID {
continue
}
wg.Add(1)
- go func(vID types.ValidatorID, addr string) {
+ go func(nID types.NodeID, addr string) {
defer wg.Done()
conn, localErr := net.Dial("tcp", addr)
@@ -394,8 +394,8 @@ func (t *TCPTransport) buildConnectionsToPeers() (err error) {
t.peersLock.Lock()
defer t.peersLock.Unlock()
- t.peers[vID] = t.connWriter(conn)
- }(vID, addr)
+ t.peers[nID] = t.connWriter(conn)
+ }(nID, addr)
}
wg.Wait()
return
@@ -410,13 +410,13 @@ type TCPTransportClient struct {
// NewTCPTransportClient constructs a TCPTransportClient instance.
func NewTCPTransportClient(
- vID types.ValidatorID,
+ nID types.NodeID,
latency LatencyModel,
marshaller Marshaller,
local bool) *TCPTransportClient {
return &TCPTransportClient{
- TCPTransport: *NewTCPTransport(TransportPeer, vID, latency, marshaller, 8080),
+ TCPTransport: *NewTCPTransport(TransportPeer, nID, latency, marshaller, 8080),
local: local,
}
}
@@ -492,15 +492,15 @@ func (t *TCPTransportClient) Join(
conn = net.JoinHostPort(ip, strconv.Itoa(t.localPort))
}
if err = t.Report(&tcpMessage{
- Type: "conn",
- ValidatorID: t.vID,
- Info: conn,
+ Type: "conn",
+ NodeID: t.nID,
+ Info: conn,
}); err != nil {
return
}
// Wait for peers list sent by server.
e := <-t.recvChannel
- if t.peersInfo, ok = e.Msg.(map[types.ValidatorID]string); !ok {
+ if t.peersInfo, ok = e.Msg.(map[types.NodeID]string); !ok {
panic(fmt.Errorf("expect peer list, not %v", e))
}
// Setup connections to other peers.
@@ -509,8 +509,8 @@ func (t *TCPTransportClient) Join(
}
// Report to server that the connections to other peers are ready.
if err = t.Report(&tcpMessage{
- Type: "conn-ready",
- ValidatorID: t.vID,
+ Type: "conn-ready",
+ NodeID: t.nID,
}); err != nil {
return
}
@@ -547,11 +547,11 @@ func NewTCPTransportServer(
serverPort int) *TCPTransportServer {
return &TCPTransportServer{
- // NOTE: the assumption here is the validator ID of peers
+ // NOTE: the assumption here is the node ID of peers
// won't be zero.
TCPTransport: *NewTCPTransport(
TransportPeerServer,
- types.ValidatorID{},
+ types.NodeID{},
nil,
marshaller,
serverPort),
@@ -586,7 +586,7 @@ func (t *TCPTransportServer) WaitForPeers(numPeers int) (err error) {
if msg.Type != "conn" {
panic(fmt.Errorf("expect connection report, not %v", e))
}
- t.peersInfo[msg.ValidatorID] = msg.Info
+ t.peersInfo[msg.NodeID] = msg.Info
// Check if we already collect enought peers.
if len(t.peersInfo) == numPeers {
break
@@ -600,7 +600,7 @@ func (t *TCPTransportServer) WaitForPeers(numPeers int) (err error) {
return
}
// Wait for peers to send 'ready' report.
- readies := make(map[types.ValidatorID]struct{})
+ readies := make(map[types.NodeID]struct{})
for {
e := <-t.recvChannel
msg, ok := e.Msg.(*tcpMessage)
@@ -610,10 +610,10 @@ func (t *TCPTransportServer) WaitForPeers(numPeers int) (err error) {
if msg.Type != "conn-ready" {
panic(fmt.Errorf("expect connection ready, not %v", e))
}
- if _, reported := readies[msg.ValidatorID]; reported {
+ if _, reported := readies[msg.NodeID]; reported {
panic(fmt.Errorf("already report conn-ready message: %v", e))
}
- readies[msg.ValidatorID] = struct{}{}
+ readies[msg.NodeID] = struct{}{}
if len(readies) == numPeers {
break
}
diff --git a/core/test/transport_test.go b/core/test/transport_test.go
index 9bfc12b..1d4b53d 100644
--- a/core/test/transport_test.go
+++ b/core/test/transport_test.go
@@ -32,21 +32,21 @@ import (
)
type testPeer struct {
- vID types.ValidatorID
+ nID types.NodeID
trans TransportClient
recv <-chan *TransportEnvelope
expectedEchoHash common.Hash
echoBlock *types.Block
myBlock *types.Block
myBlockSentTime time.Time
- blocks map[types.ValidatorID]*types.Block
+ blocks map[types.NodeID]*types.Block
blocksReceiveTime map[common.Hash]time.Time
}
type testPeerServer struct {
trans TransportServer
recv chan *TransportEnvelope
- peerBlocks map[types.ValidatorID]*types.Block
+ peerBlocks map[types.NodeID]*types.Block
}
type testMarshaller struct{}
@@ -88,7 +88,7 @@ type TransportTestSuite struct {
func (s *TransportTestSuite) baseTest(
server *testPeerServer,
- peers map[types.ValidatorID]*testPeer,
+ peers map[types.NodeID]*testPeer,
delay time.Duration) {
var (
@@ -98,11 +98,11 @@ func (s *TransportTestSuite) baseTest(
// For each peers, do following stuffs:
// - broadcast 1 block.
- // - report one random block to server, along with its validator ID.
+ // - report one random block to server, along with its node ID.
// Server would echo the random block back to the peer.
handleServer := func(server *testPeerServer) {
defer wg.Done()
- server.peerBlocks = make(map[types.ValidatorID]*types.Block)
+ server.peerBlocks = make(map[types.NodeID]*types.Block)
for {
select {
case e := <-server.recv:
@@ -123,14 +123,14 @@ func (s *TransportTestSuite) baseTest(
}
handlePeer := func(peer *testPeer) {
defer wg.Done()
- peer.blocks = make(map[types.ValidatorID]*types.Block)
+ peer.blocks = make(map[types.NodeID]*types.Block)
peer.blocksReceiveTime = make(map[common.Hash]time.Time)
for {
select {
case e := <-peer.recv:
switch v := e.Msg.(type) {
case *types.Block:
- if v.ProposerID == peer.vID {
+ if v.ProposerID == peer.nID {
req.Equal(e.PeerType, TransportPeerServer)
peer.echoBlock = v
} else {
@@ -150,11 +150,11 @@ func (s *TransportTestSuite) baseTest(
}
wg.Add(len(peers) + 1)
go handleServer(server)
- for vID, peer := range peers {
+ for nID, peer := range peers {
go handlePeer(peer)
// Broadcast a block.
peer.myBlock = &types.Block{
- ProposerID: vID,
+ ProposerID: nID,
Hash: common.NewRandomHash(),
}
peer.myBlockSentTime = time.Now()
@@ -162,28 +162,28 @@ func (s *TransportTestSuite) baseTest(
// Report a block to server.
peer.expectedEchoHash = common.NewRandomHash()
peer.trans.Report(&types.Block{
- ProposerID: vID,
+ ProposerID: nID,
Hash: peer.expectedEchoHash,
})
}
wg.Wait()
// Make sure each sent block is received.
- for vID, peer := range peers {
+ for nID, peer := range peers {
req.NotNil(peer.echoBlock)
req.Equal(peer.echoBlock.Hash, peer.expectedEchoHash)
- for otherVID, otherPeer := range peers {
- if vID == otherVID {
+ for othernID, otherPeer := range peers {
+ if nID == othernID {
continue
}
req.Equal(
peer.myBlock.Hash,
- otherPeer.blocks[peer.vID].Hash)
+ otherPeer.blocks[peer.nID].Hash)
}
}
// Make sure the latency is expected.
- for vID, peer := range peers {
- for otherVID, otherPeer := range peers {
- if otherVID == vID {
+ for nID, peer := range peers {
+ for othernID, otherPeer := range peers {
+ if othernID == nID {
continue
}
req.True(otherPeer.blocksReceiveTime[peer.myBlock.Hash].Sub(
@@ -196,8 +196,8 @@ func (s *TransportTestSuite) TestFake() {
var (
peerCount = 13
req = s.Require()
- peers = make(map[types.ValidatorID]*testPeer)
- vIDs = GenerateRandomValidatorIDs(peerCount)
+ peers = make(map[types.NodeID]*testPeer)
+ nIDs = GenerateRandomNodeIDs(peerCount)
err error
wg sync.WaitGroup
latency = &FixedLatencyModel{Latency: 300}
@@ -207,13 +207,13 @@ func (s *TransportTestSuite) TestFake() {
server.recv, err = server.trans.Host()
req.Nil(err)
// Setup Peers
- wg.Add(len(vIDs))
- for _, vID := range vIDs {
+ wg.Add(len(nIDs))
+ for _, nID := range nIDs {
peer := &testPeer{
- vID: vID,
- trans: NewFakeTransportClient(vID, latency),
+ nID: nID,
+ trans: NewFakeTransportClient(nID, latency),
}
- peers[vID] = peer
+ peers[nID] = peer
go func() {
defer wg.Done()
recv, err := peer.trans.Join(server.recv)
@@ -236,8 +236,8 @@ func (s *TransportTestSuite) TestTCPLocal() {
var (
peerCount = 25
req = s.Require()
- peers = make(map[types.ValidatorID]*testPeer)
- vIDs = GenerateRandomValidatorIDs(peerCount)
+ peers = make(map[types.NodeID]*testPeer)
+ nIDs = GenerateRandomNodeIDs(peerCount)
err error
wg sync.WaitGroup
latency = &FixedLatencyModel{Latency: 300}
@@ -250,13 +250,13 @@ func (s *TransportTestSuite) TestTCPLocal() {
server.recv, err = server.trans.Host()
req.Nil(err)
// Setup Peers
- wg.Add(len(vIDs))
- for _, vID := range vIDs {
+ wg.Add(len(nIDs))
+ for _, nID := range nIDs {
peer := &testPeer{
- vID: vID,
- trans: NewTCPTransportClient(vID, latency, &testMarshaller{}, true),
+ nID: nID,
+ trans: NewTCPTransportClient(nID, latency, &testMarshaller{}, true),
}
- peers[vID] = peer
+ peers[nID] = peer
go func() {
defer wg.Done()
diff --git a/core/test/utils.go b/core/test/utils.go
index 138e8a1..887ef14 100644
--- a/core/test/utils.go
+++ b/core/test/utils.go
@@ -34,11 +34,11 @@ func stableRandomHash(block *types.Block) (common.Hash, error) {
return common.NewRandomHash(), nil
}
-// GenerateRandomValidatorIDs generates randomly a slices of types.ValidatorID.
-func GenerateRandomValidatorIDs(validatorCount int) (vIDs types.ValidatorIDs) {
- vIDs = types.ValidatorIDs{}
- for i := 0; i < validatorCount; i++ {
- vIDs = append(vIDs, types.ValidatorID{Hash: common.NewRandomHash()})
+// GenerateRandomNodeIDs generates randomly a slices of types.NodeID.
+func GenerateRandomNodeIDs(nodeCount int) (nIDs types.NodeIDs) {
+ nIDs = types.NodeIDs{}
+ for i := 0; i < nodeCount; i++ {
+ nIDs = append(nIDs, types.NodeID{Hash: common.NewRandomHash()})
}
return
}