aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMission Liao <mission.liao@dexon.org>2018-10-19 18:39:17 +0800
committerGitHub <noreply@github.com>2018-10-19 18:39:17 +0800
commit1f7491df37caf974ffa0c824c4c02a8fe2aafcd9 (patch)
tree55c6e653c3d78ab34e3fbe8627ba1c0a62cb53a8
parentbec97aadfa95ebb42ef042bd53c7976ae410c496 (diff)
downloaddexon-consensus-1f7491df37caf974ffa0c824c4c02a8fe2aafcd9.tar
dexon-consensus-1f7491df37caf974ffa0c824c4c02a8fe2aafcd9.tar.gz
dexon-consensus-1f7491df37caf974ffa0c824c4c02a8fe2aafcd9.tar.bz2
dexon-consensus-1f7491df37caf974ffa0c824c4c02a8fe2aafcd9.tar.lz
dexon-consensus-1f7491df37caf974ffa0c824c4c02a8fe2aafcd9.tar.xz
dexon-consensus-1f7491df37caf974ffa0c824c4c02a8fe2aafcd9.tar.zst
dexon-consensus-1f7491df37caf974ffa0c824c4c02a8fe2aafcd9.zip
core: initial commit for logger (#228)
* Replace "log.*" with logger. * Add simple logger to log with log package. * Add debug logs to all calls to these interfaces: - core.Application - core.Governance - core.Network * Add Stringer to these types: - types.DKGComplaint - types.AgreementResult - types.DKGMasterPublicKey - types.DKGFinalize
-rw-r--r--common/logger.go87
-rw-r--r--core/configuration-chain.go36
-rw-r--r--core/configuration-chain_test.go3
-rw-r--r--core/consensus.go143
-rw-r--r--core/consensus_test.go4
-rw-r--r--core/lattice.go12
-rw-r--r--core/lattice_test.go3
-rw-r--r--core/types/block-randomness.go7
-rw-r--r--core/types/dkg.go17
-rw-r--r--core/types/position.go9
-rw-r--r--integration_test/node.go3
-rw-r--r--simulation/node.go8
12 files changed, 280 insertions, 52 deletions
diff --git a/common/logger.go b/common/logger.go
new file mode 100644
index 0000000..9dc5d23
--- /dev/null
+++ b/common/logger.go
@@ -0,0 +1,87 @@
+// Copyright 2018 The dexon-consensus-core Authors
+// This file is part of the dexon-consensus-core library.
+//
+// The dexon-consensus-core library is free software: you can redistribute it
+// and/or modify it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation, either version 3 of the License,
+// or (at your option) any later version.
+//
+// The dexon-consensus-core library is distributed in the hope that it will be
+// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+// General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the dexon-consensus-core library. If not, see
+// <http://www.gnu.org/licenses/>.
+
+package common
+
+import "log"
+
+// Logger define the way to receive logs from Consensus instance.
+// NOTE: parameter in 'ctx' should be paired as key-value mapping. For example,
+// to log an error with message:
+// logger.Error("some message", "error", err)
+// which is similar to loggers with context:
+// logger.Error("some message", map[string]interface{}{
+// "error": err,
+// })
+type Logger interface {
+ // Info logs info level logs.
+ Debug(msg string, ctx ...interface{})
+ Info(msg string, ctx ...interface{})
+ Warn(msg string, ctx ...interface{})
+ Error(msg string, ctx ...interface{})
+}
+
+// NullLogger logs nothing.
+type NullLogger struct{}
+
+// Debug implements Logger interface.
+func (logger *NullLogger) Debug(msg string, ctx ...interface{}) {
+}
+
+// Info implements Logger interface.
+func (logger *NullLogger) Info(msg string, ctx ...interface{}) {
+}
+
+// Warn implements Logger interface.
+func (logger *NullLogger) Warn(msg string, ctx ...interface{}) {
+}
+
+// Error implements Logger interface.
+func (logger *NullLogger) Error(msg string, ctx ...interface{}) {
+}
+
+// SimpleLogger logs everything.
+type SimpleLogger struct{}
+
+// composeVargs makes (msg, ctx...) could be pass to log.Println
+func composeVargs(msg string, ctxs []interface{}) []interface{} {
+ args := []interface{}{msg}
+ for _, c := range ctxs {
+ args = append(args, c)
+ }
+ return args
+}
+
+// Debug implements Logger interface.
+func (logger *SimpleLogger) Debug(msg string, ctx ...interface{}) {
+ log.Println(composeVargs(msg, ctx)...)
+}
+
+// Info implements Logger interface.
+func (logger *SimpleLogger) Info(msg string, ctx ...interface{}) {
+ log.Println(composeVargs(msg, ctx)...)
+}
+
+// Warn implements Logger interface.
+func (logger *SimpleLogger) Warn(msg string, ctx ...interface{}) {
+ log.Println(composeVargs(msg, ctx)...)
+}
+
+// Error implements Logger interface.
+func (logger *SimpleLogger) Error(msg string, ctx ...interface{}) {
+ log.Println(composeVargs(msg, ctx)...)
+}
diff --git a/core/configuration-chain.go b/core/configuration-chain.go
index c40d788..8834e0d 100644
--- a/core/configuration-chain.go
+++ b/core/configuration-chain.go
@@ -19,7 +19,6 @@ package core
import (
"fmt"
- "log"
"sync"
"time"
@@ -43,6 +42,7 @@ type configurationChain struct {
recv dkgReceiver
gov Governance
dkg *dkgProtocol
+ logger common.Logger
dkgLock sync.RWMutex
dkgSigner map[uint64]*dkgShareSecret
gpk map[uint64]*DKGGroupPublicKey
@@ -58,11 +58,13 @@ type configurationChain struct {
func newConfigurationChain(
ID types.NodeID,
recv dkgReceiver,
- gov Governance) *configurationChain {
+ gov Governance,
+ logger common.Logger) *configurationChain {
return &configurationChain{
ID: ID,
recv: recv,
gov: gov,
+ logger: logger,
dkgSigner: make(map[uint64]*dkgShareSecret),
gpk: make(map[uint64]*DKGGroupPublicKey),
tsig: make(map[common.Hash]*tsigProtocol),
@@ -101,6 +103,7 @@ func (cc *configurationChain) runDKG(round uint64) error {
<-ticker.Tick()
cc.dkgLock.Lock()
// Phase 2(T = 0): Exchange DKG secret key share.
+ cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
cc.dkg.processMasterPublicKeys(cc.gov.DKGMasterPublicKeys(round))
// Phase 3(T = 0~λ): Propose complaint.
// Propose complaint is done in `processMasterPublicKeys`.
@@ -113,6 +116,7 @@ func (cc *configurationChain) runDKG(round uint64) error {
<-ticker.Tick()
cc.dkgLock.Lock()
// Phase 5(T = 2λ): Propose Anti nack complaint.
+ cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
cc.dkg.processNackComplaints(cc.gov.DKGComplaints(round))
cc.dkgLock.Unlock()
<-ticker.Tick()
@@ -123,6 +127,7 @@ func (cc *configurationChain) runDKG(round uint64) error {
<-ticker.Tick()
cc.dkgLock.Lock()
// Phase 7(T = 4λ): Enforce complaints and nack complaints.
+ cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
cc.dkg.enforceNackComplaints(cc.gov.DKGComplaints(round))
// Enforce complaint is done in `processPrivateShare`.
// Phase 8(T = 5λ): DKG finalize.
@@ -136,10 +141,14 @@ func (cc *configurationChain) runDKG(round uint64) error {
cc.dkgLock.Lock()
// Normally, IsDKGFinal would return true here. Use this for in case of
// unexpected network fluctuation and ensure the robustness of DKG protocol.
+ cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round)
for !cc.gov.IsDKGFinal(round) {
- log.Printf("[%s] DKG is not ready yet. Try again later...\n", cc.ID)
+ cc.logger.Info("DKG is not ready yet. Try again later...",
+ "nodeID", cc.ID)
time.Sleep(500 * time.Millisecond)
}
+ cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
+ cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
gpk, err := NewDKGGroupPublicKey(round,
cc.gov.DKGMasterPublicKeys(round),
cc.gov.DKGComplaints(round),
@@ -155,8 +164,11 @@ func (cc *configurationChain) runDKG(round uint64) error {
for nID := range gpk.qualifyNodeIDs {
qualifies += fmt.Sprintf("%s ", nID.String()[:6])
}
- log.Printf("[%s] Qualify Nodes(%d): (%d) %s\n",
- cc.ID, round, len(gpk.qualifyIDs), qualifies)
+ cc.logger.Info("Qualify Nodes",
+ "nodeID", cc.ID,
+ "round", round,
+ "count", len(gpk.qualifyIDs),
+ "qualifies", qualifies)
cc.dkgResult.Lock()
defer cc.dkgResult.Unlock()
cc.dkgSigner[round] = signer
@@ -214,7 +226,9 @@ func (cc *configurationChain) runTSig(
go func() {
for _, psig := range pendingPsig {
if err := cc.processPartialSignature(psig); err != nil {
- log.Printf("[%s] %s", cc.ID, err)
+ cc.logger.Error("failed to process partial signature",
+ "nodeID", cc.ID,
+ "error", err)
}
}
}()
@@ -240,14 +254,20 @@ func (cc *configurationChain) runBlockTSig(
if err != nil {
return crypto.Signature{}, err
}
- log.Printf("[%s] Block TSIG(%d): %s\n", cc.ID, round, sig)
+ cc.logger.Info("Block TSIG",
+ "nodeID", cc.ID,
+ "round", round,
+ "signature", sig)
return sig, nil
}
func (cc *configurationChain) runCRSTSig(
round uint64, crs common.Hash) ([]byte, error) {
sig, err := cc.runTSig(round, crs)
- log.Printf("[%s] CRS(%d): %s\n", cc.ID, round+1, sig)
+ cc.logger.Info("CRS",
+ "nodeID", cc.ID,
+ "round", round+1,
+ "signature", sig)
return sig.Signature[:], err
}
diff --git a/core/configuration-chain_test.go b/core/configuration-chain_test.go
index 4d104ae..19d50a3 100644
--- a/core/configuration-chain_test.go
+++ b/core/configuration-chain_test.go
@@ -168,7 +168,8 @@ func (s *ConfigurationChainTestSuite) runDKG(
for _, nID := range s.nIDs {
gov, err := test.NewGovernance(0, 50*time.Millisecond)
s.Require().NoError(err)
- cfgChains[nID] = newConfigurationChain(nID, recv, gov)
+ cfgChains[nID] = newConfigurationChain(
+ nID, recv, gov, &common.NullLogger{})
recv.nodes[nID] = cfgChains[nID]
recv.govs[nID] = gov
}
diff --git a/core/consensus.go b/core/consensus.go
index 05e3301..2f46ec1 100644
--- a/core/consensus.go
+++ b/core/consensus.go
@@ -20,7 +20,6 @@ package core
import (
"context"
"fmt"
- "log"
"sync"
"time"
@@ -42,8 +41,6 @@ var (
"genesis block should be empty")
ErrUnknownBlockProposed = fmt.Errorf(
"unknown block is proposed")
- ErrUnknownBlockConfirmed = fmt.Errorf(
- "unknown block is confirmed")
ErrIncorrectAgreementResultPosition = fmt.Errorf(
"incorrect agreement result position")
ErrNotEnoughVotes = fmt.Errorf(
@@ -67,14 +64,16 @@ type consensusBAReceiver struct {
func (recv *consensusBAReceiver) ProposeVote(vote *types.Vote) {
if err := recv.agreementModule.prepareVote(vote); err != nil {
- log.Println(err)
+ recv.consensus.logger.Error("Failed to prepare vote", "error", err)
return
}
go func() {
if err := recv.agreementModule.processVote(vote); err != nil {
- log.Println(err)
+ recv.consensus.logger.Error("Failed to process vote", "error", err)
return
}
+ recv.consensus.logger.Debug("Calling Network.BroadcastVote",
+ "vote", vote)
recv.consensus.network.BroadcastVote(vote)
}()
}
@@ -83,9 +82,10 @@ func (recv *consensusBAReceiver) ProposeBlock() common.Hash {
block := recv.consensus.proposeBlock(recv.chainID, recv.round)
recv.consensus.baModules[recv.chainID].addCandidateBlock(block)
if err := recv.consensus.preProcessBlock(block); err != nil {
- log.Println(err)
+ recv.consensus.logger.Error("Failed to pre-process block", "error", err)
return common.Hash{}
}
+ recv.consensus.logger.Debug("Calling Network.BroadcastBlock", "block", block)
recv.consensus.network.BroadcastBlock(block)
return block.Hash
}
@@ -95,7 +95,7 @@ func (recv *consensusBAReceiver) ConfirmBlock(
block, exist := recv.consensus.baModules[recv.chainID].
findCandidateBlock(hash)
if !exist {
- log.Println(ErrUnknownBlockConfirmed, hash)
+ recv.consensus.logger.Error("Unknown block confirmed", "hash", hash)
return
}
recv.consensus.ccModule.registerBlock(block)
@@ -103,13 +103,16 @@ func (recv *consensusBAReceiver) ConfirmBlock(
for _, vote := range votes {
voteList = append(voteList, *vote)
}
- recv.consensus.network.BroadcastAgreementResult(&types.AgreementResult{
+ result := &types.AgreementResult{
BlockHash: hash,
Position: block.Position,
Votes: voteList,
- })
+ }
+ recv.consensus.logger.Debug("Calling Network.BroadcastAgreementResult",
+ "result", result)
+ recv.consensus.network.BroadcastAgreementResult(result)
if err := recv.consensus.processBlock(block); err != nil {
- log.Println(err)
+ recv.consensus.logger.Error("Failed to process block", "error", err)
return
}
if block.Timestamp.After(recv.changeNotaryTime) {
@@ -127,15 +130,18 @@ type consensusDKGReceiver struct {
authModule *Authenticator
nodeSetCache *NodeSetCache
network Network
+ logger common.Logger
}
// ProposeDKGComplaint proposes a DKGComplaint.
func (recv *consensusDKGReceiver) ProposeDKGComplaint(
complaint *types.DKGComplaint) {
if err := recv.authModule.SignDKGComplaint(complaint); err != nil {
- log.Println(err)
+ recv.logger.Error("Failed to sign DKG complaint", "error", err)
return
}
+ recv.logger.Debug("Calling Governace.AddDKGComplaint",
+ "complaint", complaint)
recv.gov.AddDKGComplaint(complaint.Round, complaint)
}
@@ -143,9 +149,10 @@ func (recv *consensusDKGReceiver) ProposeDKGComplaint(
func (recv *consensusDKGReceiver) ProposeDKGMasterPublicKey(
mpk *types.DKGMasterPublicKey) {
if err := recv.authModule.SignDKGMasterPublicKey(mpk); err != nil {
- log.Println(err)
+ recv.logger.Error("Failed to sign DKG master public key", "error", err)
return
}
+ recv.logger.Debug("Calling Governance.AddDKGMasterPublicKey", "key", mpk)
recv.gov.AddDKGMasterPublicKey(mpk.Round, mpk)
}
@@ -153,14 +160,17 @@ func (recv *consensusDKGReceiver) ProposeDKGMasterPublicKey(
func (recv *consensusDKGReceiver) ProposeDKGPrivateShare(
prv *types.DKGPrivateShare) {
if err := recv.authModule.SignDKGPrivateShare(prv); err != nil {
- log.Println(err)
+ recv.logger.Error("Failed to sign DKG private share", "error", err)
return
}
receiverPubKey, exists := recv.nodeSetCache.GetPublicKey(prv.ReceiverID)
if !exists {
- log.Println("public key for receiver not found")
+ recv.logger.Error("Public key for receiver not found",
+ "receiver", prv.ReceiverID.String()[:6])
return
}
+ recv.logger.Debug("Calling Network.SendDKGPrivateShare",
+ "receiver", string(receiverPubKey.Bytes()))
recv.network.SendDKGPrivateShare(receiverPubKey, prv)
}
@@ -169,19 +179,21 @@ func (recv *consensusDKGReceiver) ProposeDKGAntiNackComplaint(
prv *types.DKGPrivateShare) {
if prv.ProposerID == recv.ID {
if err := recv.authModule.SignDKGPrivateShare(prv); err != nil {
- log.Println(err)
+ recv.logger.Error("Failed sign DKG private share", "error", err)
return
}
}
+ recv.logger.Debug("Calling Network.BroadcastDKGPrivateShare", "share", prv)
recv.network.BroadcastDKGPrivateShare(prv)
}
// ProposeDKGFinalize propose a DKGFinalize message.
func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *types.DKGFinalize) {
if err := recv.authModule.SignDKGFinalize(final); err != nil {
- log.Println(err)
+ recv.logger.Error("Faield to sign DKG finalize", "error", err)
return
}
+ recv.logger.Debug("Calling Governance.AddDKGFinalize", "final", final)
recv.gov.AddDKGFinalize(final.Round, final)
}
@@ -222,6 +234,7 @@ type Consensus struct {
ctx context.Context
ctxCancel context.CancelFunc
event *common.Event
+ logger common.Logger
}
// NewConsensus construct an Consensus instance.
@@ -231,14 +244,17 @@ func NewConsensus(
gov Governance,
db blockdb.BlockDatabase,
network Network,
- prv crypto.PrivateKey) *Consensus {
+ prv crypto.PrivateKey,
+ logger common.Logger) *Consensus {
// TODO(w): load latest blockHeight from DB, and use config at that height.
var (
round uint64
)
+ logger.Debug("Calling Governance.Configuration", "round", round)
config := gov.Configuration(round)
nodeSetCache := NewNodeSetCache(gov)
+ logger.Debug("Calling Governance.CRS", "round", round)
crs := gov.CRS(round)
// Setup acking by information returned from Governace.
nodes, err := nodeSetCache.GetNodeSet(round)
@@ -252,7 +268,8 @@ func NewConsensus(
// Setup nonblocking module.
nbModule := newNonBlocking(app, debugApp)
// Init lattice.
- lattice := NewLattice(dMoment, config, authModule, nbModule, nbModule, db)
+ lattice := NewLattice(
+ dMoment, config, authModule, nbModule, nbModule, db, logger)
// Init configuration chain.
ID := types.NewNodeID(prv.PublicKey())
cfgModule := newConfigurationChain(
@@ -263,8 +280,10 @@ func NewConsensus(
authModule: authModule,
nodeSetCache: nodeSetCache,
network: network,
+ logger: logger,
},
- gov)
+ gov,
+ logger)
// Construct Consensus instance.
con := &Consensus{
ID: ID,
@@ -282,6 +301,7 @@ func NewConsensus(
nodeSetCache: nodeSetCache,
authModule: authModule,
event: common.NewEvent(),
+ logger: logger,
}
con.baModules = make([]*agreement, config.NumChains)
@@ -316,11 +336,13 @@ func (con *Consensus) Run(initBlock *types.Block) {
con.ccModule.init(initBlock)
// TODO(jimmy-dexon): change AppendConfig to add config for specific round.
for i := uint64(0); i < initBlock.Position.Round; i++ {
+ con.logger.Debug("Calling Governance.Configuration", "round", i+1)
cfg := con.gov.Configuration(i + 1)
if err := con.lattice.AppendConfig(i+1, cfg); err != nil {
panic(err)
}
}
+ con.logger.Debug("Calling Network.ReceiveChan")
go con.processMsg(con.network.ReceiveChan())
con.cfgModule.registerDKG(con.round, int(con.currentConfig.DKGSetSize)/3+1)
con.event.RegisterTime(con.dMoment.Add(con.currentConfig.RoundInterval/4),
@@ -328,6 +350,7 @@ func (con *Consensus) Run(initBlock *types.Block) {
con.runDKGTSIG(con.round)
})
round1 := uint64(1)
+ con.logger.Debug("Calling Governance.Configuration", "round", round1)
con.lattice.AppendConfig(round1, con.gov.Configuration(round1))
con.initialRound(con.dMoment)
ticks := make([]chan struct{}, 0, con.currentConfig.NumChains)
@@ -372,6 +395,9 @@ BALoop:
if err != nil {
panic(err)
}
+ con.logger.Debug("Calling Governance.Configuration",
+ "round", recv.round)
+ con.logger.Debug("Calling Governance.CRS", "round", recv.round)
nIDs = nodes.GetSubSet(
int(con.gov.Configuration(recv.round).NotarySetSize),
types.NewNotarySetTarget(con.gov.CRS(recv.round), chainID))
@@ -383,7 +409,9 @@ BALoop:
}
err := agreement.nextState()
if err != nil {
- log.Printf("[%s] %s\n", con.ID.String(), err)
+ con.logger.Error("Failed to proceed to next state",
+ "nodeID", con.ID.String(),
+ "error", err)
break BALoop
}
for i := 0; i < agreement.clocks(); i++ {
@@ -420,8 +448,8 @@ func (con *Consensus) runDKGTSIG(round uint64) {
DKGTime := time.Now().Sub(startTime)
if DKGTime.Nanoseconds() >=
con.currentConfig.RoundInterval.Nanoseconds()/2 {
- log.Printf("[%s] WARNING!!! Your computer cannot finish DKG on time!\n",
- con.ID)
+ con.logger.Warn("Your computer cannot finish DKG on time!",
+ "nodeID", con.ID.String())
}
}()
if err := con.cfgModule.runDKG(round); err != nil {
@@ -431,6 +459,7 @@ func (con *Consensus) runDKGTSIG(round uint64) {
if err != nil {
panic(err)
}
+ con.logger.Debug("Calling Governance.Configuration", "round", round)
hash := HashConfigurationBlock(
nodes.IDs,
con.gov.Configuration(round),
@@ -447,6 +476,10 @@ func (con *Consensus) runDKGTSIG(round uint64) {
if err = con.cfgModule.processPartialSignature(psig); err != nil {
panic(err)
}
+ con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
+ "proposer", psig.ProposerID,
+ "round", psig.Round,
+ "hash", psig.Hash)
con.network.BroadcastDKGPartialSignature(psig)
if _, err = con.cfgModule.runBlockTSig(round, hash); err != nil {
panic(err)
@@ -456,20 +489,29 @@ func (con *Consensus) runDKGTSIG(round uint64) {
func (con *Consensus) runCRS() {
// Start running next round CRS.
+ con.logger.Debug("Calling Governance.CRS", "round", con.round)
psig, err := con.cfgModule.preparePartialSignature(
con.round, con.gov.CRS(con.round))
if err != nil {
- log.Println(err)
+ con.logger.Error("Failed to prepare partial signature", "error", err)
} else if err = con.authModule.SignDKGPartialSignature(psig); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to sign DKG partial signature", "error", err)
} else if err = con.cfgModule.processPartialSignature(psig); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to process partial signature", "error", err)
} else {
+ con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
+ "proposer", psig.ProposerID,
+ "round", psig.Round,
+ "hash", psig.Hash)
con.network.BroadcastDKGPartialSignature(psig)
+ con.logger.Debug("Calling Governance.CRS", "round", con.round)
crs, err := con.cfgModule.runCRSTSig(con.round, con.gov.CRS(con.round))
if err != nil {
- log.Println(err)
+ con.logger.Error("Failed to run CRS Tsig", "error", err)
} else {
+ con.logger.Debug("Calling Governance.ProposeCRS",
+ "round", con.round,
+ "crs", crs)
con.gov.ProposeCRS(crs)
}
}
@@ -481,6 +523,7 @@ func (con *Consensus) initialRound(startTime time.Time) {
return
default:
}
+ con.logger.Debug("Calling Governance.Configuration", "round", con.round)
con.currentConfig = con.gov.Configuration(con.round)
con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval/2),
@@ -532,33 +575,40 @@ func (con *Consensus) processMsg(msgChan <-chan interface{}) {
// For sync mode.
if val.IsFinalized() {
if err := con.processFinalizedBlock(val); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to process finalized block",
+ "error", err)
}
} else {
if err := con.preProcessBlock(val); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to pre process block",
+ "error", err)
}
}
case *types.Vote:
if err := con.ProcessVote(val); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to process vote",
+ "error", err)
}
case *types.AgreementResult:
if err := con.ProcessAgreementResult(val); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to process agreement result",
+ "error", err)
}
case *types.BlockRandomnessResult:
if err := con.ProcessBlockRandomnessResult(val); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to process block randomness result",
+ "error", err)
}
case *types.DKGPrivateShare:
if err := con.cfgModule.processPrivateShare(val); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to process private share",
+ "error", err)
}
case *types.DKGPartialSignature:
if err := con.cfgModule.processPartialSignature(val); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to process partial signature",
+ "error", err)
}
}
}
@@ -572,7 +622,7 @@ func (con *Consensus) proposeBlock(chainID uint32, round uint64) *types.Block {
},
}
if err := con.prepareBlock(block, time.Now().UTC()); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to prepare block", "error", err)
return nil
}
return block
@@ -624,6 +674,7 @@ func (con *Consensus) ProcessAgreementResult(
if !con.cfgModule.touchTSigHash(rand.BlockHash) {
return nil
}
+ con.logger.Debug("Calling Network.BroadcastAgreementResult", "result", rand)
con.network.BroadcastAgreementResult(rand)
dkgSet, err := con.nodeSetCache.GetDKGSet(rand.Position.Round)
if err != nil {
@@ -642,12 +693,16 @@ func (con *Consensus) ProcessAgreementResult(
if err = con.cfgModule.processPartialSignature(psig); err != nil {
return err
}
+ con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
+ "proposer", psig.ProposerID,
+ "round", psig.Round,
+ "hash", psig.Hash)
con.network.BroadcastDKGPartialSignature(psig)
go func() {
tsig, err := con.cfgModule.runTSig(rand.Position.Round, rand.BlockHash)
if err != nil {
if err != ErrTSigAlreadyRunning {
- log.Println(err)
+ con.logger.Error("Faield to run TSIG", "error", err)
}
return
}
@@ -657,9 +712,14 @@ func (con *Consensus) ProcessAgreementResult(
Randomness: tsig.Signature,
}
if err := con.ProcessBlockRandomnessResult(result); err != nil {
- log.Println(err)
+ con.logger.Error("Failed to process randomness result",
+ "error", err)
return
}
+ con.logger.Debug("Calling Network.BroadcastRandomnessResult",
+ "hash", result.BlockHash,
+ "position", result.Position,
+ "randomness", string(result.Randomness))
con.network.BroadcastRandomnessResult(result)
}()
return nil
@@ -676,6 +736,10 @@ func (con *Consensus) ProcessBlockRandomnessResult(
}
// TODO(jimmy-dexon): reuse the GPK.
round := rand.Position.Round
+ con.logger.Debug("Calling Governance.DKGMasterPublicKeys",
+ "round", con.round)
+ con.logger.Debug("Calling Governance.DKGComplaints", "round", con.round)
+ con.logger.Debug("Calling Governance.Configuration", "round", con.round)
gpk, err := NewDKGGroupPublicKey(round,
con.gov.DKGMasterPublicKeys(round),
con.gov.DKGComplaints(round),
@@ -687,6 +751,10 @@ func (con *Consensus) ProcessBlockRandomnessResult(
rand.BlockHash, crypto.Signature{Signature: rand.Randomness}) {
return ErrIncorrectBlockRandomnessResult
}
+ con.logger.Debug("Calling Network.BroadcastRandomnessResult",
+ "hash", rand.BlockHash,
+ "position", rand.Position,
+ "randomness", string(rand.Randomness))
con.network.BroadcastRandomnessResult(rand)
if err := con.ccModule.processBlockRandomnessResult(rand); err != nil {
if err != ErrBlockNotRegistered {
@@ -734,6 +802,8 @@ func (con *Consensus) processBlock(block *types.Block) (err error) {
}
if b.Position.Round > con.round {
con.round++
+ con.logger.Debug("Calling Governance.Configuration",
+ "round", con.round+1)
con.lattice.AppendConfig(con.round+1, con.gov.Configuration(con.round+1))
}
// TODO(mission): clone types.FinalizationResult
@@ -780,6 +850,7 @@ func (con *Consensus) prepareBlock(b *types.Block,
}
// TODO(mission): decide CRS by block's round, which could be determined by
// block's info (ex. position, timestamp).
+ con.logger.Debug("Calling Governance.CRS", "round", 0)
if err = con.authModule.SignCRS(b, con.gov.CRS(0)); err != nil {
return
}
diff --git a/core/consensus_test.go b/core/consensus_test.go
index ed1658f..bfe01da 100644
--- a/core/consensus_test.go
+++ b/core/consensus_test.go
@@ -168,8 +168,8 @@ func (s *ConsensusTestSuite) prepareConsensus(
s.Require().Nil(err)
nID := types.NewNodeID(prvKey.PublicKey())
network := conn.newNetwork(nID)
- con := NewConsensus(dMoment, app, gov, db,
- network, prvKey)
+ con := NewConsensus(
+ dMoment, app, gov, db, network, prvKey, &common.NullLogger{})
con.ccModule.init(&types.Block{})
conn.setCon(nID, con)
return app, con
diff --git a/core/lattice.go b/core/lattice.go
index e6f9848..73fdcb0 100644
--- a/core/lattice.go
+++ b/core/lattice.go
@@ -37,6 +37,7 @@ type Lattice struct {
data *latticeData
toModule *totalOrdering
ctModule *consensusTimestamp
+ logger common.Logger
}
// NewLattice constructs an Lattice instance.
@@ -46,7 +47,8 @@ func NewLattice(
authModule *Authenticator,
app Application,
debug Debug,
- db blockdb.BlockDatabase) (s *Lattice) {
+ db blockdb.BlockDatabase,
+ logger common.Logger) (s *Lattice) {
// Create genesis latticeDataConfig.
dataConfig := newGenesisLatticeDataConfig(dMoment, cfg)
toConfig := newGenesisTotalOrderingConfig(dMoment, cfg)
@@ -59,6 +61,7 @@ func NewLattice(
data: newLatticeData(db, dataConfig),
toModule: newTotalOrdering(toConfig),
ctModule: newConsensusTimestamp(dMoment, 0, cfg.NumChains),
+ logger: logger,
}
return
}
@@ -74,9 +77,12 @@ func (s *Lattice) PrepareBlock(
if err = s.data.prepareBlock(b); err != nil {
return
}
+ s.logger.Debug("Calling Application.PreparePayload", "position", b.Position)
if b.Payload, err = s.app.PreparePayload(b.Position); err != nil {
return
}
+ s.logger.Debug("Calling Application.PrepareWitness",
+ "height", b.Witness.Height)
if b.Witness, err = s.app.PrepareWitness(b.Witness.Height); err != nil {
return
}
@@ -107,6 +113,7 @@ func (s *Lattice) SanityCheck(b *types.Block, checkRelation bool) (err error) {
}
}
// Verify data in application layer.
+ s.logger.Debug("Calling Application.VerifyBlock", "block", b)
if !s.app.VerifyBlock(b) {
err = ErrInvalidBlock
return err
@@ -122,6 +129,7 @@ func (s *Lattice) SanityCheck(b *types.Block, checkRelation bool) (err error) {
if err == ErrAckingBlockNotExists {
s.pool.addBlock(b)
}
+ s.logger.Error("Sanity Check failed", "error", err)
return
}
return
@@ -146,6 +154,7 @@ func (s *Lattice) ProcessBlock(
if inLattice, err = s.data.addBlock(input); err != nil {
// TODO(mission): if sanity check failed with "acking block doesn't
// exists", we should keep it in a pool.
+ s.logger.Error("Sanity Check failed when adding blocks", "error", err)
return
}
// TODO(mission): remove this hack, BA related stuffs should not
@@ -153,6 +162,7 @@ func (s *Lattice) ProcessBlock(
if s.debug != nil {
s.debug.StronglyAcked(input.Hash)
}
+ s.logger.Debug("Calling Application.BlockConfirmed", "block", input)
s.app.BlockConfirmed(*input.Clone())
// Purge blocks in pool with the same chainID and lower height.
s.pool.purgeBlocks(input.Position.ChainID, input.Position.Height)
diff --git a/core/lattice_test.go b/core/lattice_test.go
index 5c06b92..a508f2b 100644
--- a/core/lattice_test.go
+++ b/core/lattice_test.go
@@ -121,7 +121,8 @@ func (s *LatticeTestSuite) newTestLatticeMgr(
NewAuthenticator(prvKey),
app,
app,
- db)}
+ db,
+ &common.NullLogger{})}
}
func (s *LatticeTestSuite) TestBasicUsage() {
diff --git a/core/types/block-randomness.go b/core/types/block-randomness.go
index b101e22..ebffd2b 100644
--- a/core/types/block-randomness.go
+++ b/core/types/block-randomness.go
@@ -18,6 +18,8 @@
package types
import (
+ "fmt"
+
"github.com/dexon-foundation/dexon-consensus-core/common"
)
@@ -28,6 +30,11 @@ type AgreementResult struct {
Votes []Vote `json:"votes"`
}
+func (r *AgreementResult) String() string {
+ return fmt.Sprintf(
+ "agreementResult[%s:%s]", r.BlockHash, &r.Position)
+}
+
// BlockRandomnessResult describes a block randomness result
type BlockRandomnessResult struct {
BlockHash common.Hash `json:"block_hash"`
diff --git a/core/types/dkg.go b/core/types/dkg.go
index c13740b..6018ebd 100644
--- a/core/types/dkg.go
+++ b/core/types/dkg.go
@@ -53,6 +53,12 @@ type DKGMasterPublicKey struct {
Signature crypto.Signature `json:"signature"`
}
+func (d *DKGMasterPublicKey) String() string {
+ return fmt.Sprintf("MasterPublicKey[%s:%d]",
+ d.ProposerID.String()[:6],
+ d.Round)
+}
+
// NewDKGMasterPublicKey returns a new DKGMasterPublicKey instance.
func NewDKGMasterPublicKey() *DKGMasterPublicKey {
return &DKGMasterPublicKey{
@@ -75,6 +81,11 @@ type DKGComplaint struct {
Signature crypto.Signature `json:"signature"`
}
+func (c *DKGComplaint) String() string {
+ return fmt.Sprintf("DKGComplaint[%s:%d]%s",
+ c.ProposerID.String()[:6], c.Round, &c.PrivateShare)
+}
+
// DKGPartialSignature describe a partial signature in DKG protocol.
type DKGPartialSignature struct {
ProposerID NodeID `json:"proposer_id"`
@@ -91,6 +102,12 @@ type DKGFinalize struct {
Signature crypto.Signature `json:"signature"`
}
+func (final *DKGFinalize) String() string {
+ return fmt.Sprintf("DKGFinal[%s:%d]",
+ final.ProposerID.String()[:6],
+ final.Round)
+}
+
// IsNack returns true if it's a nack complaint in DKG protocol.
func (c *DKGComplaint) IsNack() bool {
return len(c.PrivateShare.Signature.Signature) == 0
diff --git a/core/types/position.go b/core/types/position.go
index 4e254ba..51de405 100644
--- a/core/types/position.go
+++ b/core/types/position.go
@@ -17,7 +17,10 @@
package types
-import "errors"
+import (
+ "errors"
+ "fmt"
+)
// ErrComparePositionOnDifferentChains raised when attempting to
// compare two positions with different chain ID.
@@ -31,6 +34,10 @@ type Position struct {
Height uint64 `json:"height"`
}
+func (pos *Position) String() string {
+ return fmt.Sprintf("pos[%d:%d:%d]", pos.ChainID, pos.Round, pos.Height)
+}
+
// Equal checks if two positions are equal, it panics when their chainIDs
// are different.
func (pos *Position) Equal(other *Position) bool {
diff --git a/integration_test/node.go b/integration_test/node.go
index 09d1557..b6704e8 100644
--- a/integration_test/node.go
+++ b/integration_test/node.go
@@ -121,7 +121,8 @@ func NewNode(
core.NewAuthenticator(privateKey),
app,
app,
- db),
+ db,
+ &common.NullLogger{}),
}
}
diff --git a/simulation/node.go b/simulation/node.go
index 0a59994..5919b3e 100644
--- a/simulation/node.go
+++ b/simulation/node.go
@@ -100,7 +100,13 @@ func (n *node) run(serverEndpoint interface{}, dMoment time.Time) {
}
}
n.consensus = core.NewConsensus(
- dMoment, n.app, n.gov, n.db, n.netModule, n.prvKey)
+ dMoment,
+ n.app,
+ n.gov,
+ n.db,
+ n.netModule,
+ n.prvKey,
+ &common.SimpleLogger{})
go n.consensus.Run(&types.Block{})
// Blocks forever.