aboutsummaryrefslogtreecommitdiffstats
path: root/consensus/ethash/sealer.go
diff options
context:
space:
mode:
Diffstat (limited to 'consensus/ethash/sealer.go')
-rw-r--r--consensus/ethash/sealer.go134
1 files changed, 88 insertions, 46 deletions
diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go
index a458c60f6..06c98a781 100644
--- a/consensus/ethash/sealer.go
+++ b/consensus/ethash/sealer.go
@@ -35,6 +35,11 @@ import (
"github.com/ethereum/go-ethereum/log"
)
+const (
+ // staleThreshold is the maximum depth of the acceptable stale but valid ethash solution.
+ staleThreshold = 7
+)
+
var (
errNoMiningWork = errors.New("no mining work available yet")
errInvalidSealResult = errors.New("invalid or stale proof-of-work solution")
@@ -42,16 +47,21 @@ var (
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
// the block's difficulty requirements.
-func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
+func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
// If we're running a fake PoW, simply return a 0 nonce immediately
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
header := block.Header()
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
- return block.WithSeal(header), nil
+ select {
+ case results <- block.WithSeal(header):
+ default:
+ log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
+ }
+ return nil
}
// If we're running a shared PoW, delegate sealing to it
if ethash.shared != nil {
- return ethash.shared.Seal(chain, block, stop)
+ return ethash.shared.Seal(chain, block, results, stop)
}
// Create a runner and the multiple search threads it directs
abort := make(chan struct{})
@@ -62,7 +72,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
ethash.lock.Unlock()
- return nil, err
+ return err
}
ethash.rand = rand.New(rand.NewSource(seed.Int64()))
}
@@ -75,34 +85,45 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
}
// Push new work to remote sealer
if ethash.workCh != nil {
- ethash.workCh <- block
+ ethash.workCh <- &sealTask{block: block, results: results}
}
- var pend sync.WaitGroup
+ var (
+ pend sync.WaitGroup
+ locals = make(chan *types.Block)
+ )
for i := 0; i < threads; i++ {
pend.Add(1)
go func(id int, nonce uint64) {
defer pend.Done()
- ethash.mine(block, id, nonce, abort, ethash.resultCh)
+ ethash.mine(block, id, nonce, abort, locals)
}(i, uint64(ethash.rand.Int63()))
}
// Wait until sealing is terminated or a nonce is found
- var result *types.Block
- select {
- case <-stop:
- // Outside abort, stop all miner threads
- close(abort)
- case result = <-ethash.resultCh:
- // One of the threads found a block, abort all others
- close(abort)
- case <-ethash.update:
- // Thread count was changed on user request, restart
- close(abort)
+ go func() {
+ var result *types.Block
+ select {
+ case <-stop:
+ // Outside abort, stop all miner threads
+ close(abort)
+ case result = <-locals:
+ // One of the threads found a block, abort all others
+ select {
+ case results <- result:
+ default:
+ log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
+ }
+ close(abort)
+ case <-ethash.update:
+ // Thread count was changed on user request, restart
+ close(abort)
+ if err := ethash.Seal(chain, block, results, stop); err != nil {
+ log.Error("Failed to restart sealing after update", "err", err)
+ }
+ }
+ // Wait for all miners to terminate and return the block
pend.Wait()
- return ethash.Seal(chain, block, stop)
- }
- // Wait for all miners to terminate and return the block
- pend.Wait()
- return result, nil
+ }()
+ return nil
}
// mine is the actual proof-of-work miner that searches for a nonce starting from
@@ -165,11 +186,12 @@ search:
}
// remote is a standalone goroutine to handle remote mining related stuff.
-func (ethash *Ethash) remote(notify []string) {
+func (ethash *Ethash) remote(notify []string, noverify bool) {
var (
works = make(map[common.Hash]*types.Block)
rates = make(map[common.Hash]hashrate)
+ results chan<- *types.Block
currentBlock *types.Block
currentWork [3]string
@@ -226,11 +248,15 @@ func (ethash *Ethash) remote(notify []string) {
// submitWork verifies the submitted pow solution, returning
// whether the solution was accepted or not (not can be both a bad pow as well as
// any other error, like no pending work or stale mining result).
- submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, hash common.Hash) bool {
+ submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
+ if currentBlock == nil {
+ log.Error("Pending work without block", "sealhash", sealhash)
+ return false
+ }
// Make sure the work submitted is present
- block := works[hash]
+ block := works[sealhash]
if block == nil {
- log.Info("Work submitted but none pending", "hash", hash)
+ log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64())
return false
}
// Verify the correctness of submitted result.
@@ -239,26 +265,36 @@ func (ethash *Ethash) remote(notify []string) {
header.MixDigest = mixDigest
start := time.Now()
- if err := ethash.verifySeal(nil, header, true); err != nil {
- log.Warn("Invalid proof-of-work submitted", "hash", hash, "elapsed", time.Since(start), "err", err)
- return false
+ if !noverify {
+ if err := ethash.verifySeal(nil, header, true); err != nil {
+ log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", time.Since(start), "err", err)
+ return false
+ }
}
- // Make sure the result channel is created.
- if ethash.resultCh == nil {
+ // Make sure the result channel is assigned.
+ if results == nil {
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
return false
}
- log.Trace("Verified correct proof-of-work", "hash", hash, "elapsed", time.Since(start))
+ log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", time.Since(start))
// Solutions seems to be valid, return to the miner and notify acceptance.
- select {
- case ethash.resultCh <- block.WithSeal(header):
- delete(works, hash)
- return true
- default:
- log.Info("Work submitted is stale", "hash", hash)
- return false
+ solution := block.WithSeal(header)
+
+ // The submitted solution is within the scope of acceptance.
+ if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() {
+ select {
+ case results <- solution:
+ log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
+ return true
+ default:
+ log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
+ return false
+ }
}
+ // The submitted block is too old to accept, drop it.
+ log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
+ return false
}
ticker := time.NewTicker(5 * time.Second)
@@ -266,14 +302,12 @@ func (ethash *Ethash) remote(notify []string) {
for {
select {
- case block := <-ethash.workCh:
- if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() {
- // Start new round mining, throw out all previous work.
- works = make(map[common.Hash]*types.Block)
- }
+ case work := <-ethash.workCh:
// Update current work with new received block.
// Note same work can be past twice, happens when changing CPU threads.
- makeWork(block)
+ results = work.results
+
+ makeWork(work.block)
// Notify and requested URLs of the new work availability
notifyWork()
@@ -315,6 +349,14 @@ func (ethash *Ethash) remote(notify []string) {
delete(rates, id)
}
}
+ // Clear stale pending blocks
+ if currentBlock != nil {
+ for hash, block := range works {
+ if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() {
+ delete(works, hash)
+ }
+ }
+ }
case errc := <-ethash.exitCh:
// Exit remote loop if ethash is closed and return relevant error.