aboutsummaryrefslogtreecommitdiffstats
path: root/consensus/ethash/sealer.go
diff options
context:
space:
mode:
Diffstat (limited to 'consensus/ethash/sealer.go')
-rw-r--r--consensus/ethash/sealer.go88
1 files changed, 59 insertions, 29 deletions
diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go
index a9449d406..03d848473 100644
--- a/consensus/ethash/sealer.go
+++ b/consensus/ethash/sealer.go
@@ -17,11 +17,14 @@
package ethash
import (
+ "bytes"
crand "crypto/rand"
+ "encoding/json"
"errors"
"math"
"math/big"
"math/rand"
+ "net/http"
"runtime"
"sync"
"time"
@@ -109,7 +112,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
var (
header = block.Header()
hash = header.HashNoNonce().Bytes()
- target = new(big.Int).Div(maxUint256, header.Difficulty)
+ target = new(big.Int).Div(two256, header.Difficulty)
number = header.Number.Uint64()
dataset = ethash.dataset(number)
)
@@ -161,40 +164,65 @@ search:
runtime.KeepAlive(dataset)
}
-// remote starts a standalone goroutine to handle remote mining related stuff.
-func (ethash *Ethash) remote() {
+// remote is a standalone goroutine to handle remote mining related stuff.
+func (ethash *Ethash) remote(notify []string) {
var (
- works = make(map[common.Hash]*types.Block)
- rates = make(map[common.Hash]hashrate)
- currentWork *types.Block
+ works = make(map[common.Hash]*types.Block)
+ rates = make(map[common.Hash]hashrate)
+
+ currentBlock *types.Block
+ currentWork [3]string
+
+ notifyTransport = &http.Transport{}
+ notifyClient = &http.Client{
+ Transport: notifyTransport,
+ Timeout: time.Second,
+ }
+ notifyReqs = make([]*http.Request, len(notify))
)
+ // notifyWork notifies all the specified mining endpoints of the availability of
+ // new work to be processed.
+ notifyWork := func() {
+ work := currentWork
+ blob, _ := json.Marshal(work)
- // getWork returns a work package for external miner.
+ for i, url := range notify {
+ // Terminate any previously pending request and create the new work
+ if notifyReqs[i] != nil {
+ notifyTransport.CancelRequest(notifyReqs[i])
+ }
+ notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
+ notifyReqs[i].Header.Set("Content-Type", "application/json")
+
+ // Push the new work concurrently to all the remote nodes
+ go func(req *http.Request, url string) {
+ res, err := notifyClient.Do(req)
+ if err != nil {
+ log.Warn("Failed to notify remote miner", "err", err)
+ } else {
+ log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
+ res.Body.Close()
+ }
+ }(notifyReqs[i], url)
+ }
+ }
+ // makeWork creates a work package for external miner.
//
// The work package consists of 3 strings:
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
- getWork := func() ([3]string, error) {
- var res [3]string
- if currentWork == nil {
- return res, errNoMiningWork
- }
- res[0] = currentWork.HashNoNonce().Hex()
- res[1] = common.BytesToHash(SeedHash(currentWork.NumberU64())).Hex()
+ makeWork := func(block *types.Block) {
+ hash := block.HashNoNonce()
- // Calculate the "target" to be returned to the external sealer.
- n := big.NewInt(1)
- n.Lsh(n, 255)
- n.Div(n, currentWork.Difficulty())
- n.Lsh(n, 1)
- res[2] = common.BytesToHash(n.Bytes()).Hex()
+ currentWork[0] = hash.Hex()
+ currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
+ currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
// Trace the seal work fetched by remote sealer.
- works[currentWork.HashNoNonce()] = currentWork
- return res, nil
+ currentBlock = block
+ works[hash] = block
}
-
// submitWork verifies the submitted pow solution, returning
// whether the solution was accepted or not (not can be both a bad pow as well as
// any other error, like no pending work or stale mining result).
@@ -238,21 +266,23 @@ func (ethash *Ethash) remote() {
for {
select {
case block := <-ethash.workCh:
- if currentWork != nil && block.ParentHash() != currentWork.ParentHash() {
+ if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() {
// Start new round mining, throw out all previous work.
works = make(map[common.Hash]*types.Block)
}
// Update current work with new received block.
// Note same work can be past twice, happens when changing CPU threads.
- currentWork = block
+ makeWork(block)
+
+ // Notify and requested URLs of the new work availability
+ notifyWork()
case work := <-ethash.fetchWorkCh:
// Return current mining work to remote miner.
- miningWork, err := getWork()
- if err != nil {
- work.errc <- err
+ if currentBlock == nil {
+ work.errc <- errNoMiningWork
} else {
- work.res <- miningWork
+ work.res <- currentWork
}
case result := <-ethash.submitWorkCh: