aboutsummaryrefslogtreecommitdiffstats
path: root/consensus/ethash/sealer.go
blob: 03d84847392e71a2c312fb169484b7585877e7b4 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

package ethash

import (
    "bytes"
    crand "crypto/rand"
    "encoding/json"
    "errors"
    "math"
    "math/big"
    "math/rand"
    "net/http"
    "runtime"
    "sync"
    "time"

    "github.com/ethereum/go-ethereum/common"
    "github.com/ethereum/go-ethereum/consensus"
    "github.com/ethereum/go-ethereum/core/types"
    "github.com/ethereum/go-ethereum/log"
)

var (
    errNoMiningWork      = errors.New("no mining work available yet")
    errInvalidSealResult = errors.New("invalid or stale proof-of-work solution")
)

// Seal implements consensus.Engine, attempting to find a nonce that satisfies
// the block's difficulty requirements.
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
    // If we're running a fake PoW, simply return a 0 nonce immediately
    if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
        header := block.Header()
        header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
        return block.WithSeal(header), nil
    }
    // If we're running a shared PoW, delegate sealing to it
    if ethash.shared != nil {
        return ethash.shared.Seal(chain, block, stop)
    }
    // Create a runner and the multiple search threads it directs
    abort := make(chan struct{})

    ethash.lock.Lock()
    threads := ethash.threads
    if ethash.rand == nil {
        seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
        if err != nil {
            ethash.lock.Unlock()
            return nil, err
        }
        ethash.rand = rand.New(rand.NewSource(seed.Int64()))
    }
    ethash.lock.Unlock()
    if threads == 0 {
        threads = runtime.NumCPU()
    }
    if threads < 0 {
        threads = 0 // Allows disabling local mining without extra logic around local/remote
    }
    // Push new work to remote sealer
    if ethash.workCh != nil {
        ethash.workCh <- block
    }
    var pend sync.WaitGroup
    for i := 0; i < threads; i++ {
        pend.Add(1)
        go func(id int, nonce uint64) {
            defer pend.Done()
            ethash.mine(block, id, nonce, abort, ethash.resultCh)
        }(i, uint64(ethash.rand.Int63()))
    }
    // Wait until sealing is terminated or a nonce is found
    var result *types.Block
    select {
    case <-stop:
        // Outside abort, stop all miner threads
        close(abort)
    case result = <-ethash.resultCh:
        // One of the threads found a block, abort all others
        close(abort)
    case <-ethash.update:
        // Thread count was changed on user request, restart
        close(abort)
        pend.Wait()
        return ethash.Seal(chain, block, stop)
    }
    // Wait for all miners to terminate and return the block
    pend.Wait()
    return result, nil
}

// mine is the actual proof-of-work miner that searches for a nonce starting from
// seed that results in correct final block difficulty.
func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
    // Extract some data from the header
    var (
        header  = block.Header()
        hash    = header.HashNoNonce().Bytes()
        target  = new(big.Int).Div(two256, header.Difficulty)
        number  = header.Number.Uint64()
        dataset = ethash.dataset(number)
    )
    // Start generating random nonces until we abort or find a good one
    var (
        attempts = int64(0)
        nonce    = seed
    )
    logger := log.New("miner", id)
    logger.Trace("Started ethash search for new nonces", "seed", seed)
search:
    for {
        select {
        case <-abort:
            // Mining terminated, update stats and abort
            logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
            ethash.hashrate.Mark(attempts)
            break search

        default:
            // We don't have to update hash rate on every nonce, so update after after 2^X nonces
            attempts++
            if (attempts % (1 << 15)) == 0 {
                ethash.hashrate.Mark(attempts)
                attempts = 0
            }
            // Compute the PoW value of this nonce
            digest, result := hashimotoFull(dataset.dataset, hash, nonce)
            if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
                // Correct nonce found, create a new header with it
                header = types.CopyHeader(header)
                header.Nonce = types.EncodeNonce(nonce)
                header.MixDigest = common.BytesToHash(digest)

                // Seal and return a block (if still needed)
                select {
                case found <- block.WithSeal(header):
                    logger.Trace("Ethash nonce found and reported", "attempts", nonce-seed, "nonce", nonce)
                case <-abort:
                    logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
                }
                break search
            }
            nonce++
        }
    }
    // Datasets are unmapped in a finalizer. Ensure that the dataset stays live
    // during sealing so it's not unmapped while being read.
    runtime.KeepAlive(dataset)
}

// remote is a standalone goroutine to handle remote mining related stuff.
func (ethash *Ethash) remote(notify []string) {
    var (
        works = make(map[common.Hash]*types.Block)
        rates = make(map[common.Hash]hashrate)

        currentBlock *types.Block
        currentWork  [3]string

        notifyTransport = &http.Transport{}
        notifyClient    = &http.Client{
            Transport: notifyTransport,
            Timeout:   time.Second,
        }
        notifyReqs = make([]*http.Request, len(notify))
    )
    // notifyWork notifies all the specified mining endpoints of the availability of
    // new work to be processed.
    notifyWork := func() {
        work := currentWork
        blob, _ := json.Marshal(work)

        for i, url := range notify {
            // Terminate any previously pending request and create the new work
            if notifyReqs[i] != nil {
                notifyTransport.CancelRequest(notifyReqs[i])
            }
            notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
            notifyReqs[i].Header.Set("Content-Type", "application/json")

            // Push the new work concurrently to all the remote nodes
            go func(req *http.Request, url string) {
                res, err := notifyClient.Do(req)
                if err != nil {
                    log.Warn("Failed to notify remote miner", "err", err)
                } else {
                    log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
                    res.Body.Close()
                }
            }(notifyReqs[i], url)
        }
    }
    // makeWork creates a work package for external miner.
    //
    // The work package consists of 3 strings:
    //   result[0], 32 bytes hex encoded current block header pow-hash
    //   result[1], 32 bytes hex encoded seed hash used for DAG
    //   result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
    makeWork := func(block *types.Block) {
        hash := block.HashNoNonce()

        currentWork[0] = hash.Hex()
        currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
        currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()

        // Trace the seal work fetched by remote sealer.
        currentBlock = block
        works[hash] = block
    }
    // submitWork verifies the submitted pow solution, returning
    // whether the solution was accepted or not (not can be both a bad pow as well as
    // any other error, like no pending work or stale mining result).
    submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, hash common.Hash) bool {
        // Make sure the work submitted is present
        block := works[hash]
        if block == nil {
            log.Info("Work submitted but none pending", "hash", hash)
            return false
        }

        // Verify the correctness of submitted result.
        header := block.Header()
        header.Nonce = nonce
        header.MixDigest = mixDigest
        if err := ethash.VerifySeal(nil, header); err != nil {
            log.Warn("Invalid proof-of-work submitted", "hash", hash, "err", err)
            return false
        }

        // Make sure the result channel is created.
        if ethash.resultCh == nil {
            log.Warn("Ethash result channel is empty, submitted mining result is rejected")
            return false
        }

        // Solutions seems to be valid, return to the miner and notify acceptance.
        select {
        case ethash.resultCh <- block.WithSeal(header):
            delete(works, hash)
            return true
        default:
            log.Info("Work submitted is stale", "hash", hash)
            return false
        }
    }

    ticker := time.NewTicker(5 * time.Second)
    defer ticker.Stop()

    for {
        select {
        case block := <-ethash.workCh:
            if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() {
                // Start new round mining, throw out all previous work.
                works = make(map[common.Hash]*types.Block)
            }
            // Update current work with new received block.
            // Note same work can be past twice, happens when changing CPU threads.
            makeWork(block)

            // Notify and requested URLs of the new work availability
            notifyWork()

        case work := <-ethash.fetchWorkCh:
            // Return current mining work to remote miner.
            if currentBlock == nil {
                work.errc <- errNoMiningWork
            } else {
                work.res <- currentWork
            }

        case result := <-ethash.submitWorkCh:
            // Verify submitted PoW solution based on maintained mining blocks.
            if submitWork(result.nonce, result.mixDigest, result.hash) {
                result.errc <- nil
            } else {
                result.errc <- errInvalidSealResult
            }

        case result := <-ethash.submitRateCh:
            // Trace remote sealer's hash rate by submitted value.
            rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
            close(result.done)

        case req := <-ethash.fetchRateCh:
            // Gather all hash rate submitted by remote sealer.
            var total uint64
            for _, rate := range rates {
                // this could overflow
                total += rate.rate
            }
            req <- total

        case <-ticker.C:
            // Clear stale submitted hash rate.
            for id, rate := range rates {
                if time.Since(rate.ping) > 10*time.Second {
                    delete(rates, id)
                }
            }

        case errc := <-ethash.exitCh:
            // Exit remote loop if ethash is closed and return relevant error.
            errc <- nil
            log.Trace("Ethash remote sealer is exiting")
            return
        }
    }
}