aboutsummaryrefslogtreecommitdiffstats
path: root/Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
blob: d0864da7f7b0def675cf1b9fa7a3e0d5fb1aa217 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
package ethash

/*
#include "src/libethash/internal.h"

int ethashGoCallback_cgo(unsigned);
*/
import "C"

import (
    "errors"
    "fmt"
    "io/ioutil"
    "math/big"
    "math/rand"
    "os"
    "os/user"
    "path/filepath"
    "runtime"
    "sync"
    "sync/atomic"
    "time"
    "unsafe"

    "github.com/ethereum/go-ethereum/common"
    "github.com/ethereum/go-ethereum/crypto"
    "github.com/ethereum/go-ethereum/logger"
    "github.com/ethereum/go-ethereum/logger/glog"
    "github.com/ethereum/go-ethereum/pow"
)

var (
    minDifficulty = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
    sharedLight   = new(Light)
)

const (
    epochLength         uint64     = 30000
    cacheSizeForTesting C.uint64_t = 1024
    dagSizeForTesting   C.uint64_t = 1024 * 32
)

var DefaultDir = defaultDir()

func defaultDir() string {
    home := os.Getenv("HOME")
    if user, err := user.Current(); err == nil {
        home = user.HomeDir
    }
    if runtime.GOOS == "windows" {
        return filepath.Join(home, "AppData", "Ethash")
    }
    return filepath.Join(home, ".ethash")
}

// cache wraps an ethash_light_t with some metadata
// and automatic memory management.
type cache struct {
    epoch uint64
    test  bool

    gen sync.Once // ensures cache is only generated once.
    ptr *C.struct_ethash_light
}

// generate creates the actual cache. it can be called from multiple
// goroutines. the first call will generate the cache, subsequent
// calls wait until it is generated.
func (cache *cache) generate() {
    cache.gen.Do(func() {
        started := time.Now()
        seedHash := makeSeedHash(cache.epoch)
        glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
        size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
        if cache.test {
            size = cacheSizeForTesting
        }
        cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
        runtime.SetFinalizer(cache, freeCache)
        glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
    })
}

func freeCache(cache *cache) {
    C.ethash_light_delete(cache.ptr)
    cache.ptr = nil
}

// Light implements the Verify half of the proof of work.
// It uses a small in-memory cache to verify the nonces
// found by Full.
type Light struct {
    test    bool       // if set use a smaller cache size
    mu      sync.Mutex // protects current
    current *cache     // last cache which was generated.
    // TODO: keep multiple caches.
}

// Verify checks whether the block's nonce is valid.
func (l *Light) Verify(block pow.Block) bool {
    // TODO: do ethash_quick_verify before getCache in order
    // to prevent DOS attacks.
    blockNum := block.NumberU64()
    if blockNum >= epochLength*2048 {
        glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
        return false
    }

    difficulty := block.Difficulty()
    /* Cannot happen if block header diff is validated prior to PoW, but can
         happen if PoW is checked first due to parallel PoW checking.
         We could check the minimum valid difficulty but for SoC we avoid (duplicating)
       Ethereum protocol consensus rules here which are not in scope of Ethash
    */
    if difficulty.Cmp(common.Big0) == 0 {
        glog.V(logger.Debug).Infof("invalid block difficulty")
        return false
    }

    cache := l.getCache(blockNum)
    dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))

    if l.test {
        dagSize = dagSizeForTesting
    }
    // Recompute the hash using the cache.
    hash := hashToH256(block.HashNoNonce())
    ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce()))
    if !ret.success {
        return false
    }

    // avoid mixdigest malleability as it's not included in a block's "hashNononce"
    if block.MixDigest() != h256ToHash(ret.mix_hash) {
        return false
    }

    // Make sure cache is live until after the C call.
    // This is important because a GC might happen and execute
    // the finalizer before the call completes.
    _ = cache
    // The actual check.
    target := new(big.Int).Div(minDifficulty, difficulty)
    return h256ToHash(ret.result).Big().Cmp(target) <= 0
}

func h256ToHash(in C.ethash_h256_t) common.Hash {
    return *(*common.Hash)(unsafe.Pointer(&in.b))
}

func hashToH256(in common.Hash) C.ethash_h256_t {
    return C.ethash_h256_t{b: *(*[32]C.uint8_t)(unsafe.Pointer(&in[0]))}
}

func (l *Light) getCache(blockNum uint64) *cache {
    var c *cache
    epoch := blockNum / epochLength
    // Update or reuse the last cache.
    l.mu.Lock()
    if l.current != nil && l.current.epoch == epoch {
        c = l.current
    } else {
        c = &cache{epoch: epoch, test: l.test}
        l.current = c
    }
    l.mu.Unlock()
    // Wait for the cache to finish generating.
    c.generate()
    return c
}

// dag wraps an ethash_full_t with some metadata
// and automatic memory management.
type dag struct {
    epoch uint64
    test  bool
    dir   string

    gen sync.Once // ensures DAG is only generated once.
    ptr *C.struct_ethash_full
}

// generate creates the actual DAG. it can be called from multiple
// goroutines. the first call will generate the DAG, subsequent
// calls wait until it is generated.
func (d *dag) generate() {
    d.gen.Do(func() {
        var (
            started   = time.Now()
            seedHash  = makeSeedHash(d.epoch)
            blockNum  = C.uint64_t(d.epoch * epochLength)
            cacheSize = C.ethash_get_cachesize(blockNum)
            dagSize   = C.ethash_get_datasize(blockNum)
        )
        if d.test {
            cacheSize = cacheSizeForTesting
            dagSize = dagSizeForTesting
        }
        if d.dir == "" {
            d.dir = DefaultDir
        }
        glog.V(logger.Info).Infof("Generating DAG for epoch %d (%x)", d.epoch, seedHash)
        // Generate a temporary cache.
        // TODO: this could share the cache with Light
        cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
        defer C.ethash_light_delete(cache)
        // Generate the actual DAG.
        d.ptr = C.ethash_full_new_internal(
            C.CString(d.dir),
            hashToH256(seedHash),
            dagSize,
            cache,
            (C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)),
        )
        if d.ptr == nil {
            panic("ethash_full_new IO or memory error")
        }
        runtime.SetFinalizer(d, freeDAG)
        glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started))
    })
}

func freeDAG(h *dag) {
    C.ethash_full_delete(h.ptr)
    h.ptr = nil
}

//export ethashGoCallback
func ethashGoCallback(percent C.unsigned) C.int {
    glog.V(logger.Info).Infof("Still generating DAG: %d%%", percent)
    return 0
}

// MakeDAG pre-generates a DAG file for the given block number in the
// given directory. If dir is the empty string, the default directory
// is used.
func MakeDAG(blockNum uint64, dir string) error {
    d := &dag{epoch: blockNum / epochLength, dir: dir}
    if blockNum >= epochLength*2048 {
        return fmt.Errorf("block number too high, limit is %d", epochLength*2048)
    }
    d.generate()
    if d.ptr == nil {
        return errors.New("failed")
    }
    return nil
}

// Full implements the Search half of the proof of work.
type Full struct {
    Dir string // use this to specify a non-default DAG directory

    test     bool // if set use a smaller DAG size
    turbo    bool
    hashRate int32

    mu      sync.Mutex // protects dag
    current *dag       // current full DAG
}

func (pow *Full) getDAG(blockNum uint64) (d *dag) {
    epoch := blockNum / epochLength
    pow.mu.Lock()
    if pow.current != nil && pow.current.epoch == epoch {
        d = pow.current
    } else {
        d = &dag{epoch: epoch, test: pow.test, dir: pow.Dir}
        pow.current = d
    }
    pow.mu.Unlock()
    // wait for it to finish generating.
    d.generate()
    return d
}

func (pow *Full) Search(block pow.Block, stop <-chan struct{}) (nonce uint64, mixDigest []byte) {
    dag := pow.getDAG(block.NumberU64())

    r := rand.New(rand.NewSource(time.Now().UnixNano()))
    diff := block.Difficulty()

    i := int64(0)
    starti := i
    start := time.Now().UnixNano()
    previousHashrate := int32(0)

    nonce = uint64(r.Int63())
    hash := hashToH256(block.HashNoNonce())
    target := new(big.Int).Div(minDifficulty, diff)
    for {
        select {
        case <-stop:
            atomic.AddInt32(&pow.hashRate, -previousHashrate)
            return 0, nil
        default:
            i++

            // we don't have to update hash rate on every nonce, so update after
            // first nonce check and then after 2^X nonces
            if i == 2 || ((i % (1 << 16)) == 0) {
                elapsed := time.Now().UnixNano() - start
                hashes := (float64(1e9) / float64(elapsed)) * float64(i-starti)
                hashrateDiff := int32(hashes) - previousHashrate
                previousHashrate = int32(hashes)
                atomic.AddInt32(&pow.hashRate, hashrateDiff)
            }

            ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
            result := h256ToHash(ret.result).Big()

            // TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
            if ret.success && result.Cmp(target) <= 0 {
                mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
                atomic.AddInt32(&pow.hashRate, -previousHashrate)
                return nonce, mixDigest
            }
            nonce += 1
        }

        if !pow.turbo {
            time.Sleep(20 * time.Microsecond)
        }
    }
}

func (pow *Full) GetHashrate() int64 {
    return int64(atomic.LoadInt32(&pow.hashRate))
}

func (pow *Full) Turbo(on bool) {
    // TODO: this needs to use an atomic operation.
    pow.turbo = on
}

// Ethash combines block verification with Light and
// nonce searching with Full into a single proof of work.
type Ethash struct {
    *Light
    *Full
}

// New creates an instance of the proof of work.
// A single instance of Light is shared across all instances
// created with New.
func New() *Ethash {
    return &Ethash{sharedLight, &Full{turbo: true}}
}

// NewForTesting creates a proof of work for use in unit tests.
// It uses a smaller DAG and cache size to keep test times low.
// DAG files are stored in a temporary directory.
//
// Nonces found by a testing instance are not verifiable with a
// regular-size cache.
func NewForTesting() (*Ethash, error) {
    dir, err := ioutil.TempDir("", "ethash-test")
    if err != nil {
        return nil, err
    }
    return &Ethash{&Light{test: true}, &Full{Dir: dir, test: true}}, nil
}

func GetSeedHash(blockNum uint64) ([]byte, error) {
    if blockNum >= epochLength*2048 {
        return nil, fmt.Errorf("block number too high, limit is %d", epochLength*2048)
    }
    sh := makeSeedHash(blockNum / epochLength)
    return sh[:], nil
}

func makeSeedHash(epoch uint64) (sh common.Hash) {
    for ; epoch > 0; epoch-- {
        sh = crypto.Sha3Hash(sh[:])
    }
    return sh
}