aboutsummaryrefslogtreecommitdiffstats
path: root/consensus/ethash/algorithm_go1.8.go
blob: 62bf4dec1b964cbfc5ee7b2b6f8708c3a2075b19 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.

// +build go1.8

package ethash

import "math/big"

// cacheSize calculates and returns the size of the ethash verification cache that
// belongs to a certain block number. The cache size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func cacheSize(block uint64) uint64 {
    // If we have a pre-generated value, use that
    epoch := int(block / epochLength)
    if epoch < len(cacheSizes) {
        return cacheSizes[epoch]
    }
    // No known cache size, calculate manually (sanity branch only)
    size := uint64(cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes)
    for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
        size -= 2 * hashBytes
    }
    return size
}

// datasetSize calculates and returns the size of the ethash mining dataset that
// belongs to a certain block number. The dataset size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func datasetSize(block uint64) uint64 {
    // If we have a pre-generated value, use that
    epoch := int(block / epochLength)
    if epoch < len(datasetSizes) {
        return datasetSizes[epoch]
    }
    // No known dataset size, calculate manually (sanity branch only)
    size := uint64(datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes)
    for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
        size -= 2 * mixBytes
    }
    return size
}