diff options
author | Felix Lange <fjl@twurst.com> | 2017-03-18 07:55:37 +0800 |
---|---|---|
committer | Felix Lange <fjl@twurst.com> | 2017-03-18 08:05:28 +0800 |
commit | 24dd0355a34a40b1798c9b8bd97a7332a77e2556 (patch) | |
tree | 6eb22f45d051a3ba7aeeade24614dc2808b1b60e | |
parent | 61ede86737d57f62cb09de013191fc430d1dd3a2 (diff) | |
download | go-tangerine-24dd0355a34a40b1798c9b8bd97a7332a77e2556.tar go-tangerine-24dd0355a34a40b1798c9b8bd97a7332a77e2556.tar.gz go-tangerine-24dd0355a34a40b1798c9b8bd97a7332a77e2556.tar.bz2 go-tangerine-24dd0355a34a40b1798c9b8bd97a7332a77e2556.tar.lz go-tangerine-24dd0355a34a40b1798c9b8bd97a7332a77e2556.tar.xz go-tangerine-24dd0355a34a40b1798c9b8bd97a7332a77e2556.tar.zst go-tangerine-24dd0355a34a40b1798c9b8bd97a7332a77e2556.zip |
pow: fix Search with ethash test mode
The cache/dataset methods crashed with a nil pointer error if
cachesinmem/dagsinmem were zero. Fix it by skipping the eviction logic
if there are no caches/datasets.
Search always used the regular dataset size regardless of test mode. Fix
it by removing the redundant size parameter of hashimotoFull.
Fixes #3784
-rw-r--r-- | pow/ethash.go | 26 | ||||
-rw-r--r-- | pow/ethash_algo.go | 4 | ||||
-rw-r--r-- | pow/ethash_algo_test.go | 15 |
3 files changed, 25 insertions, 20 deletions
diff --git a/pow/ethash.go b/pow/ethash.go index 1e577a587..9adc38540 100644 --- a/pow/ethash.go +++ b/pow/ethash.go @@ -428,7 +428,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 { current, future := ethash.caches[epoch], (*cache)(nil) if current == nil { // No in-memory cache, evict the oldest if the cache limit was reached - for len(ethash.caches) >= ethash.cachesinmem { + for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.cachesinmem { var evict *cache for _, cache := range ethash.caches { if evict == nil || evict.used.After(cache.used) { @@ -480,22 +480,16 @@ func (ethash *Ethash) cache(block uint64) []uint32 { // Search implements PoW, attempting to find a nonce that satisfies the block's // difficulty requirements. func (ethash *Ethash) Search(block Block, stop <-chan struct{}) (uint64, []byte) { - // Extract some data from the block - var ( - hash = block.HashNoNonce().Bytes() - diff = block.Difficulty() - target = new(big.Int).Div(maxUint256, diff) - ) - // Retrieve the mining dataset - dataset, size := ethash.dataset(block.NumberU64()), datasetSize(block.NumberU64()) - - // Start generating random nonces until we abort or find a good one var ( + hash = block.HashNoNonce().Bytes() + diff = block.Difficulty() + target = new(big.Int).Div(maxUint256, diff) + dataset = ethash.dataset(block.NumberU64()) + rand = rand.New(rand.NewSource(time.Now().UnixNano())) + nonce = uint64(rand.Int63()) attempts int64 - - rand = rand.New(rand.NewSource(time.Now().UnixNano())) - nonce = uint64(rand.Int63()) ) + // Start generating random nonces until we abort or find a good one for { select { case <-stop: @@ -511,7 +505,7 @@ func (ethash *Ethash) Search(block Block, stop <-chan struct{}) (uint64, []byte) attempts = 0 } // Compute the PoW value of this nonce - digest, result := hashimotoFull(size, dataset, hash, nonce) + digest, result := hashimotoFull(dataset, hash, nonce) if new(big.Int).SetBytes(result).Cmp(target) <= 0 { return nonce, digest } @@ -532,7 +526,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 { current, future := ethash.datasets[epoch], (*dataset)(nil) if current == nil { // No in-memory dataset, evict the oldest if the dataset limit was reached - for len(ethash.datasets) >= ethash.dagsinmem { + for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.dagsinmem { var evict *dataset for _, dataset := range ethash.datasets { if evict == nil || evict.used.After(dataset.used) { diff --git a/pow/ethash_algo.go b/pow/ethash_algo.go index 3737cc5d7..1e996785f 100644 --- a/pow/ethash_algo.go +++ b/pow/ethash_algo.go @@ -349,12 +349,12 @@ func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]b // hashimotoFull aggregates data from the full dataset (using the full in-memory // dataset) in order to produce our final value for a particular header hash and // nonce. -func hashimotoFull(size uint64, dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte) { +func hashimotoFull(dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte) { lookup := func(index uint32) []uint32 { offset := index * hashWords return dataset[offset : offset+hashWords] } - return hashimoto(hash, nonce, size, lookup) + return hashimoto(hash, nonce, uint64(len(dataset))*4, lookup) } // datasetSizes is a lookup table for the ethash dataset size for the first 2048 diff --git a/pow/ethash_algo_test.go b/pow/ethash_algo_test.go index c881874ff..0605d70ad 100644 --- a/pow/ethash_algo_test.go +++ b/pow/ethash_algo_test.go @@ -660,7 +660,7 @@ func TestHashimoto(t *testing.T) { if !bytes.Equal(result, wantResult) { t.Errorf("light hashimoto result mismatch: have %x, want %x", result, wantResult) } - digest, result = hashimotoFull(32*1024, dataset, hash, nonce) + digest, result = hashimotoFull(dataset, hash, nonce) if !bytes.Equal(digest, wantDigest) { t.Errorf("full hashimoto digest mismatch: have %x, want %x", digest, wantDigest) } @@ -713,6 +713,17 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { pend.Wait() } +func TestTestMode(t *testing.T) { + head := &types.Header{Difficulty: big.NewInt(100)} + ethash := NewTestEthash() + nonce, mix := ethash.Search(types.NewBlockWithHeader(head), nil) + head.Nonce = types.EncodeNonce(nonce) + copy(head.MixDigest[:], mix) + if err := ethash.Verify(types.NewBlockWithHeader(head)); err != nil { + t.Error("unexpected Verify error:", err) + } +} + // Benchmarks the cache generation performance. func BenchmarkCacheGeneration(b *testing.B) { for i := 0; i < b.N; i++ { @@ -758,6 +769,6 @@ func BenchmarkHashimotoFullSmall(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - hashimotoFull(32*65536, dataset, hash, 0) + hashimotoFull(dataset, hash, 0) } } |