aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Godeps/Godeps.json12
-rw-r--r--[-rwxr-xr-x]Godeps/_workspace/src/github.com/ethereum/ethash/setup.py0
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h4
-rw-r--r--[-rwxr-xr-x]Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh0
-rw-r--r--[-rwxr-xr-x]Godeps/_workspace/src/github.com/ethereum/ethash/test/python/test.sh0
-rw-r--r--[-rwxr-xr-x]Godeps/_workspace/src/github.com/ethereum/ethash/test/test.sh0
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS1
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/LICENSE (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE)2
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/dce.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go)0
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/doc.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go)0
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/hash.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go)0
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/json.go30
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/json_test.go32
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/node.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go)0
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/seq_test.go66
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/sql.go40
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/sql_test.go53
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/time.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go)10
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/util.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go)0
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/uuid.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go)0
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/uuid_test.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go)0
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/version1.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go)4
-rw-r--r--Godeps/_workspace/src/github.com/pborman/uuid/version4.go (renamed from Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go)0
-rw-r--r--cmd/geth/main.go7
-rw-r--r--core/block_cache.go120
-rw-r--r--core/block_cache_test.go76
-rw-r--r--core/chain_makers.go1
-rw-r--r--core/chain_manager.go352
-rw-r--r--core/chain_manager_test.go19
-rw-r--r--core/chain_util.go258
-rw-r--r--core/chain_util_test.go243
-rw-r--r--core/genesis.go20
-rw-r--r--core/state/state_object.go43
-rw-r--r--core/state/statedb.go21
-rw-r--r--core/types/block.go37
-rw-r--r--core/types/transaction.go4
-rw-r--r--core/vm/errors.go17
-rw-r--r--crypto/crypto.go2
-rw-r--r--crypto/key.go2
-rw-r--r--crypto/key_store_passphrase.go2
-rw-r--r--eth/backend.go122
-rw-r--r--eth/downloader/downloader.go11
-rw-r--r--eth/downloader/downloader_test.go44
-rw-r--r--eth/handler.go52
-rw-r--r--eth/peer.go7
-rw-r--r--miner/worker.go4
-rw-r--r--rpc/api/eth.go72
-rw-r--r--rpc/api/parsing.go4
-rw-r--r--tests/block_test_util.go5
-rw-r--r--xeth/xeth.go4
50 files changed, 1122 insertions, 681 deletions
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index c939ae670..e4b37a12e 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -6,11 +6,6 @@
],
"Deps": [
{
- "ImportPath": "code.google.com/p/go-uuid/uuid",
- "Comment": "null-12",
- "Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
- },
- {
"ImportPath": "github.com/codegangsta/cli",
"Comment": "1.2.0-95-g9b2bd2b",
"Rev": "9b2bd2b3489748d4d0a204fa4eb2ee9e89e0ebc6"
@@ -21,7 +16,8 @@
},
{
"ImportPath": "github.com/ethereum/ethash",
- "Rev": "227ec953eae56f4f6c7f5e7dc93b4bbebf0cda2e"
+ "Comment": "v23.1-234-g062e40a",
+ "Rev": "062e40a1a1671f5a5102862b56e4c56f68a732f5"
},
{
"ImportPath": "github.com/fatih/color",
@@ -62,6 +58,10 @@
"Rev": "675ffd907b7401b8a709a5ef2249978af5616bb2"
},
{
+ "ImportPath": "github.com/pborman/uuid",
+ "Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
+ },
+ {
"ImportPath": "github.com/peterh/liner",
"Rev": "29f6a646557d83e2b6e9ba05c45fbea9c006dbe8"
},
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/setup.py b/Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
index 18aa20f6d..18aa20f6d 100755..100644
--- a/Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
+++ b/Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h b/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
index e32b1c539..849325a59 100644
--- a/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
+++ b/Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
@@ -41,8 +41,8 @@
#define ethash_swap_u64(input_) swap64(input_)
#else // posix
#include <byteswap.h>
-#define ethash_swap_u32(input_) __bswap_32(input_)
-#define ethash_swap_u64(input_) __bswap_64(input_)
+#define ethash_swap_u32(input_) bswap_32(input_)
+#define ethash_swap_u64(input_) bswap_64(input_)
#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh b/Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh
index 92b6b8b66..92b6b8b66 100755..100644
--- a/Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh
+++ b/Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/test/python/test.sh b/Godeps/_workspace/src/github.com/ethereum/ethash/test/python/test.sh
index 05c66b550..05c66b550 100755..100644
--- a/Godeps/_workspace/src/github.com/ethereum/ethash/test/python/test.sh
+++ b/Godeps/_workspace/src/github.com/ethereum/ethash/test/python/test.sh
diff --git a/Godeps/_workspace/src/github.com/ethereum/ethash/test/test.sh b/Godeps/_workspace/src/github.com/ethereum/ethash/test/test.sh
index aaeaa878c..aaeaa878c 100755..100644
--- a/Godeps/_workspace/src/github.com/ethereum/ethash/test/test.sh
+++ b/Godeps/_workspace/src/github.com/ethereum/ethash/test/test.sh
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS b/Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS
new file mode 100644
index 000000000..b382a04ed
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS
@@ -0,0 +1 @@
+Paul Borman <borman@google.com>
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE b/Godeps/_workspace/src/github.com/pborman/uuid/LICENSE
index ab6b011a1..5dc68268d 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/LICENSE
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 Google Inc. All rights reserved.
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go b/Godeps/_workspace/src/github.com/pborman/uuid/dce.go
index 50a0f2d09..50a0f2d09 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/dce.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/dce.go
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go b/Godeps/_workspace/src/github.com/pborman/uuid/doc.go
index d8bd013e6..d8bd013e6 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/doc.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/doc.go
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go b/Godeps/_workspace/src/github.com/pborman/uuid/hash.go
index cdd4192fd..cdd4192fd 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/hash.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/hash.go
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/json.go b/Godeps/_workspace/src/github.com/pborman/uuid/json.go
new file mode 100644
index 000000000..760580a50
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/json.go
@@ -0,0 +1,30 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "errors"
+
+func (u UUID) MarshalJSON() ([]byte, error) {
+ if len(u) == 0 {
+ return []byte(`""`), nil
+ }
+ return []byte(`"` + u.String() + `"`), nil
+}
+
+func (u *UUID) UnmarshalJSON(data []byte) error {
+ if len(data) == 0 || string(data) == `""` {
+ return nil
+ }
+ if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
+ return errors.New("invalid UUID format")
+ }
+ data = data[1 : len(data)-1]
+ uu := Parse(string(data))
+ if uu == nil {
+ return errors.New("invalid UUID format")
+ }
+ *u = uu
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/json_test.go b/Godeps/_workspace/src/github.com/pborman/uuid/json_test.go
new file mode 100644
index 000000000..b5eae0924
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/json_test.go
@@ -0,0 +1,32 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+)
+
+var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")
+
+func TestJSON(t *testing.T) {
+ type S struct {
+ ID1 UUID
+ ID2 UUID
+ }
+ s1 := S{ID1: testUUID}
+ data, err := json.Marshal(&s1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var s2 S
+ if err := json.Unmarshal(data, &s2); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(&s1, &s2) {
+ t.Errorf("got %#v, want %#v", s2, s1)
+ }
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go b/Godeps/_workspace/src/github.com/pborman/uuid/node.go
index dd0a8ac18..dd0a8ac18 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/node.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/node.go
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/seq_test.go b/Godeps/_workspace/src/github.com/pborman/uuid/seq_test.go
new file mode 100644
index 000000000..3b3d1430d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/seq_test.go
@@ -0,0 +1,66 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "flag"
+ "runtime"
+ "testing"
+ "time"
+)
+
+// This test is only run when --regressions is passed on the go test line.
+var regressions = flag.Bool("regressions", false, "run uuid regression tests")
+
+// TestClockSeqRace tests for a particular race condition of returning two
+// identical Version1 UUIDs. The duration of 1 minute was chosen as the race
+// condition, before being fixed, nearly always occured in under 30 seconds.
+func TestClockSeqRace(t *testing.T) {
+ if !*regressions {
+ t.Skip("skipping regression tests")
+ }
+ duration := time.Minute
+
+ done := make(chan struct{})
+ defer close(done)
+
+ ch := make(chan UUID, 10000)
+ ncpu := runtime.NumCPU()
+ switch ncpu {
+ case 0, 1:
+ // We can't run the test effectively.
+ t.Skip("skipping race test, only one CPU detected")
+ return
+ default:
+ runtime.GOMAXPROCS(ncpu)
+ }
+ for i := 0; i < ncpu; i++ {
+ go func() {
+ for {
+ select {
+ case <-done:
+ return
+ case ch <- NewUUID():
+ }
+ }
+ }()
+ }
+
+ uuids := make(map[string]bool)
+ cnt := 0
+ start := time.Now()
+ for u := range ch {
+ s := u.String()
+ if uuids[s] {
+ t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s)
+ return
+ }
+ uuids[s] = true
+ if time.Since(start) > duration {
+ return
+ }
+ cnt++
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/sql.go b/Godeps/_workspace/src/github.com/pborman/uuid/sql.go
new file mode 100644
index 000000000..2d7679e2a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/sql.go
@@ -0,0 +1,40 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+ switch src.(type) {
+ case string:
+ // see uuid.Parse for required string format
+ parsed := Parse(src.(string))
+
+ if parsed == nil {
+ return errors.New("Scan: invalid UUID format")
+ }
+
+ *uuid = parsed
+ case []byte:
+ // assumes a simple slice of bytes, just check validity and store
+ u := UUID(src.([]byte))
+
+ if u.Variant() == Invalid {
+ return errors.New("Scan: invalid UUID format")
+ }
+
+ *uuid = u
+ default:
+ return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/pborman/uuid/sql_test.go b/Godeps/_workspace/src/github.com/pborman/uuid/sql_test.go
new file mode 100644
index 000000000..d643567ee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/sql_test.go
@@ -0,0 +1,53 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestScan(t *testing.T) {
+ var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479"
+ var byteTest []byte = Parse(stringTest)
+ var badTypeTest int = 6
+ var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4"
+ var invalidByteTest []byte = Parse(invalidTest)
+
+ var uuid UUID
+ err := (&uuid).Scan(stringTest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = (&uuid).Scan(byteTest)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = (&uuid).Scan(badTypeTest)
+ if err == nil {
+ t.Error("int correctly parsed and shouldn't have")
+ }
+ if !strings.Contains(err.Error(), "unable to scan type") {
+ t.Error("attempting to parse an int returned an incorrect error message")
+ }
+
+ err = (&uuid).Scan(invalidTest)
+ if err == nil {
+ t.Error("invalid uuid was parsed without error")
+ }
+ if !strings.Contains(err.Error(), "invalid UUID") {
+ t.Error("attempting to parse an invalid UUID returned an incorrect error message")
+ }
+
+ err = (&uuid).Scan(invalidByteTest)
+ if err == nil {
+ t.Error("invalid byte uuid was parsed without error")
+ }
+ if !strings.Contains(err.Error(), "invalid UUID") {
+ t.Error("attempting to parse an invalid byte UUID returned an incorrect error message")
+ }
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go b/Godeps/_workspace/src/github.com/pborman/uuid/time.go
index b9369c200..7ebc9bef1 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/time.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/time.go
@@ -40,15 +40,15 @@ func (t Time) UnixTime() (sec, nsec int64) {
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
-// adjusts the clock sequence as needed. An error is returned if the current
-// time cannot be determined.
-func GetTime() (Time, error) {
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
defer mu.Unlock()
mu.Lock()
return getTime()
}
-func getTime() (Time, error) {
+func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
@@ -63,7 +63,7 @@ func getTime() (Time, error) {
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
}
lasttime = now
- return Time(now), nil
+ return Time(now), clock_seq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go b/Godeps/_workspace/src/github.com/pborman/uuid/util.go
index de40b102c..de40b102c 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/util.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/util.go
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go b/Godeps/_workspace/src/github.com/pborman/uuid/uuid.go
index 2920fae63..2920fae63 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/uuid.go
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go b/Godeps/_workspace/src/github.com/pborman/uuid/uuid_test.go
index 417ebeb26..417ebeb26 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/uuid_test.go
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go b/Godeps/_workspace/src/github.com/pborman/uuid/version1.go
index 63580044b..0127eacfa 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version1.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/version1.go
@@ -19,7 +19,7 @@ func NewUUID() UUID {
SetNodeInterface("")
}
- now, err := GetTime()
+ now, seq, err := GetTime()
if err != nil {
return nil
}
@@ -34,7 +34,7 @@ func NewUUID() UUID {
binary.BigEndian.PutUint32(uuid[0:], time_low)
binary.BigEndian.PutUint16(uuid[4:], time_mid)
binary.BigEndian.PutUint16(uuid[6:], time_hi)
- binary.BigEndian.PutUint16(uuid[8:], clock_seq)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
copy(uuid[10:], nodeID)
return uuid
diff --git a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go b/Godeps/_workspace/src/github.com/pborman/uuid/version4.go
index b3d4a368d..b3d4a368d 100644
--- a/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/version4.go
+++ b/Godeps/_workspace/src/github.com/pborman/uuid/version4.go
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index f72f69791..ba753a493 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -528,17 +528,16 @@ func blockRecovery(ctx *cli.Context) {
var block *types.Block
if arg[0] == '#' {
- block = core.GetBlockByNumber(blockDb, common.String2Big(arg[1:]).Uint64())
+ block = core.GetBlock(blockDb, core.GetCanonicalHash(blockDb, common.String2Big(arg[1:]).Uint64()))
} else {
- block = core.GetBlockByHash(blockDb, common.HexToHash(arg))
+ block = core.GetBlock(blockDb, common.HexToHash(arg))
}
if block == nil {
glog.Fatalln("block not found. Recovery failed")
}
- err = core.WriteHead(blockDb, block)
- if err != nil {
+ if err = core.WriteHeadBlockHash(blockDb, block.Hash()); err != nil {
glog.Fatalln("block write err", err)
}
glog.Infof("Recovery succesful. New HEAD %x\n", block.Hash())
diff --git a/core/block_cache.go b/core/block_cache.go
deleted file mode 100644
index 0fd711448..000000000
--- a/core/block_cache.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "sync"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
-)
-
-// BlockCache implements a caching mechanism specifically for blocks and uses FILO to pop
-type BlockCache struct {
- size int
-
- hashes []common.Hash
- blocks map[common.Hash]*types.Block
-
- mu sync.RWMutex
-}
-
-// Creates and returns a `BlockCache` with `size`. If `size` is smaller than 1 it will panic
-func NewBlockCache(size int) *BlockCache {
- if size < 1 {
- panic("block cache size not allowed to be smaller than 1")
- }
-
- bc := &BlockCache{size: size}
- bc.Clear()
- return bc
-}
-
-func (bc *BlockCache) Clear() {
- bc.blocks = make(map[common.Hash]*types.Block)
- bc.hashes = nil
-
-}
-
-func (bc *BlockCache) Push(block *types.Block) {
- bc.mu.Lock()
- defer bc.mu.Unlock()
-
- if len(bc.hashes) == bc.size {
- delete(bc.blocks, bc.hashes[0])
-
- // XXX There are a few other options on solving this
- // 1) use a poller / GC like mechanism to clean up untracked objects
- // 2) copy as below
- // re-use the slice and remove the reference to bc.hashes[0]
- // this will allow the element to be garbage collected.
- copy(bc.hashes, bc.hashes[1:])
- } else {
- bc.hashes = append(bc.hashes, common.Hash{})
- }
-
- hash := block.Hash()
- bc.blocks[hash] = block
- bc.hashes[len(bc.hashes)-1] = hash
-}
-
-func (bc *BlockCache) Delete(hash common.Hash) {
- bc.mu.Lock()
- defer bc.mu.Unlock()
-
- if _, ok := bc.blocks[hash]; ok {
- delete(bc.blocks, hash)
- for i, h := range bc.hashes {
- if hash == h {
- bc.hashes = bc.hashes[:i+copy(bc.hashes[i:], bc.hashes[i+1:])]
- // or ? => bc.hashes = append(bc.hashes[:i], bc.hashes[i+1]...)
-
- break
- }
- }
- }
-}
-
-func (bc *BlockCache) Get(hash common.Hash) *types.Block {
- bc.mu.RLock()
- defer bc.mu.RUnlock()
-
- if block, haz := bc.blocks[hash]; haz {
- return block
- }
-
- return nil
-}
-
-func (bc *BlockCache) Has(hash common.Hash) bool {
- bc.mu.RLock()
- defer bc.mu.RUnlock()
-
- _, ok := bc.blocks[hash]
- return ok
-}
-
-func (bc *BlockCache) Each(cb func(int, *types.Block)) {
- bc.mu.Lock()
- defer bc.mu.Unlock()
-
- i := 0
- for _, block := range bc.blocks {
- cb(i, block)
- i++
- }
-}
diff --git a/core/block_cache_test.go b/core/block_cache_test.go
deleted file mode 100644
index ef826d5bd..000000000
--- a/core/block_cache_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
-)
-
-func newChain(size int) (chain []*types.Block) {
- var parentHash common.Hash
- for i := 0; i < size; i++ {
- head := &types.Header{ParentHash: parentHash, Number: big.NewInt(int64(i))}
- block := types.NewBlock(head, nil, nil, nil)
- chain = append(chain, block)
- parentHash = block.Hash()
- }
- return chain
-}
-
-func insertChainCache(cache *BlockCache, chain []*types.Block) {
- for _, block := range chain {
- cache.Push(block)
- }
-}
-
-func TestNewBlockCache(t *testing.T) {
- chain := newChain(3)
- cache := NewBlockCache(2)
- insertChainCache(cache, chain)
-
- if cache.hashes[0] != chain[1].Hash() {
- t.Error("oldest block incorrect")
- }
-}
-
-func TestInclusion(t *testing.T) {
- chain := newChain(3)
- cache := NewBlockCache(3)
- insertChainCache(cache, chain)
-
- for _, block := range chain {
- if b := cache.Get(block.Hash()); b == nil {
- t.Errorf("getting %x failed", block.Hash())
- }
- }
-}
-
-func TestDeletion(t *testing.T) {
- chain := newChain(3)
- cache := NewBlockCache(3)
- insertChainCache(cache, chain)
-
- cache.Delete(chain[1].Hash())
-
- if cache.Has(chain[1].Hash()) {
- t.Errorf("expected %x not to be included")
- }
-}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index b009e0c28..f89218f82 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -158,7 +158,6 @@ func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int,
for i := 0; i < n; i++ {
header := makeHeader(parent, statedb)
block := genblock(i, header)
- block.Td = CalcTD(block, parent)
blocks[i] = block
parent = block
}
diff --git a/core/chain_manager.go b/core/chain_manager.go
index c8127951e..407945f8e 100644
--- a/core/chain_manager.go
+++ b/core/chain_manager.go
@@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/pow"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/hashicorp/golang-lru"
)
@@ -48,6 +49,9 @@ var (
)
const (
+ headerCacheLimit = 512
+ bodyCacheLimit = 256
+ tdCacheLimit = 1024
blockCacheLimit = 256
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
@@ -68,10 +72,13 @@ type ChainManager struct {
checkpoint int // checkpoint counts towards the new checkpoint
td *big.Int
currentBlock *types.Block
- lastBlockHash common.Hash
currentGasLimit *big.Int
- cache *lru.Cache // cache is the LRU caching
+ headerCache *lru.Cache // Cache for the most recent block headers
+ bodyCache *lru.Cache // Cache for the most recent block bodies
+ bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
+ tdCache *lru.Cache // Cache for the most recent block total difficulties
+ blockCache *lru.Cache // Cache for the most recent entire blocks
futureBlocks *lru.Cache // future blocks are blocks added for later processing
quit chan struct{}
@@ -84,13 +91,24 @@ type ChainManager struct {
}
func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
- cache, _ := lru.New(blockCacheLimit)
+ headerCache, _ := lru.New(headerCacheLimit)
+ bodyCache, _ := lru.New(bodyCacheLimit)
+ bodyRLPCache, _ := lru.New(bodyCacheLimit)
+ tdCache, _ := lru.New(tdCacheLimit)
+ blockCache, _ := lru.New(blockCacheLimit)
+ futureBlocks, _ := lru.New(maxFutureBlocks)
+
bc := &ChainManager{
- chainDb: chainDb,
- eventMux: mux,
- quit: make(chan struct{}),
- cache: cache,
- pow: pow,
+ chainDb: chainDb,
+ eventMux: mux,
+ quit: make(chan struct{}),
+ headerCache: headerCache,
+ bodyCache: bodyCache,
+ bodyRLPCache: bodyRLPCache,
+ tdCache: tdCache,
+ blockCache: blockCache,
+ futureBlocks: futureBlocks,
+ pow: pow,
}
bc.genesisBlock = bc.GetBlockByNumber(0)
@@ -105,11 +123,9 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
}
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
}
-
if err := bc.setLastState(); err != nil {
return nil, err
}
-
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash, _ := range BadHashes {
if block := bc.GetBlock(hash); block != nil {
@@ -123,14 +139,8 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
}
}
-
// Take ownership of this particular state
-
- bc.futureBlocks, _ = lru.New(maxFutureBlocks)
- bc.makeCache()
-
go bc.update()
-
return bc, nil
}
@@ -139,14 +149,16 @@ func (bc *ChainManager) SetHead(head *types.Block) {
defer bc.mu.Unlock()
for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
- bc.removeBlock(block)
+ DeleteBlock(bc.chainDb, block.Hash())
}
+ bc.headerCache.Purge()
+ bc.bodyCache.Purge()
+ bc.bodyRLPCache.Purge()
+ bc.blockCache.Purge()
+ bc.futureBlocks.Purge()
- bc.cache, _ = lru.New(blockCacheLimit)
bc.currentBlock = head
- bc.makeCache()
-
- bc.setTotalDifficulty(head.Td)
+ bc.setTotalDifficulty(bc.GetTd(head.Hash()))
bc.insert(head)
bc.setLastState()
}
@@ -169,7 +181,7 @@ func (self *ChainManager) LastBlockHash() common.Hash {
self.mu.RLock()
defer self.mu.RUnlock()
- return self.lastBlockHash
+ return self.currentBlock.Hash()
}
func (self *ChainManager) CurrentBlock() *types.Block {
@@ -199,13 +211,13 @@ func (bc *ChainManager) recover() bool {
if len(data) != 0 {
block := bc.GetBlock(common.BytesToHash(data))
if block != nil {
- err := bc.chainDb.Put([]byte("LastBlock"), block.Hash().Bytes())
- if err != nil {
- glog.Fatalln("db write err:", err)
+ if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
+ glog.Fatalf("failed to write database head number: %v", err)
+ }
+ if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
+ glog.Fatalf("failed to write database head hash: %v", err)
}
-
bc.currentBlock = block
- bc.lastBlockHash = block.Hash()
return true
}
}
@@ -213,14 +225,13 @@ func (bc *ChainManager) recover() bool {
}
func (bc *ChainManager) setLastState() error {
- data, _ := bc.chainDb.Get([]byte("LastBlock"))
- if len(data) != 0 {
- block := bc.GetBlock(common.BytesToHash(data))
+ head := GetHeadBlockHash(bc.chainDb)
+ if head != (common.Hash{}) {
+ block := bc.GetBlock(head)
if block != nil {
bc.currentBlock = block
- bc.lastBlockHash = block.Hash()
} else {
- glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
+ glog.Infof("LastBlock (%x) not found. Recovering...\n", head)
if bc.recover() {
glog.Infof("Recover successful")
} else {
@@ -230,7 +241,7 @@ func (bc *ChainManager) setLastState() error {
} else {
bc.Reset()
}
- bc.td = bc.currentBlock.Td
+ bc.td = bc.GetTd(bc.currentBlock.Hash())
bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
if glog.V(logger.Info) {
@@ -240,63 +251,37 @@ func (bc *ChainManager) setLastState() error {
return nil
}
-func (bc *ChainManager) makeCache() {
- bc.cache, _ = lru.New(blockCacheLimit)
- // load in last `blockCacheLimit` - 1 blocks. Last block is the current.
- bc.cache.Add(bc.genesisBlock.Hash(), bc.genesisBlock)
- for _, block := range bc.GetBlocksFromHash(bc.currentBlock.Hash(), blockCacheLimit) {
- bc.cache.Add(block.Hash(), block)
- }
-}
-
+// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *ChainManager) Reset() {
- bc.mu.Lock()
- defer bc.mu.Unlock()
-
- for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
- bc.removeBlock(block)
- }
-
- bc.cache, _ = lru.New(blockCacheLimit)
-
- // Prepare the genesis block
- err := WriteBlock(bc.chainDb, bc.genesisBlock)
- if err != nil {
- glog.Fatalln("db err:", err)
- }
-
- bc.insert(bc.genesisBlock)
- bc.currentBlock = bc.genesisBlock
- bc.makeCache()
-
- bc.setTotalDifficulty(common.Big("0"))
+ bc.ResetWithGenesisBlock(bc.genesisBlock)
}
-func (bc *ChainManager) removeBlock(block *types.Block) {
- bc.chainDb.Delete(append(blockHashPre, block.Hash().Bytes()...))
-}
-
-func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) {
+// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
+// specified genesis state.
+func (bc *ChainManager) ResetWithGenesisBlock(genesis *types.Block) {
bc.mu.Lock()
defer bc.mu.Unlock()
+ // Dump the entire block chain and purge the caches
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
- bc.removeBlock(block)
+ DeleteBlock(bc.chainDb, block.Hash())
}
+ bc.headerCache.Purge()
+ bc.bodyCache.Purge()
+ bc.bodyRLPCache.Purge()
+ bc.blockCache.Purge()
+ bc.futureBlocks.Purge()
- // Prepare the genesis block
- gb.Td = gb.Difficulty()
- bc.genesisBlock = gb
-
- err := WriteBlock(bc.chainDb, bc.genesisBlock)
- if err != nil {
- glog.Fatalln("db err:", err)
+ // Prepare the genesis block and reinitialize the chain
+ if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil {
+ glog.Fatalf("failed to write genesis block TD: %v", err)
+ }
+ if err := WriteBlock(bc.chainDb, genesis); err != nil {
+ glog.Fatalf("failed to write genesis block: %v", err)
}
-
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
- bc.makeCache()
- bc.td = gb.Difficulty()
+ bc.setTotalDifficulty(genesis.Difficulty())
}
// Export writes the active chain to the given writer.
@@ -335,23 +320,23 @@ func (self *ChainManager) ExportN(w io.Writer, first uint64, last uint64) error
// insert injects a block into the current chain block chain. Note, this function
// assumes that the `mu` mutex is held!
func (bc *ChainManager) insert(block *types.Block) {
- err := WriteHead(bc.chainDb, block)
- if err != nil {
- glog.Fatal("db write fail:", err)
+ // Add the block to the canonical chain number scheme and mark as the head
+ if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
+ glog.Fatalf("failed to insert block number: %v", err)
}
-
+ if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
+ glog.Fatalf("failed to insert block number: %v", err)
+ }
+ // Add a new restore point if we reached some limit
bc.checkpoint++
if bc.checkpoint > checkpointLimit {
- err = bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes())
- if err != nil {
- glog.Fatal("db write fail:", err)
+ if err := bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes()); err != nil {
+ glog.Fatalf("failed to create checkpoint: %v", err)
}
-
bc.checkpoint = 0
}
-
+ // Update the internal internal state with the head block
bc.currentBlock = block
- bc.lastBlockHash = block.Hash()
}
// Accessors
@@ -359,61 +344,141 @@ func (bc *ChainManager) Genesis() *types.Block {
return bc.genesisBlock
}
-// Block fetching methods
-func (bc *ChainManager) HasBlock(hash common.Hash) bool {
- if bc.cache.Contains(hash) {
- return true
+// HasHeader checks if a block header is present in the database or not, caching
+// it if present.
+func (bc *ChainManager) HasHeader(hash common.Hash) bool {
+ return bc.GetHeader(hash) != nil
+}
+
+// GetHeader retrieves a block header from the database by hash, caching it if
+// found.
+func (self *ChainManager) GetHeader(hash common.Hash) *types.Header {
+ // Short circuit if the header's already in the cache, retrieve otherwise
+ if header, ok := self.headerCache.Get(hash); ok {
+ return header.(*types.Header)
+ }
+ header := GetHeader(self.chainDb, hash)
+ if header == nil {
+ return nil
}
+ // Cache the found header for next time and return
+ self.headerCache.Add(header.Hash(), header)
+ return header
+}
- data, _ := bc.chainDb.Get(append(blockHashPre, hash[:]...))
- return len(data) != 0
+// GetHeaderByNumber retrieves a block header from the database by number,
+// caching it (associated with its hash) if found.
+func (self *ChainManager) GetHeaderByNumber(number uint64) *types.Header {
+ hash := GetCanonicalHash(self.chainDb, number)
+ if hash == (common.Hash{}) {
+ return nil
+ }
+ return self.GetHeader(hash)
}
-func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) (chain []common.Hash) {
- block := self.GetBlock(hash)
- if block == nil {
- return
+// GetBody retrieves a block body (transactions and uncles) from the database by
+// hash, caching it if found.
+func (self *ChainManager) GetBody(hash common.Hash) *types.Body {
+ // Short circuit if the body's already in the cache, retrieve otherwise
+ if cached, ok := self.bodyCache.Get(hash); ok {
+ body := cached.(*types.Body)
+ return body
}
- // XXX Could be optimised by using a different database which only holds hashes (i.e., linked list)
- for i := uint64(0); i < max; i++ {
- block = self.GetBlock(block.ParentHash())
- if block == nil {
- break
- }
+ body := GetBody(self.chainDb, hash)
+ if body == nil {
+ return nil
+ }
+ // Cache the found body for next time and return
+ self.bodyCache.Add(hash, body)
+ return body
+}
- chain = append(chain, block.Hash())
- if block.Number().Cmp(common.Big0) <= 0 {
- break
- }
+// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
+// caching it if found.
+func (self *ChainManager) GetBodyRLP(hash common.Hash) rlp.RawValue {
+ // Short circuit if the body's already in the cache, retrieve otherwise
+ if cached, ok := self.bodyRLPCache.Get(hash); ok {
+ return cached.(rlp.RawValue)
}
+ body := GetBodyRLP(self.chainDb, hash)
+ if len(body) == 0 {
+ return nil
+ }
+ // Cache the found body for next time and return
+ self.bodyRLPCache.Add(hash, body)
+ return body
+}
- return
+// GetTd retrieves a block's total difficulty in the canonical chain from the
+// database by hash, caching it if found.
+func (self *ChainManager) GetTd(hash common.Hash) *big.Int {
+ // Short circuit if the td's already in the cache, retrieve otherwise
+ if cached, ok := self.tdCache.Get(hash); ok {
+ return cached.(*big.Int)
+ }
+ td := GetTd(self.chainDb, hash)
+ if td == nil {
+ return nil
+ }
+ // Cache the found body for next time and return
+ self.tdCache.Add(hash, td)
+ return td
+}
+
+// HasBlock checks if a block is fully present in the database or not, caching
+// it if present.
+func (bc *ChainManager) HasBlock(hash common.Hash) bool {
+ return bc.GetBlock(hash) != nil
}
+// GetBlock retrieves a block from the database by hash, caching it if found.
func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
- if block, ok := self.cache.Get(hash); ok {
+ // Short circuit if the block's already in the cache, retrieve otherwise
+ if block, ok := self.blockCache.Get(hash); ok {
return block.(*types.Block)
}
-
- block := GetBlockByHash(self.chainDb, hash)
+ block := GetBlock(self.chainDb, hash)
if block == nil {
return nil
}
-
- // Add the block to the cache
- self.cache.Add(hash, (*types.Block)(block))
-
- return (*types.Block)(block)
+ // Cache the found block for next time and return
+ self.blockCache.Add(block.Hash(), block)
+ return block
}
-func (self *ChainManager) GetBlockByNumber(num uint64) *types.Block {
- self.mu.RLock()
- defer self.mu.RUnlock()
-
- return self.getBlockByNumber(num)
+// GetBlockByNumber retrieves a block from the database by number, caching it
+// (associated with its hash) if found.
+func (self *ChainManager) GetBlockByNumber(number uint64) *types.Block {
+ hash := GetCanonicalHash(self.chainDb, number)
+ if hash == (common.Hash{}) {
+ return nil
+ }
+ return self.GetBlock(hash)
+}
+// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
+// hash, fetching towards the genesis block.
+func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
+ // Get the origin header from which to fetch
+ header := self.GetHeader(hash)
+ if header == nil {
+ return nil
+ }
+ // Iterate the headers until enough is collected or the genesis reached
+ chain := make([]common.Hash, 0, max)
+ for i := uint64(0); i < max; i++ {
+ if header = self.GetHeader(header.ParentHash); header == nil {
+ break
+ }
+ chain = append(chain, header.Hash())
+ if header.Number.Cmp(common.Big0) == 0 {
+ break
+ }
+ }
+ return chain
}
+// [deprecated by eth/62]
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
for i := 0; i < n; i++ {
@@ -427,11 +492,6 @@ func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*
return
}
-// non blocking version
-func (self *ChainManager) getBlockByNumber(num uint64) *types.Block {
- return GetBlockByNumber(self.chainDb, num)
-}
-
func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
@@ -487,15 +547,25 @@ const (
SideStatTy
)
-// WriteBlock writes the block to the chain (or pending queue)
-func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status writeStatus, err error) {
+// WriteBlock writes the block to the chain.
+func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, err error) {
self.wg.Add(1)
defer self.wg.Done()
+ // Calculate the total difficulty of the block
+ ptd := self.GetTd(block.ParentHash())
+ if ptd == nil {
+ return NonStatTy, ParentError(block.ParentHash())
+ }
+ td := new(big.Int).Add(block.Difficulty(), ptd)
+
+ self.mu.RLock()
cblock := self.currentBlock
+ self.mu.RUnlock()
+
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
- if block.Td.Cmp(self.Td()) > 0 {
+ if td.Cmp(self.Td()) > 0 {
// chain fork
if block.ParentHash() != cblock.Hash() {
// during split we merge two different chains and create the new canonical chain
@@ -503,12 +573,10 @@ func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status wr
if err != nil {
return NonStatTy, err
}
-
status = SplitStatTy
}
-
self.mu.Lock()
- self.setTotalDifficulty(block.Td)
+ self.setTotalDifficulty(td)
self.insert(block)
self.mu.Unlock()
@@ -517,9 +585,11 @@ func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status wr
status = SideStatTy
}
- err = WriteBlock(self.chainDb, block)
- if err != nil {
- glog.Fatalln("db err:", err)
+ if err := WriteTd(self.chainDb, block.Hash(), td); err != nil {
+ glog.Fatalf("failed to write block total difficulty: %v", err)
+ }
+ if err := WriteBlock(self.chainDb, block); err != nil {
+ glog.Fatalf("filed to write block contents: %v", err)
}
// Delete from future blocks
self.futureBlocks.Remove(block.Hash())
@@ -578,11 +648,6 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
blockErr(block, err)
return i, err
}
-
- // Setting block.Td regardless of error (known for example) prevents errors down the line
- // in the protocol handler
- block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash())))
-
// Call in to the block processor and check for errors. It's likely that if one block fails
// all others will fail too (unless a known block is returned).
logs, receipts, err := self.processor.Process(block)
@@ -622,7 +687,7 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
txcount += len(block.Transactions())
// write the block to the chain and get the status
- status, err := self.WriteBlock(block, true)
+ status, err := self.WriteBlock(block)
if err != nil {
return i, err
}
@@ -755,12 +820,11 @@ out:
case ChainEvent:
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
// and in most cases isn't even necessary.
- if self.lastBlockHash == event.Hash {
+ if self.currentBlock.Hash() == event.Hash {
self.currentGasLimit = CalcGasLimit(event.Block)
self.eventMux.Post(ChainHeadEvent{event.Block})
}
}
-
self.eventMux.Post(event)
}
}
diff --git a/core/chain_manager_test.go b/core/chain_manager_test.go
index 002dcbe44..a20480de8 100644
--- a/core/chain_manager_test.go
+++ b/core/chain_manager_test.go
@@ -77,6 +77,7 @@ func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big
bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash()
bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash()
if bi1 != bi2 {
+ fmt.Printf("%+v\n%+v\n\n", bi1, bi2)
t.Fatal("chains do not have the same hash at height", i)
}
bman2.bc.SetProcessor(bman2)
@@ -110,7 +111,6 @@ func printChain(bc *ChainManager) {
// process blocks against a chain
func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
- td := new(big.Int)
for _, block := range chainB {
_, _, err := bman.bc.processor.Process(block)
if err != nil {
@@ -119,17 +119,12 @@ func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
}
return nil, err
}
- parent := bman.bc.GetBlock(block.ParentHash())
- block.Td = CalcTD(block, parent)
- td = block.Td
-
bman.bc.mu.Lock()
- {
- WriteBlock(bman.bc.chainDb, block)
- }
+ WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash())))
+ WriteBlock(bman.bc.chainDb, block)
bman.bc.mu.Unlock()
}
- return td, nil
+ return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil
}
func loadChain(fn string, t *testing.T) (types.Blocks, error) {
@@ -388,7 +383,11 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
func chm(genesis *types.Block, db common.Database) *ChainManager {
var eventMux event.TypeMux
bc := &ChainManager{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
- bc.cache, _ = lru.New(100)
+ bc.headerCache, _ = lru.New(100)
+ bc.bodyCache, _ = lru.New(100)
+ bc.bodyRLPCache, _ = lru.New(100)
+ bc.tdCache, _ = lru.New(100)
+ bc.blockCache, _ = lru.New(100)
bc.futureBlocks, _ = lru.New(100)
bc.processor = bproc{}
bc.ResetWithGenesisBlock(genesis)
diff --git a/core/chain_util.go b/core/chain_util.go
index 84b462ce3..0e3fa31f9 100644
--- a/core/chain_util.go
+++ b/core/chain_util.go
@@ -19,7 +19,6 @@ package core
import (
"bytes"
"math/big"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@@ -30,9 +29,18 @@ import (
)
var (
- blockHashPre = []byte("block-hash-")
- blockNumPre = []byte("block-num-")
+ headHeaderKey = []byte("LastHeader")
+ headBlockKey = []byte("LastBlock")
+
+ blockPrefix = []byte("block-")
+ blockNumPrefix = []byte("block-num-")
+
+ headerSuffix = []byte("-header")
+ bodySuffix = []byte("-body")
+ tdSuffix = []byte("-td")
+
ExpDiffPeriod = big.NewInt(100000)
+ blockHashPre = []byte("block-hash-") // [deprecated by eth/63]
)
// CalcDifficulty is the difficulty adjustment algorithm. It returns
@@ -69,16 +77,6 @@ func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int)
return diff
}
-// CalcTD computes the total difficulty of block.
-func CalcTD(block, parent *types.Block) *big.Int {
- if parent == nil {
- return block.Difficulty()
- }
- d := block.Difficulty()
- d.Add(d, parent.Td)
- return d
-}
-
// CalcGasLimit computes the gas limit of the next block after parent.
// The result may be modified by the caller.
// This is miner strategy, not consensus protocol.
@@ -112,68 +110,238 @@ func CalcGasLimit(parent *types.Block) *big.Int {
return gl
}
-// GetBlockByHash returns the block corresponding to the hash or nil if not found
-func GetBlockByHash(db common.Database, hash common.Hash) *types.Block {
- data, _ := db.Get(append(blockHashPre, hash[:]...))
+// GetCanonicalHash retrieves a hash assigned to a canonical block number.
+func GetCanonicalHash(db common.Database, number uint64) common.Hash {
+ data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// GetHeadHeaderHash retrieves the hash of the current canonical head block's
+// header. The difference between this and GetHeadBlockHash is that whereas the
+// last block hash is only updated upon a full block import, the last header
+// hash is updated already at header import, allowing head tracking for the
+// fast synchronization mechanism.
+func GetHeadHeaderHash(db common.Database) common.Hash {
+ data, _ := db.Get(headHeaderKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// GetHeadBlockHash retrieves the hash of the current canonical head block.
+func GetHeadBlockHash(db common.Database) common.Hash {
+ data, _ := db.Get(headBlockKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
+// if the header's not found.
+func GetHeaderRLP(db common.Database, hash common.Hash) rlp.RawValue {
+ data, _ := db.Get(append(append(blockPrefix, hash[:]...), headerSuffix...))
+ return data
+}
+
+// GetHeader retrieves the block header corresponding to the hash, nil if none
+// found.
+func GetHeader(db common.Database, hash common.Hash) *types.Header {
+ data := GetHeaderRLP(db, hash)
if len(data) == 0 {
return nil
}
- var block types.StorageBlock
- if err := rlp.Decode(bytes.NewReader(data), &block); err != nil {
- glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err)
+ header := new(types.Header)
+ if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
+ glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err)
return nil
}
- return (*types.Block)(&block)
+ return header
+}
+
+// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
+func GetBodyRLP(db common.Database, hash common.Hash) rlp.RawValue {
+ data, _ := db.Get(append(append(blockPrefix, hash[:]...), bodySuffix...))
+ return data
}
-// GetBlockByHash returns the canonical block by number or nil if not found
-func GetBlockByNumber(db common.Database, number uint64) *types.Block {
- key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...))
- if len(key) == 0 {
+// GetBody retrieves the block body (transactons, uncles) corresponding to the
+// hash, nil if none found.
+func GetBody(db common.Database, hash common.Hash) *types.Body {
+ data := GetBodyRLP(db, hash)
+ if len(data) == 0 {
+ return nil
+ }
+ body := new(types.Body)
+ if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
+ glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
return nil
}
+ return body
+}
- return GetBlockByHash(db, common.BytesToHash(key))
+// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
+// none found.
+func GetTd(db common.Database, hash common.Hash) *big.Int {
+ data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
+ if len(data) == 0 {
+ return nil
+ }
+ td := new(big.Int)
+ if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
+ glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err)
+ return nil
+ }
+ return td
}
-// WriteCanonNumber writes the canonical hash for the given block
-func WriteCanonNumber(db common.Database, block *types.Block) error {
- key := append(blockNumPre, block.Number().Bytes()...)
- err := db.Put(key, block.Hash().Bytes())
- if err != nil {
+// GetBlock retrieves an entire block corresponding to the hash, assembling it
+// back from the stored header and body.
+func GetBlock(db common.Database, hash common.Hash) *types.Block {
+ // Retrieve the block header and body contents
+ header := GetHeader(db, hash)
+ if header == nil {
+ return nil
+ }
+ body := GetBody(db, hash)
+ if body == nil {
+ return nil
+ }
+ // Reassemble the block and return
+ return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
+}
+
+// WriteCanonicalHash stores the canonical hash for the given block number.
+func WriteCanonicalHash(db common.Database, hash common.Hash, number uint64) error {
+ key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)
+ if err := db.Put(key, hash.Bytes()); err != nil {
+ glog.Fatalf("failed to store number to hash mapping into database: %v", err)
return err
}
return nil
}
-// WriteHead force writes the current head
-func WriteHead(db common.Database, block *types.Block) error {
- err := WriteCanonNumber(db, block)
- if err != nil {
+// WriteHeadHeaderHash stores the head header's hash.
+func WriteHeadHeaderHash(db common.Database, hash common.Hash) error {
+ if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
+ glog.Fatalf("failed to store last header's hash into database: %v", err)
return err
}
- err = db.Put([]byte("LastBlock"), block.Hash().Bytes())
- if err != nil {
+ return nil
+}
+
+// WriteHeadBlockHash stores the head block's hash.
+func WriteHeadBlockHash(db common.Database, hash common.Hash) error {
+ if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
+ glog.Fatalf("failed to store last block's hash into database: %v", err)
return err
}
return nil
}
-// WriteBlock writes a block to the database
-func WriteBlock(db common.Database, block *types.Block) error {
- tstart := time.Now()
+// WriteHeader serializes a block header into the database.
+func WriteHeader(db common.Database, header *types.Header) error {
+ data, err := rlp.EncodeToBytes(header)
+ if err != nil {
+ return err
+ }
+ key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...)
+ if err := db.Put(key, data); err != nil {
+ glog.Fatalf("failed to store header into database: %v", err)
+ return err
+ }
+ glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4])
+ return nil
+}
- enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
- key := append(blockHashPre, block.Hash().Bytes()...)
- err := db.Put(key, enc)
+// WriteBody serializes the body of a block into the database.
+func WriteBody(db common.Database, hash common.Hash, body *types.Body) error {
+ data, err := rlp.EncodeToBytes(body)
if err != nil {
- glog.Fatal("db write fail:", err)
return err
}
+ key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...)
+ if err := db.Put(key, data); err != nil {
+ glog.Fatalf("failed to store block body into database: %v", err)
+ return err
+ }
+ glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4])
+ return nil
+}
- if glog.V(logger.Debug) {
- glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
+// WriteTd serializes the total difficulty of a block into the database.
+func WriteTd(db common.Database, hash common.Hash, td *big.Int) error {
+ data, err := rlp.EncodeToBytes(td)
+ if err != nil {
+ return err
}
+ key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...)
+ if err := db.Put(key, data); err != nil {
+ glog.Fatalf("failed to store block total difficulty into database: %v", err)
+ return err
+ }
+ glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td)
+ return nil
+}
+// WriteBlock serializes a block into the database, header and body separately.
+func WriteBlock(db common.Database, block *types.Block) error {
+ // Store the body first to retain database consistency
+ if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
+ return err
+ }
+ // Store the header too, signaling full block ownership
+ if err := WriteHeader(db, block.Header()); err != nil {
+ return err
+ }
return nil
}
+
+// DeleteCanonicalHash removes the number to hash canonical mapping.
+func DeleteCanonicalHash(db common.Database, number uint64) {
+ db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...))
+}
+
+// DeleteHeader removes all block header data associated with a hash.
+func DeleteHeader(db common.Database, hash common.Hash) {
+ db.Delete(append(append(blockPrefix, hash.Bytes()...), headerSuffix...))
+}
+
+// DeleteBody removes all block body data associated with a hash.
+func DeleteBody(db common.Database, hash common.Hash) {
+ db.Delete(append(append(blockPrefix, hash.Bytes()...), bodySuffix...))
+}
+
+// DeleteTd removes all block total difficulty data associated with a hash.
+func DeleteTd(db common.Database, hash common.Hash) {
+ db.Delete(append(append(blockPrefix, hash.Bytes()...), tdSuffix...))
+}
+
+// DeleteBlock removes all block data associated with a hash.
+func DeleteBlock(db common.Database, hash common.Hash) {
+ DeleteHeader(db, hash)
+ DeleteBody(db, hash)
+ DeleteTd(db, hash)
+}
+
+// [deprecated by eth/63]
+// GetBlockByHashOld returns the old combined block corresponding to the hash
+// or nil if not found. This method is only used by the upgrade mechanism to
+// access the old combined block representation. It will be dropped after the
+// network transitions to eth/63.
+func GetBlockByHashOld(db common.Database, hash common.Hash) *types.Block {
+ data, _ := db.Get(append(blockHashPre, hash[:]...))
+ if len(data) == 0 {
+ return nil
+ }
+ var block types.StorageBlock
+ if err := rlp.Decode(bytes.NewReader(data), &block); err != nil {
+ glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err)
+ return nil
+ }
+ return (*types.Block)(&block)
+}
diff --git a/core/chain_util_test.go b/core/chain_util_test.go
index 4bbe81194..3f0446715 100644
--- a/core/chain_util_test.go
+++ b/core/chain_util_test.go
@@ -23,6 +23,10 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto/sha3"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
)
type diffTest struct {
@@ -75,3 +79,242 @@ func TestDifficulty(t *testing.T) {
}
}
}
+
+// Tests block header storage and retrieval operations.
+func TestHeaderStorage(t *testing.T) {
+ db, _ := ethdb.NewMemDatabase()
+
+ // Create a test header to move around the database and make sure it's really new
+ header := &types.Header{Extra: []byte("test header")}
+ if entry := GetHeader(db, header.Hash()); entry != nil {
+ t.Fatalf("Non existent header returned: %v", entry)
+ }
+ // Write and verify the header in the database
+ if err := WriteHeader(db, header); err != nil {
+ t.Fatalf("Failed to write header into database: %v", err)
+ }
+ if entry := GetHeader(db, header.Hash()); entry == nil {
+ t.Fatalf("Stored header not found")
+ } else if entry.Hash() != header.Hash() {
+ t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
+ }
+ if entry := GetHeaderRLP(db, header.Hash()); entry == nil {
+ t.Fatalf("Stored header RLP not found")
+ } else {
+ hasher := sha3.NewKeccak256()
+ hasher.Write(entry)
+
+ if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
+ t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
+ }
+ }
+ // Delete the header and verify the execution
+ DeleteHeader(db, header.Hash())
+ if entry := GetHeader(db, header.Hash()); entry != nil {
+ t.Fatalf("Deleted header returned: %v", entry)
+ }
+}
+
+// Tests block body storage and retrieval operations.
+func TestBodyStorage(t *testing.T) {
+ db, _ := ethdb.NewMemDatabase()
+
+ // Create a test body to move around the database and make sure it's really new
+ body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
+
+ hasher := sha3.NewKeccak256()
+ rlp.Encode(hasher, body)
+ hash := common.BytesToHash(hasher.Sum(nil))
+
+ if entry := GetBody(db, hash); entry != nil {
+ t.Fatalf("Non existent body returned: %v", entry)
+ }
+ // Write and verify the body in the database
+ if err := WriteBody(db, hash, body); err != nil {
+ t.Fatalf("Failed to write body into database: %v", err)
+ }
+ if entry := GetBody(db, hash); entry == nil {
+ t.Fatalf("Stored body not found")
+ } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
+ t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
+ }
+ if entry := GetBodyRLP(db, hash); entry == nil {
+ t.Fatalf("Stored body RLP not found")
+ } else {
+ hasher := sha3.NewKeccak256()
+ hasher.Write(entry)
+
+ if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
+ t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
+ }
+ }
+ // Delete the body and verify the execution
+ DeleteBody(db, hash)
+ if entry := GetBody(db, hash); entry != nil {
+ t.Fatalf("Deleted body returned: %v", entry)
+ }
+}
+
+// Tests block storage and retrieval operations.
+func TestBlockStorage(t *testing.T) {
+ db, _ := ethdb.NewMemDatabase()
+
+ // Create a test block to move around the database and make sure it's really new
+ block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
+ if entry := GetBlock(db, block.Hash()); entry != nil {
+ t.Fatalf("Non existent block returned: %v", entry)
+ }
+ if entry := GetHeader(db, block.Hash()); entry != nil {
+ t.Fatalf("Non existent header returned: %v", entry)
+ }
+ if entry := GetBody(db, block.Hash()); entry != nil {
+ t.Fatalf("Non existent body returned: %v", entry)
+ }
+ // Write and verify the block in the database
+ if err := WriteBlock(db, block); err != nil {
+ t.Fatalf("Failed to write block into database: %v", err)
+ }
+ if entry := GetBlock(db, block.Hash()); entry == nil {
+ t.Fatalf("Stored block not found")
+ } else if entry.Hash() != block.Hash() {
+ t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
+ }
+ if entry := GetHeader(db, block.Hash()); entry == nil {
+ t.Fatalf("Stored header not found")
+ } else if entry.Hash() != block.Header().Hash() {
+ t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
+ }
+ if entry := GetBody(db, block.Hash()); entry == nil {
+ t.Fatalf("Stored body not found")
+ } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
+ t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, &types.Body{block.Transactions(), block.Uncles()})
+ }
+ // Delete the block and verify the execution
+ DeleteBlock(db, block.Hash())
+ if entry := GetBlock(db, block.Hash()); entry != nil {
+ t.Fatalf("Deleted block returned: %v", entry)
+ }
+ if entry := GetHeader(db, block.Hash()); entry != nil {
+ t.Fatalf("Deleted header returned: %v", entry)
+ }
+ if entry := GetBody(db, block.Hash()); entry != nil {
+ t.Fatalf("Deleted body returned: %v", entry)
+ }
+}
+
+// Tests that partial block contents don't get reassembled into full blocks.
+func TestPartialBlockStorage(t *testing.T) {
+ db, _ := ethdb.NewMemDatabase()
+ block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")})
+
+ // Store a header and check that it's not recognized as a block
+ if err := WriteHeader(db, block.Header()); err != nil {
+ t.Fatalf("Failed to write header into database: %v", err)
+ }
+ if entry := GetBlock(db, block.Hash()); entry != nil {
+ t.Fatalf("Non existent block returned: %v", entry)
+ }
+ DeleteHeader(db, block.Hash())
+
+ // Store a body and check that it's not recognized as a block
+ if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
+ t.Fatalf("Failed to write body into database: %v", err)
+ }
+ if entry := GetBlock(db, block.Hash()); entry != nil {
+ t.Fatalf("Non existent block returned: %v", entry)
+ }
+ DeleteBody(db, block.Hash())
+
+ // Store a header and a body separately and check reassembly
+ if err := WriteHeader(db, block.Header()); err != nil {
+ t.Fatalf("Failed to write header into database: %v", err)
+ }
+ if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
+ t.Fatalf("Failed to write body into database: %v", err)
+ }
+ if entry := GetBlock(db, block.Hash()); entry == nil {
+ t.Fatalf("Stored block not found")
+ } else if entry.Hash() != block.Hash() {
+ t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
+ }
+}
+
+// Tests block total difficulty storage and retrieval operations.
+func TestTdStorage(t *testing.T) {
+ db, _ := ethdb.NewMemDatabase()
+
+ // Create a test TD to move around the database and make sure it's really new
+ hash, td := common.Hash{}, big.NewInt(314)
+ if entry := GetTd(db, hash); entry != nil {
+ t.Fatalf("Non existent TD returned: %v", entry)
+ }
+ // Write and verify the TD in the database
+ if err := WriteTd(db, hash, td); err != nil {
+ t.Fatalf("Failed to write TD into database: %v", err)
+ }
+ if entry := GetTd(db, hash); entry == nil {
+ t.Fatalf("Stored TD not found")
+ } else if entry.Cmp(td) != 0 {
+ t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
+ }
+ // Delete the TD and verify the execution
+ DeleteTd(db, hash)
+ if entry := GetTd(db, hash); entry != nil {
+ t.Fatalf("Deleted TD returned: %v", entry)
+ }
+}
+
+// Tests that canonical numbers can be mapped to hashes and retrieved.
+func TestCanonicalMappingStorage(t *testing.T) {
+ db, _ := ethdb.NewMemDatabase()
+
+ // Create a test canonical number and assinged hash to move around
+ hash, number := common.Hash{0: 0xff}, uint64(314)
+ if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
+ t.Fatalf("Non existent canonical mapping returned: %v", entry)
+ }
+ // Write and verify the TD in the database
+ if err := WriteCanonicalHash(db, hash, number); err != nil {
+ t.Fatalf("Failed to write canonical mapping into database: %v", err)
+ }
+ if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) {
+ t.Fatalf("Stored canonical mapping not found")
+ } else if entry != hash {
+ t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
+ }
+ // Delete the TD and verify the execution
+ DeleteCanonicalHash(db, number)
+ if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
+ t.Fatalf("Deleted canonical mapping returned: %v", entry)
+ }
+}
+
+// Tests that head headers and head blocks can be assigned, individually.
+func TestHeadStorage(t *testing.T) {
+ db, _ := ethdb.NewMemDatabase()
+
+ blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
+ blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
+
+ // Check that no head entries are in a pristine database
+ if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
+ t.Fatalf("Non head header entry returned: %v", entry)
+ }
+ if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
+ t.Fatalf("Non head block entry returned: %v", entry)
+ }
+ // Assign separate entries for the head header and block
+ if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
+ t.Fatalf("Failed to write head header hash: %v", err)
+ }
+ if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
+ t.Fatalf("Failed to write head block hash: %v", err)
+ }
+ // Check that both heads are present, and different (i.e. two heads maintained)
+ if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
+ t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
+ }
+ if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
+ t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
+ }
+}
diff --git a/core/genesis.go b/core/genesis.go
index 7d4e03c99..3a8f0af0c 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -82,28 +82,29 @@ func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block,
Coinbase: common.HexToAddress(genesis.Coinbase),
Root: statedb.Root(),
}, nil, nil, nil)
- block.Td = difficulty
- if block := GetBlockByHash(chainDb, block.Hash()); block != nil {
+ if block := GetBlock(chainDb, block.Hash()); block != nil {
glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number")
- err := WriteCanonNumber(chainDb, block)
+ err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
if err != nil {
return nil, err
}
return block, nil
}
-
statedb.Sync()
- err = WriteBlock(chainDb, block)
- if err != nil {
+ if err := WriteTd(chainDb, block.Hash(), difficulty); err != nil {
return nil, err
}
- err = WriteHead(chainDb, block)
- if err != nil {
+ if err := WriteBlock(chainDb, block); err != nil {
+ return nil, err
+ }
+ if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {
+ return nil, err
+ }
+ if err := WriteHeadBlockHash(chainDb, block.Hash()); err != nil {
return nil, err
}
-
return block, nil
}
@@ -120,7 +121,6 @@ func GenesisBlockForTesting(db common.Database, addr common.Address, balance *bi
GasLimit: params.GenesisGasLimit,
Root: statedb.Root(),
}, nil, nil, nil)
- block.Td = params.GenesisDifficulty
return block
}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 0af0fbd5a..69c64ae40 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -87,10 +87,6 @@ type StateObject struct {
dirty bool
}
-func (self *StateObject) Reset() {
- self.storage = make(Storage)
-}
-
func NewStateObject(address common.Address, db common.Database) *StateObject {
object := &StateObject{db: db, address: address, balance: new(big.Int), gasPool: new(big.Int), dirty: true}
object.trie = trie.NewSecure((common.Hash{}).Bytes(), db)
@@ -184,14 +180,6 @@ func (self *StateObject) Update() {
}
}
-func (c *StateObject) GetInstr(pc *big.Int) *common.Value {
- if int64(len(c.code)-1) < pc.Int64() {
- return common.NewValue(0)
- }
-
- return common.NewValueFromBytes([]byte{c.code[pc.Int64()]})
-}
-
func (c *StateObject) AddBalance(amount *big.Int) {
c.SetBalance(new(big.Int).Add(c.balance, amount))
@@ -268,10 +256,6 @@ func (self *StateObject) Copy() *StateObject {
return stateObject
}
-func (self *StateObject) Set(stateObject *StateObject) {
- *self = *stateObject
-}
-
//
// Attribute accessors
//
@@ -280,20 +264,11 @@ func (self *StateObject) Balance() *big.Int {
return self.balance
}
-func (c *StateObject) N() *big.Int {
- return big.NewInt(int64(c.nonce))
-}
-
// Returns the address of the contract/account
func (c *StateObject) Address() common.Address {
return c.address
}
-// Returns the initialization Code
-func (c *StateObject) Init() Code {
- return c.initCode
-}
-
func (self *StateObject) Trie() *trie.SecureTrie {
return self.trie
}
@@ -311,11 +286,6 @@ func (self *StateObject) SetCode(code []byte) {
self.dirty = true
}
-func (self *StateObject) SetInitCode(code []byte) {
- self.initCode = code
- self.dirty = true
-}
-
func (self *StateObject) SetNonce(nonce uint64) {
self.nonce = nonce
self.dirty = true
@@ -354,19 +324,6 @@ func (c *StateObject) CodeHash() common.Bytes {
return crypto.Sha3(c.code)
}
-func (c *StateObject) RlpDecode(data []byte) {
- decoder := common.NewValueFromBytes(data)
- c.nonce = decoder.Get(0).Uint()
- c.balance = decoder.Get(1).BigInt()
- c.trie = trie.NewSecure(decoder.Get(2).Bytes(), c.db)
- c.storage = make(map[string]common.Hash)
- c.gasPool = new(big.Int)
-
- c.codeHash = decoder.Get(3).Bytes()
-
- c.code, _ = c.db.Get(c.codeHash)
-}
-
// Storage change object. Used by the manifest for notifying changes to
// the sub channels.
type StorageState struct {
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 577f7162e..b754f0887 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -18,7 +18,6 @@
package state
import (
- "bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
@@ -276,10 +275,6 @@ func (self *StateDB) CreateAccount(addr common.Address) *StateObject {
// Setting, copying of the state methods
//
-func (s *StateDB) Cmp(other *StateDB) bool {
- return bytes.Equal(s.trie.Root(), other.trie.Root())
-}
-
func (self *StateDB) Copy() *StateDB {
state := New(common.Hash{}, self.db)
state.trie = self.trie
@@ -311,22 +306,6 @@ func (s *StateDB) Root() common.Hash {
return common.BytesToHash(s.trie.Root())
}
-func (s *StateDB) Trie() *trie.SecureTrie {
- return s.trie
-}
-
-// Resets the trie and all siblings
-func (s *StateDB) Reset() {
- s.trie.Reset()
-
- // Reset all nested states
- for _, stateObject := range s.stateObjects {
- stateObject.Reset()
- }
-
- s.Empty()
-}
-
// Syncs the trie and all siblings
func (s *StateDB) Sync() {
// Sync all nested states
diff --git a/core/types/block.go b/core/types/block.go
index fd81db04c..7a84045a6 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -117,6 +117,13 @@ func rlpHash(x interface{}) (h common.Hash) {
return h
}
+// Body is a simple (mutable, non-safe) data container for storing and moving
+// a block's data contents (transactions and uncles) together.
+type Body struct {
+ Transactions []*Transaction
+ Uncles []*Header
+}
+
type Block struct {
header *Header
uncles []*Header
@@ -129,12 +136,20 @@ type Block struct {
// Td is used by package core to store the total difficulty
// of the chain up to and including the block.
- Td *big.Int
+ td *big.Int
// ReceivedAt is used by package eth to track block propagation time.
ReceivedAt time.Time
}
+// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
+// code solely to facilitate upgrading the database from the old format to the
+// new, after which it should be deleted. Do not use!
+func (b *Block) DeprecatedTd() *big.Int {
+ return b.td
+}
+
+// [deprecated by eth/63]
// StorageBlock defines the RLP encoding of a Block stored in the
// state database. The StorageBlock encoding contains fields that
// would otherwise need to be recomputed.
@@ -147,6 +162,7 @@ type extblock struct {
Uncles []*Header
}
+// [deprecated by eth/63]
// "storage" block encoding. used for database.
type storageblock struct {
Header *Header
@@ -168,7 +184,7 @@ var (
// are ignored and set to values derived from the given txs, uncles
// and receipts.
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
- b := &Block{header: copyHeader(header), Td: new(big.Int)}
+ b := &Block{header: copyHeader(header), td: new(big.Int)}
// TODO: panic if len(txs) != len(receipts)
if len(txs) == 0 {
@@ -268,24 +284,16 @@ func (b *Block) EncodeRLP(w io.Writer) error {
})
}
+// [deprecated by eth/63]
func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
var sb storageblock
if err := s.Decode(&sb); err != nil {
return err
}
- b.header, b.uncles, b.transactions, b.Td = sb.Header, sb.Uncles, sb.Txs, sb.TD
+ b.header, b.uncles, b.transactions, b.td = sb.Header, sb.Uncles, sb.Txs, sb.TD
return nil
}
-func (b *StorageBlock) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, storageblock{
- Header: b.header,
- Txs: b.transactions,
- Uncles: b.uncles,
- TD: b.Td,
- })
-}
-
// TODO: copies
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
@@ -356,7 +364,6 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
transactions: b.transactions,
receipts: b.receipts,
uncles: b.uncles,
- Td: b.Td,
}
}
@@ -386,7 +393,7 @@ func (b *Block) Hash() common.Hash {
}
func (b *Block) String() string {
- str := fmt.Sprintf(`Block(#%v): Size: %v TD: %v {
+ str := fmt.Sprintf(`Block(#%v): Size: %v {
MinerHash: %x
%v
Transactions:
@@ -394,7 +401,7 @@ Transactions:
Uncles:
%v
}
-`, b.Number(), b.Size(), b.Td, b.header.HashNoNonce(), b.header, b.transactions, b.uncles)
+`, b.Number(), b.Size(), b.header.HashNoNonce(), b.header, b.transactions, b.uncles)
return str
}
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 28a7e02b3..8260d7423 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -33,10 +33,6 @@ import (
var ErrInvalidSig = errors.New("invalid v, r, s values")
-func IsContractAddr(addr []byte) bool {
- return len(addr) == 0
-}
-
type Transaction struct {
data txdata
// caches
diff --git a/core/vm/errors.go b/core/vm/errors.go
index 24567e9a1..e2fc84065 100644
--- a/core/vm/errors.go
+++ b/core/vm/errors.go
@@ -25,20 +25,3 @@ import (
var OutOfGasError = errors.New("Out of gas")
var DepthError = fmt.Errorf("Max call depth exceeded (%d)", params.CallCreateDepth)
-
-type StackError struct {
- req, has int
-}
-
-func StackErr(req, has int) StackError {
- return StackError{req, has}
-}
-
-func (self StackError) Error() string {
- return fmt.Sprintf("stack error! require %v, have %v", self.req, self.has)
-}
-
-func IsStackErr(err error) bool {
- _, ok := err.(StackError)
- return ok
-}
diff --git a/crypto/crypto.go b/crypto/crypto.go
index a474d6f13..b3a8d730b 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -33,12 +33,12 @@ import (
"encoding/json"
"errors"
- "code.google.com/p/go-uuid/uuid"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/pborman/uuid"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/ripemd160"
)
diff --git a/crypto/key.go b/crypto/key.go
index d80b99759..35139b67f 100644
--- a/crypto/key.go
+++ b/crypto/key.go
@@ -23,8 +23,8 @@ import (
"encoding/json"
"io"
- "code.google.com/p/go-uuid/uuid"
"github.com/ethereum/go-ethereum/common"
+ "github.com/pborman/uuid"
)
const (
diff --git a/crypto/key_store_passphrase.go b/crypto/key_store_passphrase.go
index f21af8dd9..c7ee00987 100644
--- a/crypto/key_store_passphrase.go
+++ b/crypto/key_store_passphrase.go
@@ -36,9 +36,9 @@ import (
"io"
"reflect"
- "code.google.com/p/go-uuid/uuid"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/randentropy"
+ "github.com/pborman/uuid"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt"
)
diff --git a/eth/backend.go b/eth/backend.go
index 639aaaaec..deb6d3d0f 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -18,6 +18,7 @@
package eth
import (
+ "bytes"
"crypto/ecdsa"
"encoding/json"
"fmt"
@@ -269,11 +270,7 @@ func New(config *Config) (*Ethereum, error) {
newdb = func(path string) (common.Database, error) { return ethdb.NewLDBDatabase(path, config.DatabaseCache) }
}
- // attempt to merge database together, upgrading from an old version
- if err := mergeDatabases(config.DataDir, newdb); err != nil {
- return nil, err
- }
-
+ // Open the chain database and perform any upgrades needed
chainDb, err := newdb(filepath.Join(config.DataDir, "chaindata"))
if err != nil {
return nil, fmt.Errorf("blockchain db err: %v", err)
@@ -281,6 +278,10 @@ func New(config *Config) (*Ethereum, error) {
if db, ok := chainDb.(*ethdb.LDBDatabase); ok {
db.Meter("eth/db/chaindata/")
}
+ if err := upgradeChainDatabase(chainDb); err != nil {
+ return nil, err
+ }
+
dappDb, err := newdb(filepath.Join(config.DataDir, "dapp"))
if err != nil {
return nil, fmt.Errorf("dapp db err: %v", err)
@@ -315,9 +316,13 @@ func New(config *Config) (*Ethereum, error) {
if err != nil {
return nil, err
}
- case config.GenesisBlock != nil: // This is for testing only.
+ }
+ // This is for testing only.
+ if config.GenesisBlock != nil {
+ core.WriteTd(chainDb, config.GenesisBlock.Hash(), config.GenesisBlock.Difficulty())
core.WriteBlock(chainDb, config.GenesisBlock)
- core.WriteHead(chainDb, config.GenesisBlock)
+ core.WriteCanonicalHash(chainDb, config.GenesisBlock.Hash(), config.GenesisBlock.NumberU64())
+ core.WriteHeadBlockHash(chainDb, config.GenesisBlock.Hash())
}
if !config.SkipBcVersionCheck {
@@ -721,74 +726,61 @@ func saveBlockchainVersion(db common.Database, bcVersion int) {
}
}
-// mergeDatabases when required merge old database layout to one single database
-func mergeDatabases(datadir string, newdb func(path string) (common.Database, error)) error {
- // Check if already upgraded
- data := filepath.Join(datadir, "chaindata")
- if _, err := os.Stat(data); !os.IsNotExist(err) {
- return nil
- }
- // make sure it's not just a clean path
- chainPath := filepath.Join(datadir, "blockchain")
- if _, err := os.Stat(chainPath); os.IsNotExist(err) {
+// upgradeChainDatabase ensures that the chain database stores block split into
+// separate header and body entries.
+func upgradeChainDatabase(db common.Database) error {
+ // Short circuit if the head block is stored already as separate header and body
+ data, err := db.Get([]byte("LastBlock"))
+ if err != nil {
return nil
}
- glog.Infoln("Database upgrade required. Upgrading...")
+ head := common.BytesToHash(data)
- database, err := newdb(data)
- if err != nil {
- return fmt.Errorf("creating data db err: %v", err)
+ if block := core.GetBlockByHashOld(db, head); block == nil {
+ return nil
}
- defer database.Close()
+ // At least some of the database is still the old format, upgrade (skip the head block!)
+ glog.V(logger.Info).Info("Old database detected, upgrading...")
- // Migrate blocks
- chainDb, err := newdb(chainPath)
- if err != nil {
- return fmt.Errorf("state db err: %v", err)
- }
- defer chainDb.Close()
+ if db, ok := db.(*ethdb.LDBDatabase); ok {
+ blockPrefix := []byte("block-hash-")
+ for it := db.NewIterator(); it.Next(); {
+ // Skip anything other than a combined block
+ if !bytes.HasPrefix(it.Key(), blockPrefix) {
+ continue
+ }
+ // Skip the head block (merge last to signal upgrade completion)
+ if bytes.HasSuffix(it.Key(), head.Bytes()) {
+ continue
+ }
+ // Load the block, split and serialize (order!)
+ block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix)))
- if chain, ok := chainDb.(*ethdb.LDBDatabase); ok {
- glog.Infoln("Merging blockchain database...")
- it := chain.NewIterator()
- for it.Next() {
- database.Put(it.Key(), it.Value())
+ if err := core.WriteTd(db, block.Hash(), block.DeprecatedTd()); err != nil {
+ return err
+ }
+ if err := core.WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
+ return err
+ }
+ if err := core.WriteHeader(db, block.Header()); err != nil {
+ return err
+ }
+ if err := db.Delete(it.Key()); err != nil {
+ return err
+ }
}
- it.Release()
- }
-
- // Migrate state
- stateDb, err := newdb(filepath.Join(datadir, "state"))
- if err != nil {
- return fmt.Errorf("state db err: %v", err)
- }
- defer stateDb.Close()
+ // Lastly, upgrade the head block, disabling the upgrade mechanism
+ current := core.GetBlockByHashOld(db, head)
- if state, ok := stateDb.(*ethdb.LDBDatabase); ok {
- glog.Infoln("Merging state database...")
- it := state.NewIterator()
- for it.Next() {
- database.Put(it.Key(), it.Value())
+ if err := core.WriteTd(db, current.Hash(), current.DeprecatedTd()); err != nil {
+ return err
}
- it.Release()
- }
-
- // Migrate transaction / receipts
- extraDb, err := newdb(filepath.Join(datadir, "extra"))
- if err != nil {
- return fmt.Errorf("state db err: %v", err)
- }
- defer extraDb.Close()
-
- if extra, ok := extraDb.(*ethdb.LDBDatabase); ok {
- glog.Infoln("Merging transaction database...")
-
- it := extra.NewIterator()
- for it.Next() {
- database.Put(it.Key(), it.Value())
+ if err := core.WriteBody(db, current.Hash(), &types.Body{current.Transactions(), current.Uncles()}); err != nil {
+ return err
+ }
+ if err := core.WriteHeader(db, current.Header()); err != nil {
+ return err
}
- it.Release()
}
-
return nil
}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 73f95bf64..d28985b3e 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -87,6 +87,9 @@ type blockRetrievalFn func(common.Hash) *types.Block
// headRetrievalFn is a callback type for retrieving the head block from the local chain.
type headRetrievalFn func() *types.Block
+// tdRetrievalFn is a callback type for retrieving the total difficulty of a local block.
+type tdRetrievalFn func(common.Hash) *big.Int
+
// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
type chainInsertFn func(types.Blocks) (int, error)
@@ -136,6 +139,7 @@ type Downloader struct {
hasBlock hashCheckFn // Checks if a block is present in the chain
getBlock blockRetrievalFn // Retrieves a block from the chain
headBlock headRetrievalFn // Retrieves the head block from the chain
+ getTd tdRetrievalFn // Retrieves the TD of a block from the chain
insertChain chainInsertFn // Injects a batch of blocks into the chain
dropPeer peerDropFn // Drops a peer for misbehaving
@@ -168,7 +172,7 @@ type Block struct {
}
// New creates a new downloader to fetch hashes and blocks from remote peers.
-func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, headBlock headRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader {
+func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, headBlock headRetrievalFn, getTd tdRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader {
return &Downloader{
mux: mux,
queue: newQueue(),
@@ -176,6 +180,7 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, he
hasBlock: hasBlock,
getBlock: getBlock,
headBlock: headBlock,
+ getTd: getTd,
insertChain: insertChain,
dropPeer: dropPeer,
newPeerCh: make(chan *peer, 1),
@@ -582,7 +587,7 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
// L: Sync begins, and finds common ancestor at 11
// L: Request new hashes up from 11 (R's TD was higher, it must have something)
// R: Nothing to give
- if !gotHashes && td.Cmp(d.headBlock().Td) > 0 {
+ if !gotHashes && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
return errStallingPeer
}
return nil
@@ -958,7 +963,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
// L: Sync begins, and finds common ancestor at 11
// L: Request new headers up from 11 (R's TD was higher, it must have something)
// R: Nothing to give
- if !gotHeaders && td.Cmp(d.headBlock().Td) > 0 {
+ if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
return errStallingPeer
}
return nil
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 8d009b671..dbcf93607 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -93,21 +93,25 @@ func makeChainFork(n, f int, parent *types.Block) (h1, h2 []common.Hash, b1, b2
type downloadTester struct {
downloader *Downloader
- ownHashes []common.Hash // Hash chain belonging to the tester
- ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
- peerHashes map[string][]common.Hash // Hash chain belonging to different test peers
- peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers
+ ownHashes []common.Hash // Hash chain belonging to the tester
+ ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
+ ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain
+ peerHashes map[string][]common.Hash // Hash chain belonging to different test peers
+ peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers
+ peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains
}
// newTester creates a new downloader test mocker.
func newTester() *downloadTester {
tester := &downloadTester{
- ownHashes: []common.Hash{genesis.Hash()},
- ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
- peerHashes: make(map[string][]common.Hash),
- peerBlocks: make(map[string]map[common.Hash]*types.Block),
+ ownHashes: []common.Hash{genesis.Hash()},
+ ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
+ ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
+ peerHashes: make(map[string][]common.Hash),
+ peerBlocks: make(map[string]map[common.Hash]*types.Block),
+ peerChainTds: make(map[string]map[common.Hash]*big.Int),
}
- tester.downloader = New(new(event.TypeMux), tester.hasBlock, tester.getBlock, tester.headBlock, tester.insertChain, tester.dropPeer)
+ tester.downloader = New(new(event.TypeMux), tester.hasBlock, tester.getBlock, tester.headBlock, tester.getTd, tester.insertChain, tester.dropPeer)
return tester
}
@@ -119,8 +123,8 @@ func (dl *downloadTester) sync(id string, td *big.Int) error {
// If no particular TD was requested, load from the peer's blockchain
if td == nil {
td = big.NewInt(1)
- if block, ok := dl.peerBlocks[id][hash]; ok {
- td = block.Td
+ if diff, ok := dl.peerChainTds[id][hash]; ok {
+ td = diff
}
}
err := dl.downloader.synchronise(id, hash, td)
@@ -152,6 +156,11 @@ func (dl *downloadTester) headBlock() *types.Block {
return dl.getBlock(dl.ownHashes[len(dl.ownHashes)-1])
}
+// getTd retrieves the block's total difficulty from the canonical chain.
+func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
+ return dl.ownChainTd[hash]
+}
+
// insertChain injects a new batch of blocks into the simulated chain.
func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) {
for i, block := range blocks {
@@ -160,6 +169,7 @@ func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) {
}
dl.ownHashes = append(dl.ownHashes, block.Hash())
dl.ownBlocks[block.Hash()] = block
+ dl.ownChainTd[block.Hash()] = dl.ownChainTd[block.ParentHash()]
}
return len(blocks), nil
}
@@ -180,9 +190,16 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
// Assign the owned hashes and blocks to the peer (deep copy)
dl.peerHashes[id] = make([]common.Hash, len(hashes))
copy(dl.peerHashes[id], hashes)
+
dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
- for hash, block := range blocks {
- dl.peerBlocks[id][hash] = block
+ dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
+ for _, hash := range hashes {
+ if block, ok := blocks[hash]; ok {
+ dl.peerBlocks[id][hash] = block
+ if parent, ok := dl.peerBlocks[id][block.ParentHash()]; ok {
+ dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][parent.Hash()])
+ }
+ }
}
}
return err
@@ -192,6 +209,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
func (dl *downloadTester) dropPeer(id string) {
delete(dl.peerHashes, id)
delete(dl.peerBlocks, id)
+ delete(dl.peerChainTds, id)
dl.downloader.UnregisterPeer(id)
}
diff --git a/eth/handler.go b/eth/handler.go
index f22afecb7..4aef69043 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -36,8 +36,10 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
-// This is the target maximum size of returned blocks, headers or node data.
-const softResponseLimit = 2 * 1024 * 1024
+const (
+ softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
+ estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
+)
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
@@ -113,7 +115,7 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po
}
}
// Construct the different synchronisation mechanisms
- manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.InsertChain, manager.removePeer)
+ manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.GetTd, manager.chainman.InsertChain, manager.removePeer)
validator := func(block *types.Block, parent *types.Block) error {
return core.ValidateHeader(pow, block.Header(), parent, true, false)
@@ -345,33 +347,33 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err := msg.Decode(&query); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
- // Gather blocks until the fetch or network limits is reached
+ // Gather headers until the fetch or network limits is reached
var (
bytes common.StorageSize
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
- // Retrieve the next block satisfying the query
- var origin *types.Block
+ // Retrieve the next header satisfying the query
+ var origin *types.Header
if query.Origin.Hash != (common.Hash{}) {
- origin = pm.chainman.GetBlock(query.Origin.Hash)
+ origin = pm.chainman.GetHeader(query.Origin.Hash)
} else {
- origin = pm.chainman.GetBlockByNumber(query.Origin.Number)
+ origin = pm.chainman.GetHeaderByNumber(query.Origin.Number)
}
if origin == nil {
break
}
- headers = append(headers, origin.Header())
- bytes += origin.Size()
+ headers = append(headers, origin)
+ bytes += estHeaderRlpSize
- // Advance to the next block of the query
+ // Advance to the next header of the query
switch {
case query.Origin.Hash != (common.Hash{}) && query.Reverse:
// Hash based traversal towards the genesis block
for i := 0; i < int(query.Skip)+1; i++ {
- if block := pm.chainman.GetBlock(query.Origin.Hash); block != nil {
- query.Origin.Hash = block.ParentHash()
+ if header := pm.chainman.GetHeader(query.Origin.Hash); header != nil {
+ query.Origin.Hash = header.ParentHash
} else {
unknown = true
break
@@ -379,9 +381,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
// Hash based traversal towards the leaf block
- if block := pm.chainman.GetBlockByNumber(origin.NumberU64() + query.Skip + 1); block != nil {
- if pm.chainman.GetBlockHashesFromHash(block.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
- query.Origin.Hash = block.Hash()
+ if header := pm.chainman.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil {
+ if pm.chainman.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
+ query.Origin.Hash = header.Hash()
} else {
unknown = true
}
@@ -452,23 +454,23 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Gather blocks until the fetch or network limits is reached
var (
hash common.Hash
- bytes common.StorageSize
- bodies []*blockBody
+ bytes int
+ bodies []rlp.RawValue
)
for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
- //Retrieve the hash of the next block
+ // Retrieve the hash of the next block
if err := msgStream.Decode(&hash); err == rlp.EOL {
break
} else if err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
- // Retrieve the requested block, stopping if enough was found
- if block := pm.chainman.GetBlock(hash); block != nil {
- bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})
- bytes += block.Size()
+ // Retrieve the requested block body, stopping if enough was found
+ if data := pm.chainman.GetBodyRLP(hash); len(data) != 0 {
+ bodies = append(bodies, data)
+ bytes += len(data)
}
}
- return p.SendBlockBodies(bodies)
+ return p.SendBlockBodiesRLP(bodies)
case p.version >= eth63 && msg.Code == GetNodeDataMsg:
// Decode the retrieval message
@@ -643,7 +645,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
var td *big.Int
if parent := pm.chainman.GetBlock(block.ParentHash()); parent != nil {
- td = new(big.Int).Add(parent.Td, block.Difficulty())
+ td = new(big.Int).Add(block.Difficulty(), pm.chainman.GetTd(block.ParentHash()))
} else {
glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4])
return
diff --git a/eth/peer.go b/eth/peer.go
index 8d7c48885..603b49b88 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/rlp"
"gopkg.in/fatih/set.v0"
)
@@ -184,6 +185,12 @@ func (p *peer) SendBlockBodies(bodies []*blockBody) error {
return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesData(bodies))
}
+// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
+// an already RLP encoded format.
+func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
+ return p2p.Send(p.rw, BlockBodiesMsg, bodies)
+}
+
// SendNodeData sends a batch of arbitrary internal data, corresponding to the
// hashes requested.
func (p *peer) SendNodeData(data [][]byte) error {
diff --git a/miner/worker.go b/miner/worker.go
index 16a16931d..2f43b110f 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -283,7 +283,7 @@ func (self *worker) wait() {
continue
}
- stat, err := self.chain.WriteBlock(block, false)
+ stat, err := self.chain.WriteBlock(block)
if err != nil {
glog.V(logger.Error).Infoln("error writing block to chain", err)
continue
@@ -533,14 +533,12 @@ func (self *worker) commitNewWork() {
// create the new block whose nonce will be mined.
work.Block = types.NewBlock(header, work.txs, uncles, work.receipts)
- work.Block.Td = new(big.Int).Set(core.CalcTD(work.Block, self.chain.GetBlock(work.Block.ParentHash())))
// We only care about logging if we're actually mining.
if atomic.LoadInt32(&self.mining) == 1 {
glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart))
self.logLocalMinedBlocks(work, previous)
}
-
self.push(work)
}
diff --git a/rpc/api/eth.go b/rpc/api/eth.go
index ba87e86c6..a93e41157 100644
--- a/rpc/api/eth.go
+++ b/rpc/api/eth.go
@@ -204,7 +204,8 @@ func (self *ethApi) GetBlockTransactionCountByHash(req *shared.Request) (interfa
return nil, shared.NewDecodeParamError(err.Error())
}
- block := NewBlockRes(self.xeth.EthBlockByHash(args.Hash), false)
+ raw := self.xeth.EthBlockByHash(args.Hash)
+ block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false)
if block == nil {
return nil, nil
} else {
@@ -218,7 +219,8 @@ func (self *ethApi) GetBlockTransactionCountByNumber(req *shared.Request) (inter
return nil, shared.NewDecodeParamError(err.Error())
}
- block := NewBlockRes(self.xeth.EthBlockByNumber(args.BlockNumber), false)
+ raw := self.xeth.EthBlockByNumber(args.BlockNumber)
+ block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false)
if block == nil {
return nil, nil
} else {
@@ -232,12 +234,12 @@ func (self *ethApi) GetUncleCountByBlockHash(req *shared.Request) (interface{},
return nil, shared.NewDecodeParamError(err.Error())
}
- block := self.xeth.EthBlockByHash(args.Hash)
- br := NewBlockRes(block, false)
- if br == nil {
+ raw := self.xeth.EthBlockByHash(args.Hash)
+ block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false)
+ if block == nil {
return nil, nil
}
- return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil
+ return newHexNum(big.NewInt(int64(len(block.Uncles))).Bytes()), nil
}
func (self *ethApi) GetUncleCountByBlockNumber(req *shared.Request) (interface{}, error) {
@@ -246,12 +248,12 @@ func (self *ethApi) GetUncleCountByBlockNumber(req *shared.Request) (interface{}
return nil, shared.NewDecodeParamError(err.Error())
}
- block := self.xeth.EthBlockByNumber(args.BlockNumber)
- br := NewBlockRes(block, false)
- if br == nil {
+ raw := self.xeth.EthBlockByNumber(args.BlockNumber)
+ block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false)
+ if block == nil {
return nil, nil
}
- return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil
+ return newHexNum(big.NewInt(int64(len(block.Uncles))).Bytes()), nil
}
func (self *ethApi) GetData(req *shared.Request) (interface{}, error) {
@@ -362,7 +364,7 @@ func (self *ethApi) GetBlockByHash(req *shared.Request) (interface{}, error) {
}
block := self.xeth.EthBlockByHash(args.BlockHash)
- return NewBlockRes(block, args.IncludeTxs), nil
+ return NewBlockRes(block, self.xeth.Td(block.Hash()), args.IncludeTxs), nil
}
func (self *ethApi) GetBlockByNumber(req *shared.Request) (interface{}, error) {
@@ -372,8 +374,7 @@ func (self *ethApi) GetBlockByNumber(req *shared.Request) (interface{}, error) {
}
block := self.xeth.EthBlockByNumber(args.BlockNumber)
- br := NewBlockRes(block, args.IncludeTxs)
- return br, nil
+ return NewBlockRes(block, self.xeth.Td(block.Hash()), args.IncludeTxs), nil
}
func (self *ethApi) GetTransactionByHash(req *shared.Request) (interface{}, error) {
@@ -402,16 +403,15 @@ func (self *ethApi) GetTransactionByBlockHashAndIndex(req *shared.Request) (inte
return nil, shared.NewDecodeParamError(err.Error())
}
- block := self.xeth.EthBlockByHash(args.Hash)
- br := NewBlockRes(block, true)
- if br == nil {
+ raw := self.xeth.EthBlockByHash(args.Hash)
+ block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), true)
+ if block == nil {
return nil, nil
}
-
- if args.Index >= int64(len(br.Transactions)) || args.Index < 0 {
+ if args.Index >= int64(len(block.Transactions)) || args.Index < 0 {
return nil, nil
} else {
- return br.Transactions[args.Index], nil
+ return block.Transactions[args.Index], nil
}
}
@@ -421,17 +421,16 @@ func (self *ethApi) GetTransactionByBlockNumberAndIndex(req *shared.Request) (in
return nil, shared.NewDecodeParamError(err.Error())
}
- block := self.xeth.EthBlockByNumber(args.BlockNumber)
- v := NewBlockRes(block, true)
- if v == nil {
+ raw := self.xeth.EthBlockByNumber(args.BlockNumber)
+ block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), true)
+ if block == nil {
return nil, nil
}
-
- if args.Index >= int64(len(v.Transactions)) || args.Index < 0 {
+ if args.Index >= int64(len(block.Transactions)) || args.Index < 0 {
// return NewValidationError("Index", "does not exist")
return nil, nil
}
- return v.Transactions[args.Index], nil
+ return block.Transactions[args.Index], nil
}
func (self *ethApi) GetUncleByBlockHashAndIndex(req *shared.Request) (interface{}, error) {
@@ -440,17 +439,16 @@ func (self *ethApi) GetUncleByBlockHashAndIndex(req *shared.Request) (interface{
return nil, shared.NewDecodeParamError(err.Error())
}
- br := NewBlockRes(self.xeth.EthBlockByHash(args.Hash), false)
- if br == nil {
+ raw := self.xeth.EthBlockByHash(args.Hash)
+ block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false)
+ if block == nil {
return nil, nil
}
-
- if args.Index >= int64(len(br.Uncles)) || args.Index < 0 {
+ if args.Index >= int64(len(block.Uncles)) || args.Index < 0 {
// return NewValidationError("Index", "does not exist")
return nil, nil
}
-
- return br.Uncles[args.Index], nil
+ return block.Uncles[args.Index], nil
}
func (self *ethApi) GetUncleByBlockNumberAndIndex(req *shared.Request) (interface{}, error) {
@@ -459,17 +457,15 @@ func (self *ethApi) GetUncleByBlockNumberAndIndex(req *shared.Request) (interfac
return nil, shared.NewDecodeParamError(err.Error())
}
- block := self.xeth.EthBlockByNumber(args.BlockNumber)
- v := NewBlockRes(block, true)
-
- if v == nil {
+ raw := self.xeth.EthBlockByNumber(args.BlockNumber)
+ block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), true)
+ if block == nil {
return nil, nil
}
-
- if args.Index >= int64(len(v.Uncles)) || args.Index < 0 {
+ if args.Index >= int64(len(block.Uncles)) || args.Index < 0 {
return nil, nil
} else {
- return v.Uncles[args.Index], nil
+ return block.Uncles[args.Index], nil
}
}
diff --git a/rpc/api/parsing.go b/rpc/api/parsing.go
index 5858bc136..cdfaa0ed1 100644
--- a/rpc/api/parsing.go
+++ b/rpc/api/parsing.go
@@ -281,7 +281,7 @@ func (b *BlockRes) MarshalJSON() ([]byte, error) {
}
}
-func NewBlockRes(block *types.Block, fullTx bool) *BlockRes {
+func NewBlockRes(block *types.Block, td *big.Int, fullTx bool) *BlockRes {
if block == nil {
return nil
}
@@ -299,7 +299,7 @@ func NewBlockRes(block *types.Block, fullTx bool) *BlockRes {
res.ReceiptRoot = newHexData(block.ReceiptHash())
res.Miner = newHexData(block.Coinbase())
res.Difficulty = newHexNum(block.Difficulty())
- res.TotalDifficulty = newHexNum(block.Td)
+ res.TotalDifficulty = newHexNum(td)
res.Size = newHexNum(block.Size().Int64())
res.ExtraData = newHexData(block.Extra())
res.GasLimit = newHexNum(block.GasLimit())
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 2090afce7..30488951d 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -440,9 +440,8 @@ func convertBlockTest(in *btJSON) (out *BlockTest, err error) {
func mustConvertGenesis(testGenesis btHeader) *types.Block {
hdr := mustConvertHeader(testGenesis)
hdr.Number = big.NewInt(0)
- b := types.NewBlockWithHeader(hdr)
- b.Td = new(big.Int)
- return b
+
+ return types.NewBlockWithHeader(hdr)
}
func mustConvertHeader(in btHeader) *types.Header {
diff --git a/xeth/xeth.go b/xeth/xeth.go
index 8bd45998f..00b70da6c 100644
--- a/xeth/xeth.go
+++ b/xeth/xeth.go
@@ -355,6 +355,10 @@ func (self *XEth) EthBlockByNumber(num int64) *types.Block {
return self.getBlockByHeight(num)
}
+func (self *XEth) Td(hash common.Hash) *big.Int {
+ return self.backend.ChainManager().GetTd(hash)
+}
+
func (self *XEth) CurrentBlock() *types.Block {
return self.backend.ChainManager().CurrentBlock()
}