aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/CONTRIBUTING.md (renamed from CONTRIBUTING.md)0
-rw-r--r--.github/ISSUE_TEMPLATE.md20
-rw-r--r--Godeps/Godeps.json24
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go6
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go21
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go33
-rw-r--r--accounts/abi/abi.go37
-rw-r--r--accounts/abi/abi_test.go128
-rw-r--r--accounts/abi/method.go7
-rw-r--r--accounts/abi/numbers.go58
-rw-r--r--accounts/abi/type.go15
-rw-r--r--cmd/utils/cmd.go3
-rw-r--r--core/block_validator.go6
-rw-r--r--core/blockchain.go533
-rw-r--r--core/blockchain_test.go105
-rw-r--r--core/chain_makers.go4
-rw-r--r--core/headerchain.go464
-rw-r--r--core/state/managed_state.go2
-rw-r--r--core/state/state_test.go2
-rw-r--r--core/state_processor.go2
-rw-r--r--core/tx_pool.go6
-rw-r--r--core/tx_pool_test.go4
-rw-r--r--core/types.go14
-rw-r--r--core/types/block.go2
-rw-r--r--core/types/transaction.go4
-rw-r--r--core/vm/asm.go2
-rw-r--r--core/vm/doc.go2
-rw-r--r--core/vm/environment.go6
-rw-r--r--core/vm/jit.go6
-rw-r--r--core/vm/jit_test.go2
-rw-r--r--core/vm/jit_util.go2
-rw-r--r--core/vm/runtime/runtime.go2
-rw-r--r--core/vm/vm.go8
-rw-r--r--core/vm/vm_jit.go6
-rw-r--r--core/vm_env.go2
-rw-r--r--eth/api.go26
-rw-r--r--eth/downloader/api.go2
-rw-r--r--eth/downloader/downloader.go14
-rw-r--r--eth/downloader/downloader_test.go6
-rw-r--r--eth/downloader/peer.go4
-rw-r--r--eth/downloader/queue.go2
-rw-r--r--eth/fetcher/fetcher.go6
-rw-r--r--eth/filters/api.go2
-rw-r--r--eth/filters/filter_system_test.go2
-rw-r--r--eth/handler.go2
-rw-r--r--eth/handler_test.go4
-rw-r--r--eth/metrics.go2
-rw-r--r--internal/debug/flags.go1
-rw-r--r--internal/debug/loudpanic.go27
-rw-r--r--internal/debug/loudpanic_fallback.go24
-rw-r--r--miner/miner.go9
-rw-r--r--miner/worker.go12
-rw-r--r--p2p/nat/natupnp.go1
53 files changed, 1110 insertions, 574 deletions
diff --git a/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 829bf5d43..829bf5d43 100644
--- a/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..6c1cb9f9a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,20 @@
+#### System information
+
+Geth version: `geth version`
+OS & Version: Windows/Linux/OSX
+Commit hash : (if `develop`)
+
+#### Expected behaviour
+
+
+#### Actual behaviour
+
+
+#### Steps to reproduce the behaviour
+
+
+#### Backtrace
+
+````
+[backtrace]
+````
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index e02f15882..e1c07407d 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -144,51 +144,51 @@
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/cache",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/comparer",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/errors",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/filter",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/iterator",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/journal",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/memdb",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/opt",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/storage",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/table",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb/util",
- "Rev": "ad0d8b2ab58a55ed5c58073aa46451d5e1ca1280"
+ "Rev": "917f41c560270110ceb73c5b38be2a9127387071"
},
{
"ImportPath": "golang.org/x/crypto/pbkdf2",
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
index fb7896139..5576761fe 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -166,15 +166,15 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
merged := 0
danglingMerge := false
defer func() {
+ for i := 0; i < merged; i++ {
+ db.writeAckC <- err
+ }
if danglingMerge {
// Only one dangling merge at most, so this is safe.
db.writeMergedC <- false
} else {
<-db.writeLockC
}
- for i := 0; i < merged; i++ {
- db.writeAckC <- err
- }
}()
mdb, mdbFree, err := db.flush(b.size())
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
deleted file mode 100644
index 1f7fdd41f..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build go1.3
-
-package util
-
-import (
- "sync"
-)
-
-type Pool struct {
- sync.Pool
-}
-
-func NewPool(cap int) *Pool {
- return &Pool{}
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
deleted file mode 100644
index 27b8d03be..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !go1.3
-
-package util
-
-type Pool struct {
- pool chan interface{}
-}
-
-func (p *Pool) Get() interface{} {
- select {
- case x := <-p.pool:
- return x
- default:
- return nil
- }
-}
-
-func (p *Pool) Put(x interface{}) {
- select {
- case p.pool <- x:
- default:
- }
-}
-
-func NewPool(cap int) *Pool {
- return &Pool{pool: make(chan interface{}, cap)}
-}
diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go
index 324d3c76f..673088f60 100644
--- a/accounts/abi/abi.go
+++ b/accounts/abi/abi.go
@@ -56,17 +56,36 @@ func JSON(reader io.Reader) (ABI, error) {
func (abi ABI) pack(name string, args ...interface{}) ([]byte, error) {
method := abi.Methods[name]
+ // variable input is the output appended at the end of packed
+ // output. This is used for strings and bytes types input.
+ var variableInput []byte
+
var ret []byte
for i, a := range args {
input := method.Inputs[i]
-
+ // pack the input
packed, err := input.Type.pack(a)
if err != nil {
return nil, fmt.Errorf("`%s` %v", name, err)
}
- ret = append(ret, packed...)
+ // check for a string or bytes input type
+ switch input.Type.T {
+ case StringTy, BytesTy:
+ // calculate the offset
+ offset := len(method.Inputs)*32 + len(variableInput)
+ // set the offset
+ ret = append(ret, packNum(reflect.ValueOf(offset), UintTy)...)
+ // Append the packed output to the variable input. The variable input
+ // will be appended at the end of the input.
+ variableInput = append(variableInput, packed...)
+ default:
+ // append the packed value to the input
+ ret = append(ret, packed...)
+ }
}
+ // append the variable input at the end of the packed input
+ ret = append(ret, variableInput...)
return ret, nil
}
@@ -268,12 +287,12 @@ func set(dst, src reflect.Value, output Argument) error {
func (abi *ABI) UnmarshalJSON(data []byte) error {
var fields []struct {
- Type string
- Name string
- Const bool
- Indexed bool
- Inputs []Argument
- Outputs []Argument
+ Type string
+ Name string
+ Constant bool
+ Indexed bool
+ Inputs []Argument
+ Outputs []Argument
}
if err := json.Unmarshal(data, &fields); err != nil {
@@ -288,7 +307,7 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
case "function", "":
abi.Methods[field.Name] = Method{
Name: field.Name,
- Const: field.Const,
+ Const: field.Constant,
Inputs: field.Inputs,
Outputs: field.Outputs,
}
diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go
index d1b8330e3..170f3f74b 100644
--- a/accounts/abi/abi_test.go
+++ b/accounts/abi/abi_test.go
@@ -365,6 +365,134 @@ func ExampleJSON() {
// 1f2c40920000000000000000000000000000000000000000000000000000000000000001
}
+func TestInputVariableInputLength(t *testing.T) {
+ const definition = `[
+ { "type" : "function", "name" : "strOne", "const" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
+ { "type" : "function", "name" : "bytesOne", "const" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
+ { "type" : "function", "name" : "strTwo", "const" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "str1", "type" : "string" } ] }
+]`
+
+ abi, err := JSON(strings.NewReader(definition))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test one string
+ strin := "hello world"
+ strpack, err := abi.Pack("strOne", strin)
+ if err != nil {
+ t.Error(err)
+ }
+
+ offset := make([]byte, 32)
+ offset[31] = 32
+ length := make([]byte, 32)
+ length[31] = byte(len(strin))
+ value := common.RightPadBytes([]byte(strin), 32)
+ exp := append(offset, append(length, value...)...)
+
+ // ignore first 4 bytes of the output. This is the function identifier
+ strpack = strpack[4:]
+ if !bytes.Equal(strpack, exp) {
+ t.Errorf("expected %x, got %x\n", exp, strpack)
+ }
+
+ // test one bytes
+ btspack, err := abi.Pack("bytesOne", []byte(strin))
+ if err != nil {
+ t.Error(err)
+ }
+ // ignore first 4 bytes of the output. This is the function identifier
+ btspack = btspack[4:]
+ if !bytes.Equal(btspack, exp) {
+ t.Errorf("expected %x, got %x\n", exp, btspack)
+ }
+
+ // test two strings
+ str1 := "hello"
+ str2 := "world"
+ str2pack, err := abi.Pack("strTwo", str1, str2)
+ if err != nil {
+ t.Error(err)
+ }
+
+ offset1 := make([]byte, 32)
+ offset1[31] = 64
+ length1 := make([]byte, 32)
+ length1[31] = byte(len(str1))
+ value1 := common.RightPadBytes([]byte(str1), 32)
+
+ offset2 := make([]byte, 32)
+ offset2[31] = 128
+ length2 := make([]byte, 32)
+ length2[31] = byte(len(str2))
+ value2 := common.RightPadBytes([]byte(str2), 32)
+
+ exp2 := append(offset1, offset2...)
+ exp2 = append(exp2, append(length1, value1...)...)
+ exp2 = append(exp2, append(length2, value2...)...)
+
+ // ignore first 4 bytes of the output. This is the function identifier
+ str2pack = str2pack[4:]
+ if !bytes.Equal(str2pack, exp2) {
+ t.Errorf("expected %x, got %x\n", exp, str2pack)
+ }
+
+ // test two strings, first > 32, second < 32
+ str1 = strings.Repeat("a", 33)
+ str2pack, err = abi.Pack("strTwo", str1, str2)
+ if err != nil {
+ t.Error(err)
+ }
+
+ offset1 = make([]byte, 32)
+ offset1[31] = 64
+ length1 = make([]byte, 32)
+ length1[31] = byte(len(str1))
+ value1 = common.RightPadBytes([]byte(str1), 64)
+ offset2[31] = 160
+
+ exp2 = append(offset1, offset2...)
+ exp2 = append(exp2, append(length1, value1...)...)
+ exp2 = append(exp2, append(length2, value2...)...)
+
+ // ignore first 4 bytes of the output. This is the function identifier
+ str2pack = str2pack[4:]
+ if !bytes.Equal(str2pack, exp2) {
+ t.Errorf("expected %x, got %x\n", exp, str2pack)
+ }
+
+ // test two strings, first > 32, second >32
+ str1 = strings.Repeat("a", 33)
+ str2 = strings.Repeat("a", 33)
+ str2pack, err = abi.Pack("strTwo", str1, str2)
+ if err != nil {
+ t.Error(err)
+ }
+
+ offset1 = make([]byte, 32)
+ offset1[31] = 64
+ length1 = make([]byte, 32)
+ length1[31] = byte(len(str1))
+ value1 = common.RightPadBytes([]byte(str1), 64)
+
+ offset2 = make([]byte, 32)
+ offset2[31] = 160
+ length2 = make([]byte, 32)
+ length2[31] = byte(len(str2))
+ value2 = common.RightPadBytes([]byte(str2), 64)
+
+ exp2 = append(offset1, offset2...)
+ exp2 = append(exp2, append(length1, value1...)...)
+ exp2 = append(exp2, append(length2, value2...)...)
+
+ // ignore first 4 bytes of the output. This is the function identifier
+ str2pack = str2pack[4:]
+ if !bytes.Equal(str2pack, exp2) {
+ t.Errorf("expected %x, got %x\n", exp, str2pack)
+ }
+}
+
func TestBytes(t *testing.T) {
const definition = `[
{ "type" : "function", "name" : "balance", "const" : true, "inputs" : [ { "name" : "address", "type" : "bytes20" } ] },
diff --git a/accounts/abi/method.go b/accounts/abi/method.go
index e259c09aa..206c7d408 100644
--- a/accounts/abi/method.go
+++ b/accounts/abi/method.go
@@ -67,8 +67,11 @@ func (m Method) String() string {
}
outputs[i] += output.Type.String()
}
-
- return fmt.Sprintf("function %v(%v) returns(%v)", m.Name, strings.Join(inputs, ", "), strings.Join(outputs, ", "))
+ constant := ""
+ if m.Const {
+ constant = "constant "
+ }
+ return fmt.Sprintf("function %v(%v) %sreturns(%v)", m.Name, strings.Join(inputs, ", "), constant, strings.Join(outputs, ", "))
}
func (m Method) Id() []byte {
diff --git a/accounts/abi/numbers.go b/accounts/abi/numbers.go
index c37cd5f68..02609d567 100644
--- a/accounts/abi/numbers.go
+++ b/accounts/abi/numbers.go
@@ -23,36 +23,38 @@ import (
"github.com/ethereum/go-ethereum/common"
)
-var big_t = reflect.TypeOf(&big.Int{})
-var ubig_t = reflect.TypeOf(&big.Int{})
-var byte_t = reflect.TypeOf(byte(0))
-var byte_ts = reflect.TypeOf([]byte(nil))
-var uint_t = reflect.TypeOf(uint(0))
-var uint8_t = reflect.TypeOf(uint8(0))
-var uint16_t = reflect.TypeOf(uint16(0))
-var uint32_t = reflect.TypeOf(uint32(0))
-var uint64_t = reflect.TypeOf(uint64(0))
-var int_t = reflect.TypeOf(int(0))
-var int8_t = reflect.TypeOf(int8(0))
-var int16_t = reflect.TypeOf(int16(0))
-var int32_t = reflect.TypeOf(int32(0))
-var int64_t = reflect.TypeOf(int64(0))
-var hash_t = reflect.TypeOf(common.Hash{})
-var address_t = reflect.TypeOf(common.Address{})
+var (
+ big_t = reflect.TypeOf(&big.Int{})
+ ubig_t = reflect.TypeOf(&big.Int{})
+ byte_t = reflect.TypeOf(byte(0))
+ byte_ts = reflect.TypeOf([]byte(nil))
+ uint_t = reflect.TypeOf(uint(0))
+ uint8_t = reflect.TypeOf(uint8(0))
+ uint16_t = reflect.TypeOf(uint16(0))
+ uint32_t = reflect.TypeOf(uint32(0))
+ uint64_t = reflect.TypeOf(uint64(0))
+ int_t = reflect.TypeOf(int(0))
+ int8_t = reflect.TypeOf(int8(0))
+ int16_t = reflect.TypeOf(int16(0))
+ int32_t = reflect.TypeOf(int32(0))
+ int64_t = reflect.TypeOf(int64(0))
+ hash_t = reflect.TypeOf(common.Hash{})
+ address_t = reflect.TypeOf(common.Address{})
-var uint_ts = reflect.TypeOf([]uint(nil))
-var uint8_ts = reflect.TypeOf([]uint8(nil))
-var uint16_ts = reflect.TypeOf([]uint16(nil))
-var uint32_ts = reflect.TypeOf([]uint32(nil))
-var uint64_ts = reflect.TypeOf([]uint64(nil))
-var ubig_ts = reflect.TypeOf([]*big.Int(nil))
+ uint_ts = reflect.TypeOf([]uint(nil))
+ uint8_ts = reflect.TypeOf([]uint8(nil))
+ uint16_ts = reflect.TypeOf([]uint16(nil))
+ uint32_ts = reflect.TypeOf([]uint32(nil))
+ uint64_ts = reflect.TypeOf([]uint64(nil))
+ ubig_ts = reflect.TypeOf([]*big.Int(nil))
-var int_ts = reflect.TypeOf([]int(nil))
-var int8_ts = reflect.TypeOf([]int8(nil))
-var int16_ts = reflect.TypeOf([]int16(nil))
-var int32_ts = reflect.TypeOf([]int32(nil))
-var int64_ts = reflect.TypeOf([]int64(nil))
-var big_ts = reflect.TypeOf([]*big.Int(nil))
+ int_ts = reflect.TypeOf([]int(nil))
+ int8_ts = reflect.TypeOf([]int8(nil))
+ int16_ts = reflect.TypeOf([]int16(nil))
+ int32_ts = reflect.TypeOf([]int32(nil))
+ int64_ts = reflect.TypeOf([]int64(nil))
+ big_ts = reflect.TypeOf([]*big.Int(nil))
+)
// U256 will ensure unsigned 256bit on big nums
func U256(n *big.Int) []byte {
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index 6fb2950ba..c08b744f7 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -163,6 +163,13 @@ func (t Type) String() (out string) {
return t.stringKind
}
+// packBytesSlice packs the given bytes as [L, V] as the canonical representation
+// bytes slice
+func packBytesSlice(bytes []byte, l int) []byte {
+ len := packNum(reflect.ValueOf(l), UintTy)
+ return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
+}
+
// Test the given input parameter `v` and checks if it matches certain
// criteria
// * Big integers are checks for ptr types and if the given value is
@@ -193,8 +200,14 @@ func (t Type) pack(v interface{}) ([]byte, error) {
if t.Size > -1 && value.Len() > t.Size {
return nil, fmt.Errorf("%v out of bound. %d for %d", value.Kind(), value.Len(), t.Size)
}
- return []byte(common.LeftPadString(t.String(), 32)), nil
+
+ return packBytesSlice([]byte(value.String()), value.Len()), nil
case reflect.Slice:
+ // if the param is a bytes type, pack the slice up as a string
+ if t.T == BytesTy {
+ return packBytesSlice(value.Bytes(), value.Len()), nil
+ }
+
if t.Size > -1 && value.Len() > t.Size {
return nil, fmt.Errorf("%v out of bound. %d for %d", value.Kind(), value.Len(), t.Size)
}
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
index a0d60a583..7d299026b 100644
--- a/cmd/utils/cmd.go
+++ b/cmd/utils/cmd.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/node"
@@ -130,7 +131,7 @@ func StartNode(stack *node.Node) {
}
}
glog.V(logger.Error).Infof("Force quitting: this might not end so well.")
- panic("boom")
+ debug.LoudPanic("boom")
}()
}
diff --git a/core/block_validator.go b/core/block_validator.go
index 73c33d8dd..4d710ae3f 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -58,7 +58,7 @@ func NewBlockValidator(blockchain *BlockChain, pow pow.PoW) *BlockValidator {
// the block header's transaction and uncle roots.
//
// ValidateBlock does not validate the header's pow. The pow work validated
-// seperately so we can process them in paralel.
+// separately so we can process them in parallel.
//
// ValidateBlock also validates and makes sure that any previous state (or present)
// state that might or might not be present is checked to make sure that fast
@@ -106,7 +106,7 @@ func (v *BlockValidator) ValidateBlock(block *types.Block) error {
// ValidateState validates the various changes that happen after a state
// transition, such as amount of used gas, the receipt roots and the state root
-// itself. ValidateState returns a database batch if the validation was a succes
+// itself. ValidateState returns a database batch if the validation was a success
// otherwise nil and an error is returned.
func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas *big.Int) (err error) {
header := block.Header()
@@ -297,7 +297,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
periodCount := new(big.Int).Add(parentNumber, common.Big1)
periodCount.Div(periodCount, ExpDiffPeriod)
- // the exponential factor, commonly refered to as "the bomb"
+ // the exponential factor, commonly referred to as "the bomb"
// diff = diff + 2^(periodCount - 2)
if periodCount.Cmp(common.Big1) > 0 {
y.Sub(periodCount, common.Big2)
diff --git a/core/blockchain.go b/core/blockchain.go
index 2c6ff24f9..534318ecd 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -18,11 +18,9 @@
package core
import (
- crand "crypto/rand"
"errors"
"fmt"
"io"
- "math"
"math/big"
mrand "math/rand"
"runtime"
@@ -82,47 +80,41 @@ const (
// included in the canonical one where as GetBlockByNumber always represents the
// canonical chain.
type BlockChain struct {
+ hc *HeaderChain
chainDb ethdb.Database
eventMux *event.TypeMux
genesisBlock *types.Block
- // Last known total difficulty
- mu sync.RWMutex
- chainmu sync.RWMutex
- tsmu sync.RWMutex
- procmu sync.RWMutex
-
- checkpoint int // checkpoint counts towards the new checkpoint
- currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
- currentBlock *types.Block // Current head of the block chain
- currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!)
-
- headerCache *lru.Cache // Cache for the most recent block headers
+
+ mu sync.RWMutex // global mutex for locking chain operations
+ chainmu sync.RWMutex // blockchain insertion lock
+ procmu sync.RWMutex // block processor lock
+
+ checkpoint int // checkpoint counts towards the new checkpoint
+ currentBlock *types.Block // Current head of the block chain
+ currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!)
+
bodyCache *lru.Cache // Cache for the most recent block bodies
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
- tdCache *lru.Cache // Cache for the most recent block total difficulties
blockCache *lru.Cache // Cache for the most recent entire blocks
futureBlocks *lru.Cache // future blocks are blocks added for later processing
- quit chan struct{}
- running int32 // running must be called automically
+ quit chan struct{} // blockchain quit channel
+ running int32 // running must be called atomically
// procInterrupt must be atomically called
- procInterrupt int32 // interrupt signaler for block processing
- wg sync.WaitGroup
+ procInterrupt int32 // interrupt signaler for block processing
+ wg sync.WaitGroup // chain processing wait group for shutting down
pow pow.PoW
- rand *mrand.Rand
- processor Processor
- validator Validator
+ processor Processor // block processor interface
+ validator Validator // block and state validator interface
}
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialiser the default Ethereum Validator and
// Processor.
func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*BlockChain, error) {
- headerCache, _ := lru.New(headerCacheLimit)
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
- tdCache, _ := lru.New(tdCacheLimit)
blockCache, _ := lru.New(blockCacheLimit)
futureBlocks, _ := lru.New(maxFutureBlocks)
@@ -130,22 +122,21 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
chainDb: chainDb,
eventMux: mux,
quit: make(chan struct{}),
- headerCache: headerCache,
bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache,
- tdCache: tdCache,
blockCache: blockCache,
futureBlocks: futureBlocks,
pow: pow,
}
- // Seed a fast but crypto originating random generator
- seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
+ bc.SetValidator(NewBlockValidator(bc, pow))
+ bc.SetProcessor(NewStateProcessor(bc))
+
+ gv := func() HeaderValidator { return bc.Validator() }
+ var err error
+ bc.hc, err = NewHeaderChain(chainDb, gv, bc.getProcInterrupt)
if err != nil {
return nil, err
}
- bc.rand = mrand.New(mrand.NewSource(seed.Int64()))
- bc.SetValidator(NewBlockValidator(bc, pow))
- bc.SetProcessor(NewStateProcessor(bc))
bc.genesisBlock = bc.GetBlockByNumber(0)
if bc.genesisBlock == nil {
@@ -171,6 +162,10 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
return bc, nil
}
+func (self *BlockChain) getProcInterrupt() bool {
+ return atomic.LoadInt32(&self.procInterrupt) == 1
+}
+
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (self *BlockChain) loadLastState() error {
@@ -189,12 +184,13 @@ func (self *BlockChain) loadLastState() error {
}
}
// Restore the last known head header
- self.currentHeader = self.currentBlock.Header()
+ currentHeader := self.currentBlock.Header()
if head := GetHeadHeaderHash(self.chainDb); head != (common.Hash{}) {
if header := self.GetHeader(head); header != nil {
- self.currentHeader = header
+ currentHeader = header
}
}
+ self.hc.SetCurrentHeader(currentHeader)
// Restore the last known head fast block
self.currentFastBlock = self.currentBlock
if head := GetHeadFastBlockHash(self.chainDb); head != (common.Hash{}) {
@@ -203,11 +199,11 @@ func (self *BlockChain) loadLastState() error {
}
}
// Issue a status log and return
- headerTd := self.GetTd(self.currentHeader.Hash())
+ headerTd := self.GetTd(self.hc.CurrentHeader().Hash())
blockTd := self.GetTd(self.currentBlock.Hash())
fastTd := self.GetTd(self.currentFastBlock.Hash())
- glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.currentHeader.Number, self.currentHeader.Hash().Bytes()[:4], headerTd)
+ glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd)
glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd)
glog.V(logger.Info).Infof("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd)
@@ -222,71 +218,35 @@ func (bc *BlockChain) SetHead(head uint64) {
bc.mu.Lock()
defer bc.mu.Unlock()
- // Figure out the highest known canonical headers and/or blocks
- height := uint64(0)
- if bc.currentHeader != nil {
- if hh := bc.currentHeader.Number.Uint64(); hh > height {
- height = hh
- }
- }
- if bc.currentBlock != nil {
- if bh := bc.currentBlock.NumberU64(); bh > height {
- height = bh
- }
- }
- if bc.currentFastBlock != nil {
- if fbh := bc.currentFastBlock.NumberU64(); fbh > height {
- height = fbh
- }
- }
- // Gather all the hashes that need deletion
- drop := make(map[common.Hash]struct{})
-
- for bc.currentHeader != nil && bc.currentHeader.Number.Uint64() > head {
- drop[bc.currentHeader.Hash()] = struct{}{}
- bc.currentHeader = bc.GetHeader(bc.currentHeader.ParentHash)
- }
- for bc.currentBlock != nil && bc.currentBlock.NumberU64() > head {
- drop[bc.currentBlock.Hash()] = struct{}{}
- bc.currentBlock = bc.GetBlock(bc.currentBlock.ParentHash())
- }
- for bc.currentFastBlock != nil && bc.currentFastBlock.NumberU64() > head {
- drop[bc.currentFastBlock.Hash()] = struct{}{}
- bc.currentFastBlock = bc.GetBlock(bc.currentFastBlock.ParentHash())
- }
- // Roll back the canonical chain numbering
- for i := height; i > head; i-- {
- DeleteCanonicalHash(bc.chainDb, i)
- }
- // Delete everything found by the above rewind
- for hash, _ := range drop {
- DeleteHeader(bc.chainDb, hash)
+ delFn := func(hash common.Hash) {
DeleteBody(bc.chainDb, hash)
- DeleteTd(bc.chainDb, hash)
}
+ bc.hc.SetHead(head, delFn)
+
// Clear out any stale content from the caches
- bc.headerCache.Purge()
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
bc.blockCache.Purge()
bc.futureBlocks.Purge()
// Update all computed fields to the new head
+ if bc.currentBlock != nil && bc.hc.CurrentHeader().Number.Uint64() < bc.currentBlock.NumberU64() {
+ bc.currentBlock = bc.GetBlock(bc.hc.CurrentHeader().Hash())
+ }
+ if bc.currentFastBlock != nil && bc.hc.CurrentHeader().Number.Uint64() < bc.currentFastBlock.NumberU64() {
+ bc.currentFastBlock = bc.GetBlock(bc.hc.CurrentHeader().Hash())
+ }
+
if bc.currentBlock == nil {
bc.currentBlock = bc.genesisBlock
}
- if bc.currentHeader == nil {
- bc.currentHeader = bc.genesisBlock.Header()
- }
if bc.currentFastBlock == nil {
bc.currentFastBlock = bc.genesisBlock
}
+
if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
glog.Fatalf("failed to reset head block hash: %v", err)
}
- if err := WriteHeadHeaderHash(bc.chainDb, bc.currentHeader.Hash()); err != nil {
- glog.Fatalf("failed to reset head header hash: %v", err)
- }
if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
glog.Fatalf("failed to reset head fast block hash: %v", err)
}
@@ -329,15 +289,6 @@ func (self *BlockChain) LastBlockHash() common.Hash {
return self.currentBlock.Hash()
}
-// CurrentHeader retrieves the current head header of the canonical chain. The
-// header is retrieved from the blockchain's internal cache.
-func (self *BlockChain) CurrentHeader() *types.Header {
- self.mu.RLock()
- defer self.mu.RUnlock()
-
- return self.currentHeader
-}
-
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache.
func (self *BlockChain) CurrentBlock() *types.Block {
@@ -416,7 +367,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
defer bc.mu.Unlock()
// Prepare the genesis block and reinitialise the chain
- if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil {
+ if err := bc.hc.WriteTd(genesis.Hash(), genesis.Difficulty()); err != nil {
glog.Fatalf("failed to write genesis block TD: %v", err)
}
if err := WriteBlock(bc.chainDb, genesis); err != nil {
@@ -425,7 +376,8 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
bc.genesisBlock = genesis
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
- bc.currentHeader = bc.genesisBlock.Header()
+ bc.hc.SetGenesis(bc.genesisBlock.Header())
+ bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
bc.currentFastBlock = bc.genesisBlock
}
@@ -483,10 +435,7 @@ func (bc *BlockChain) insert(block *types.Block) {
// If the block is better than out head or is on a different chain, force update heads
if updateHeads {
- if err := WriteHeadHeaderHash(bc.chainDb, block.Hash()); err != nil {
- glog.Fatalf("failed to insert head header hash: %v", err)
- }
- bc.currentHeader = block.Header()
+ bc.hc.SetCurrentHeader(block.Header())
if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert head fast block hash: %v", err)
@@ -500,38 +449,6 @@ func (bc *BlockChain) Genesis() *types.Block {
return bc.genesisBlock
}
-// HasHeader checks if a block header is present in the database or not, caching
-// it if present.
-func (bc *BlockChain) HasHeader(hash common.Hash) bool {
- return bc.GetHeader(hash) != nil
-}
-
-// GetHeader retrieves a block header from the database by hash, caching it if
-// found.
-func (self *BlockChain) GetHeader(hash common.Hash) *types.Header {
- // Short circuit if the header's already in the cache, retrieve otherwise
- if header, ok := self.headerCache.Get(hash); ok {
- return header.(*types.Header)
- }
- header := GetHeader(self.chainDb, hash)
- if header == nil {
- return nil
- }
- // Cache the found header for next time and return
- self.headerCache.Add(header.Hash(), header)
- return header
-}
-
-// GetHeaderByNumber retrieves a block header from the database by number,
-// caching it (associated with its hash) if found.
-func (self *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
- hash := GetCanonicalHash(self.chainDb, number)
- if hash == (common.Hash{}) {
- return nil
- }
- return self.GetHeader(hash)
-}
-
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
func (self *BlockChain) GetBody(hash common.Hash) *types.Body {
@@ -565,22 +482,6 @@ func (self *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
return body
}
-// GetTd retrieves a block's total difficulty in the canonical chain from the
-// database by hash, caching it if found.
-func (self *BlockChain) GetTd(hash common.Hash) *big.Int {
- // Short circuit if the td's already in the cache, retrieve otherwise
- if cached, ok := self.tdCache.Get(hash); ok {
- return cached.(*big.Int)
- }
- td := GetTd(self.chainDb, hash)
- if td == nil {
- return nil
- }
- // Cache the found body for next time and return
- self.tdCache.Add(hash, td)
- return td
-}
-
// HasBlock checks if a block is fully present in the database or not, caching
// it if present.
func (bc *BlockChain) HasBlock(hash common.Hash) bool {
@@ -625,28 +526,6 @@ func (self *BlockChain) GetBlockByNumber(number uint64) *types.Block {
return self.GetBlock(hash)
}
-// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
-// hash, fetching towards the genesis block.
-func (self *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
- // Get the origin header from which to fetch
- header := self.GetHeader(hash)
- if header == nil {
- return nil
- }
- // Iterate the headers until enough is collected or the genesis reached
- chain := make([]common.Hash, 0, max)
- for i := uint64(0); i < max; i++ {
- if header = self.GetHeader(header.ParentHash); header == nil {
- break
- }
- chain = append(chain, header.Hash())
- if header.Number.Cmp(common.Big0) == 0 {
- break
- }
- }
- return chain
-}
-
// [deprecated by eth/62]
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
@@ -687,10 +566,11 @@ func (bc *BlockChain) Stop() {
}
func (self *BlockChain) procFutureBlocks() {
- blocks := make([]*types.Block, self.futureBlocks.Len())
- for i, hash := range self.futureBlocks.Keys() {
- block, _ := self.futureBlocks.Get(hash)
- blocks[i] = block.(*types.Block)
+ blocks := make([]*types.Block, 0, self.futureBlocks.Len())
+ for _, hash := range self.futureBlocks.Keys() {
+ if block, exist := self.futureBlocks.Get(hash); exist {
+ blocks = append(blocks, block.(*types.Block))
+ }
}
if len(blocks) > 0 {
types.BlockBy(types.Number).Sort(blocks)
@@ -698,195 +578,15 @@ func (self *BlockChain) procFutureBlocks() {
}
}
-type writeStatus byte
+type WriteStatus byte
const (
- NonStatTy writeStatus = iota
+ NonStatTy WriteStatus = iota
CanonStatTy
SplitStatTy
SideStatTy
)
-// writeHeader writes a header into the local chain, given that its parent is
-// already known. If the total difficulty of the newly inserted header becomes
-// greater than the current known TD, the canonical chain is re-routed.
-//
-// Note: This method is not concurrent-safe with inserting blocks simultaneously
-// into the chain, as side effects caused by reorganisations cannot be emulated
-// without the real blocks. Hence, writing headers directly should only be done
-// in two scenarios: pure-header mode of operation (light clients), or properly
-// separated header/block phases (non-archive clients).
-func (self *BlockChain) writeHeader(header *types.Header) error {
- self.wg.Add(1)
- defer self.wg.Done()
-
- // Calculate the total difficulty of the header
- ptd := self.GetTd(header.ParentHash)
- if ptd == nil {
- return ParentError(header.ParentHash)
- }
-
- localTd := self.GetTd(self.currentHeader.Hash())
- externTd := new(big.Int).Add(header.Difficulty, ptd)
-
- // Make sure no inconsistent state is leaked during insertion
- self.mu.Lock()
- defer self.mu.Unlock()
-
- // If the total difficulty is higher than our known, add it to the canonical chain
- // Second clause in the if statement reduces the vulnerability to selfish mining.
- // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
- if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
- // Delete any canonical number assignments above the new head
- for i := header.Number.Uint64() + 1; GetCanonicalHash(self.chainDb, i) != (common.Hash{}); i++ {
- DeleteCanonicalHash(self.chainDb, i)
- }
- // Overwrite any stale canonical number assignments
- head := self.GetHeader(header.ParentHash)
- for GetCanonicalHash(self.chainDb, head.Number.Uint64()) != head.Hash() {
- WriteCanonicalHash(self.chainDb, head.Hash(), head.Number.Uint64())
- head = self.GetHeader(head.ParentHash)
- }
- // Extend the canonical chain with the new header
- if err := WriteCanonicalHash(self.chainDb, header.Hash(), header.Number.Uint64()); err != nil {
- glog.Fatalf("failed to insert header number: %v", err)
- }
- if err := WriteHeadHeaderHash(self.chainDb, header.Hash()); err != nil {
- glog.Fatalf("failed to insert head header hash: %v", err)
- }
- self.currentHeader = types.CopyHeader(header)
- }
- // Irrelevant of the canonical status, write the header itself to the database
- if err := WriteTd(self.chainDb, header.Hash(), externTd); err != nil {
- glog.Fatalf("failed to write header total difficulty: %v", err)
- }
- if err := WriteHeader(self.chainDb, header); err != nil {
- glog.Fatalf("filed to write header contents: %v", err)
- }
- return nil
-}
-
-// InsertHeaderChain attempts to insert the given header chain in to the local
-// chain, possibly creating a reorg. If an error is returned, it will return the
-// index number of the failing header as well an error describing what went wrong.
-//
-// The verify parameter can be used to fine tune whether nonce verification
-// should be done or not. The reason behind the optional check is because some
-// of the header retrieval mechanisms already need to verfy nonces, as well as
-// because nonces can be verified sparsely, not needing to check each.
-func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
- self.wg.Add(1)
- defer self.wg.Done()
-
- // Make sure only one thread manipulates the chain at once
- self.chainmu.Lock()
- defer self.chainmu.Unlock()
-
- // Collect some import statistics to report on
- stats := struct{ processed, ignored int }{}
- start := time.Now()
-
- // Generate the list of headers that should be POW verified
- verify := make([]bool, len(chain))
- for i := 0; i < len(verify)/checkFreq; i++ {
- index := i*checkFreq + self.rand.Intn(checkFreq)
- if index >= len(verify) {
- index = len(verify) - 1
- }
- verify[index] = true
- }
- verify[len(verify)-1] = true // Last should always be verified to avoid junk
-
- // Create the header verification task queue and worker functions
- tasks := make(chan int, len(chain))
- for i := 0; i < len(chain); i++ {
- tasks <- i
- }
- close(tasks)
-
- errs, failed := make([]error, len(tasks)), int32(0)
- process := func(worker int) {
- for index := range tasks {
- header, hash := chain[index], chain[index].Hash()
-
- // Short circuit insertion if shutting down or processing failed
- if atomic.LoadInt32(&self.procInterrupt) == 1 {
- return
- }
- if atomic.LoadInt32(&failed) > 0 {
- return
- }
- // Short circuit if the header is bad or already known
- if BadHashes[hash] {
- errs[index] = BadHashError(hash)
- atomic.AddInt32(&failed, 1)
- return
- }
- if self.HasHeader(hash) {
- continue
- }
- // Verify that the header honors the chain parameters
- checkPow := verify[index]
-
- var err error
- if index == 0 {
- err = self.Validator().ValidateHeader(header, self.GetHeader(header.ParentHash), checkPow)
- } else {
- err = self.Validator().ValidateHeader(header, chain[index-1], checkPow)
- }
- if err != nil {
- errs[index] = err
- atomic.AddInt32(&failed, 1)
- return
- }
- }
- }
- // Start as many worker threads as goroutines allowed
- pending := new(sync.WaitGroup)
- for i := 0; i < runtime.GOMAXPROCS(0); i++ {
- pending.Add(1)
- go func(id int) {
- defer pending.Done()
- process(id)
- }(i)
- }
- pending.Wait()
-
- // If anything failed, report
- if failed > 0 {
- for i, err := range errs {
- if err != nil {
- return i, err
- }
- }
- }
- // All headers passed verification, import them into the database
- for i, header := range chain {
- // Short circuit insertion if shutting down
- if atomic.LoadInt32(&self.procInterrupt) == 1 {
- glog.V(logger.Debug).Infoln("premature abort during header chain processing")
- break
- }
- hash := header.Hash()
-
- // If the header's already known, skip it, otherwise store
- if self.HasHeader(hash) {
- stats.ignored++
- continue
- }
- if err := self.writeHeader(header); err != nil {
- return i, err
- }
- stats.processed++
- }
- // Report some public statistics so the user has a clue what's going on
- first, last := chain[0], chain[len(chain)-1]
- glog.V(logger.Info).Infof("imported %d header(s) (%d ignored) in %v. #%v [%x… / %x…]", stats.processed, stats.ignored,
- time.Since(start), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
-
- return 0, nil
-}
-
// Rollback is designed to remove a chain of links from the database that aren't
// certain enough to be valid.
func (self *BlockChain) Rollback(chain []common.Hash) {
@@ -896,9 +596,8 @@ func (self *BlockChain) Rollback(chain []common.Hash) {
for i := len(chain) - 1; i >= 0; i-- {
hash := chain[i]
- if self.currentHeader.Hash() == hash {
- self.currentHeader = self.GetHeader(self.currentHeader.ParentHash)
- WriteHeadHeaderHash(self.chainDb, self.currentHeader.Hash())
+ if self.hc.CurrentHeader().Hash() == hash {
+ self.hc.SetCurrentHeader(self.GetHeader(self.hc.CurrentHeader().ParentHash))
}
if self.currentFastBlock.Hash() == hash {
self.currentFastBlock = self.GetBlock(self.currentFastBlock.ParentHash())
@@ -1055,7 +754,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
}
// WriteBlock writes the block to the chain.
-func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) {
+func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err error) {
self.wg.Add(1)
defer self.wg.Done()
@@ -1076,7 +775,7 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
- // Reorganize the chain if the parent is not the head block
+ // Reorganise the chain if the parent is not the head block
if block.ParentHash() != self.currentBlock.Hash() {
if err := self.reorg(self.currentBlock, block); err != nil {
return NonStatTy, err
@@ -1089,12 +788,13 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err
status = SideStatTy
}
// Irrelevant of the canonical status, write the block itself to the database
- if err := WriteTd(self.chainDb, block.Hash(), externTd); err != nil {
+ if err := self.hc.WriteTd(block.Hash(), externTd); err != nil {
glog.Fatalf("failed to write block total difficulty: %v", err)
}
if err := WriteBlock(self.chainDb, block); err != nil {
- glog.Fatalf("filed to write block contents: %v", err)
+ glog.Fatalf("failed to write block contents: %v", err)
}
+
self.futureBlocks.Remove(block.Hash())
return
@@ -1268,12 +968,14 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// event about them
func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
var (
- newChain types.Blocks
- commonBlock *types.Block
- oldStart = oldBlock
- newStart = newBlock
- deletedTxs types.Transactions
- deletedLogs vm.Logs
+ newChain types.Blocks
+ oldChain types.Blocks
+ commonBlock *types.Block
+ oldStart = oldBlock
+ newStart = newBlock
+ deletedTxs types.Transactions
+ deletedLogs vm.Logs
+ deletedLogsByHash = make(map[common.Hash]vm.Logs)
// collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash.
// These logs are later announced as deleted.
@@ -1282,6 +984,8 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
receipts := GetBlockReceipts(self.chainDb, h)
for _, receipt := range receipts {
deletedLogs = append(deletedLogs, receipt.Logs...)
+
+ deletedLogsByHash[h] = receipt.Logs
}
}
)
@@ -1290,6 +994,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
if oldBlock.NumberU64() > newBlock.NumberU64() {
// reduce old chain
for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
+ oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash())
@@ -1313,6 +1018,8 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
commonBlock = oldBlock
break
}
+
+ oldChain = append(oldChain, oldBlock)
newChain = append(newChain, newBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash())
@@ -1369,6 +1076,14 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
go self.eventMux.Post(RemovedLogsEvent{deletedLogs})
}
+ if len(oldChain) > 0 {
+ go func() {
+ for _, block := range oldChain {
+ self.eventMux.Post(ChainSideEvent{Block: block, Logs: deletedLogsByHash[block.Hash()]})
+ }
+ }()
+ }
+
return nil
}
@@ -1412,3 +1127,89 @@ func reportBlock(block *types.Block, err error) {
}
go ReportBlock(block, err)
}
+
+// InsertHeaderChain attempts to insert the given header chain in to the local
+// chain, possibly creating a reorg. If an error is returned, it will return the
+// index number of the failing header as well an error describing what went wrong.
+//
+// The verify parameter can be used to fine tune whether nonce verification
+// should be done or not. The reason behind the optional check is because some
+// of the header retrieval mechanisms already need to verify nonces, as well as
+// because nonces can be verified sparsely, not needing to check each.
+func (self *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
+ // Make sure only one thread manipulates the chain at once
+ self.chainmu.Lock()
+ defer self.chainmu.Unlock()
+
+ self.wg.Add(1)
+ defer self.wg.Done()
+
+ whFunc := func(header *types.Header) error {
+ self.mu.Lock()
+ defer self.mu.Unlock()
+
+ _, err := self.hc.WriteHeader(header)
+ return err
+ }
+
+ return self.hc.InsertHeaderChain(chain, checkFreq, whFunc)
+}
+
+// writeHeader writes a header into the local chain, given that its parent is
+// already known. If the total difficulty of the newly inserted header becomes
+// greater than the current known TD, the canonical chain is re-routed.
+//
+// Note: This method is not concurrent-safe with inserting blocks simultaneously
+// into the chain, as side effects caused by reorganisations cannot be emulated
+// without the real blocks. Hence, writing headers directly should only be done
+// in two scenarios: pure-header mode of operation (light clients), or properly
+// separated header/block phases (non-archive clients).
+func (self *BlockChain) writeHeader(header *types.Header) error {
+ self.wg.Add(1)
+ defer self.wg.Done()
+
+ self.mu.Lock()
+ defer self.mu.Unlock()
+
+ _, err := self.hc.WriteHeader(header)
+ return err
+}
+
+// CurrentHeader retrieves the current head header of the canonical chain. The
+// header is retrieved from the HeaderChain's internal cache.
+func (self *BlockChain) CurrentHeader() *types.Header {
+ self.mu.RLock()
+ defer self.mu.RUnlock()
+
+ return self.hc.CurrentHeader()
+}
+
+// GetTd retrieves a block's total difficulty in the canonical chain from the
+// database by hash, caching it if found.
+func (self *BlockChain) GetTd(hash common.Hash) *big.Int {
+ return self.hc.GetTd(hash)
+}
+
+// GetHeader retrieves a block header from the database by hash, caching it if
+// found.
+func (self *BlockChain) GetHeader(hash common.Hash) *types.Header {
+ return self.hc.GetHeader(hash)
+}
+
+// HasHeader checks if a block header is present in the database or not, caching
+// it if present.
+func (bc *BlockChain) HasHeader(hash common.Hash) bool {
+ return bc.hc.HasHeader(hash)
+}
+
+// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
+// hash, fetching towards the genesis block.
+func (self *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
+ return self.hc.GetBlockHashesFromHash(hash, max)
+}
+
+// GetHeaderByNumber retrieves a block header from the database by number,
+// caching it (associated with its hash) if found.
+func (self *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
+ return self.hc.GetHeaderByNumber(number)
+}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 1bb5f646d..df979578e 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -25,6 +25,7 @@ import (
"runtime"
"strconv"
"testing"
+ "time"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/common"
@@ -167,7 +168,7 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
if err := blockchain.Validator().ValidateHeader(header, blockchain.GetHeader(header.ParentHash), false); err != nil {
return err
}
- // Manually insert the header into the database, but don't reorganize (allows subsequent testing)
+ // Manually insert the header into the database, but don't reorganise (allows subsequent testing)
blockchain.mu.Lock()
WriteTd(blockchain.chainDb, header.Hash(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash)))
WriteHeader(blockchain.chainDb, header)
@@ -471,11 +472,16 @@ func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.B
func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
var eventMux event.TypeMux
- bc := &BlockChain{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}, rand: rand.New(rand.NewSource(0))}
- bc.headerCache, _ = lru.New(100)
+ bc := &BlockChain{
+ chainDb: db,
+ genesisBlock: genesis,
+ eventMux: &eventMux,
+ pow: FakePow{},
+ }
+ valFn := func() HeaderValidator { return bc.Validator() }
+ bc.hc, _ = NewHeaderChain(db, valFn, bc.getProcInterrupt)
bc.bodyCache, _ = lru.New(100)
bc.bodyRLPCache, _ = lru.New(100)
- bc.tdCache, _ = lru.New(100)
bc.blockCache, _ = lru.New(100)
bc.futureBlocks, _ = lru.New(100)
bc.SetValidator(bproc{})
@@ -485,7 +491,7 @@ func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
return bc
}
-// Tests that reorganizing a long difficult chain after a short easy one
+// Tests that reorganising a long difficult chain after a short easy one
// overwrites the canonical numbers and links in the database.
func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
@@ -494,7 +500,7 @@ func testReorgLong(t *testing.T, full bool) {
testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10, full)
}
-// Tests that reorganizing a short difficult chain after a long easy one
+// Tests that reorganising a short difficult chain after a long easy one
// overwrites the canonical numbers and links in the database.
func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
@@ -572,7 +578,7 @@ func testBadHashes(t *testing.T, full bool) {
}
}
-// Tests that bad hashes are detected on boot, and the chan rolled back to a
+// Tests that bad hashes are detected on boot, and the chain rolled back to a
// good state prior to the bad hash.
func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
@@ -583,7 +589,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
genesis, _ := WriteTestNetGenesisBlock(db)
bc := chm(genesis, db)
- // Create a chain, import and ban aferwards
+ // Create a chain, import and ban afterwards
headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
blocks := makeBlockChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
@@ -852,7 +858,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
assert(t, "light", light, height/2, 0, 0)
}
-// Tests that chain reorganizations handle transaction removals and reinsertions.
+// Tests that chain reorganisations handle transaction removals and reinsertions.
func TestChainTxReorgs(t *testing.T) {
params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be.
params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block.
@@ -883,7 +889,7 @@ func TestChainTxReorgs(t *testing.T) {
var pastDrop, freshDrop *types.Transaction
// Create three transactions that will be added in the forked chain:
- // - pastAdd: transaction added before the reorganiztion is detected
+ // - pastAdd: transaction added before the reorganization is detected
// - freshAdd: transaction added at the exact block the reorg is detected
// - futureAdd: transaction added after the reorg has already finished
var pastAdd, freshAdd, futureAdd *types.Transaction
@@ -1006,3 +1012,82 @@ func TestLogReorgs(t *testing.T) {
t.Error("expected logs")
}
}
+
+func TestReorgSideEvent(t *testing.T) {
+ var (
+ db, _ = ethdb.NewMemDatabase()
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ genesis = WriteGenesisBlockForTesting(db, GenesisAccount{addr1, big.NewInt(10000000000000)})
+ )
+
+ evmux := &event.TypeMux{}
+ blockchain, _ := NewBlockChain(db, FakePow{}, evmux)
+
+ chain, _ := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
+ if i == 2 {
+ gen.OffsetTime(9)
+ }
+ })
+ if _, err := blockchain.InsertChain(chain); err != nil {
+ t.Fatalf("failed to insert chain: %v", err)
+ }
+
+ replacementBlocks, _ := GenerateChain(genesis, db, 4, func(i int, gen *BlockGen) {
+ tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), nil).SignECDSA(key1)
+ if err != nil {
+ t.Fatalf("failed to create tx: %v", err)
+ }
+ gen.AddTx(tx)
+ })
+
+ subs := evmux.Subscribe(ChainSideEvent{})
+ if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
+ t.Fatalf("failed to insert chain: %v", err)
+ }
+
+ // first two block of the secondary chain are for a brief moment considered
+ // side chains because up to that point the first one is considered the
+ // heavier chain.
+ expectedSideHashes := map[common.Hash]bool{
+ replacementBlocks[0].Hash(): true,
+ replacementBlocks[1].Hash(): true,
+ chain[0].Hash(): true,
+ chain[1].Hash(): true,
+ chain[2].Hash(): true,
+ }
+
+ i := 0
+
+ const timeoutDura = 10 * time.Second
+ timeout := time.NewTimer(timeoutDura)
+done:
+ for {
+ select {
+ case ev := <-subs.Chan():
+ block := ev.Data.(ChainSideEvent).Block
+ if _, ok := expectedSideHashes[block.Hash()]; !ok {
+ t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
+ }
+ i++
+
+ if i == len(expectedSideHashes) {
+ timeout.Stop()
+
+ break done
+ }
+ timeout.Reset(timeoutDura)
+
+ case <-timeout.C:
+ t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent")
+ }
+ }
+
+ // make sure no more events are fired
+ select {
+ case e := <-subs.Chan():
+ t.Errorf("unexpected event fired: %v", e)
+ case <-time.After(250 * time.Millisecond):
+ }
+
+}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index c62618e6c..0e1ca5fff 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -108,7 +108,7 @@ func (b *BlockGen) Number() *big.Int {
// backing transaction.
//
// AddUncheckedReceipts will cause consensus failures when used during real
-// chain processing. This is best used in conjuction with raw block insertion.
+// chain processing. This is best used in conjunction with raw block insertion.
func (b *BlockGen) AddUncheckedReceipt(receipt *types.Receipt) {
b.receipts = append(b.receipts, receipt)
}
@@ -215,7 +215,7 @@ func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
// chain. Depending on the full flag, if creates either a full block chain or a
// header only chain.
func newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) {
- // Create te new chain database
+ // Create the new chain database
db, _ := ethdb.NewMemDatabase()
evmux := &event.TypeMux{}
diff --git a/core/headerchain.go b/core/headerchain.go
new file mode 100644
index 000000000..255139dde
--- /dev/null
+++ b/core/headerchain.go
@@ -0,0 +1,464 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+package core
+
+import (
+ crand "crypto/rand"
+ "math"
+ "math/big"
+ mrand "math/rand"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/logger"
+ "github.com/ethereum/go-ethereum/logger/glog"
+ "github.com/ethereum/go-ethereum/pow"
+ "github.com/hashicorp/golang-lru"
+)
+
+// HeaderChain implements the basic block header chain logic that is shared by
+// core.BlockChain and light.LightChain. It is not usable in itself, only as
+// a part of either structure.
+// It is not thread safe either, the encapsulating chain structures should do
+// the necessary mutex locking/unlocking.
+type HeaderChain struct {
+ chainDb ethdb.Database
+ genesisHeader *types.Header
+
+ currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
+ currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time)
+
+ headerCache *lru.Cache // Cache for the most recent block headers
+ tdCache *lru.Cache // Cache for the most recent block total difficulties
+
+ procInterrupt func() bool
+
+ rand *mrand.Rand
+ getValidator getHeaderValidatorFn
+}
+
+// getHeaderValidatorFn returns a HeaderValidator interface
+type getHeaderValidatorFn func() HeaderValidator
+
+// NewHeaderChain creates a new HeaderChain structure.
+// getValidator should return the parent's validator
+// procInterrupt points to the parent's interrupt semaphore
+// wg points to the parent's shutdown wait group
+func NewHeaderChain(chainDb ethdb.Database, getValidator getHeaderValidatorFn, procInterrupt func() bool) (*HeaderChain, error) {
+ headerCache, _ := lru.New(headerCacheLimit)
+ tdCache, _ := lru.New(tdCacheLimit)
+
+ // Seed a fast but crypto originating random generator
+ seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
+ if err != nil {
+ return nil, err
+ }
+
+ hc := &HeaderChain{
+ chainDb: chainDb,
+ headerCache: headerCache,
+ tdCache: tdCache,
+ procInterrupt: procInterrupt,
+ rand: mrand.New(mrand.NewSource(seed.Int64())),
+ getValidator: getValidator,
+ }
+
+ hc.genesisHeader = hc.GetHeaderByNumber(0)
+ if hc.genesisHeader == nil {
+ genesisBlock, err := WriteDefaultGenesisBlock(chainDb)
+ if err != nil {
+ return nil, err
+ }
+ glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
+ hc.genesisHeader = genesisBlock.Header()
+ }
+
+ hc.currentHeader = hc.genesisHeader
+ if head := GetHeadBlockHash(chainDb); head != (common.Hash{}) {
+ if chead := hc.GetHeader(head); chead != nil {
+ hc.currentHeader = chead
+ }
+ }
+ hc.currentHeaderHash = hc.currentHeader.Hash()
+
+ return hc, nil
+}
+
+// WriteHeader writes a header into the local chain, given that its parent is
+// already known. If the total difficulty of the newly inserted header becomes
+// greater than the current known TD, the canonical chain is re-routed.
+//
+// Note: This method is not concurrent-safe with inserting blocks simultaneously
+// into the chain, as side effects caused by reorganisations cannot be emulated
+// without the real blocks. Hence, writing headers directly should only be done
+// in two scenarios: pure-header mode of operation (light clients), or properly
+// separated header/block phases (non-archive clients).
+func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, err error) {
+ // Cache some values to prevent constant recalculation
+ var (
+ hash = header.Hash()
+ number = header.Number.Uint64()
+ )
+ // Calculate the total difficulty of the header
+ ptd := hc.GetTd(header.ParentHash)
+ if ptd == nil {
+ return NonStatTy, ParentError(header.ParentHash)
+ }
+ localTd := hc.GetTd(hc.currentHeaderHash)
+ externTd := new(big.Int).Add(header.Difficulty, ptd)
+
+ // If the total difficulty is higher than our known, add it to the canonical chain
+ // Second clause in the if statement reduces the vulnerability to selfish mining.
+ // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
+ if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
+ // Delete any canonical number assignments above the new head
+ for i := number + 1; GetCanonicalHash(hc.chainDb, i) != (common.Hash{}); i++ {
+ DeleteCanonicalHash(hc.chainDb, i)
+ }
+ // Overwrite any stale canonical number assignments
+ var (
+ headHash = header.ParentHash
+ headHeader = hc.GetHeader(headHash)
+ headNumber = headHeader.Number.Uint64()
+ )
+ for GetCanonicalHash(hc.chainDb, headNumber) != headHash {
+ WriteCanonicalHash(hc.chainDb, headHash, headNumber)
+
+ headHash = headHeader.ParentHash
+ headHeader = hc.GetHeader(headHash)
+ headNumber = headHeader.Number.Uint64()
+ }
+ // Extend the canonical chain with the new header
+ if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
+ glog.Fatalf("failed to insert header number: %v", err)
+ }
+ if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
+ glog.Fatalf("failed to insert head header hash: %v", err)
+ }
+ hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
+
+ status = CanonStatTy
+ } else {
+ status = SideStatTy
+ }
+ // Irrelevant of the canonical status, write the header itself to the database
+ if err := hc.WriteTd(hash, externTd); err != nil {
+ glog.Fatalf("failed to write header total difficulty: %v", err)
+ }
+ if err := WriteHeader(hc.chainDb, header); err != nil {
+ glog.Fatalf("failed to write header contents: %v", err)
+ }
+ hc.headerCache.Add(hash, header)
+
+ return
+}
+
+// WhCallback is a callback function for inserting individual headers.
+// A callback is used for two reasons: first, in a LightChain, status should be
+// processed and light chain events sent, while in a BlockChain this is not
+// necessary since chain events are sent after inserting blocks. Second, the
+// header writes should be protected by the parent chain mutex individually.
+type WhCallback func(*types.Header) error
+
+// InsertHeaderChain attempts to insert the given header chain in to the local
+// chain, possibly creating a reorg. If an error is returned, it will return the
+// index number of the failing header as well an error describing what went wrong.
+//
+// The verify parameter can be used to fine tune whether nonce verification
+// should be done or not. The reason behind the optional check is because some
+// of the header retrieval mechanisms already need to verfy nonces, as well as
+// because nonces can be verified sparsely, not needing to check each.
+func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, writeHeader WhCallback) (int, error) {
+ // Collect some import statistics to report on
+ stats := struct{ processed, ignored int }{}
+ start := time.Now()
+
+ // Generate the list of headers that should be POW verified
+ verify := make([]bool, len(chain))
+ for i := 0; i < len(verify)/checkFreq; i++ {
+ index := i*checkFreq + hc.rand.Intn(checkFreq)
+ if index >= len(verify) {
+ index = len(verify) - 1
+ }
+ verify[index] = true
+ }
+ verify[len(verify)-1] = true // Last should always be verified to avoid junk
+
+ // Create the header verification task queue and worker functions
+ tasks := make(chan int, len(chain))
+ for i := 0; i < len(chain); i++ {
+ tasks <- i
+ }
+ close(tasks)
+
+ errs, failed := make([]error, len(tasks)), int32(0)
+ process := func(worker int) {
+ for index := range tasks {
+ header, hash := chain[index], chain[index].Hash()
+
+ // Short circuit insertion if shutting down or processing failed
+ if hc.procInterrupt() {
+ return
+ }
+ if atomic.LoadInt32(&failed) > 0 {
+ return
+ }
+ // Short circuit if the header is bad or already known
+ if BadHashes[hash] {
+ errs[index] = BadHashError(hash)
+ atomic.AddInt32(&failed, 1)
+ return
+ }
+ if hc.HasHeader(hash) {
+ continue
+ }
+ // Verify that the header honors the chain parameters
+ checkPow := verify[index]
+
+ var err error
+ if index == 0 {
+ err = hc.getValidator().ValidateHeader(header, hc.GetHeader(header.ParentHash), checkPow)
+ } else {
+ err = hc.getValidator().ValidateHeader(header, chain[index-1], checkPow)
+ }
+ if err != nil {
+ errs[index] = err
+ atomic.AddInt32(&failed, 1)
+ return
+ }
+ }
+ }
+ // Start as many worker threads as goroutines allowed
+ pending := new(sync.WaitGroup)
+ for i := 0; i < runtime.GOMAXPROCS(0); i++ {
+ pending.Add(1)
+ go func(id int) {
+ defer pending.Done()
+ process(id)
+ }(i)
+ }
+ pending.Wait()
+
+ // If anything failed, report
+ if failed > 0 {
+ for i, err := range errs {
+ if err != nil {
+ return i, err
+ }
+ }
+ }
+ // All headers passed verification, import them into the database
+ for i, header := range chain {
+ // Short circuit insertion if shutting down
+ if hc.procInterrupt() {
+ glog.V(logger.Debug).Infoln("premature abort during header chain processing")
+ break
+ }
+ hash := header.Hash()
+
+ // If the header's already known, skip it, otherwise store
+ if hc.HasHeader(hash) {
+ stats.ignored++
+ continue
+ }
+ if err := writeHeader(header); err != nil {
+ return i, err
+ }
+ stats.processed++
+ }
+ // Report some public statistics so the user has a clue what's going on
+ first, last := chain[0], chain[len(chain)-1]
+ glog.V(logger.Info).Infof("imported %d header(s) (%d ignored) in %v. #%v [%x… / %x…]", stats.processed, stats.ignored,
+ time.Since(start), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
+
+ return 0, nil
+}
+
+// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
+// hash, fetching towards the genesis block.
+func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
+ // Get the origin header from which to fetch
+ header := hc.GetHeader(hash)
+ if header == nil {
+ return nil
+ }
+ // Iterate the headers until enough is collected or the genesis reached
+ chain := make([]common.Hash, 0, max)
+ for i := uint64(0); i < max; i++ {
+ next := header.ParentHash
+ if header = hc.GetHeader(next); header == nil {
+ break
+ }
+ chain = append(chain, next)
+ if header.Number.Cmp(common.Big0) == 0 {
+ break
+ }
+ }
+ return chain
+}
+
+// GetTd retrieves a block's total difficulty in the canonical chain from the
+// database by hash, caching it if found.
+func (hc *HeaderChain) GetTd(hash common.Hash) *big.Int {
+ // Short circuit if the td's already in the cache, retrieve otherwise
+ if cached, ok := hc.tdCache.Get(hash); ok {
+ return cached.(*big.Int)
+ }
+ td := GetTd(hc.chainDb, hash)
+ if td == nil {
+ return nil
+ }
+ // Cache the found body for next time and return
+ hc.tdCache.Add(hash, td)
+ return td
+}
+
+// WriteTd stores a block's total difficulty into the database, also caching it
+// along the way.
+func (hc *HeaderChain) WriteTd(hash common.Hash, td *big.Int) error {
+ if err := WriteTd(hc.chainDb, hash, td); err != nil {
+ return err
+ }
+ hc.tdCache.Add(hash, new(big.Int).Set(td))
+ return nil
+}
+
+// GetHeader retrieves a block header from the database by hash, caching it if
+// found.
+func (hc *HeaderChain) GetHeader(hash common.Hash) *types.Header {
+ // Short circuit if the header's already in the cache, retrieve otherwise
+ if header, ok := hc.headerCache.Get(hash); ok {
+ return header.(*types.Header)
+ }
+ header := GetHeader(hc.chainDb, hash)
+ if header == nil {
+ return nil
+ }
+ // Cache the found header for next time and return
+ hc.headerCache.Add(hash, header)
+ return header
+}
+
+// HasHeader checks if a block header is present in the database or not, caching
+// it if present.
+func (hc *HeaderChain) HasHeader(hash common.Hash) bool {
+ return hc.GetHeader(hash) != nil
+}
+
+// GetHeaderByNumber retrieves a block header from the database by number,
+// caching it (associated with its hash) if found.
+func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
+ hash := GetCanonicalHash(hc.chainDb, number)
+ if hash == (common.Hash{}) {
+ return nil
+ }
+ return hc.GetHeader(hash)
+}
+
+// CurrentHeader retrieves the current head header of the canonical chain. The
+// header is retrieved from the HeaderChain's internal cache.
+func (hc *HeaderChain) CurrentHeader() *types.Header {
+ return hc.currentHeader
+}
+
+// SetCurrentHeader sets the current head header of the canonical chain.
+func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
+ if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
+ glog.Fatalf("failed to insert head header hash: %v", err)
+ }
+ hc.currentHeader = head
+ hc.currentHeaderHash = head.Hash()
+}
+
+// DeleteCallback is a callback function that is called by SetHead before
+// each header is deleted.
+type DeleteCallback func(common.Hash)
+
+// SetHead rewinds the local chain to a new head. Everything above the new head
+// will be deleted and the new one set.
+func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
+ height := uint64(0)
+ if hc.currentHeader != nil {
+ height = hc.currentHeader.Number.Uint64()
+ }
+
+ for hc.currentHeader != nil && hc.currentHeader.Number.Uint64() > head {
+ hash := hc.currentHeader.Hash()
+ if delFn != nil {
+ delFn(hash)
+ }
+ DeleteHeader(hc.chainDb, hash)
+ DeleteTd(hc.chainDb, hash)
+ hc.currentHeader = hc.GetHeader(hc.currentHeader.ParentHash)
+ }
+ // Roll back the canonical chain numbering
+ for i := height; i > head; i-- {
+ DeleteCanonicalHash(hc.chainDb, i)
+ }
+ // Clear out any stale content from the caches
+ hc.headerCache.Purge()
+ hc.tdCache.Purge()
+
+ if hc.currentHeader == nil {
+ hc.currentHeader = hc.genesisHeader
+ }
+ hc.currentHeaderHash = hc.currentHeader.Hash()
+
+ if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
+ glog.Fatalf("failed to reset head header hash: %v", err)
+ }
+}
+
+// SetGenesis sets a new genesis block header for the chain
+func (hc *HeaderChain) SetGenesis(head *types.Header) {
+ hc.genesisHeader = head
+}
+
+// headerValidator is responsible for validating block headers
+//
+// headerValidator implements HeaderValidator.
+type headerValidator struct {
+ hc *HeaderChain // Canonical header chain
+ Pow pow.PoW // Proof of work used for validating
+}
+
+// NewBlockValidator returns a new block validator which is safe for re-use
+func NewHeaderValidator(chain *HeaderChain, pow pow.PoW) HeaderValidator {
+ return &headerValidator{
+ Pow: pow,
+ hc: chain,
+ }
+}
+
+// ValidateHeader validates the given header and, depending on the pow arg,
+// checks the proof of work of the given header. Returns an error if the
+// validation failed.
+func (v *headerValidator) ValidateHeader(header, parent *types.Header, checkPow bool) error {
+ // Short circuit if the parent is missing.
+ if parent == nil {
+ return ParentError(header.ParentHash)
+ }
+ // Short circuit if the header's already known or its parent missing
+ if v.hc.HasHeader(header.Hash()) {
+ return nil
+ }
+ return ValidateHeader(v.Pow, header, parent, checkPow, false)
+}
diff --git a/core/state/managed_state.go b/core/state/managed_state.go
index 4df047979..f8e2f2b87 100644
--- a/core/state/managed_state.go
+++ b/core/state/managed_state.go
@@ -82,7 +82,7 @@ func (ms *ManagedState) NewNonce(addr common.Address) uint64 {
return uint64(len(account.nonces)-1) + account.nstart
}
-// GetNonce returns the canonical nonce for the managed or unmanged account
+// GetNonce returns the canonical nonce for the managed or unmanaged account
func (ms *ManagedState) GetNonce(addr common.Address) uint64 {
ms.mu.RLock()
defer ms.mu.RUnlock()
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 7ce341c36..a45eddd0d 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -102,7 +102,7 @@ func (s *StateSuite) TestSnapshot(c *checker.C) {
data1 := common.BytesToHash([]byte{42})
data2 := common.BytesToHash([]byte{43})
- // set inital state object value
+ // set initial state object value
s.state.SetState(stateobjaddr, storageaddr, data1)
// get snapshot of current state
snapshot := s.state.Copy()
diff --git a/core/state_processor.go b/core/state_processor.go
index b9793b157..3ca36a43a 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -55,7 +55,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB) (ty
return receipts, allLogs, totalUsedGas, err
}
-// ApplyTransaction attemps to apply a transaction to the given state database
+// ApplyTransaction attempts to apply a transaction to the given state database
// and uses the input parameters for its environment.
//
// ApplyTransactions returns the generated receipts and vm logs during the
diff --git a/core/tx_pool.go b/core/tx_pool.go
index b8fb4cd35..f4e964bf7 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -60,8 +60,8 @@ type stateFn func() (*state.StateDB, error)
// current state) and future transactions. Transactions move between those
// two states over time as they are received and processed.
type TxPool struct {
- quit chan bool // Quiting channel
- currentState stateFn // The state function which will allow us to do some pre checkes
+ quit chan bool // Quitting channel
+ currentState stateFn // The state function which will allow us to do some pre checks
pendingState *state.ManagedState
gasLimit func() *big.Int // The current gas limit function callback
minGasPrice *big.Int
@@ -357,7 +357,7 @@ func (self *TxPool) AddTransactions(txs []*types.Transaction) {
}
}
- // check and validate the queueue
+ // check and validate the queue
self.checkQueue()
}
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index 811e40111..fa1a740dc 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -331,7 +331,7 @@ func TestTransactionDropping(t *testing.T) {
// Tests that if a transaction is dropped from the current pending pool (e.g. out
// of fund), all consecutive (still valid, but not executable) transactions are
-// postponed back into the future queue to prevent broadcating them.
+// postponed back into the future queue to prevent broadcasting them.
func TestTransactionPostponing(t *testing.T) {
// Create a test account and fund it
pool, key := setupTxPool()
@@ -366,7 +366,7 @@ func TestTransactionPostponing(t *testing.T) {
if len(pool.queue[account]) != 0 {
t.Errorf("queued transaction mismatch: have %d, want %d", len(pool.queue), 0)
}
- // Reduce the balance of the account, and check that transactions are reorganized
+ // Reduce the balance of the account, and check that transactions are reorganised
state.AddBalance(account, big.NewInt(-750))
pool.resetState()
diff --git a/core/types.go b/core/types.go
index 027f628b1..022528374 100644
--- a/core/types.go
+++ b/core/types.go
@@ -38,14 +38,22 @@ import (
// ValidateHeader validates the given header and parent and returns an error
// if it failed to do so.
//
-// ValidateStack validates the given statedb and optionally the receipts and
-// gas used. The implementor should decide what to do with the given input.
+// ValidateState validates the given statedb and optionally the receipts and
+// gas used. The implementer should decide what to do with the given input.
type Validator interface {
+ HeaderValidator
ValidateBlock(block *types.Block) error
- ValidateHeader(header, parent *types.Header, checkPow bool) error
ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error
}
+// HeaderValidator is an interface for validating headers only
+//
+// ValidateHeader validates the given header and parent and returns an error
+// if it failed to do so.
+type HeaderValidator interface {
+ ValidateHeader(header, parent *types.Header, checkPow bool) error
+}
+
// Processor is an interface for processing blocks using a given initial state.
//
// Process takes the block to be processed and the statedb upon which the
diff --git a/core/types/block.go b/core/types/block.go
index 5536e0ea8..5e6a9019d 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -34,7 +34,7 @@ import (
)
// A BlockNonce is a 64-bit hash which proves (combined with the
-// mix-hash) that a suffcient amount of computation has been carried
+// mix-hash) that a sufficient amount of computation has been carried
// out on a block.
type BlockNonce [8]byte
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 37715ee53..b99d3a716 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -158,7 +158,7 @@ func (tx *Transaction) Size() common.StorageSize {
}
// From returns the address derived from the signature (V, R, S) using secp256k1
-// eliptic curve and an error if it failed deriving or upon an incorrect
+// elliptic curve and an error if it failed deriving or upon an incorrect
// signature.
//
// From Uses the homestead consensus rules to determine whether the signature is
@@ -176,7 +176,7 @@ func (tx *Transaction) From() (common.Address, error) {
}
// FromFrontier returns the address derived from the signature (V, R, S) using
-// secp256k1 eliptic curve and an error if it failed deriving or upon an
+// secp256k1 elliptic curve and an error if it failed deriving or upon an
// incorrect signature.
//
// FromFrantier uses the frontier consensus rules to determine whether the
diff --git a/core/vm/asm.go b/core/vm/asm.go
index 065d3eb97..b248838a7 100644
--- a/core/vm/asm.go
+++ b/core/vm/asm.go
@@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
)
-// Dissassemble dissassembles the byte code and returns the string
+// Disassemble disassembles the byte code and returns the string
// representation (human readable opcodes).
func Disassemble(script []byte) (asm []string) {
pc := new(big.Int)
diff --git a/core/vm/doc.go b/core/vm/doc.go
index debbdb35e..de7fa6021 100644
--- a/core/vm/doc.go
+++ b/core/vm/doc.go
@@ -20,7 +20,7 @@ Package vm implements the Ethereum Virtual Machine.
The vm package implements two EVMs, a byte code VM and a JIT VM. The BC
(Byte Code) VM loops over a set of bytes and executes them according to the set
of rules defined in the Ethereum yellow paper. When the BC VM is invoked it
-invokes the JIT VM in a seperate goroutine and compiles the byte code in JIT
+invokes the JIT VM in a separate goroutine and compiles the byte code in JIT
instructions.
The JIT VM, when invoked, loops around a set of pre-defined instructions until
diff --git a/core/vm/environment.go b/core/vm/environment.go
index a58e3ba2b..d5d21a45b 100644
--- a/core/vm/environment.go
+++ b/core/vm/environment.go
@@ -34,9 +34,9 @@ type Environment interface {
MakeSnapshot() Database
// Set database to previous snapshot
SetSnapshot(Database)
- // Address of the original invoker (first occurance of the VM invoker)
+ // Address of the original invoker (first occurrence of the VM invoker)
Origin() common.Address
- // The block number this VM is invoken on
+ // The block number this VM is invoked on
BlockNumber() *big.Int
// The n'th hash ago from this block number
GetHash(uint64) common.Hash
@@ -101,7 +101,7 @@ type Database interface {
IsDeleted(common.Address) bool
}
-// StructLog is emited to the Environment each cycle and lists information about the curent internal state
+// StructLog is emitted to the Environment each cycle and lists information about the current internal state
// prior to the execution of the statement.
type StructLog struct {
Pc uint64
diff --git a/core/vm/jit.go b/core/vm/jit.go
index 5404730c1..71ffcf0f6 100644
--- a/core/vm/jit.go
+++ b/core/vm/jit.go
@@ -300,7 +300,7 @@ func CompileProgram(program *Program) (err error) {
return nil
}
-// RunProgram runs the program given the enviroment and contract and returns an
+// RunProgram runs the program given the environment and contract and returns an
// error if the execution failed (non-consensus)
func RunProgram(program *Program, env Environment, contract *Contract, input []byte) ([]byte, error) {
return runProgram(program, 0, NewMemory(), newstack(), env, contract, input)
@@ -346,7 +346,7 @@ func runProgram(program *Program, pcstart uint64, mem *Memory, stack *stack, env
return nil, nil
}
-// validDest checks if the given distination is a valid one given the
+// validDest checks if the given destination is a valid one given the
// destination table of the program
func validDest(dests map[uint64]struct{}, dest *big.Int) bool {
// PC cannot go beyond len(code) and certainly can't be bigger than 64bits.
@@ -416,7 +416,7 @@ func jitCalculateGasAndSize(env Environment, contract *Contract, instr instructi
// This checks for 3 scenario's and calculates gas accordingly
// 1. From a zero-value address to a non-zero value (NEW VALUE)
// 2. From a non-zero value address to a zero-value address (DELETE)
- // 3. From a nen-zero to a non-zero (CHANGE)
+ // 3. From a non-zero to a non-zero (CHANGE)
if common.EmptyHash(val) && !common.EmptyHash(common.BigToHash(y)) {
g = params.SstoreSetGas
} else if !common.EmptyHash(val) && common.EmptyHash(common.BigToHash(y)) {
diff --git a/core/vm/jit_test.go b/core/vm/jit_test.go
index 4174c666f..19261827b 100644
--- a/core/vm/jit_test.go
+++ b/core/vm/jit_test.go
@@ -77,7 +77,7 @@ func TestCompiling(t *testing.T) {
}
if len(prog.instructions) != 1 {
- t.Error("exected 1 compiled instruction, got", len(prog.instructions))
+ t.Error("expected 1 compiled instruction, got", len(prog.instructions))
}
}
diff --git a/core/vm/jit_util.go b/core/vm/jit_util.go
index 0d3d6d701..72e9ccf8f 100644
--- a/core/vm/jit_util.go
+++ b/core/vm/jit_util.go
@@ -41,7 +41,7 @@ func Parse(code []byte) (opcodes []OpCode) {
// MatchFn searcher for match in the given input and calls matcheFn if it finds
// an appropriate match. matcherFn yields the starting position in the input.
-// MatchFn will continue to search for a match until it reacher the end of the
+// MatchFn will continue to search for a match until it reaches the end of the
// buffer or if matcherFn return false.
func MatchFn(input, match []OpCode, matcherFn func(int) bool) {
// short circuit if either input or match is empty or if the match is
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 565ce7b73..3e6057142 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -27,7 +27,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
)
-// Config is a basic type specifing certain configuration flags for running
+// Config is a basic type specifying certain configuration flags for running
// the EVM.
type Config struct {
Difficulty *big.Int
diff --git a/core/vm/vm.go b/core/vm/vm.go
index d45d136b5..95d27c64c 100644
--- a/core/vm/vm.go
+++ b/core/vm/vm.go
@@ -63,7 +63,7 @@ func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
)
if EnableJit {
// If the JIT is enabled check the status of the JIT program,
- // if it doesn't exist compile a new program in a seperate
+ // if it doesn't exist compile a new program in a separate
// goroutine or wait for compilation to finish if the JIT is
// forced.
switch GetProgramStatus(codehash) {
@@ -80,7 +80,7 @@ func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
glog.V(logger.Info).Infoln("error compiling program", err)
} else {
// create and compile the program. Compilation
- // is done in a seperate goroutine
+ // is done in a separate goroutine
program = NewProgram(contract.Code)
go func() {
err := CompileProgram(program)
@@ -103,7 +103,7 @@ func (self *Vm) Run(contract *Contract, input []byte) (ret []byte, err error) {
stack = newstack() // local stack
statedb = self.env.Db() // current state
// For optimisation reason we're using uint64 as the program counter.
- // It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Pratically much less so feasible.
+ // It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Practically much less so feasible.
pc = uint64(0) // program counter
// jump evaluates and checks whether the given jump destination is a valid one
@@ -271,7 +271,7 @@ func calculateGasAndSize(env Environment, contract *Contract, caller ContractRef
// This checks for 3 scenario's and calculates gas accordingly
// 1. From a zero-value address to a non-zero value (NEW VALUE)
// 2. From a non-zero value address to a zero-value address (DELETE)
- // 3. From a nen-zero to a non-zero (CHANGE)
+ // 3. From a non-zero to a non-zero (CHANGE)
if common.EmptyHash(val) && !common.EmptyHash(common.BigToHash(y)) {
// 0 => non 0
g = params.SstoreSetGas
diff --git a/core/vm/vm_jit.go b/core/vm/vm_jit.go
index 589c30fa8..f6e4a515b 100644
--- a/core/vm/vm_jit.go
+++ b/core/vm/vm_jit.go
@@ -138,7 +138,7 @@ func llvm2big(m *i256) *big.Int {
}
// llvm2bytesRef creates a []byte slice that references byte buffer on LLVM side (as of that not controller by GC)
-// User must asure that referenced memory is available to Go until the data is copied or not needed any more
+// User must ensure that referenced memory is available to Go until the data is copied or not needed any more
func llvm2bytesRef(data *byte, length uint64) []byte {
if length == 0 {
return nil
@@ -171,7 +171,7 @@ func (self *JitVm) Run(me, caller ContextRef, code []byte, value, gas, price *bi
// TODO: Move it to Env.Call() or sth
if Precompiled[string(me.Address())] != nil {
- // if it's address of precopiled contract
+ // if it's address of precompiled contract
// fallback to standard VM
stdVm := New(self.env)
return stdVm.Run(me, caller, code, value, gas, price, callData)
@@ -348,7 +348,7 @@ func env_create(_vm unsafe.Pointer, _gas *int64, _value unsafe.Pointer, initData
gas := big.NewInt(*_gas)
ret, suberr, ref := vm.env.Create(vm.me, nil, initData, gas, vm.price, value)
if suberr == nil {
- dataGas := big.NewInt(int64(len(ret))) // TODO: Nto the best design. env.Create can do it, it has the reference to gas counter
+ dataGas := big.NewInt(int64(len(ret))) // TODO: Not the best design. env.Create can do it, it has the reference to gas counter
dataGas.Mul(dataGas, params.CreateDataGas)
gas.Sub(gas, dataGas)
*result = hash2llvm(ref.Address())
diff --git a/core/vm_env.go b/core/vm_env.go
index db29cc32c..7b9a1a0f9 100644
--- a/core/vm_env.go
+++ b/core/vm_env.go
@@ -25,7 +25,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
)
-// GetHashFn returns a function for which the VM env can query block hashes thru
+// GetHashFn returns a function for which the VM env can query block hashes through
// up to the limit defined by the Yellow Paper and uses the given block chain
// to query for information.
func GetHashFn(ref common.Hash, chain *BlockChain) func(n uint64) common.Hash {
diff --git a/eth/api.go b/eth/api.go
index 38b67a07a..c16c3d142 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -51,12 +51,13 @@ const defaultGas = uint64(90000)
// blockByNumber is a commonly used helper function which retrieves and returns
// the block for the given block number, capable of handling two special blocks:
-// rpc.LatestBlockNumber adn rpc.PendingBlockNumber. It returns nil when no block
+// rpc.LatestBlockNumber and rpc.PendingBlockNumber. It returns nil when no block
// could be found.
func blockByNumber(m *miner.Miner, bc *core.BlockChain, blockNr rpc.BlockNumber) *types.Block {
// Pending block is only known by the miner
if blockNr == rpc.PendingBlockNumber {
- return m.PendingBlock()
+ block, _ := m.Pending()
+ return block
}
// Otherwise resolve and return the block
if blockNr == rpc.LatestBlockNumber {
@@ -67,12 +68,13 @@ func blockByNumber(m *miner.Miner, bc *core.BlockChain, blockNr rpc.BlockNumber)
// stateAndBlockByNumber is a commonly used helper function which retrieves and
// returns the state and containing block for the given block number, capable of
-// handling two special states: rpc.LatestBlockNumber adn rpc.PendingBlockNumber.
+// handling two special states: rpc.LatestBlockNumber and rpc.PendingBlockNumber.
// It returns nil when no block or state could be found.
func stateAndBlockByNumber(m *miner.Miner, bc *core.BlockChain, blockNr rpc.BlockNumber, chainDb ethdb.Database) (*state.StateDB, *types.Block, error) {
// Pending state is only known by the miner
if blockNr == rpc.PendingBlockNumber {
- return m.PendingState(), m.PendingBlock(), nil
+ block, state := m.Pending()
+ return state, block, nil
}
// Otherwise resolve the block number and return its state
block := blockByNumber(m, bc, blockNr)
@@ -90,7 +92,7 @@ type PublicEthereumAPI struct {
gpo *GasPriceOracle
}
-// NewPublicEthereumAPI creates a new Etheruem protocol API.
+// NewPublicEthereumAPI creates a new Ethereum protocol API.
func NewPublicEthereumAPI(e *Ethereum) *PublicEthereumAPI {
return &PublicEthereumAPI{e, NewGasPriceOracle(e)}
}
@@ -148,7 +150,7 @@ func (s *PublicEthereumAPI) Hashrate() *rpc.HexNumber {
return rpc.NewHexNumber(s.e.Miner().HashRate())
}
-// Syncing returns false in case the node is currently not synching with the network. It can be up to date or has not
+// Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not
// yet received the latest block headers from its pears. In case it is synchronizing:
// - startingBlock: block number this node started to synchronise from
// - currentBlock: block number this node is currently importing
@@ -600,7 +602,7 @@ func (s *PublicBlockChainAPI) GetStorageAt(address common.Address, key string, b
return state.GetState(address, common.HexToHash(key)).Hex(), nil
}
-// callmsg is the message type used for call transations.
+// callmsg is the message type used for call transactions.
type callmsg struct {
from *state.StateObject
to *common.Address
@@ -678,7 +680,7 @@ func (s *PublicBlockChainAPI) doCall(args CallArgs, blockNr rpc.BlockNumber) (st
}
// Call executes the given transaction on the state for the given block number.
-// It doesn't make and changes in the state/blockchain and is usefull to execute and retrieve values.
+// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(args CallArgs, blockNr rpc.BlockNumber) (string, error) {
result, _, err := s.doCall(args, blockNr)
return result, err
@@ -1545,7 +1547,7 @@ func (api *PrivateDebugAPI) SetHead(number uint64) {
api.eth.BlockChain().SetHead(number)
}
-// StructLogRes stores a structured log emitted by the evm while replaying a
+// StructLogRes stores a structured log emitted by the EVM while replaying a
// transaction in debug mode
type structLogRes struct {
Pc uint64 `json:"pc"`
@@ -1558,7 +1560,7 @@ type structLogRes struct {
Storage map[string]string `json:"storage"`
}
-// TransactionExecutionRes groups all structured logs emitted by the evm
+// TransactionExecutionRes groups all structured logs emitted by the EVM
// while replaying a transaction in debug mode as well as the amount of
// gas used and the return value
type TransactionExecutionResult struct {
@@ -1614,7 +1616,7 @@ func (s *PrivateDebugAPI) doReplayTransaction(txHash common.Hash) ([]vm.StructLo
return vmenv.StructLogs(), ret, gas, nil
}
-// Executes a transaction and returns the structured logs of the evm
+// Executes a transaction and returns the structured logs of the EVM
// gathered during the execution
func (s *PrivateDebugAPI) ReplayTransaction(txHash common.Hash, stackDepth int, memorySize int, storageSize int) (*TransactionExecutionResult, error) {
@@ -1690,7 +1692,7 @@ type PublicNetAPI struct {
networkVersion int
}
-// NewPublicNetAPI creates a new net api instance.
+// NewPublicNetAPI creates a new net API instance.
func NewPublicNetAPI(net *p2p.Server, networkVersion int) *PublicNetAPI {
return &PublicNetAPI{net, networkVersion}
}
diff --git a/eth/downloader/api.go b/eth/downloader/api.go
index 6df911fee..13d0ed46e 100644
--- a/eth/downloader/api.go
+++ b/eth/downloader/api.go
@@ -20,7 +20,7 @@ import (
"github.com/ethereum/go-ethereum/rpc"
)
-// PublicDownloaderAPI provides an API which gives informatoin about the current synchronisation status.
+// PublicDownloaderAPI provides an API which gives information about the current synchronisation status.
// It offers only methods that operates on data that can be available to anyone without security risks.
type PublicDownloaderAPI struct {
d *Downloader
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 143d8bde7..f50a71cf1 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -197,7 +197,7 @@ func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, ha
// block where synchronisation started at (may have failed/suspended); the block
// or header sync is currently at; and the latest known block which the sync targets.
//
-// In addition, during the state download phase of fast synchonisation the number
+// In addition, during the state download phase of fast synchronisation the number
// of processed and the total number of known states are also returned. Otherwise
// these are zero.
func (d *Downloader) Progress() (uint64, uint64, uint64, uint64, uint64) {
@@ -280,7 +280,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous
func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
- // Mock out the synchonisation if testing
+ // Mock out the synchronisation if testing
if d.synchroniseMock != nil {
return d.synchroniseMock(id, hash)
}
@@ -534,7 +534,7 @@ func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
// findAncestor61 tries to locate the common ancestor block of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N blocks should already get us a match.
-// In the rare scenario when we ended up on a long reorganization (i.e. none of
+// In the rare scenario when we ended up on a long reorganisation (i.e. none of
// the head blocks match), we do a binary search to find the common ancestor.
func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
@@ -709,7 +709,7 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
}
// If no hashes were retrieved at all, the peer violated it's TD promise that it had a
// better chain compared to ours. The only exception is if it's promised blocks were
- // already imported by other means (e.g. fecher):
+ // already imported by other means (e.g. fetcher):
//
// R <remote peer>, L <local node>: Both at block 10
// R: Mine block 11, and propagate it to L
@@ -960,7 +960,7 @@ func (d *Downloader) fetchHeight(p *peer) (uint64, error) {
// findAncestor tries to locate the common ancestor link of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N links should already get us a match.
-// In the rare scenario when we ended up on a long reorganization (i.e. none of
+// In the rare scenario when we ended up on a long reorganisation (i.e. none of
// the head links match), we do a binary search to find the common ancestor.
func (d *Downloader) findAncestor(p *peer) (uint64, error) {
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
@@ -1180,7 +1180,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
}
// If no headers were retrieved at all, the peer violated it's TD promise that it had a
// better chain compared to ours. The only exception is if it's promised blocks were
- // already imported by other means (e.g. fecher):
+ // already imported by other means (e.g. fetcher):
//
// R <remote peer>, L <local node>: Both at block 10
// R: Mine block 11, and propagate it to L
@@ -1621,7 +1621,7 @@ func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) (err error)
return d.deliver(id, d.blockCh, &blockPack{id, blocks}, blockInMeter, blockDropMeter)
}
-// DeliverHeaders injects a new batch of blck headers received from a remote
+// DeliverHeaders injects a new batch of block headers received from a remote
// node into the download schedule.
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index ff57fe167..e66a90264 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -277,7 +277,7 @@ func (dl *downloadTester) insertHeaders(headers []*types.Header, checkFreq int)
dl.lock.Lock()
defer dl.lock.Unlock()
- // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anthing in case of errors
+ // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
return 0, errors.New("unknown parent")
}
@@ -958,7 +958,7 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
}
// Tests that synchronisations behave well in multi-version protocol environments
-// and not wreak havok on other nodes in the network.
+// and not wreak havoc on other nodes in the network.
func TestMultiProtoSynchronisation61(t *testing.T) { testMultiProtoSync(t, 61, FullSync) }
func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) }
func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) }
@@ -1188,7 +1188,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
// Synchronise with the valid peer and make sure sync succeeds. Since the last
// rollback should also disable fast syncing for this process, verify that we
// did a fresh full sync. Note, we can't assert anything about the receipts
- // since we won't purge the database of them, hence we can't use asserOwnChain.
+ // since we won't purge the database of them, hence we can't use assertOwnChain.
tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
if err := tester.sync("valid", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index 80f08b68f..c4846194b 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -251,8 +251,8 @@ func (p *peer) setIdle(started time.Time, delivered int, throughput *float64, id
// Irrelevant of the scaling, make sure the peer ends up idle
defer atomic.StoreInt32(idle, 0)
- p.lock.RLock()
- defer p.lock.RUnlock()
+ p.lock.Lock()
+ defer p.lock.Unlock()
// If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum
if delivered == 0 {
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index bc9428ecf..f86bae144 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -976,7 +976,7 @@ func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(error, i
accepted, errs := 0, make([]error, 0)
process := []trie.SyncResult{}
for _, blob := range data {
- // Skip any state trie entires that were not requested
+ // Skip any state trie entries that were not requested
hash := common.BytesToHash(crypto.Keccak256(blob))
if _, ok := request.Hashes[hash]; !ok {
errs = append(errs, fmt.Errorf("non-requested state data %x", hash))
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go
index d88d91982..9300717c3 100644
--- a/eth/fetcher/fetcher.go
+++ b/eth/fetcher/fetcher.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-// Package fetcher contains the block announcement based synchonisation.
+// Package fetcher contains the block announcement based synchronisation.
package fetcher
import (
@@ -34,7 +34,7 @@ import (
const (
arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
- fetchTimeout = 5 * time.Second // Maximum alloted time to return an explicitly requested block
+ fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block
maxUncleDist = 7 // Maximum allowed backward distance from the chain head
maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
hashLimit = 256 // Maximum number of unique blocks a peer may have announced
@@ -176,7 +176,7 @@ func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlo
}
}
-// Start boots up the announcement based synchoniser, accepting and processing
+// Start boots up the announcement based synchroniser, accepting and processing
// hash notifications and block fetches until termination requested.
func (f *Fetcher) Start() {
go f.loop()
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 6cd184b80..e6a1ce3ab 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -47,7 +47,7 @@ const (
logFilterTy
)
-// PublicFilterAPI offers support to create and manage filters. This will allow externa clients to retrieve various
+// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
// information related to the Ethereum protocol such als blocks, transactions and logs.
type PublicFilterAPI struct {
mux *event.TypeMux
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 3ad7dd9cb..7904d7d33 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -96,6 +96,6 @@ func TestCallbacks(t *testing.T) {
select {
case <-pendingLogDone:
case <-failTimer.C:
- t.Error("pending log filter failed to trigger (timout)")
+ t.Error("pending log filter failed to trigger (timeout)")
}
}
diff --git a/eth/handler.go b/eth/handler.go
index f11a69550..2c5cae479 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -590,7 +590,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
case msg.Code == NewBlockHashesMsg:
- // Retrieve and deseralize the remote new block hashes notification
+ // Retrieve and deserialize the remote new block hashes notification
type announce struct {
Hash common.Hash
Number uint64
diff --git a/eth/handler_test.go b/eth/handler_test.go
index e5974c23c..8a0dd21b7 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -427,7 +427,7 @@ func testGetNodeData(t *testing.T, protocol int) {
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
- // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makerts_test)
+ // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test)
generator := func(i int, block *core.BlockGen) {
switch i {
case 0:
@@ -518,7 +518,7 @@ func testGetReceipt(t *testing.T, protocol int) {
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
- // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makerts_test)
+ // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test)
generator := func(i int, block *core.BlockGen) {
switch i {
case 0:
diff --git a/eth/metrics.go b/eth/metrics.go
index 8231a06ff..e1a89d3a9 100644
--- a/eth/metrics.go
+++ b/eth/metrics.go
@@ -72,7 +72,7 @@ type meteredMsgReadWriter struct {
}
// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the
-// metrics system is disabled, this fucntion returns the original object.
+// metrics system is disabled, this function returns the original object.
func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter {
if !metrics.Enabled {
return rw
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index 22e524cd6..76f32561a 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -55,6 +55,7 @@ var (
memprofilerateFlag = cli.IntFlag{
Name: "memprofilerate",
Usage: "Turn on memory profiling with the given rate",
+ Value: runtime.MemProfileRate,
}
blockprofilerateFlag = cli.IntFlag{
Name: "blockprofilerate",
diff --git a/internal/debug/loudpanic.go b/internal/debug/loudpanic.go
new file mode 100644
index 000000000..572ebcefa
--- /dev/null
+++ b/internal/debug/loudpanic.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// +build go1.6
+
+package debug
+
+import "runtime/debug"
+
+// LoudPanic panics in a way that gets all goroutine stacks printed on stderr.
+func LoudPanic(x interface{}) {
+ debug.SetTraceback("all")
+ panic(x)
+}
diff --git a/internal/debug/loudpanic_fallback.go b/internal/debug/loudpanic_fallback.go
new file mode 100644
index 000000000..4ce4985da
--- /dev/null
+++ b/internal/debug/loudpanic_fallback.go
@@ -0,0 +1,24 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// +build !go1.6
+
+package debug
+
+// LoudPanic panics in a way that gets all goroutine stacks printed on stderr.
+func LoudPanic(x interface{}) {
+ panic(x)
+}
diff --git a/miner/miner.go b/miner/miner.go
index 6d4a84f1a..e52cefaab 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -164,12 +164,9 @@ func (self *Miner) SetExtra(extra []byte) error {
return nil
}
-func (self *Miner) PendingState() *state.StateDB {
- return self.worker.pendingState()
-}
-
-func (self *Miner) PendingBlock() *types.Block {
- return self.worker.pendingBlock()
+// Pending returns the currently pending block and associated state.
+func (self *Miner) Pending() (*types.Block, *state.StateDB) {
+ return self.worker.pending()
}
func (self *Miner) SetEtherbase(addr common.Address) {
diff --git a/miner/worker.go b/miner/worker.go
index f3e95cb5f..108b2d6b5 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -152,13 +152,7 @@ func (self *worker) setEtherbase(addr common.Address) {
self.coinbase = addr
}
-func (self *worker) pendingState() *state.StateDB {
- self.currentMu.Lock()
- defer self.currentMu.Unlock()
- return self.current.state
-}
-
-func (self *worker) pendingBlock() *types.Block {
+func (self *worker) pending() (*types.Block, *state.StateDB) {
self.currentMu.Lock()
defer self.currentMu.Unlock()
@@ -168,9 +162,9 @@ func (self *worker) pendingBlock() *types.Block {
self.current.txs,
nil,
self.current.receipts,
- )
+ ), self.current.state
}
- return self.current.Block
+ return self.current.Block, self.current.state
}
func (self *worker) start() {
diff --git a/p2p/nat/natupnp.go b/p2p/nat/natupnp.go
index 890a35043..804396e94 100644
--- a/p2p/nat/natupnp.go
+++ b/p2p/nat/natupnp.go
@@ -139,6 +139,7 @@ func discoverUPnP() Interface {
func discover(out chan<- *upnp, target string, matcher func(*goupnp.RootDevice, goupnp.ServiceClient) *upnp) {
devs, err := goupnp.DiscoverDevices(target)
if err != nil {
+ out <- nil
return
}
found := false