aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--accounts/abi/abi.go48
-rw-r--r--accounts/abi/abi_test.go52
-rw-r--r--accounts/abi/argument.go4
-rw-r--r--accounts/abi/bind/bind.go20
-rw-r--r--accounts/abi/bind/util_test.go2
-rw-r--r--accounts/abi/type.go16
-rw-r--r--accounts/abi/type_test.go24
-rw-r--r--build/ci.go2
-rw-r--r--build/update-license.go2
-rw-r--r--cmd/disasm/main.go14
-rw-r--r--cmd/ethtest/main.go7
-rw-r--r--cmd/geth/accountcmd_test.go8
-rw-r--r--cmd/geth/main.go1
-rw-r--r--cmd/swarm/main.go8
-rw-r--r--cmd/swarm/upload.go82
-rw-r--r--cmd/utils/cmd.go3
-rw-r--r--cmd/utils/flags.go31
-rw-r--r--common/big_test.go6
-rw-r--r--common/bytes.go2
-rw-r--r--common/bytes_test.go4
-rw-r--r--common/compiler/solidity.go32
-rw-r--r--common/compiler/solidity_test.go28
-rw-r--r--common/format.go2
-rw-r--r--common/math/dist_test.go36
-rw-r--r--compression/rle/read_write.go4
-rw-r--r--console/bridge.go25
-rw-r--r--console/console.go11
-rw-r--r--console/prompter.go4
-rw-r--r--contracts/chequebook/cheque.go4
-rw-r--r--contracts/release/contract.sol4
-rw-r--r--core/bench_test.go4
-rw-r--r--core/block_validator_test.go9
-rw-r--r--core/blockchain.go16
-rw-r--r--core/blockchain_test.go34
-rw-r--r--core/chain_makers_test.go6
-rw-r--r--core/dao.go4
-rw-r--r--core/database_util.go6
-rw-r--r--core/database_util_test.go58
-rw-r--r--core/events.go11
-rw-r--r--core/state/iterator.go2
-rw-r--r--core/state/iterator_test.go2
-rw-r--r--core/state/managed_state_test.go6
-rw-r--r--core/state/statedb.go29
-rw-r--r--core/state/statedb_test.go4
-rw-r--r--core/state/sync.go7
-rw-r--r--core/state/sync_test.go18
-rw-r--r--core/state_processor.go4
-rw-r--r--core/tx_list.go4
-rw-r--r--core/tx_pool.go4
-rw-r--r--core/tx_pool_test.go10
-rw-r--r--core/types.go2
-rw-r--r--core/types/bloom9.go11
-rw-r--r--core/types/log.go (renamed from core/vm/log.go)8
-rw-r--r--core/types/log_test.go (renamed from core/vm/log_test.go)2
-rw-r--r--core/types/receipt.go21
-rw-r--r--core/types/transaction.go9
-rw-r--r--core/types/transaction_signing.go40
-rw-r--r--core/types/transaction_signing_test.go8
-rw-r--r--core/types/transaction_test.go2
-rw-r--r--core/vm/environment.go8
-rw-r--r--core/vm/instructions.go11
-rw-r--r--core/vm/interface.go3
-rw-r--r--core/vm/jump_table.go260
-rw-r--r--core/vm/noop.go3
-rw-r--r--crypto/crypto.go4
-rw-r--r--crypto/crypto_test.go2
-rw-r--r--crypto/ecies/asn1.go12
-rw-r--r--crypto/ecies/ecies.go7
-rw-r--r--crypto/ecies/ecies_test.go6
-rw-r--r--crypto/secp256k1/libsecp256k1/include/secp256k1_schnorr.h4
-rw-r--r--crypto/secp256k1/notes.go2
-rw-r--r--crypto/secp256k1/secp256.go2
-rw-r--r--crypto/sha3/sha3_test.go2
-rw-r--r--errs/errors.go22
-rw-r--r--errs/errors_test.go21
-rw-r--r--eth/api.go2
-rw-r--r--eth/api_backend.go2
-rw-r--r--eth/backend_test.go5
-rw-r--r--eth/downloader/downloader.go2
-rw-r--r--eth/downloader/downloader_test.go2
-rw-r--r--eth/downloader/peer.go4
-rw-r--r--eth/downloader/queue.go19
-rw-r--r--eth/fetcher/fetcher_test.go2
-rw-r--r--eth/filters/api.go19
-rw-r--r--eth/filters/filter.go17
-rw-r--r--eth/filters/filter_system.go25
-rw-r--r--eth/filters/filter_system_test.go79
-rw-r--r--eth/filters/filter_test.go35
-rw-r--r--eth/handler.go30
-rw-r--r--eth/handler_test.go26
-rw-r--r--eth/helper_test.go2
-rw-r--r--eth/protocol_test.go4
-rw-r--r--ethclient/ethclient.go7
-rw-r--r--ethdb/memory_database.go2
-rw-r--r--ethstats/ethstats.go43
-rw-r--r--event/event_test.go4
-rw-r--r--event/filter/generic_filter.go2
-rw-r--r--interfaces.go5
-rw-r--r--internal/ethapi/api.go8
-rw-r--r--internal/jsre/ethereum_js.go22
-rw-r--r--les/api_backend.go2
-rw-r--r--les/fetcher.go2
-rw-r--r--les/flowcontrol/manager.go6
-rw-r--r--les/handler_test.go14
-rw-r--r--les/helper_test.go14
-rw-r--r--les/metrics.go2
-rw-r--r--les/peer.go2
-rw-r--r--les/randselect_test.go2
-rw-r--r--les/serverpool.go2
-rw-r--r--les/sync.go12
-rw-r--r--les/txrelay.go2
-rw-r--r--light/lightchain.go2
-rw-r--r--light/odr_test.go12
-rw-r--r--light/txpool_test.go4
-rw-r--r--light/vm_env.go3
-rw-r--r--logger/example_test.go37
-rw-r--r--logger/glog/glog.go2
-rw-r--r--logger/log.go65
-rw-r--r--logger/loggers.go149
-rw-r--r--logger/loggers_test.go192
-rw-r--r--logger/logsystem.go76
-rw-r--r--logger/sys.go142
-rw-r--r--logger/types.go381
-rw-r--r--miner/worker.go20
-rw-r--r--mobile/accounts.go6
-rw-r--r--mobile/android_test.go2
-rw-r--r--mobile/bind.go29
-rw-r--r--mobile/ethclient.go5
-rw-r--r--mobile/p2p.go2
-rw-r--r--mobile/params.go4
-rw-r--r--mobile/vm.go6
-rw-r--r--node/api.go2
-rw-r--r--node/config_test.go2
-rw-r--r--node/node_example_test.go2
-rw-r--r--node/node_test.go16
-rw-r--r--p2p/discover/database.go2
-rw-r--r--p2p/discover/database_test.go4
-rw-r--r--p2p/discover/node.go5
-rw-r--r--p2p/discover/table.go2
-rw-r--r--p2p/discover/table_test.go26
-rw-r--r--p2p/discover/udp_test.go2
-rw-r--r--p2p/discv5/database.go2
-rw-r--r--p2p/discv5/database_test.go4
-rw-r--r--p2p/discv5/net_test.go26
-rw-r--r--p2p/discv5/node.go5
-rw-r--r--p2p/discv5/sim_test.go6
-rw-r--r--p2p/discv5/ticket.go2
-rw-r--r--p2p/discv5/topic.go2
-rw-r--r--p2p/discv5/udp_test.go2
-rw-r--r--p2p/nat/natpmp.go2
-rw-r--r--p2p/peer_test.go2
-rw-r--r--p2p/server.go12
-rw-r--r--params/bootnodes.go31
-rw-r--r--params/protocol_params.go2
-rw-r--r--pow/dagger/dagger.go176
-rw-r--r--pow/dagger/dagger_test.go35
-rw-r--r--pow/ezp/pow.go113
-rw-r--r--rpc/json.go8
-rw-r--r--rpc/subscription.go2
-rw-r--r--rpc/subscription_test.go2
-rw-r--r--rpc/types.go2
-rw-r--r--swarm/network/protocol.go7
-rw-r--r--swarm/storage/dbstore.go2
-rw-r--r--swarm/storage/dpa_test.go4
-rw-r--r--swarm/storage/netstore.go2
-rw-r--r--swarm/storage/types.go4
-rw-r--r--tests/block_test_util.go4
-rw-r--r--tests/files/BlockchainTests/TestNetwork/bcTheDaoTest.json6
-rw-r--r--tests/files/ansible/test-files/docker-cpp/Dockerfile2
-rw-r--r--tests/files/ansible/test-files/docker-cppjit/Dockerfile2
-rw-r--r--tests/init.go6
-rw-r--r--tests/state_test_util.go8
-rw-r--r--tests/util.go4
-rw-r--r--tests/vm_test_util.go9
-rw-r--r--trie/encoding.go2
-rw-r--r--trie/hasher.go2
-rw-r--r--trie/iterator_test.go2
-rw-r--r--trie/sync.go24
-rw-r--r--trie/sync_test.go18
-rw-r--r--trie/trie.go6
-rw-r--r--whisper/shhapi/api.go6
-rw-r--r--whisper/shhapi/api_test.go2
-rw-r--r--whisper/whisperv2/envelope_test.go12
-rw-r--r--whisper/whisperv2/filter.go7
-rw-r--r--whisper/whisperv2/filter_test.go8
-rw-r--r--whisper/whisperv2/message_test.go4
-rw-r--r--whisper/whisperv2/peer.go2
-rw-r--r--whisper/whisperv2/peer_test.go2
-rw-r--r--whisper/whisperv2/topic_test.go60
-rw-r--r--whisper/whisperv5/doc.go2
-rw-r--r--whisper/whisperv5/message_test.go8
-rw-r--r--whisper/whisperv5/peer.go2
-rw-r--r--whisper/whisperv5/peer_test.go2
-rw-r--r--whisper/whisperv5/whisper.go2
-rw-r--r--whisper/whisperv5/whisper_test.go6
195 files changed, 1000 insertions, 2501 deletions
diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go
index c3d49da66..627a2a0c4 100644
--- a/accounts/abi/abi.go
+++ b/accounts/abi/abi.go
@@ -91,8 +91,30 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
// first we need to create a slice of the type
var refSlice reflect.Value
switch elem.T {
- case IntTy, UintTy, BoolTy: // int, uint, bool can all be of type big int.
- refSlice = reflect.ValueOf([]*big.Int(nil))
+ case IntTy, UintTy, BoolTy:
+ // create a new reference slice matching the element type
+ switch t.Type.Kind {
+ case reflect.Bool:
+ refSlice = reflect.ValueOf([]bool(nil))
+ case reflect.Uint8:
+ refSlice = reflect.ValueOf([]uint8(nil))
+ case reflect.Uint16:
+ refSlice = reflect.ValueOf([]uint16(nil))
+ case reflect.Uint32:
+ refSlice = reflect.ValueOf([]uint32(nil))
+ case reflect.Uint64:
+ refSlice = reflect.ValueOf([]uint64(nil))
+ case reflect.Int8:
+ refSlice = reflect.ValueOf([]int8(nil))
+ case reflect.Int16:
+ refSlice = reflect.ValueOf([]int16(nil))
+ case reflect.Int32:
+ refSlice = reflect.ValueOf([]int32(nil))
+ case reflect.Int64:
+ refSlice = reflect.ValueOf([]int64(nil))
+ default:
+ refSlice = reflect.ValueOf([]*big.Int(nil))
+ }
case AddressTy: // address must be of slice Address
refSlice = reflect.ValueOf([]common.Address(nil))
case HashTy: // hash must be of slice hash
@@ -147,7 +169,27 @@ func toGoSlice(i int, t Argument, output []byte) (interface{}, error) {
// set inter to the correct type (cast)
switch elem.T {
case IntTy, UintTy:
- inter = common.BytesToBig(returnOutput)
+ bigNum := common.BytesToBig(returnOutput)
+ switch t.Type.Kind {
+ case reflect.Uint8:
+ inter = uint8(bigNum.Uint64())
+ case reflect.Uint16:
+ inter = uint16(bigNum.Uint64())
+ case reflect.Uint32:
+ inter = uint32(bigNum.Uint64())
+ case reflect.Uint64:
+ inter = bigNum.Uint64()
+ case reflect.Int8:
+ inter = int8(bigNum.Int64())
+ case reflect.Int16:
+ inter = int16(bigNum.Int64())
+ case reflect.Int32:
+ inter = int32(bigNum.Int64())
+ case reflect.Int64:
+ inter = bigNum.Int64()
+ default:
+ inter = common.BytesToBig(returnOutput)
+ }
case BoolTy:
inter = common.BytesToBig(returnOutput).Uint64() > 0
case AddressTy:
diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go
index b1bfcb012..a45bd6cc0 100644
--- a/accounts/abi/abi_test.go
+++ b/accounts/abi/abi_test.go
@@ -67,10 +67,10 @@ func TestTypeCheck(t *testing.T) {
{"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
{"uint16[3]", []uint16{1, 2, 3}, ""},
{"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
- {"address[]", []common.Address{common.Address{1}}, ""},
- {"address[1]", []common.Address{common.Address{1}}, ""},
- {"address[1]", [1]common.Address{common.Address{1}}, ""},
- {"address[2]", [1]common.Address{common.Address{1}}, "abi: cannot use [1]array as type [2]array as argument"},
+ {"address[]", []common.Address{{1}}, ""},
+ {"address[1]", []common.Address{{1}}, ""},
+ {"address[1]", [1]common.Address{{1}}, ""},
+ {"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
{"bytes32", [32]byte{}, ""},
{"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
{"bytes32", common.Hash{1}, ""},
@@ -80,7 +80,7 @@ func TestTypeCheck(t *testing.T) {
{"bytes", [2]byte{0, 1}, ""},
{"bytes", common.Hash{1}, ""},
{"string", "hello world", ""},
- {"bytes32[]", [][32]byte{[32]byte{}}, ""},
+ {"bytes32[]", [][32]byte{{}}, ""},
{"function", [24]byte{}, ""},
} {
typ, err := NewType(test.typ)
@@ -332,6 +332,30 @@ func TestUnpackSetInterfaceSlice(t *testing.T) {
}
}
+func TestUnpackSetInterfaceArrayOutput(t *testing.T) {
+ var (
+ var1 = new([1]uint32)
+ var2 = new([1]uint32)
+ )
+ out := []interface{}{var1, var2}
+ abi, err := JSON(strings.NewReader(`[{"type":"function", "name":"ints", "outputs":[{"type":"uint32[1]"}, {"type":"uint32[1]"}]}]`))
+ if err != nil {
+ t.Fatal(err)
+ }
+ marshalledReturn := append(pad([]byte{1}, 32, true), pad([]byte{2}, 32, true)...)
+ err = abi.Unpack(&out, "ints", marshalledReturn)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if *var1 != [1]uint32{1} {
+ t.Error("expected var1 to be [1], got", *var1)
+ }
+ if *var2 != [1]uint32{2} {
+ t.Error("expected var2 to be [2], got", *var2)
+ }
+}
+
func TestPack(t *testing.T) {
for i, test := range []struct {
typ string
@@ -343,8 +367,8 @@ func TestPack(t *testing.T) {
{"uint16[]", []uint16{1, 2}, formatSliceOutput([]byte{1}, []byte{2})},
{"bytes20", [20]byte{1}, pad([]byte{1}, 32, false)},
{"uint256[]", []*big.Int{big.NewInt(1), big.NewInt(2)}, formatSliceOutput([]byte{1}, []byte{2})},
- {"address[]", []common.Address{common.Address{1}, common.Address{2}}, formatSliceOutput(pad([]byte{1}, 20, false), pad([]byte{2}, 20, false))},
- {"bytes32[]", []common.Hash{common.Hash{1}, common.Hash{2}}, formatSliceOutput(pad([]byte{1}, 32, false), pad([]byte{2}, 32, false))},
+ {"address[]", []common.Address{{1}, {2}}, formatSliceOutput(pad([]byte{1}, 20, false), pad([]byte{2}, 20, false))},
+ {"bytes32[]", []common.Hash{{1}, {2}}, formatSliceOutput(pad([]byte{1}, 32, false), pad([]byte{2}, 32, false))},
{"function", [24]byte{1}, pad([]byte{1}, 32, false)},
} {
typ, err := NewType(test.typ)
@@ -458,12 +482,12 @@ func TestReader(t *testing.T) {
Uint256, _ := NewType("uint256")
exp := ABI{
Methods: map[string]Method{
- "balance": Method{
+ "balance": {
"balance", true, nil, nil,
},
- "send": Method{
+ "send": {
"send", false, []Argument{
- Argument{"amount", Uint256, false},
+ {"amount", Uint256, false},
}, nil,
},
},
@@ -562,7 +586,7 @@ func TestTestSlice(t *testing.T) {
func TestMethodSignature(t *testing.T) {
String, _ := NewType("string")
- m := Method{"foo", false, []Argument{Argument{"bar", String, false}, Argument{"baz", String, false}}, nil}
+ m := Method{"foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
exp := "foo(string,string)"
if m.Sig() != exp {
t.Error("signature mismatch", exp, "!=", m.Sig())
@@ -574,7 +598,7 @@ func TestMethodSignature(t *testing.T) {
}
uintt, _ := NewType("uint")
- m = Method{"foo", false, []Argument{Argument{"bar", uintt, false}}, nil}
+ m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
exp = "foo(uint256)"
if m.Sig() != exp {
t.Error("signature mismatch", exp, "!=", m.Sig())
@@ -779,8 +803,8 @@ func TestBareEvents(t *testing.T) {
"balance": {false, nil},
"anon": {true, nil},
"args": {false, []Argument{
- Argument{Name: "arg0", Type: arg0, Indexed: false},
- Argument{Name: "arg1", Type: arg1, Indexed: true},
+ {Name: "arg0", Type: arg0, Indexed: false},
+ {Name: "arg1", Type: arg1, Indexed: true},
}},
}
diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go
index 4faafdd3b..4691318ce 100644
--- a/accounts/abi/argument.go
+++ b/accounts/abi/argument.go
@@ -31,8 +31,8 @@ type Argument struct {
func (a *Argument) UnmarshalJSON(data []byte) error {
var extarg struct {
- Name string
- Type string
+ Name string
+ Type string
Indexed bool
}
err := json.Unmarshal(data, &extarg)
diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go
index 84cf22e3c..73e95e02a 100644
--- a/accounts/abi/bind/bind.go
+++ b/accounts/abi/bind/bind.go
@@ -147,21 +147,21 @@ func bindTypeGo(kind abi.Type) string {
switch {
case strings.HasPrefix(stringKind, "address"):
- parts := regexp.MustCompile("address(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ parts := regexp.MustCompile(`address(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 2 {
return stringKind
}
return fmt.Sprintf("%scommon.Address", parts[1])
case strings.HasPrefix(stringKind, "bytes"):
- parts := regexp.MustCompile("bytes([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ parts := regexp.MustCompile(`bytes([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 3 {
return stringKind
}
return fmt.Sprintf("%s[%s]byte", parts[2], parts[1])
case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"):
- parts := regexp.MustCompile("(u)?int([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 4 {
return stringKind
}
@@ -172,7 +172,7 @@ func bindTypeGo(kind abi.Type) string {
return fmt.Sprintf("%s*big.Int", parts[3])
case strings.HasPrefix(stringKind, "bool") || strings.HasPrefix(stringKind, "string"):
- parts := regexp.MustCompile("([a-z]+)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ parts := regexp.MustCompile(`([a-z]+)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 3 {
return stringKind
}
@@ -191,7 +191,7 @@ func bindTypeJava(kind abi.Type) string {
switch {
case strings.HasPrefix(stringKind, "address"):
- parts := regexp.MustCompile("address(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ parts := regexp.MustCompile(`address(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 2 {
return stringKind
}
@@ -201,7 +201,7 @@ func bindTypeJava(kind abi.Type) string {
return fmt.Sprintf("Addresses")
case strings.HasPrefix(stringKind, "bytes"):
- parts := regexp.MustCompile("bytes([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ parts := regexp.MustCompile(`bytes([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 3 {
return stringKind
}
@@ -211,7 +211,7 @@ func bindTypeJava(kind abi.Type) string {
return "byte[]"
case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"):
- parts := regexp.MustCompile("(u)?int([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 4 {
return stringKind
}
@@ -230,7 +230,7 @@ func bindTypeJava(kind abi.Type) string {
return fmt.Sprintf("BigInts")
case strings.HasPrefix(stringKind, "bool"):
- parts := regexp.MustCompile("bool(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ parts := regexp.MustCompile(`bool(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 2 {
return stringKind
}
@@ -240,7 +240,7 @@ func bindTypeJava(kind abi.Type) string {
return fmt.Sprintf("bool[]")
case strings.HasPrefix(stringKind, "string"):
- parts := regexp.MustCompile("string(\\[[0-9]*\\])?").FindStringSubmatch(stringKind)
+ parts := regexp.MustCompile(`string(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 2 {
return stringKind
}
@@ -278,7 +278,7 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
case "bool[]":
return "Bools"
case "BigInt":
- parts := regexp.MustCompile("(u)?int([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(solKind.String())
+ parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String())
if len(parts) != 4 {
return javaKind
}
diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go
index d3ed02575..f31dbfc29 100644
--- a/accounts/abi/bind/util_test.go
+++ b/accounts/abi/bind/util_test.go
@@ -60,7 +60,7 @@ func TestWaitDeployed(t *testing.T) {
// Create the transaction.
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code))
- tx, _ = tx.SignECDSA(types.HomesteadSigner{}, testKey)
+ tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
// Wait for it to get mined in the background.
var (
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index 7af7ff386..ed3e33f39 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -65,7 +65,7 @@ var (
// string int uint fixed
// string32 int8 uint8 uint[]
// address int256 uint256 fixed128x128[2]
- fullTypeRegex = regexp.MustCompile("([a-zA-Z0-9]+)(\\[([0-9]*)\\])?")
+ fullTypeRegex = regexp.MustCompile(`([a-zA-Z0-9]+)(\[([0-9]*)\])?`)
// typeRegex parses the abi sub types
typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?")
)
@@ -91,7 +91,12 @@ func NewType(t string) (typ Type, err error) {
}
typ.Elem = &sliceType
typ.stringKind = sliceType.stringKind + t[len(res[1]):]
- return typ, nil
+ // Altough we know that this is an array, we cannot return
+ // as we don't know the type of the element, however, if it
+ // is still an array, then don't determine the type.
+ if typ.Elem.IsArray || typ.Elem.IsSlice {
+ return typ, nil
+ }
}
// parse the type and size of the abi-type.
@@ -112,7 +117,12 @@ func NewType(t string) (typ Type, err error) {
varSize = 256
t += "256"
}
- typ.stringKind = t
+
+ // only set stringKind if not array or slice, as for those,
+ // the correct string type has been set
+ if !(typ.IsArray || typ.IsSlice) {
+ typ.stringKind = t
+ }
switch varType {
case "int":
diff --git a/accounts/abi/type_test.go b/accounts/abi/type_test.go
index bf776cf09..1557c2a41 100644
--- a/accounts/abi/type_test.go
+++ b/accounts/abi/type_test.go
@@ -34,17 +34,17 @@ func TestTypeRegexp(t *testing.T) {
{"int", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}},
{"int8", Type{Kind: reflect.Int8, Type: big_t, Size: 8, T: IntTy, stringKind: "int8"}},
{"int256", Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}},
- {"int[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
- {"int[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
- {"int32[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
- {"int32[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
+ {"int[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
+ {"int[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, Elem: &Type{Kind: reflect.Ptr, Type: big_t, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
+ {"int32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
+ {"int32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, Elem: &Type{Kind: reflect.Int32, Type: big_t, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
{"uint", Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}},
{"uint8", Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}},
{"uint256", Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}},
- {"uint[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
- {"uint[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
- {"uint32[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint32, Type: big_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
- {"uint32[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{Kind: reflect.Uint32, Type: big_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
+ {"uint[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
+ {"uint[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, Elem: &Type{Kind: reflect.Ptr, Type: ubig_t, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
+ {"uint32[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.Uint32, Type: ubig_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: big_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
+ {"uint32[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.Uint32, Type: ubig_t, Size: 32, T: UintTy, Elem: &Type{Kind: reflect.Uint32, Type: big_t, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
{"bytes", Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}},
{"bytes32", Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}},
{"bytes[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
@@ -52,11 +52,11 @@ func TestTypeRegexp(t *testing.T) {
{"bytes32[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
{"bytes32[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{IsArray: true, SliceSize: 32, Elem: &Type{Kind: reflect.Uint8, Type: ubig_t, Size: 8, T: UintTy, stringKind: "uint8"}, T: FixedBytesTy, stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
{"string", Type{Kind: reflect.String, Size: -1, T: StringTy, stringKind: "string"}},
- {"string[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.String, Size: -1, T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
- {"string[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{Kind: reflect.String, Size: -1, T: StringTy, stringKind: "string"}, stringKind: "string[2]"}},
+ {"string[]", Type{IsSlice: true, SliceSize: -1, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[]"}},
+ {"string[2]", Type{IsArray: true, SliceSize: 2, Kind: reflect.String, T: StringTy, Size: -1, Elem: &Type{Kind: reflect.String, T: StringTy, Size: -1, stringKind: "string"}, stringKind: "string[2]"}},
{"address", Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}},
- {"address[]", Type{IsSlice: true, SliceSize: -1, Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
- {"address[2]", Type{IsArray: true, SliceSize: 2, Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
+ {"address[]", Type{IsSlice: true, SliceSize: -1,Kind: reflect.Array, Type:address_t, T: AddressTy, Size:20, Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
+ {"address[2]", Type{IsArray: true, SliceSize: 2,Kind: reflect.Array, Type:address_t, T: AddressTy, Size:20, Elem: &Type{Kind: reflect.Array, Type: address_t, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
// TODO when fixed types are implemented properly
// {"fixed", Type{}},
diff --git a/build/ci.go b/build/ci.go
index 593fcd151..d530c24ca 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -195,7 +195,7 @@ func doInstall(cmdline []string) {
if err != nil {
log.Fatal(err)
}
- for name, _ := range pkgs {
+ for name := range pkgs {
if name == "main" {
gobuild := goToolArch(*arch, "build", buildFlags(env)...)
gobuild.Args = append(gobuild.Args, "-v")
diff --git a/build/update-license.go b/build/update-license.go
index e0c273def..f3335b119 100644
--- a/build/update-license.go
+++ b/build/update-license.go
@@ -294,7 +294,7 @@ func getInfo(files <-chan string, out chan<- *info, wg *sync.WaitGroup) {
wg.Done()
}
-// fileInfo finds the lowest year in which the given file was commited.
+// fileInfo finds the lowest year in which the given file was committed.
func fileInfo(file string) (*info, error) {
info := &info{file: file, Year: int64(time.Now().Year())}
cmd := exec.Command("git", "log", "--follow", "--find-renames=80", "--find-copies=80", "--pretty=format:%ai", "--", file)
diff --git a/cmd/disasm/main.go b/cmd/disasm/main.go
index d792e8ee5..e6a9a6676 100644
--- a/cmd/disasm/main.go
+++ b/cmd/disasm/main.go
@@ -18,10 +18,10 @@
package main
import (
+ "encoding/hex"
"fmt"
"io/ioutil"
"os"
- "encoding/hex"
"strings"
"github.com/ethereum/go-ethereum/core/vm"
@@ -42,15 +42,19 @@ func main() {
for pc := uint64(0); pc < uint64(len(code)); pc++ {
op := vm.OpCode(code[pc])
- fmt.Printf("%-5d %v", pc, op)
switch op {
case vm.PUSH1, vm.PUSH2, vm.PUSH3, vm.PUSH4, vm.PUSH5, vm.PUSH6, vm.PUSH7, vm.PUSH8, vm.PUSH9, vm.PUSH10, vm.PUSH11, vm.PUSH12, vm.PUSH13, vm.PUSH14, vm.PUSH15, vm.PUSH16, vm.PUSH17, vm.PUSH18, vm.PUSH19, vm.PUSH20, vm.PUSH21, vm.PUSH22, vm.PUSH23, vm.PUSH24, vm.PUSH25, vm.PUSH26, vm.PUSH27, vm.PUSH28, vm.PUSH29, vm.PUSH30, vm.PUSH31, vm.PUSH32:
a := uint64(op) - uint64(vm.PUSH1) + 1
- fmt.Printf(" => %x", code[pc+1:pc+1+a])
-
+ u := pc + 1 + a
+ if uint64(len(code)) <= pc || uint64(len(code)) < u {
+ fmt.Printf("Error: incomplete push instruction at %v\n", pc)
+ return
+ }
+ fmt.Printf("%-5d %v => %x\n", pc, op, code[pc+1:u])
pc += a
+ default:
+ fmt.Printf("%-5d %v\n", pc, op)
}
- fmt.Println()
}
}
diff --git a/cmd/ethtest/main.go b/cmd/ethtest/main.go
index 7ce663dc0..14b839579 100644
--- a/cmd/ethtest/main.go
+++ b/cmd/ethtest/main.go
@@ -88,12 +88,7 @@ func runTestWithReader(test string, r io.Reader) error {
default:
err = fmt.Errorf("Invalid test type specified: %v", test)
}
-
- if err != nil {
- return err
- }
-
- return nil
+ return err
}
func getFiles(path string) ([]string, error) {
diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go
index b6abde6d8..113df983e 100644
--- a/cmd/geth/accountcmd_test.go
+++ b/cmd/geth/accountcmd_test.go
@@ -148,7 +148,7 @@ Passphrase: {{.InputLine "foobar"}}
"Unlocked account f466859ead1932d743d622cb74fc058882e8648a",
}
for _, m := range wantMessages {
- if strings.Index(geth.stderrText(), m) == -1 {
+ if !strings.Contains(geth.stderrText(), m) {
t.Errorf("stderr text does not contain %q", m)
}
}
@@ -193,7 +193,7 @@ Passphrase: {{.InputLine "foobar"}}
"Unlocked account 289d485d9771714cce91d3393d764e1311907acc",
}
for _, m := range wantMessages {
- if strings.Index(geth.stderrText(), m) == -1 {
+ if !strings.Contains(geth.stderrText(), m) {
t.Errorf("stderr text does not contain %q", m)
}
}
@@ -212,7 +212,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
"Unlocked account 289d485d9771714cce91d3393d764e1311907acc",
}
for _, m := range wantMessages {
- if strings.Index(geth.stderrText(), m) == -1 {
+ if !strings.Contains(geth.stderrText(), m) {
t.Errorf("stderr text does not contain %q", m)
}
}
@@ -260,7 +260,7 @@ In order to avoid this warning, you need to remove the following duplicate key f
"Unlocked account f466859ead1932d743d622cb74fc058882e8648a",
}
for _, m := range wantMessages {
- if strings.Index(geth.stderrText(), m) == -1 {
+ if !strings.Contains(geth.stderrText(), m) {
t.Errorf("stderr text does not contain %q", m)
}
}
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 332e1ae8d..766e49f49 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -168,7 +168,6 @@ func init() {
}
app.After = func(ctx *cli.Context) error {
- logger.Flush()
debug.Exit()
console.Stdin.Close() // Resets terminal mode.
return nil
diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go
index 954ad3b13..87e21fb7f 100644
--- a/cmd/swarm/main.go
+++ b/cmd/swarm/main.go
@@ -114,7 +114,7 @@ var (
}
CorsStringFlag = cli.StringFlag{
Name: "corsdomain",
- Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied seperated by a ',')",
+ Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
}
)
@@ -129,7 +129,7 @@ func init() {
app.HideVersion = true // we have a command to print the version
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
app.Commands = []cli.Command{
- cli.Command{
+ {
Action: version,
Name: "version",
Usage: "Print version numbers",
@@ -138,7 +138,7 @@ func init() {
The output of this command is supposed to be machine-readable.
`,
},
- cli.Command{
+ {
Action: upload,
Name: "up",
Usage: "upload a file or directory to swarm using the HTTP API",
@@ -147,7 +147,7 @@ The output of this command is supposed to be machine-readable.
"upload a file or directory to swarm using the HTTP API and prints the root hash",
`,
},
- cli.Command{
+ {
Action: hash,
Name: "hash",
Usage: "print the swarm hash of a file or directory",
diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go
index d048bbc40..d8039d45b 100644
--- a/cmd/swarm/upload.go
+++ b/cmd/swarm/upload.go
@@ -50,8 +50,6 @@ func upload(ctx *cli.Context) {
var (
file = args[0]
client = &client{api: bzzapi}
- mroot manifest
- entry manifestEntry
)
fi, err := os.Stat(expandPath(file))
if err != nil {
@@ -61,14 +59,21 @@ func upload(ctx *cli.Context) {
if !recursive {
log.Fatal("argument is a directory and recursive upload is disabled")
}
- mroot, err = client.uploadDirectory(file, defaultPath)
- } else {
- entry, err = client.uploadFile(file, fi)
- mroot = manifest{[]manifestEntry{entry}}
+ if !wantManifest {
+ log.Fatal("manifest is required for directory uploads")
+ }
+ mhash, err := client.uploadDirectory(file, defaultPath)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(mhash)
+ return
}
+ entry, err := client.uploadFile(file, fi)
if err != nil {
log.Fatalln("upload failed:", err)
}
+ mroot := manifest{[]manifestEntry{entry}}
if !wantManifest {
// Print the manifest. This is the only output to stdout.
mrootJSON, _ := json.MarshalIndent(mroot, "", " ")
@@ -123,43 +128,43 @@ type manifest struct {
Entries []manifestEntry `json:"entries,omitempty"`
}
-func (c *client) uploadFile(file string, fi os.FileInfo) (manifestEntry, error) {
- hash, err := c.uploadFileContent(file, fi)
- m := manifestEntry{
- Hash: hash,
- ContentType: mime.TypeByExtension(filepath.Ext(fi.Name())),
+func (c *client) uploadDirectory(dir string, defaultPath string) (string, error) {
+ mhash, err := c.postRaw("application/json", 2, ioutil.NopCloser(bytes.NewReader([]byte("{}"))))
+ if err != nil {
+ return "", fmt.Errorf("failed to upload empty manifest")
}
- return m, err
-}
-
-func (c *client) uploadDirectory(dir string, defaultPath string) (manifest, error) {
- dirm := manifest{}
if len(defaultPath) > 0 {
fi, err := os.Stat(defaultPath)
if err != nil {
- log.Fatal(err)
+ return "", err
}
- entry, err := c.uploadFile(defaultPath, fi)
+ mhash, err = c.uploadToManifest(mhash, "", defaultPath, fi)
if err != nil {
- log.Fatal(err)
+ return "", err
}
- entry.Path = ""
- dirm.Entries = append(dirm.Entries, entry)
}
prefix := filepath.ToSlash(filepath.Clean(dir)) + "/"
- err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
+ err = filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if err != nil || fi.IsDir() {
return err
}
if !strings.HasPrefix(path, dir) {
return fmt.Errorf("path %s outside directory %s", path, dir)
}
- entry, err := c.uploadFile(path, fi)
- entry.Path = strings.TrimPrefix(filepath.ToSlash(filepath.Clean(path)), prefix)
- dirm.Entries = append(dirm.Entries, entry)
+ uripath := strings.TrimPrefix(filepath.ToSlash(filepath.Clean(path)), prefix)
+ mhash, err = c.uploadToManifest(mhash, uripath, path, fi)
return err
})
- return dirm, err
+ return mhash, err
+}
+
+func (c *client) uploadFile(file string, fi os.FileInfo) (manifestEntry, error) {
+ hash, err := c.uploadFileContent(file, fi)
+ m := manifestEntry{
+ Hash: hash,
+ ContentType: mime.TypeByExtension(filepath.Ext(fi.Name())),
+ }
+ return m, err
}
func (c *client) uploadFileContent(file string, fi os.FileInfo) (string, error) {
@@ -181,6 +186,31 @@ func (c *client) uploadManifest(m manifest) (string, error) {
return c.postRaw("application/json", int64(len(jsm)), ioutil.NopCloser(bytes.NewReader(jsm)))
}
+func (c *client) uploadToManifest(mhash string, path string, fpath string, fi os.FileInfo) (string, error) {
+ fd, err := os.Open(fpath)
+ if err != nil {
+ return "", err
+ }
+ defer fd.Close()
+ log.Printf("uploading file %s (%d bytes) and adding path %v", fpath, fi.Size(), path)
+ req, err := http.NewRequest("PUT", c.api+"/bzz:/"+mhash+"/"+path, fd)
+ if err != nil {
+ return "", err
+ }
+ req.Header.Set("content-type", mime.TypeByExtension(filepath.Ext(fi.Name())))
+ req.ContentLength = fi.Size()
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode >= 400 {
+ return "", fmt.Errorf("bad status: %s", resp.Status)
+ }
+ content, err := ioutil.ReadAll(resp.Body)
+ return string(content), err
+}
+
func (c *client) postRaw(mimetype string, size int64, body io.ReadCloser) (string, error) {
req, err := http.NewRequest("POST", c.api+"/bzzr:/", body)
if err != nil {
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
index a56507e4d..8666f3775 100644
--- a/cmd/utils/cmd.go
+++ b/cmd/utils/cmd.go
@@ -67,7 +67,6 @@ func Fatalf(format string, args ...interface{}) {
}
}
fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
- logger.Flush()
os.Exit(1)
}
@@ -95,7 +94,7 @@ func StartNode(stack *node.Node) {
func FormatTransactionData(data string) []byte {
d := common.StringToByteFunc(data, func(s string) (ret []byte) {
- slice := regexp.MustCompile("\\n|\\s").Split(s, 1000000000)
+ slice := regexp.MustCompile(`\n|\s`).Split(s, 1000000000)
for _, dataItem := range slice {
d := common.FormatData(dataItem)
ret = append(ret, d...)
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index c5f38fe93..18745e557 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -337,10 +337,10 @@ var (
Usage: "Network listening port",
Value: 30303,
}
- BootnodesFlag = cli.StringFlag{
+ BootnodesFlag = cli.StringSliceFlag{
Name: "bootnodes",
Usage: "Comma separated enode URLs for P2P discovery bootstrap",
- Value: "",
+ Value: nil,
}
NodeKeyFileFlag = cli.StringFlag{
Name: "nodekey",
@@ -485,17 +485,15 @@ func makeNodeUserIdent(ctx *cli.Context) string {
// MakeBootstrapNodes creates a list of bootstrap nodes from the command line
// flags, reverting to pre-configured ones if none have been specified.
func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node {
- // Return pre-configured nodes if none were manually requested
- if !ctx.GlobalIsSet(BootnodesFlag.Name) {
- if ctx.GlobalBool(TestNetFlag.Name) {
- return params.TestnetBootnodes
- }
- return params.MainnetBootnodes
+ urls := params.MainnetBootnodes
+ if ctx.GlobalIsSet(BootnodesFlag.Name) {
+ urls = ctx.GlobalStringSlice(BootnodesFlag.Name)
+ } else if ctx.GlobalBool(TestNetFlag.Name) {
+ urls = params.TestnetBootnodes
}
- // Otherwise parse and use the CLI bootstrap nodes
- bootnodes := []*discover.Node{}
- for _, url := range strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",") {
+ bootnodes := make([]*discover.Node, 0, len(urls))
+ for _, url := range urls {
node, err := discover.ParseNode(url)
if err != nil {
glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err)
@@ -509,14 +507,13 @@ func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node {
// MakeBootstrapNodesV5 creates a list of bootstrap nodes from the command line
// flags, reverting to pre-configured ones if none have been specified.
func MakeBootstrapNodesV5(ctx *cli.Context) []*discv5.Node {
- // Return pre-configured nodes if none were manually requested
- if !ctx.GlobalIsSet(BootnodesFlag.Name) {
- return params.DiscoveryV5Bootnodes
+ urls := params.DiscoveryV5Bootnodes
+ if ctx.GlobalIsSet(BootnodesFlag.Name) {
+ urls = ctx.GlobalStringSlice(BootnodesFlag.Name)
}
- // Otherwise parse and use the CLI bootstrap nodes
- bootnodes := []*discv5.Node{}
- for _, url := range strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",") {
+ bootnodes := make([]*discv5.Node, 0, len(urls))
+ for _, url := range urls {
node, err := discv5.ParseNode(url)
if err != nil {
glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err)
diff --git a/common/big_test.go b/common/big_test.go
index 1eb0c0c1f..4d04a8db3 100644
--- a/common/big_test.go
+++ b/common/big_test.go
@@ -27,7 +27,7 @@ func TestMisc(t *testing.T) {
c := []byte{1, 2, 3, 4}
z := BitTest(a, 1)
- if z != true {
+ if !z {
t.Error("Expected true got", z)
}
@@ -79,11 +79,11 @@ func TestBigCopy(t *testing.T) {
z := BigToBytes(c, 16)
zbytes := []byte{232, 212, 165, 16, 0}
- if bytes.Compare(y, ybytes) != 0 {
+ if !bytes.Equal(y, ybytes) {
t.Error("Got", ybytes)
}
- if bytes.Compare(z, zbytes) != 0 {
+ if !bytes.Equal(z, zbytes) {
t.Error("Got", zbytes)
}
}
diff --git a/common/bytes.go b/common/bytes.go
index b9fb3b2da..cbceea8b5 100644
--- a/common/bytes.go
+++ b/common/bytes.go
@@ -143,7 +143,7 @@ func Hex2BytesFixed(str string, flen int) []byte {
return h
} else {
if len(h) > flen {
- return h[len(h)-flen : len(h)]
+ return h[len(h)-flen:]
} else {
hh := make([]byte, flen)
copy(hh[flen-len(h):flen], h[:])
diff --git a/common/bytes_test.go b/common/bytes_test.go
index 2e5208477..98d402c48 100644
--- a/common/bytes_test.go
+++ b/common/bytes_test.go
@@ -181,7 +181,7 @@ func TestFromHex(t *testing.T) {
input := "0x01"
expected := []byte{1}
result := FromHex(input)
- if bytes.Compare(expected, result) != 0 {
+ if !bytes.Equal(expected, result) {
t.Errorf("Expected % x got % x", expected, result)
}
}
@@ -190,7 +190,7 @@ func TestFromHexOddLength(t *testing.T) {
input := "0x1"
expected := []byte{1}
result := FromHex(input)
- if bytes.Compare(expected, result) != 0 {
+ if !bytes.Equal(expected, result) {
t.Errorf("Expected % x got % x", expected, result)
}
}
diff --git a/common/compiler/solidity.go b/common/compiler/solidity.go
index b682107d9..d27bddd9f 100644
--- a/common/compiler/solidity.go
+++ b/common/compiler/solidity.go
@@ -22,9 +22,7 @@ import (
"encoding/json"
"errors"
"fmt"
- "io"
"io/ioutil"
- "os"
"os/exec"
"regexp"
"strings"
@@ -34,7 +32,7 @@ import (
)
var (
- versionRegexp = regexp.MustCompile("[0-9]+\\.[0-9]+\\.[0-9]+")
+ versionRegexp = regexp.MustCompile(`[0-9]+\.[0-9]+\.[0-9]+`)
solcParams = []string{
"--combined-json", "bin,abi,userdoc,devdoc",
"--add-std", // include standard lib contracts
@@ -96,27 +94,16 @@ func CompileSolidityString(solc, source string) (map[string]*Contract, error) {
if solc == "" {
solc = "solc"
}
- // Write source to a temporary file. Compiling stdin used to be supported
- // but seems to produce an exception with solc 0.3.5.
- infile, err := ioutil.TempFile("", "geth-compile-solidity")
- if err != nil {
- return nil, err
- }
- defer os.Remove(infile.Name())
- if _, err := io.WriteString(infile, source); err != nil {
- return nil, err
- }
- if err := infile.Close(); err != nil {
- return nil, err
- }
-
- return CompileSolidity(solc, infile.Name())
+ args := append(solcParams, "--")
+ cmd := exec.Command(solc, append(args, "-")...)
+ cmd.Stdin = strings.NewReader(source)
+ return runsolc(cmd, source)
}
// CompileSolidity compiles all given Solidity source files.
func CompileSolidity(solc string, sourcefiles ...string) (map[string]*Contract, error) {
if len(sourcefiles) == 0 {
- return nil, errors.New("solc: no source ")
+ return nil, errors.New("solc: no source files")
}
source, err := slurpFiles(sourcefiles)
if err != nil {
@@ -125,10 +112,13 @@ func CompileSolidity(solc string, sourcefiles ...string) (map[string]*Contract,
if solc == "" {
solc = "solc"
}
-
- var stderr, stdout bytes.Buffer
args := append(solcParams, "--")
cmd := exec.Command(solc, append(args, sourcefiles...)...)
+ return runsolc(cmd, source)
+}
+
+func runsolc(cmd *exec.Cmd, source string) (map[string]*Contract, error) {
+ var stderr, stdout bytes.Buffer
cmd.Stderr = &stderr
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
diff --git a/common/compiler/solidity_test.go b/common/compiler/solidity_test.go
index e4d96bd01..8ba9e55d0 100644
--- a/common/compiler/solidity_test.go
+++ b/common/compiler/solidity_test.go
@@ -20,6 +20,7 @@ import (
"encoding/json"
"io/ioutil"
"os"
+ "os/exec"
"path"
"testing"
@@ -27,8 +28,7 @@ import (
)
const (
- supportedSolcVersion = "0.3.5"
- testSource = `
+ testSource = `
contract test {
/// @notice Will multiply ` + "`a`" + ` by 7.
function multiply(uint a) returns(uint d) {
@@ -36,23 +36,18 @@ contract test {
}
}
`
- testCode = "0x6060604052602a8060106000396000f3606060405260e060020a6000350463c6888fa18114601a575b005b6007600435026060908152602090f3"
testInfo = `{"source":"\ncontract test {\n /// @notice Will multiply ` + "`a`" + ` by 7.\n function multiply(uint a) returns(uint d) {\n return a * 7;\n }\n}\n","language":"Solidity","languageVersion":"0.1.1","compilerVersion":"0.1.1","compilerOptions":"--binary file --json-abi file --natspec-user file --natspec-dev file --add-std 1","abiDefinition":[{"constant":false,"inputs":[{"name":"a","type":"uint256"}],"name":"multiply","outputs":[{"name":"d","type":"uint256"}],"type":"function"}],"userDoc":{"methods":{"multiply(uint256)":{"notice":"Will multiply ` + "`a`" + ` by 7."}}},"developerDoc":{"methods":{}}}`
)
-func skipUnsupported(t *testing.T) {
- sol, err := SolidityVersion("")
- if err != nil {
+func skipWithoutSolc(t *testing.T) {
+ if _, err := exec.LookPath("solc"); err != nil {
t.Skip(err)
- return
- }
- if sol.Version != supportedSolcVersion {
- t.Skipf("unsupported version of solc found (%v, expect %v)", sol.Version, supportedSolcVersion)
}
}
func TestCompiler(t *testing.T) {
- skipUnsupported(t)
+ skipWithoutSolc(t)
+
contracts, err := CompileSolidityString("", testSource)
if err != nil {
t.Fatalf("error compiling source. result %v: %v", contracts, err)
@@ -64,19 +59,20 @@ func TestCompiler(t *testing.T) {
if !ok {
t.Fatal("info for contract 'test' not present in result")
}
- if c.Code != testCode {
- t.Errorf("wrong code: expected\n%s, got\n%s", testCode, c.Code)
+ if c.Code == "" {
+ t.Error("empty code")
}
if c.Info.Source != testSource {
t.Error("wrong source")
}
- if c.Info.CompilerVersion != supportedSolcVersion {
- t.Errorf("wrong version: expected %q, got %q", supportedSolcVersion, c.Info.CompilerVersion)
+ if c.Info.CompilerVersion == "" {
+ t.Error("empty version")
}
}
func TestCompileError(t *testing.T) {
- skipUnsupported(t)
+ skipWithoutSolc(t)
+
contracts, err := CompileSolidityString("", testSource[4:])
if err == nil {
t.Errorf("error expected compiling source. got none. result %v", contracts)
diff --git a/common/format.go b/common/format.go
index 119637d2e..fccc29962 100644
--- a/common/format.go
+++ b/common/format.go
@@ -27,7 +27,7 @@ import (
// the unnecessary precision off from the formatted textual representation.
type PrettyDuration time.Duration
-var prettyDurationRe = regexp.MustCompile("\\.[0-9]+")
+var prettyDurationRe = regexp.MustCompile(`\.[0-9]+`)
// String implements the Stringer interface, allowing pretty printing of duration
// values rounded to three decimals.
diff --git a/common/math/dist_test.go b/common/math/dist_test.go
index 826faea8b..f5857b6f8 100644
--- a/common/math/dist_test.go
+++ b/common/math/dist_test.go
@@ -41,24 +41,24 @@ func TestSum(t *testing.T) {
func TestDist(t *testing.T) {
var vectors = []Vector{
- Vector{big.NewInt(1000), big.NewInt(1234)},
- Vector{big.NewInt(500), big.NewInt(10023)},
- Vector{big.NewInt(1034), big.NewInt(1987)},
- Vector{big.NewInt(1034), big.NewInt(1987)},
- Vector{big.NewInt(8983), big.NewInt(1977)},
- Vector{big.NewInt(98382), big.NewInt(1887)},
- Vector{big.NewInt(12398), big.NewInt(1287)},
- Vector{big.NewInt(12398), big.NewInt(1487)},
- Vector{big.NewInt(12398), big.NewInt(1987)},
- Vector{big.NewInt(12398), big.NewInt(128)},
- Vector{big.NewInt(12398), big.NewInt(1987)},
- Vector{big.NewInt(1398), big.NewInt(187)},
- Vector{big.NewInt(12328), big.NewInt(1927)},
- Vector{big.NewInt(12398), big.NewInt(1987)},
- Vector{big.NewInt(22398), big.NewInt(1287)},
- Vector{big.NewInt(1370), big.NewInt(1981)},
- Vector{big.NewInt(12398), big.NewInt(1957)},
- Vector{big.NewInt(42198), big.NewInt(1987)},
+ {big.NewInt(1000), big.NewInt(1234)},
+ {big.NewInt(500), big.NewInt(10023)},
+ {big.NewInt(1034), big.NewInt(1987)},
+ {big.NewInt(1034), big.NewInt(1987)},
+ {big.NewInt(8983), big.NewInt(1977)},
+ {big.NewInt(98382), big.NewInt(1887)},
+ {big.NewInt(12398), big.NewInt(1287)},
+ {big.NewInt(12398), big.NewInt(1487)},
+ {big.NewInt(12398), big.NewInt(1987)},
+ {big.NewInt(12398), big.NewInt(128)},
+ {big.NewInt(12398), big.NewInt(1987)},
+ {big.NewInt(1398), big.NewInt(187)},
+ {big.NewInt(12328), big.NewInt(1927)},
+ {big.NewInt(12398), big.NewInt(1987)},
+ {big.NewInt(22398), big.NewInt(1287)},
+ {big.NewInt(1370), big.NewInt(1981)},
+ {big.NewInt(12398), big.NewInt(1957)},
+ {big.NewInt(42198), big.NewInt(1987)},
}
VectorsBy(GasSort).Sort(vectors)
diff --git a/compression/rle/read_write.go b/compression/rle/read_write.go
index 03dffd607..0e7ad90ae 100644
--- a/compression/rle/read_write.go
+++ b/compression/rle/read_write.go
@@ -76,9 +76,9 @@ func compressChunk(dat []byte) (ret []byte, n int) {
}
return []byte{token, byte(j + 2)}, j
case len(dat) >= 32:
- if dat[0] == empty[0] && bytes.Compare(dat[:32], empty) == 0 {
+ if dat[0] == empty[0] && bytes.Equal(dat[:32], empty) {
return []byte{token, emptyShaToken}, 32
- } else if dat[0] == emptyList[0] && bytes.Compare(dat[:32], emptyList) == 0 {
+ } else if dat[0] == emptyList[0] && bytes.Equal(dat[:32], emptyList) {
return []byte{token, emptyListShaToken}, 32
}
fallthrough
diff --git a/console/bridge.go b/console/bridge.go
index 7f7e6feb1..f0c59804b 100644
--- a/console/bridge.go
+++ b/console/bridge.go
@@ -46,7 +46,7 @@ func newBridge(client *rpc.Client, prompter UserPrompter, printer io.Writer) *br
}
// NewAccount is a wrapper around the personal.newAccount RPC method that uses a
-// non-echoing password prompt to aquire the passphrase and executes the original
+// non-echoing password prompt to acquire the passphrase and executes the original
// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call.
func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
var (
@@ -75,7 +75,7 @@ func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
default:
throwJSException("expected 0 or 1 string argument")
}
- // Password aquired, execute the call and return
+ // Password acquired, execute the call and return
ret, err := call.Otto.Call("jeth.newAccount", nil, password)
if err != nil {
throwJSException(err.Error())
@@ -84,7 +84,7 @@ func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
}
// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that
-// uses a non-echoing password prompt to aquire the passphrase and executes the
+// uses a non-echoing password prompt to acquire the passphrase and executes the
// original RPC method (saved in jeth.unlockAccount) with it to actually execute
// the RPC call.
func (b *bridge) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
@@ -127,7 +127,7 @@ func (b *bridge) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
}
// Sign is a wrapper around the personal.sign RPC method that uses a non-echoing password
-// prompt to aquire the passphrase and executes the original RPC method (saved in
+// prompt to acquire the passphrase and executes the original RPC method (saved in
// jeth.sign) with it to actually execute the RPC call.
func (b *bridge) Sign(call otto.FunctionCall) (response otto.Value) {
var (
@@ -270,18 +270,15 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
} else {
resultVal, err := JSON.Call("parse", string(result))
if err != nil {
- resp = newErrorResponse(call, -32603, err.Error(), &req.Id).Object()
+ setError(resp, -32603, err.Error())
} else {
resp.Set("result", resultVal)
}
}
case rpc.Error:
- resp.Set("error", map[string]interface{}{
- "code": err.ErrorCode(),
- "message": err.Error(),
- })
+ setError(resp, err.ErrorCode(), err.Error())
default:
- resp = newErrorResponse(call, -32603, err.Error(), &req.Id).Object()
+ setError(resp, -32603, err.Error())
}
resps.Call("push", resp)
}
@@ -300,12 +297,8 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
return response
}
-func newErrorResponse(call otto.FunctionCall, code int, msg string, id interface{}) otto.Value {
- // Bundle the error into a JSON RPC call response
- m := map[string]interface{}{"version": "2.0", "id": id, "error": map[string]interface{}{"code": code, msg: msg}}
- res, _ := json.Marshal(m)
- val, _ := call.Otto.Run("(" + string(res) + ")")
- return val
+func setError(resp *otto.Object, code int, msg string) {
+ resp.Set("error", map[string]interface{}{"code": code, "message": msg})
}
// throwJSException panics on an otto.Value. The Otto VM will recover from the
diff --git a/console/console.go b/console/console.go
index 6e3d7e43c..8865f5e89 100644
--- a/console/console.go
+++ b/console/console.go
@@ -36,9 +36,9 @@ import (
)
var (
- passwordRegexp = regexp.MustCompile("personal.[nus]")
- onlyWhitespace = regexp.MustCompile("^\\s*$")
- exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$")
+ passwordRegexp = regexp.MustCompile(`personal.[nus]`)
+ onlyWhitespace = regexp.MustCompile(`^\s*$`)
+ exit = regexp.MustCompile(`^\s*exit\s*;*\s*$`)
)
// HistoryFile is the file within the data directory to store input scrollback.
@@ -275,10 +275,7 @@ func (c *Console) Evaluate(statement string) error {
fmt.Fprintf(c.printer, "[native] error: %v\n", r)
}
}()
- if err := c.jsre.Evaluate(statement, c.printer); err != nil {
- return err
- }
- return nil
+ return c.jsre.Evaluate(statement, c.printer)
}
// Interactive starts an interactive user session, where input is propted from
diff --git a/console/prompter.go b/console/prompter.go
index 5946d9ece..6acbfb0e2 100644
--- a/console/prompter.go
+++ b/console/prompter.go
@@ -44,7 +44,7 @@ type UserPrompter interface {
PromptConfirm(prompt string) (bool, error)
// SetHistory sets the the input scrollback history that the prompter will allow
- // the user to scoll back to.
+ // the user to scroll back to.
SetHistory(history []string)
// AppendHistory appends an entry to the scrollback history. It should be called
@@ -147,7 +147,7 @@ func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) {
}
// SetHistory sets the the input scrollback history that the prompter will allow
-// the user to scoll back to.
+// the user to scroll back to.
func (p *terminalPrompter) SetHistory(history []string) {
p.State.ReadHistory(strings.NewReader(strings.Join(history, "\n")))
}
diff --git a/contracts/chequebook/cheque.go b/contracts/chequebook/cheque.go
index 5ece1391b..d49964f91 100644
--- a/contracts/chequebook/cheque.go
+++ b/contracts/chequebook/cheque.go
@@ -252,7 +252,7 @@ func (self *Chequebook) Issue(beneficiary common.Address, amount *big.Int) (ch *
return nil, fmt.Errorf("amount must be greater than zero (%v)", amount)
}
if self.balance.Cmp(amount) < 0 {
- err = fmt.Errorf("insufficent funds to issue cheque for amount: %v. balance: %v", amount, self.balance)
+ err = fmt.Errorf("insufficient funds to issue cheque for amount: %v. balance: %v", amount, self.balance)
} else {
var sig []byte
sent, found := self.sent[beneficiary]
@@ -277,7 +277,7 @@ func (self *Chequebook) Issue(beneficiary common.Address, amount *big.Int) (ch *
}
// auto deposit if threshold is set and balance is less then threshold
- // note this is called even if issueing cheque fails
+ // note this is called even if issuing cheque fails
// so we reattempt depositing
if self.threshold != nil {
if self.balance.Cmp(self.threshold) < 0 {
diff --git a/contracts/release/contract.sol b/contracts/release/contract.sol
index fedf646c0..554cf7290 100644
--- a/contracts/release/contract.sol
+++ b/contracts/release/contract.sol
@@ -78,7 +78,7 @@ contract ReleaseOracle {
}
// signers is an accessor method to retrieve all te signers (public accessor
- // generates an indexed one, not a retreive-all version).
+ // generates an indexed one, not a retrieve-all version).
function signers() constant returns(address[]) {
return voters;
}
@@ -178,7 +178,7 @@ contract ReleaseOracle {
voters[i] = voters[voters.length - 1];
voters.length--;
- delete verProp; // Nuke any version proposal (no suprise releases!)
+ delete verProp; // Nuke any version proposal (no surprise releases!)
break;
}
}
diff --git a/core/bench_test.go b/core/bench_test.go
index a208ea250..5785748a1 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -83,7 +83,7 @@ func genValueTx(nbytes int) func(int, *BlockGen) {
toaddr := common.Address{}
data := make([]byte, nbytes)
gas := IntrinsicGas(data, false, false)
- tx, _ := types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data).SignECDSA(types.HomesteadSigner{}, benchRootKey)
+ tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data), types.HomesteadSigner{}, benchRootKey)
gen.AddTx(tx)
}
}
@@ -123,7 +123,7 @@ func genTxRing(naccounts int) func(int, *BlockGen) {
nil,
nil,
)
- tx, _ = tx.SignECDSA(types.HomesteadSigner{}, ringKeys[from])
+ tx, _ = types.SignTx(tx, types.HomesteadSigner{}, ringKeys[from])
gen.AddTx(tx)
from = to
}
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index 6fcab1e5f..413c3cc8e 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -24,11 +24,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow/ezp"
)
func testChainConfig() *params.ChainConfig {
@@ -49,20 +47,19 @@ func proc() (Validator, *BlockChain) {
}
func TestNumber(t *testing.T) {
- pow := ezp.New()
_, chain := proc()
statedb, _ := state.New(chain.Genesis().Root(), chain.chainDb)
cfg := testChainConfig()
header := makeHeader(cfg, chain.Genesis(), statedb)
header.Number = big.NewInt(3)
- err := ValidateHeader(cfg, pow, header, chain.Genesis().Header(), false, false)
+ err := ValidateHeader(cfg, FakePow{}, header, chain.Genesis().Header(), false, false)
if err != BlockNumberErr {
t.Errorf("expected block number error, got %q", err)
}
header = makeHeader(cfg, chain.Genesis(), statedb)
- err = ValidateHeader(cfg, pow, header, chain.Genesis().Header(), false, false)
+ err = ValidateHeader(cfg, FakePow{}, header, chain.Genesis().Header(), false, false)
if err == BlockNumberErr {
t.Errorf("didn't expect block number error")
}
@@ -77,7 +74,7 @@ func TestPutReceipt(t *testing.T) {
hash[0] = 2
receipt := new(types.Receipt)
- receipt.Logs = vm.Logs{&vm.Log{
+ receipt.Logs = []*types.Log{{
Address: addr,
Topics: []common.Hash{hash},
Data: []byte("hi"),
diff --git a/core/blockchain.go b/core/blockchain.go
index 0de529480..c3530b93c 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -46,9 +46,6 @@ import (
)
var (
- chainlogger = logger.NewLogger("CHAIN")
- jsonlogger = logger.NewJsonLogger()
-
blockInsertTimer = metrics.NewTimer("chain/inserts")
ErrNoGenesis = errors.New("Genesis not found in chain")
@@ -150,7 +147,7 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P
return nil, err
}
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
- for hash, _ := range BadHashes {
+ for hash := range BadHashes {
if header := bc.GetHeaderByHash(hash); header != nil {
// get the canonical block corresponding to the offending header's number
headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
@@ -402,10 +399,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
// Export writes the active chain to the given writer.
func (self *BlockChain) Export(w io.Writer) error {
- if err := self.ExportN(w, uint64(0), self.currentBlock.NumberU64()); err != nil {
- return err
- }
- return nil
+ return self.ExportN(w, uint64(0), self.currentBlock.NumberU64())
}
// ExportN writes a subset of the active chain to the given writer.
@@ -883,7 +877,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
var (
stats = insertStats{startTime: time.Now()}
events = make([]interface{}, 0, len(chain))
- coalescedLogs vm.Logs
+ coalescedLogs []*types.Log
nonceChecked = make([]bool, len(chain))
)
@@ -1094,7 +1088,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
oldStart = oldBlock
newStart = newBlock
deletedTxs types.Transactions
- deletedLogs vm.Logs
+ deletedLogs []*types.Log
// collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash.
// These logs are later announced as deleted.
@@ -1210,7 +1204,7 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// postChainEvents iterates over the events generated by a chain insertion and
// posts them into the event mux.
-func (self *BlockChain) postChainEvents(events []interface{}, logs vm.Logs) {
+func (self *BlockChain) postChainEvents(events []interface{}, logs []*types.Log) {
// post event logs for further processing
self.eventMux.Post(logs)
for _, event := range events {
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 62d85e2e5..a5a83ba60 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -435,7 +435,7 @@ func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return n
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error {
return nil
}
-func (bproc) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, vm.Logs, *big.Int, error) {
+func (bproc) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, *big.Int, error) {
return nil, nil, new(big.Int), nil
}
@@ -719,7 +719,7 @@ func TestFastVsFullChains(t *testing.T) {
// If the block number is multiple of 3, send a few bonus transactions to the miner
if i%3 == 2 {
for j := 0; j < i%4+1; j++ {
- tx, err := types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key)
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, nil, nil), signer, key)
if err != nil {
panic(err)
}
@@ -883,8 +883,8 @@ func TestChainTxReorgs(t *testing.T) {
// Create two transactions shared between the chains:
// - postponed: transaction included at a later block in the forked chain
// - swapped: transaction included at the same block number in the forked chain
- postponed, _ := types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key1)
- swapped, _ := types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key1)
+ postponed, _ := types.SignTx(types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil), signer, key1)
+ swapped, _ := types.SignTx(types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil), signer, key1)
// Create two transactions that will be dropped by the forked chain:
// - pastDrop: transaction dropped retroactively from a past block
@@ -900,13 +900,13 @@ func TestChainTxReorgs(t *testing.T) {
chain, _ := GenerateChain(params.TestChainConfig, genesis, db, 3, func(i int, gen *BlockGen) {
switch i {
case 0:
- pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key2)
+ pastDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil), signer, key2)
gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point
gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork
case 2:
- freshDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key2)
+ freshDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil), signer, key2)
gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point
gen.AddTx(swapped) // This transaction will be swapped out at the exact height
@@ -925,18 +925,18 @@ func TestChainTxReorgs(t *testing.T) {
chain, _ = GenerateChain(params.TestChainConfig, genesis, db, 5, func(i int, gen *BlockGen) {
switch i {
case 0:
- pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key3)
+ pastAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil), signer, key3)
gen.AddTx(pastAdd) // This transaction needs to be injected during reorg
case 2:
gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain
gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain
- freshAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key3)
+ freshAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil), signer, key3)
gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time
case 3:
- futureAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key3)
+ futureAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil), signer, key3)
gen.AddTx(futureAdd) // This transaction will be added after a full reorg
}
})
@@ -995,7 +995,7 @@ func TestLogReorgs(t *testing.T) {
subs := evmux.Subscribe(RemovedLogsEvent{})
chain, _ := GenerateChain(params.TestChainConfig, genesis, db, 2, func(i int, gen *BlockGen) {
if i == 1 {
- tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), code).SignECDSA(signer, key1)
+ tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), code), signer, key1)
if err != nil {
t.Fatalf("failed to create tx: %v", err)
}
@@ -1035,7 +1035,7 @@ func TestReorgSideEvent(t *testing.T) {
}
replacementBlocks, _ := GenerateChain(params.TestChainConfig, genesis, db, 4, func(i int, gen *BlockGen) {
- tx, err := types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), nil).SignECDSA(signer, key1)
+ tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), big.NewInt(1000000), new(big.Int), nil), signer, key1)
if i == 2 {
gen.OffsetTime(-1)
}
@@ -1107,7 +1107,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
chain, _ := GenerateChain(params.TestChainConfig, genesis, db, 10, func(i int, gen *BlockGen) {})
- for i, _ := range chain {
+ for i := range chain {
go func(block *types.Block) {
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
for {
@@ -1152,7 +1152,7 @@ func TestEIP155Transition(t *testing.T) {
tx *types.Transaction
err error
basicTx = func(signer types.Signer) (*types.Transaction, error) {
- return types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), big.NewInt(21000), new(big.Int), nil).SignECDSA(signer, key)
+ return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), big.NewInt(21000), new(big.Int), nil), signer, key)
}
)
switch i {
@@ -1215,7 +1215,7 @@ func TestEIP155Transition(t *testing.T) {
tx *types.Transaction
err error
basicTx = func(signer types.Signer) (*types.Transaction, error) {
- return types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), big.NewInt(21000), new(big.Int), nil).SignECDSA(signer, key)
+ return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), big.NewInt(21000), new(big.Int), nil), signer, key)
}
)
switch i {
@@ -1260,11 +1260,11 @@ func TestEIP161AccountRemoval(t *testing.T) {
)
switch i {
case 0:
- tx, err = types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), big.NewInt(21000), new(big.Int), nil).SignECDSA(signer, key)
+ tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), big.NewInt(21000), new(big.Int), nil), signer, key)
case 1:
- tx, err = types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), big.NewInt(21000), new(big.Int), nil).SignECDSA(signer, key)
+ tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), big.NewInt(21000), new(big.Int), nil), signer, key)
case 2:
- tx, err = types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), big.NewInt(21000), new(big.Int), nil).SignECDSA(signer, key)
+ tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), big.NewInt(21000), new(big.Int), nil), signer, key)
}
if err != nil {
t.Fatal(err)
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 487cd6e18..942f4ace2 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -55,13 +55,13 @@ func ExampleGenerateChain() {
switch i {
case 0:
// In block 1, addr1 sends addr2 some ether.
- tx, _ := types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(signer, key1)
+ tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1)
gen.AddTx(tx)
case 1:
// In block 2, addr1 sends some more ether to addr2.
// addr2 passes it on to addr3.
- tx1, _ := types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key1)
- tx2, _ := types.NewTransaction(gen.TxNonce(addr2), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, key2)
+ tx1, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(1000), params.TxGas, nil, nil), signer, key1)
+ tx2, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr3, big.NewInt(1000), params.TxGas, nil, nil), signer, key2)
gen.AddTx(tx1)
gen.AddTx(tx2)
case 2:
diff --git a/core/dao.go b/core/dao.go
index 1260c310a..a7f544c3d 100644
--- a/core/dao.go
+++ b/core/dao.go
@@ -45,11 +45,11 @@ func ValidateDAOHeaderExtraData(config *params.ChainConfig, header *types.Header
}
// Depending whether we support or oppose the fork, validate the extra-data contents
if config.DAOForkSupport {
- if bytes.Compare(header.Extra, params.DAOForkBlockExtra) != 0 {
+ if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
return ValidationError("DAO pro-fork bad block extra-data: 0x%x", header.Extra)
}
} else {
- if bytes.Compare(header.Extra, params.DAOForkBlockExtra) == 0 {
+ if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
return ValidationError("DAO no-fork bad block extra-data: 0x%x", header.Extra)
}
}
diff --git a/core/database_util.go b/core/database_util.go
index 84669de35..2060b8b6a 100644
--- a/core/database_util.go
+++ b/core/database_util.go
@@ -23,6 +23,7 @@ import (
"errors"
"fmt"
"math/big"
+ "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@@ -63,6 +64,8 @@ var (
oldBlockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually]
ChainConfigNotFoundErr = errors.New("ChainConfig not found") // general config not found error
+
+ mipmapBloomMu sync.Mutex // protect against race condition when updating mipmap blooms
)
// encodeBlockNumber encodes a block number as big endian uint64
@@ -564,6 +567,9 @@ func mipmapKey(num, level uint64) []byte {
// WriteMapmapBloom writes each address included in the receipts' logs to the
// MIP bloom bin.
func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
+ mipmapBloomMu.Lock()
+ defer mipmapBloomMu.Unlock()
+
batch := db.NewBatch()
for _, level := range MIPMapLevels {
key := mipmapKey(number, level)
diff --git a/core/database_util_test.go b/core/database_util_test.go
index 83750aa60..d96aa71ba 100644
--- a/core/database_util_test.go
+++ b/core/database_util_test.go
@@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb"
@@ -393,9 +392,9 @@ func TestReceiptStorage(t *testing.T) {
receipt1 := &types.Receipt{
PostState: []byte{0x01},
CumulativeGasUsed: big.NewInt(1),
- Logs: vm.Logs{
- &vm.Log{Address: common.BytesToAddress([]byte{0x11})},
- &vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})},
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x11})},
+ {Address: common.BytesToAddress([]byte{0x01, 0x11})},
},
TxHash: common.BytesToHash([]byte{0x11, 0x11}),
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
@@ -404,9 +403,9 @@ func TestReceiptStorage(t *testing.T) {
receipt2 := &types.Receipt{
PostState: []byte{0x02},
CumulativeGasUsed: big.NewInt(2),
- Logs: vm.Logs{
- &vm.Log{Address: common.BytesToAddress([]byte{0x22})},
- &vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})},
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x22})},
+ {Address: common.BytesToAddress([]byte{0x02, 0x22})},
},
TxHash: common.BytesToHash([]byte{0x22, 0x22}),
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
@@ -431,7 +430,7 @@ func TestReceiptStorage(t *testing.T) {
rlpHave, _ := rlp.EncodeToBytes(r)
rlpWant, _ := rlp.EncodeToBytes(receipt)
- if bytes.Compare(rlpHave, rlpWant) != 0 {
+ if !bytes.Equal(rlpHave, rlpWant) {
t.Fatalf("receipt #%d [%x]: receipt mismatch: have %v, want %v", i, receipt.TxHash, r, receipt)
}
}
@@ -452,9 +451,9 @@ func TestBlockReceiptStorage(t *testing.T) {
receipt1 := &types.Receipt{
PostState: []byte{0x01},
CumulativeGasUsed: big.NewInt(1),
- Logs: vm.Logs{
- &vm.Log{Address: common.BytesToAddress([]byte{0x11})},
- &vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})},
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x11})},
+ {Address: common.BytesToAddress([]byte{0x01, 0x11})},
},
TxHash: common.BytesToHash([]byte{0x11, 0x11}),
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
@@ -463,9 +462,9 @@ func TestBlockReceiptStorage(t *testing.T) {
receipt2 := &types.Receipt{
PostState: []byte{0x02},
CumulativeGasUsed: big.NewInt(2),
- Logs: vm.Logs{
- &vm.Log{Address: common.BytesToAddress([]byte{0x22})},
- &vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})},
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x22})},
+ {Address: common.BytesToAddress([]byte{0x02, 0x22})},
},
TxHash: common.BytesToHash([]byte{0x22, 0x22}),
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
@@ -489,7 +488,7 @@ func TestBlockReceiptStorage(t *testing.T) {
rlpHave, _ := rlp.EncodeToBytes(rs[i])
rlpWant, _ := rlp.EncodeToBytes(receipts[i])
- if bytes.Compare(rlpHave, rlpWant) != 0 {
+ if !bytes.Equal(rlpHave, rlpWant) {
t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i])
}
}
@@ -505,14 +504,14 @@ func TestMipmapBloom(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
receipt1 := new(types.Receipt)
- receipt1.Logs = vm.Logs{
- &vm.Log{Address: common.BytesToAddress([]byte("test"))},
- &vm.Log{Address: common.BytesToAddress([]byte("address"))},
+ receipt1.Logs = []*types.Log{
+ {Address: common.BytesToAddress([]byte("test"))},
+ {Address: common.BytesToAddress([]byte("address"))},
}
receipt2 := new(types.Receipt)
- receipt2.Logs = vm.Logs{
- &vm.Log{Address: common.BytesToAddress([]byte("test"))},
- &vm.Log{Address: common.BytesToAddress([]byte("address1"))},
+ receipt2.Logs = []*types.Log{
+ {Address: common.BytesToAddress([]byte("test"))},
+ {Address: common.BytesToAddress([]byte("address1"))},
}
WriteMipmapBloom(db, 1, types.Receipts{receipt1})
@@ -528,14 +527,14 @@ func TestMipmapBloom(t *testing.T) {
// reset
db, _ = ethdb.NewMemDatabase()
receipt := new(types.Receipt)
- receipt.Logs = vm.Logs{
- &vm.Log{Address: common.BytesToAddress([]byte("test"))},
+ receipt.Logs = []*types.Log{
+ {Address: common.BytesToAddress([]byte("test"))},
}
WriteMipmapBloom(db, 999, types.Receipts{receipt1})
receipt = new(types.Receipt)
- receipt.Logs = vm.Logs{
- &vm.Log{Address: common.BytesToAddress([]byte("test 1"))},
+ receipt.Logs = []*types.Log{
+ {Address: common.BytesToAddress([]byte("test 1"))},
}
WriteMipmapBloom(db, 1000, types.Receipts{receipt})
@@ -568,17 +567,12 @@ func TestMipmapChain(t *testing.T) {
switch i {
case 1:
receipt := types.NewReceipt(nil, new(big.Int))
- receipt.Logs = vm.Logs{
- &vm.Log{
- Address: addr,
- Topics: []common.Hash{hash1},
- },
- }
+ receipt.Logs = []*types.Log{{Address: addr, Topics: []common.Hash{hash1}}}
gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
case 1000:
receipt := types.NewReceipt(nil, new(big.Int))
- receipt.Logs = vm.Logs{&vm.Log{Address: addr2}}
+ receipt.Logs = []*types.Log{{Address: addr2}}
gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
diff --git a/core/events.go b/core/events.go
index 414493fbf..31ad8364b 100644
--- a/core/events.go
+++ b/core/events.go
@@ -21,7 +21,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
)
// TxPreEvent is posted when a transaction enters the transaction pool.
@@ -32,7 +31,7 @@ type TxPostEvent struct{ Tx *types.Transaction }
// PendingLogsEvent is posted pre mining and notifies of pending logs.
type PendingLogsEvent struct {
- Logs vm.Logs
+ Logs []*types.Log
}
// PendingStateEvent is posted pre mining and notifies of pending state changes.
@@ -45,18 +44,18 @@ type NewMinedBlockEvent struct{ Block *types.Block }
type RemovedTransactionEvent struct{ Txs types.Transactions }
// RemovedLogEvent is posted when a reorg happens
-type RemovedLogsEvent struct{ Logs vm.Logs }
+type RemovedLogsEvent struct{ Logs []*types.Log }
// ChainSplit is posted when a new head is detected
type ChainSplitEvent struct {
Block *types.Block
- Logs vm.Logs
+ Logs []*types.Log
}
type ChainEvent struct {
Block *types.Block
Hash common.Hash
- Logs vm.Logs
+ Logs []*types.Log
}
type ChainSideEvent struct {
@@ -65,7 +64,7 @@ type ChainSideEvent struct {
type PendingBlockEvent struct {
Block *types.Block
- Logs vm.Logs
+ Logs []*types.Log
}
type ChainUncleEvent struct {
diff --git a/core/state/iterator.go b/core/state/iterator.go
index 14265b277..a58a15ad3 100644
--- a/core/state/iterator.go
+++ b/core/state/iterator.go
@@ -123,7 +123,7 @@ func (it *NodeIterator) step() error {
if !it.dataIt.Next() {
it.dataIt = nil
}
- if bytes.Compare(account.CodeHash, emptyCodeHash) != 0 {
+ if !bytes.Equal(account.CodeHash, emptyCodeHash) {
it.codeHash = common.BytesToHash(account.CodeHash)
it.code, err = it.state.db.Get(account.CodeHash)
if err != nil {
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
index aa05c5dfe..aa9c5b728 100644
--- a/core/state/iterator_test.go
+++ b/core/state/iterator_test.go
@@ -41,7 +41,7 @@ func TestNodeIteratorCoverage(t *testing.T) {
}
}
// Cross check the hashes and the database itself
- for hash, _ := range hashes {
+ for hash := range hashes {
if _, err := db.Get(hash.Bytes()); err != nil {
t.Errorf("failed to retrieve reported node %x: %v", hash, err)
}
diff --git a/core/state/managed_state_test.go b/core/state/managed_state_test.go
index 3f7bc2aa8..d9c232ebb 100644
--- a/core/state/managed_state_test.go
+++ b/core/state/managed_state_test.go
@@ -52,7 +52,7 @@ func TestRemove(t *testing.T) {
ms, account := create()
nn := make([]bool, 10)
- for i, _ := range nn {
+ for i := range nn {
nn[i] = true
}
account.nonces = append(account.nonces, nn...)
@@ -68,7 +68,7 @@ func TestReuse(t *testing.T) {
ms, account := create()
nn := make([]bool, 10)
- for i, _ := range nn {
+ for i := range nn {
nn[i] = true
}
account.nonces = append(account.nonces, nn...)
@@ -84,7 +84,7 @@ func TestReuse(t *testing.T) {
func TestRemoteNonceChange(t *testing.T) {
ms, account := create()
nn := make([]bool, 10)
- for i, _ := range nn {
+ for i := range nn {
nn[i] = true
}
account.nonces = append(account.nonces, nn...)
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 82e2ec7c1..063e2b469 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -24,6 +24,7 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
@@ -71,7 +72,7 @@ type StateDB struct {
thash, bhash common.Hash
txIndex int
- logs map[common.Hash]vm.Logs
+ logs map[common.Hash][]*types.Log
logSize uint
// Journal of state modifications. This is the backbone of
@@ -97,7 +98,7 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
stateObjects: make(map[common.Address]*StateObject),
stateObjectsDirty: make(map[common.Address]struct{}),
refund: new(big.Int),
- logs: make(map[common.Hash]vm.Logs),
+ logs: make(map[common.Hash][]*types.Log),
}, nil
}
@@ -118,7 +119,7 @@ func (self *StateDB) New(root common.Hash) (*StateDB, error) {
stateObjects: make(map[common.Address]*StateObject),
stateObjectsDirty: make(map[common.Address]struct{}),
refund: new(big.Int),
- logs: make(map[common.Hash]vm.Logs),
+ logs: make(map[common.Hash][]*types.Log),
}, nil
}
@@ -138,7 +139,7 @@ func (self *StateDB) Reset(root common.Hash) error {
self.thash = common.Hash{}
self.bhash = common.Hash{}
self.txIndex = 0
- self.logs = make(map[common.Hash]vm.Logs)
+ self.logs = make(map[common.Hash][]*types.Log)
self.logSize = 0
self.clearJournalAndRefund()
@@ -175,7 +176,7 @@ func (self *StateDB) StartRecord(thash, bhash common.Hash, ti int) {
self.txIndex = ti
}
-func (self *StateDB) AddLog(log *vm.Log) {
+func (self *StateDB) AddLog(log *types.Log) {
self.journal = append(self.journal, addLogChange{txhash: self.thash})
log.TxHash = self.thash
@@ -186,12 +187,12 @@ func (self *StateDB) AddLog(log *vm.Log) {
self.logSize++
}
-func (self *StateDB) GetLogs(hash common.Hash) vm.Logs {
+func (self *StateDB) GetLogs(hash common.Hash) []*types.Log {
return self.logs[hash]
}
-func (self *StateDB) Logs() vm.Logs {
- var logs vm.Logs
+func (self *StateDB) Logs() []*types.Log {
+ var logs []*types.Log
for _, lgs := range self.logs {
logs = append(logs, lgs...)
}
@@ -209,7 +210,7 @@ func (self *StateDB) Exist(addr common.Address) bool {
return self.GetStateObject(addr) != nil
}
-// Empty returns whether the state object is either non-existant
+// Empty returns whether the state object is either non-existent
// or empty according to the EIP161 specification (balance = nonce = code = 0)
func (self *StateDB) Empty(addr common.Address) bool {
so := self.GetStateObject(addr)
@@ -474,16 +475,16 @@ func (self *StateDB) Copy() *StateDB {
stateObjects: make(map[common.Address]*StateObject, len(self.stateObjectsDirty)),
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
refund: new(big.Int).Set(self.refund),
- logs: make(map[common.Hash]vm.Logs, len(self.logs)),
+ logs: make(map[common.Hash][]*types.Log, len(self.logs)),
logSize: self.logSize,
}
// Copy the dirty states and logs
- for addr, _ := range self.stateObjectsDirty {
+ for addr := range self.stateObjectsDirty {
state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state, state.MarkStateObjectDirty)
state.stateObjectsDirty[addr] = struct{}{}
}
for hash, logs := range self.logs {
- state.logs[hash] = make(vm.Logs, len(logs))
+ state.logs[hash] = make([]*types.Log, len(logs))
copy(state.logs[hash], logs)
}
return state
@@ -529,7 +530,7 @@ func (self *StateDB) GetRefund() *big.Int {
// It is called in between transactions to get the root hash that
// goes into transaction receipts.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
- for addr, _ := range s.stateObjectsDirty {
+ for addr := range s.stateObjectsDirty {
stateObject := s.stateObjects[addr]
if stateObject.suicided || (deleteEmptyObjects && stateObject.empty()) {
s.deleteStateObject(stateObject)
@@ -552,7 +553,7 @@ func (s *StateDB) DeleteSuicides() {
// Reset refund so that any used-gas calculations can use this method.
s.clearJournalAndRefund()
- for addr, _ := range s.stateObjectsDirty {
+ for addr := range s.stateObjectsDirty {
stateObject := s.stateObjects[addr]
// If the object has been removed by a suicide
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index f91820378..874317300 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -29,7 +29,7 @@ import (
"testing/quick"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
)
@@ -221,7 +221,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
fn: func(a testAction, s *StateDB) {
data := make([]byte, 2)
binary.BigEndian.PutUint16(data, uint16(a.args[0]))
- s.AddLog(&vm.Log{Address: addr, Data: data})
+ s.AddLog(&types.Log{Address: addr, Data: data})
},
args: make([]int64, 1),
},
diff --git a/core/state/sync.go b/core/state/sync.go
index bab9c8e7e..8456a810b 100644
--- a/core/state/sync.go
+++ b/core/state/sync.go
@@ -21,7 +21,6 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
@@ -32,7 +31,7 @@ import (
type StateSync trie.TrieSync
// NewStateSync create a new state trie download scheduler.
-func NewStateSync(root common.Hash, database ethdb.Database) *StateSync {
+func NewStateSync(root common.Hash, database trie.DatabaseReader) *StateSync {
var syncer *trie.TrieSync
callback := func(leaf []byte, parent common.Hash) error {
@@ -62,8 +61,8 @@ func (s *StateSync) Missing(max int) []common.Hash {
// Process injects a batch of retrieved trie nodes data, returning if something
// was committed to the database and also the index of an entry if processing of
// it failed.
-func (s *StateSync) Process(list []trie.SyncResult) (bool, int, error) {
- return (*trie.TrieSync)(s).Process(list)
+func (s *StateSync) Process(list []trie.SyncResult, dbw trie.DatabaseWriter) (bool, int, error) {
+ return (*trie.TrieSync)(s).Process(list, dbw)
}
// Pending returns the number of state entries currently pending for download.
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index 8111320e6..43d146e3a 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -84,7 +84,7 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accou
if nonce := state.GetNonce(acc.address); nonce != acc.nonce {
t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce)
}
- if code := state.GetCode(acc.address); bytes.Compare(code, acc.code) != 0 {
+ if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) {
t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code)
}
}
@@ -138,7 +138,7 @@ func testIterativeStateSync(t *testing.T, batch int) {
}
results[i] = trie.SyncResult{Hash: hash, Data: data}
}
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = append(queue[:0], sched.Missing(batch)...)
@@ -168,7 +168,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
}
results[i] = trie.SyncResult{Hash: hash, Data: data}
}
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = append(queue[len(results):], sched.Missing(0)...)
@@ -198,7 +198,7 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
for len(queue) > 0 {
// Fetch all the queued nodes in a random order
results := make([]trie.SyncResult, 0, len(queue))
- for hash, _ := range queue {
+ for hash := range queue {
data, err := srcDb.Get(hash.Bytes())
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
@@ -206,7 +206,7 @@ func testIterativeRandomStateSync(t *testing.T, batch int) {
results = append(results, trie.SyncResult{Hash: hash, Data: data})
}
// Feed the retrieved results back and queue new tasks
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = make(map[common.Hash]struct{})
@@ -235,7 +235,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
for len(queue) > 0 {
// Sync only half of the scheduled nodes, even those in random order
results := make([]trie.SyncResult, 0, len(queue)/2+1)
- for hash, _ := range queue {
+ for hash := range queue {
delete(queue, hash)
data, err := srcDb.Get(hash.Bytes())
@@ -249,7 +249,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
}
}
// Feed the retrieved results back and queue new tasks
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
for _, hash := range sched.Missing(0) {
@@ -283,7 +283,7 @@ func TestIncompleteStateSync(t *testing.T) {
results[i] = trie.SyncResult{Hash: hash, Data: data}
}
// Process each of the state nodes
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
for _, result := range results {
@@ -294,7 +294,7 @@ func TestIncompleteStateSync(t *testing.T) {
// Skim through the accounts and make sure the root hash is not a code node
codeHash := false
for _, acc := range srcAccounts {
- if bytes.Compare(root.Bytes(), crypto.Sha3(acc.code)) == 0 {
+ if root == crypto.Keccak256Hash(acc.code) {
codeHash = true
break
}
diff --git a/core/state_processor.go b/core/state_processor.go
index 82a371a9e..4f6ca651e 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -57,13 +57,13 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain) *StateProcess
// Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error.
-func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, vm.Logs, *big.Int, error) {
+func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, *big.Int, error) {
var (
receipts types.Receipts
totalUsedGas = big.NewInt(0)
err error
header = block.Header()
- allLogs vm.Logs
+ allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit())
)
// Mutate the the block and state according to any hard-fork specs
diff --git a/core/tx_list.go b/core/tx_list.go
index c3ddf3148..535cb9dd6 100644
--- a/core/tx_list.go
+++ b/core/tx_list.go
@@ -110,7 +110,7 @@ func (m *txSortedMap) Filter(filter func(*types.Transaction) bool) types.Transac
// If transactions were removed, the heap and cache are ruined
if len(removed) > 0 {
*m.index = make([]uint64, 0, len(m.items))
- for nonce, _ := range m.items {
+ for nonce := range m.items {
*m.index = append(*m.index, nonce)
}
heap.Init(m.index)
@@ -216,7 +216,7 @@ func (m *txSortedMap) Flatten() types.Transactions {
// txList is a "list" of transactions belonging to an account, sorted by account
// nonce. The same type can be used both for storing contiguous transactions for
// the executable/pending queue; and for storing gapped transactions for the non-
-// executable/future queue, with minor behavoiral changes.
+// executable/future queue, with minor behavioral changes.
type txList struct {
strict bool // Whether nonces are strictly continuous or not
txs *txSortedMap // Heap indexed sorted hash map of the transactions
diff --git a/core/tx_pool.go b/core/tx_pool.go
index c5421fa02..58922f12f 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -321,7 +321,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
// add validates a transaction and inserts it into the non-executable queue for
// later pending promotion and execution.
func (pool *TxPool) add(tx *types.Transaction) error {
- // If the transaction is alreayd known, discard it
+ // If the transaction is already known, discard it
hash := tx.Hash()
if pool.all[hash] != nil {
return fmt.Errorf("Known transaction: %x", hash[:4])
@@ -609,7 +609,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
if queued > maxQueuedInTotal {
// Sort all accounts with queued transactions by heartbeat
addresses := make(addresssByHeartbeat, 0, len(pool.queue))
- for addr, _ := range pool.queue {
+ for addr := range pool.queue {
addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
}
sort.Sort(addresses)
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index f5fcac19f..98a34b757 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -32,7 +32,7 @@ import (
)
func transaction(nonce uint64, gaslimit *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
- tx, _ := types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, big.NewInt(1), nil).SignECDSA(types.HomesteadSigner{}, key)
+ tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, big.NewInt(1), nil), types.HomesteadSigner{}, key)
return tx
}
@@ -238,7 +238,7 @@ func TestRemoveTx(t *testing.T) {
func TestNegativeValue(t *testing.T) {
pool, key := setupTxPool()
- tx, _ := types.NewTransaction(0, common.Address{}, big.NewInt(-1), big.NewInt(100), big.NewInt(1), nil).SignECDSA(types.HomesteadSigner{}, key)
+ tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), big.NewInt(100), big.NewInt(1), nil), types.HomesteadSigner{}, key)
from, _ := deriveSender(tx)
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1))
@@ -287,9 +287,9 @@ func TestTransactionDoubleNonce(t *testing.T) {
resetState()
signer := types.HomesteadSigner{}
- tx1, _ := types.NewTransaction(0, common.Address{}, big.NewInt(100), big.NewInt(100000), big.NewInt(1), nil).SignECDSA(signer, key)
- tx2, _ := types.NewTransaction(0, common.Address{}, big.NewInt(100), big.NewInt(1000000), big.NewInt(2), nil).SignECDSA(signer, key)
- tx3, _ := types.NewTransaction(0, common.Address{}, big.NewInt(100), big.NewInt(1000000), big.NewInt(1), nil).SignECDSA(signer, key)
+ tx1, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), big.NewInt(100000), big.NewInt(1), nil), signer, key)
+ tx2, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), big.NewInt(1000000), big.NewInt(2), nil), signer, key)
+ tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), big.NewInt(1000000), big.NewInt(1), nil), signer, key)
// Add the first two transaction, ensure higher priced stays only
if err := pool.add(tx1); err != nil {
diff --git a/core/types.go b/core/types.go
index d84d0987f..7fd658979 100644
--- a/core/types.go
+++ b/core/types.go
@@ -58,5 +58,5 @@ type HeaderValidator interface {
// of gas used in the process and return an error if any of the internal rules
// failed.
type Processor interface {
- Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, vm.Logs, *big.Int, error)
+ Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, *big.Int, error)
}
diff --git a/core/types/bloom9.go b/core/types/bloom9.go
index a1d13e218..32aa47a41 100644
--- a/core/types/bloom9.go
+++ b/core/types/bloom9.go
@@ -22,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
)
@@ -95,17 +94,11 @@ func CreateBloom(receipts Receipts) Bloom {
return BytesToBloom(bin.Bytes())
}
-func LogsBloom(logs vm.Logs) *big.Int {
+func LogsBloom(logs []*Log) *big.Int {
bin := new(big.Int)
for _, log := range logs {
- data := make([]common.Hash, len(log.Topics))
bin.Or(bin, bloom9(log.Address.Bytes()))
-
- for i, topic := range log.Topics {
- data[i] = topic
- }
-
- for _, b := range data {
+ for _, b := range log.Topics {
bin.Or(bin, bloom9(b[:]))
}
}
diff --git a/core/vm/log.go b/core/types/log.go
index 347bd6e5d..7efb06b5c 100644
--- a/core/vm/log.go
+++ b/core/types/log.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-package vm
+package types
import (
"encoding/json"
@@ -79,10 +79,6 @@ type jsonLog struct {
Removed bool `json:"removed"`
}
-func NewLog(address common.Address, topics []common.Hash, data []byte, number uint64) *Log {
- return &Log{Address: address, Topics: topics, Data: data, BlockNumber: number}
-}
-
// EncodeRLP implements rlp.Encoder.
func (l *Log) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data})
@@ -150,8 +146,6 @@ func (l *Log) UnmarshalJSON(input []byte) error {
return nil
}
-type Logs []*Log
-
// LogForStorage is a wrapper around a Log that flattens and parses the entire content of
// a log including non-consensus fields.
type LogForStorage Log
diff --git a/core/vm/log_test.go b/core/types/log_test.go
index 994753c62..bf742ccac 100644
--- a/core/vm/log_test.go
+++ b/core/types/log_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-package vm
+package types
import (
"encoding/json"
diff --git a/core/types/receipt.go b/core/types/receipt.go
index 70c10d422..0a6a35e33 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -40,7 +39,7 @@ type Receipt struct {
PostState []byte
CumulativeGasUsed *big.Int
Bloom Bloom
- Logs vm.Logs
+ Logs []*Log
// Implementation fields (don't reorder!)
TxHash common.Hash
@@ -52,7 +51,7 @@ type jsonReceipt struct {
PostState *common.Hash `json:"root"`
CumulativeGasUsed *hexutil.Big `json:"cumulativeGasUsed"`
Bloom *Bloom `json:"logsBloom"`
- Logs *vm.Logs `json:"logs"`
+ Logs []*Log `json:"logs"`
TxHash *common.Hash `json:"transactionHash"`
ContractAddress *common.Address `json:"contractAddress"`
GasUsed *hexutil.Big `json:"gasUsed"`
@@ -76,7 +75,7 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
PostState []byte
CumulativeGasUsed *big.Int
Bloom Bloom
- Logs vm.Logs
+ Logs []*Log
}
if err := s.Decode(&receipt); err != nil {
return err
@@ -93,7 +92,7 @@ func (r *Receipt) MarshalJSON() ([]byte, error) {
PostState: &root,
CumulativeGasUsed: (*hexutil.Big)(r.CumulativeGasUsed),
Bloom: &r.Bloom,
- Logs: &r.Logs,
+ Logs: r.Logs,
TxHash: &r.TxHash,
ContractAddress: &r.ContractAddress,
GasUsed: (*hexutil.Big)(r.GasUsed),
@@ -120,7 +119,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
PostState: (*dec.PostState)[:],
CumulativeGasUsed: (*big.Int)(dec.CumulativeGasUsed),
Bloom: *dec.Bloom,
- Logs: *dec.Logs,
+ Logs: dec.Logs,
TxHash: *dec.TxHash,
GasUsed: (*big.Int)(dec.GasUsed),
}
@@ -142,9 +141,9 @@ type ReceiptForStorage Receipt
// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
// into an RLP stream.
func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error {
- logs := make([]*vm.LogForStorage, len(r.Logs))
+ logs := make([]*LogForStorage, len(r.Logs))
for i, log := range r.Logs {
- logs[i] = (*vm.LogForStorage)(log)
+ logs[i] = (*LogForStorage)(log)
}
return rlp.Encode(w, []interface{}{r.PostState, r.CumulativeGasUsed, r.Bloom, r.TxHash, r.ContractAddress, logs, r.GasUsed})
}
@@ -158,7 +157,7 @@ func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
Bloom Bloom
TxHash common.Hash
ContractAddress common.Address
- Logs []*vm.LogForStorage
+ Logs []*LogForStorage
GasUsed *big.Int
}
if err := s.Decode(&receipt); err != nil {
@@ -166,9 +165,9 @@ func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error {
}
// Assign the consensus fields
r.PostState, r.CumulativeGasUsed, r.Bloom = receipt.PostState, receipt.CumulativeGasUsed, receipt.Bloom
- r.Logs = make(vm.Logs, len(receipt.Logs))
+ r.Logs = make([]*Log, len(receipt.Logs))
for i, log := range receipt.Logs {
- r.Logs[i] = (*vm.Log)(log)
+ r.Logs[i] = (*Log)(log)
}
// Assign the implementation fields
r.TxHash, r.ContractAddress, r.GasUsed = receipt.TxHash, receipt.ContractAddress, receipt.GasUsed
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 87b54ab30..e610671d3 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -18,7 +18,6 @@ package types
import (
"container/heap"
- "crypto/ecdsa"
"encoding/json"
"errors"
"fmt"
@@ -293,14 +292,6 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) {
return msg, err
}
-// SignECDSA signs the transaction using the given signer and private key
-//
-// XXX This only makes for a nice API: NewTx(...).SignECDSA(signer, prv). Should
-// we keep this?
-func (tx *Transaction) SignECDSA(signer Signer, prv *ecdsa.PrivateKey) (*Transaction, error) {
- return signer.SignECDSA(tx, prv)
-}
-
// WithSignature returns a new transaction with the given signature.
// This signature needs to be formatted as described in the yellow paper (v+27).
func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) {
diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go
index 8952bd574..4ebc789a5 100644
--- a/core/types/transaction_signing.go
+++ b/core/types/transaction_signing.go
@@ -50,8 +50,8 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer {
return signer
}
-// SignECDSA signs the transaction using the given signer and private key
-func SignECDSA(s Signer, tx *Transaction, prv *ecdsa.PrivateKey) (*Transaction, error) {
+// SignTx signs the transaction using the given signer and private key
+func SignTx(tx *Transaction, s Signer, prv *ecdsa.PrivateKey) (*Transaction, error) {
h := s.Hash(tx)
sig, err := crypto.Sign(h[:], prv)
if err != nil {
@@ -96,9 +96,8 @@ type Signer interface {
Hash(tx *Transaction) common.Hash
// PubilcKey returns the public key derived from the signature
PublicKey(tx *Transaction) ([]byte, error)
- // SignECDSA signs the transaction with the given and returns a copy of the tx
- SignECDSA(tx *Transaction, prv *ecdsa.PrivateKey) (*Transaction, error)
- // WithSignature returns a copy of the transaction with the given signature
+ // WithSignature returns a copy of the transaction with the given signature.
+ // The signature must be encoded in [R || S || V] format where V is 0 or 1.
WithSignature(tx *Transaction, sig []byte) (*Transaction, error)
// Checks for equality on the signers
Equal(Signer) bool
@@ -124,10 +123,6 @@ func (s EIP155Signer) Equal(s2 Signer) bool {
return ok && eip155.chainId.Cmp(s.chainId) == 0
}
-func (s EIP155Signer) SignECDSA(tx *Transaction, prv *ecdsa.PrivateKey) (*Transaction, error) {
- return SignECDSA(s, tx, prv)
-}
-
func (s EIP155Signer) PublicKey(tx *Transaction) ([]byte, error) {
// if the transaction is not protected fall back to homestead signer
if !tx.Protected() {
@@ -193,15 +188,6 @@ func (s EIP155Signer) Hash(tx *Transaction) common.Hash {
})
}
-func (s EIP155Signer) SigECDSA(tx *Transaction, prv *ecdsa.PrivateKey) (*Transaction, error) {
- h := s.Hash(tx)
- sig, err := crypto.Sign(h[:], prv)
- if err != nil {
- return nil, err
- }
- return s.WithSignature(tx, sig)
-}
-
// HomesteadTransaction implements TransactionInterface using the
// homestead rules.
type HomesteadSigner struct{ FrontierSigner }
@@ -224,15 +210,6 @@ func (hs HomesteadSigner) WithSignature(tx *Transaction, sig []byte) (*Transacti
return cpy, nil
}
-func (hs HomesteadSigner) SignECDSA(tx *Transaction, prv *ecdsa.PrivateKey) (*Transaction, error) {
- h := hs.Hash(tx)
- sig, err := crypto.Sign(h[:], prv)
- if err != nil {
- return nil, err
- }
- return hs.WithSignature(tx, sig)
-}
-
func (hs HomesteadSigner) PublicKey(tx *Transaction) ([]byte, error) {
if tx.data.V.BitLen() > 8 {
return nil, ErrInvalidSig
@@ -280,15 +257,6 @@ func (fs FrontierSigner) WithSignature(tx *Transaction, sig []byte) (*Transactio
return cpy, nil
}
-func (fs FrontierSigner) SignECDSA(tx *Transaction, prv *ecdsa.PrivateKey) (*Transaction, error) {
- h := fs.Hash(tx)
- sig, err := crypto.Sign(h[:], prv)
- if err != nil {
- return nil, err
- }
- return fs.WithSignature(tx, sig)
-}
-
// Hash returns the hash to be sned by the sender.
// It does not uniquely identify the transaction.
func (fs FrontierSigner) Hash(tx *Transaction) common.Hash {
diff --git a/core/types/transaction_signing_test.go b/core/types/transaction_signing_test.go
index dc618e570..3216fcfad 100644
--- a/core/types/transaction_signing_test.go
+++ b/core/types/transaction_signing_test.go
@@ -30,7 +30,7 @@ func TestEIP155Signing(t *testing.T) {
addr := crypto.PubkeyToAddress(key.PublicKey)
signer := NewEIP155Signer(big.NewInt(18))
- tx, err := NewTransaction(0, addr, new(big.Int), new(big.Int), new(big.Int), nil).SignECDSA(signer, key)
+ tx, err := SignTx(NewTransaction(0, addr, new(big.Int), new(big.Int), new(big.Int), nil), signer, key)
if err != nil {
t.Fatal(err)
}
@@ -49,7 +49,7 @@ func TestEIP155ChainId(t *testing.T) {
addr := crypto.PubkeyToAddress(key.PublicKey)
signer := NewEIP155Signer(big.NewInt(18))
- tx, err := NewTransaction(0, addr, new(big.Int), new(big.Int), new(big.Int), nil).SignECDSA(signer, key)
+ tx, err := SignTx(NewTransaction(0, addr, new(big.Int), new(big.Int), new(big.Int), nil), signer, key)
if err != nil {
t.Fatal(err)
}
@@ -62,7 +62,7 @@ func TestEIP155ChainId(t *testing.T) {
}
tx = NewTransaction(0, addr, new(big.Int), new(big.Int), new(big.Int), nil)
- tx, err = tx.SignECDSA(HomesteadSigner{}, key)
+ tx, err = SignTx(tx, HomesteadSigner{}, key)
if err != nil {
t.Fatal(err)
}
@@ -121,7 +121,7 @@ func TestChainId(t *testing.T) {
tx := NewTransaction(0, common.Address{}, new(big.Int), new(big.Int), new(big.Int), nil)
var err error
- tx, err = tx.SignECDSA(NewEIP155Signer(big.NewInt(1)), key)
+ tx, err = SignTx(tx, NewEIP155Signer(big.NewInt(1)), key)
if err != nil {
t.Fatal(err)
}
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index 4a38462e3..f52f80d34 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -138,7 +138,7 @@ func TestTransactionPriceNonceSort(t *testing.T) {
for start, key := range keys {
addr := crypto.PubkeyToAddress(key.PublicKey)
for i := 0; i < 25; i++ {
- tx, _ := NewTransaction(uint64(start+i), common.Address{}, big.NewInt(100), big.NewInt(100), big.NewInt(int64(start+i)), nil).SignECDSA(signer, key)
+ tx, _ := SignTx(NewTransaction(uint64(start+i), common.Address{}, big.NewInt(100), big.NewInt(100), big.NewInt(int64(start+i)), nil), signer, key)
groups[addr] = append(groups[addr], tx)
}
}
diff --git a/core/vm/environment.go b/core/vm/environment.go
index b74b3a795..c19ef464b 100644
--- a/core/vm/environment.go
+++ b/core/vm/environment.go
@@ -34,7 +34,7 @@ type (
GetHashFunc func(uint64) common.Hash
)
-// Context provides the EVM with auxilary information. Once provided it shouldn't be modified.
+// Context provides the EVM with auxiliary information. Once provided it shouldn't be modified.
type Context struct {
// CanTransfer returns whether the account contains
// sufficient ether to transfer the value
@@ -99,7 +99,7 @@ func (evm *EVM) Cancel() {
atomic.StoreInt32(&evm.abort, 1)
}
-// Call executes the contract associated with the addr with the given input as paramaters. It also handles any
+// Call executes the contract associated with the addr with the given input as parameters. It also handles any
// necessary value transfer required and takes the necessary steps to create accounts and reverses the state in
// case of an execution error or failed value transfer.
func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas, value *big.Int) (ret []byte, err error) {
@@ -157,7 +157,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas,
return ret, err
}
-// CallCode executes the contract associated with the addr with the given input as paramaters. It also handles any
+// CallCode executes the contract associated with the addr with the given input as parameters. It also handles any
// necessary value transfer required and takes the necessary steps to create accounts and reverses the state in
// case of an execution error or failed value transfer.
//
@@ -203,7 +203,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
return ret, err
}
-// DelegateCall executes the contract associated with the addr with the given input as paramaters.
+// DelegateCall executes the contract associated with the addr with the given input as parameters.
// It reverses the state in case of an execution error.
//
// DelegateCall differs from CallCode in the sense that it executes the given address' code with the caller as context
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 2839b7109..5bfa73a30 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
)
@@ -605,8 +606,14 @@ func makeLog(size int) executionFunc {
}
d := memory.Get(mStart.Int64(), mSize.Int64())
- log := NewLog(contract.Address(), topics, d, env.BlockNumber.Uint64())
- env.StateDB.AddLog(log)
+ env.StateDB.AddLog(&types.Log{
+ Address: contract.Address(),
+ Topics: topics,
+ Data: d,
+ // This is a non-consensus field, but assigned here because
+ // core/state doesn't know the current block number.
+ BlockNumber: env.BlockNumber.Uint64(),
+ })
return nil, nil
}
}
diff --git a/core/vm/interface.go b/core/vm/interface.go
index b81f59125..8617b2d0f 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -20,6 +20,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
)
// StateDB is an EVM database for full state querying.
@@ -58,7 +59,7 @@ type StateDB interface {
RevertToSnapshot(int)
Snapshot() int
- AddLog(*Log)
+ AddLog(*types.Log)
}
// Account represents a contract or basic ethereum account.
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index eb85ae6af..f4ce81883 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -52,283 +52,283 @@ var defaultJumpTable = NewJumpTable()
func NewJumpTable() [256]operation {
return [256]operation{
- ADD: operation{
+ ADD: {
execute: opAdd,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- SUB: operation{
+ SUB: {
execute: opSub,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- MUL: operation{
+ MUL: {
execute: opMul,
gasCost: constGasFunc(GasFastStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- DIV: operation{
+ DIV: {
execute: opDiv,
gasCost: constGasFunc(GasFastStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- SDIV: operation{
+ SDIV: {
execute: opSdiv,
gasCost: constGasFunc(GasFastStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- MOD: operation{
+ MOD: {
execute: opMod,
gasCost: constGasFunc(GasFastStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- SMOD: operation{
+ SMOD: {
execute: opSmod,
gasCost: constGasFunc(GasFastStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- EXP: operation{
+ EXP: {
execute: opExp,
gasCost: gasExp,
validateStack: makeStackFunc(2, 1),
valid: true,
},
- SIGNEXTEND: operation{
+ SIGNEXTEND: {
execute: opSignExtend,
gasCost: constGasFunc(GasFastStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- NOT: operation{
+ NOT: {
execute: opNot,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(1, 1),
valid: true,
},
- LT: operation{
+ LT: {
execute: opLt,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- GT: operation{
+ GT: {
execute: opGt,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- SLT: operation{
+ SLT: {
execute: opSlt,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- SGT: operation{
+ SGT: {
execute: opSgt,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- EQ: operation{
+ EQ: {
execute: opEq,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- ISZERO: operation{
+ ISZERO: {
execute: opIszero,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(1, 1),
valid: true,
},
- AND: operation{
+ AND: {
execute: opAnd,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- OR: operation{
+ OR: {
execute: opOr,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- XOR: operation{
+ XOR: {
execute: opXor,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- BYTE: operation{
+ BYTE: {
execute: opByte,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
},
- ADDMOD: operation{
+ ADDMOD: {
execute: opAddmod,
gasCost: constGasFunc(GasMidStep),
validateStack: makeStackFunc(3, 1),
valid: true,
},
- MULMOD: operation{
+ MULMOD: {
execute: opMulmod,
gasCost: constGasFunc(GasMidStep),
validateStack: makeStackFunc(3, 1),
valid: true,
},
- SHA3: operation{
+ SHA3: {
execute: opSha3,
gasCost: gasSha3,
validateStack: makeStackFunc(2, 1),
memorySize: memorySha3,
valid: true,
},
- ADDRESS: operation{
+ ADDRESS: {
execute: opAddress,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- BALANCE: operation{
+ BALANCE: {
execute: opBalance,
gasCost: gasBalance,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- ORIGIN: operation{
+ ORIGIN: {
execute: opOrigin,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- CALLER: operation{
+ CALLER: {
execute: opCaller,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- CALLVALUE: operation{
+ CALLVALUE: {
execute: opCallValue,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- CALLDATALOAD: operation{
+ CALLDATALOAD: {
execute: opCalldataLoad,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(1, 1),
valid: true,
},
- CALLDATASIZE: operation{
+ CALLDATASIZE: {
execute: opCalldataSize,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- CALLDATACOPY: operation{
+ CALLDATACOPY: {
execute: opCalldataCopy,
gasCost: gasCalldataCopy,
validateStack: makeStackFunc(3, 1),
memorySize: memoryCalldataCopy,
valid: true,
},
- CODESIZE: operation{
+ CODESIZE: {
execute: opCodeSize,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- EXTCODESIZE: operation{
+ EXTCODESIZE: {
execute: opExtCodeSize,
gasCost: gasExtCodeSize,
validateStack: makeStackFunc(1, 1),
valid: true,
},
- CODECOPY: operation{
+ CODECOPY: {
execute: opCodeCopy,
gasCost: gasCodeCopy,
validateStack: makeStackFunc(3, 0),
memorySize: memoryCodeCopy,
valid: true,
},
- EXTCODECOPY: operation{
+ EXTCODECOPY: {
execute: opExtCodeCopy,
gasCost: gasExtCodeCopy,
validateStack: makeStackFunc(4, 0),
memorySize: memoryExtCodeCopy,
valid: true,
},
- GASPRICE: operation{
+ GASPRICE: {
execute: opGasprice,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- BLOCKHASH: operation{
+ BLOCKHASH: {
execute: opBlockhash,
gasCost: constGasFunc(GasExtStep),
validateStack: makeStackFunc(1, 1),
valid: true,
},
- COINBASE: operation{
+ COINBASE: {
execute: opCoinbase,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- TIMESTAMP: operation{
+ TIMESTAMP: {
execute: opTimestamp,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- NUMBER: operation{
+ NUMBER: {
execute: opNumber,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- DIFFICULTY: operation{
+ DIFFICULTY: {
execute: opDifficulty,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- GASLIMIT: operation{
+ GASLIMIT: {
execute: opGasLimit,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- POP: operation{
+ POP: {
execute: opPop,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(1, 0),
valid: true,
},
- MLOAD: operation{
+ MLOAD: {
execute: opMload,
gasCost: gasMLoad,
validateStack: makeStackFunc(1, 1),
memorySize: memoryMLoad,
valid: true,
},
- MSTORE: operation{
+ MSTORE: {
execute: opMstore,
gasCost: gasMStore,
validateStack: makeStackFunc(2, 0),
memorySize: memoryMStore,
valid: true,
},
- MSTORE8: operation{
+ MSTORE8: {
execute: opMstore8,
gasCost: gasMStore8,
memorySize: memoryMStore8,
@@ -336,71 +336,71 @@ func NewJumpTable() [256]operation {
valid: true,
},
- SLOAD: operation{
+ SLOAD: {
execute: opSload,
gasCost: gasSLoad,
validateStack: makeStackFunc(1, 1),
valid: true,
},
- SSTORE: operation{
+ SSTORE: {
execute: opSstore,
gasCost: gasSStore,
validateStack: makeStackFunc(2, 0),
valid: true,
},
- JUMPDEST: operation{
+ JUMPDEST: {
execute: opJumpdest,
gasCost: constGasFunc(params.JumpdestGas),
validateStack: makeStackFunc(0, 0),
valid: true,
},
- PC: operation{
+ PC: {
execute: opPc,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- MSIZE: operation{
+ MSIZE: {
execute: opMsize,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- GAS: operation{
+ GAS: {
execute: opGas,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
- CREATE: operation{
+ CREATE: {
execute: opCreate,
gasCost: gasCreate,
validateStack: makeStackFunc(3, 1),
memorySize: memoryCreate,
valid: true,
},
- CALL: operation{
+ CALL: {
execute: opCall,
gasCost: gasCall,
validateStack: makeStackFunc(7, 1),
memorySize: memoryCall,
valid: true,
},
- CALLCODE: operation{
+ CALLCODE: {
execute: opCallCode,
gasCost: gasCallCode,
validateStack: makeStackFunc(7, 1),
memorySize: memoryCall,
valid: true,
},
- DELEGATECALL: operation{
+ DELEGATECALL: {
execute: opDelegateCall,
gasCost: gasDelegateCall,
validateStack: makeStackFunc(6, 1),
memorySize: memoryDelegateCall,
valid: true,
},
- RETURN: operation{
+ RETURN: {
execute: opReturn,
gasCost: gasReturn,
validateStack: makeStackFunc(2, 0),
@@ -408,448 +408,448 @@ func NewJumpTable() [256]operation {
halts: true,
valid: true,
},
- SUICIDE: operation{
+ SUICIDE: {
execute: opSuicide,
gasCost: gasSuicide,
validateStack: makeStackFunc(1, 0),
halts: true,
valid: true,
},
- JUMP: operation{
+ JUMP: {
execute: opJump,
gasCost: constGasFunc(GasMidStep),
validateStack: makeStackFunc(1, 0),
jumps: true,
valid: true,
},
- JUMPI: operation{
+ JUMPI: {
execute: opJumpi,
gasCost: constGasFunc(GasSlowStep),
validateStack: makeStackFunc(2, 0),
jumps: true,
valid: true,
},
- STOP: operation{
+ STOP: {
execute: opStop,
gasCost: constGasFunc(Zero),
validateStack: makeStackFunc(0, 0),
halts: true,
valid: true,
},
- LOG0: operation{
+ LOG0: {
execute: makeLog(0),
gasCost: makeGasLog(0),
validateStack: makeStackFunc(2, 0),
memorySize: memoryLog,
valid: true,
},
- LOG1: operation{
+ LOG1: {
execute: makeLog(1),
gasCost: makeGasLog(1),
validateStack: makeStackFunc(3, 0),
memorySize: memoryLog,
valid: true,
},
- LOG2: operation{
+ LOG2: {
execute: makeLog(2),
gasCost: makeGasLog(2),
validateStack: makeStackFunc(4, 0),
memorySize: memoryLog,
valid: true,
},
- LOG3: operation{
+ LOG3: {
execute: makeLog(3),
gasCost: makeGasLog(3),
validateStack: makeStackFunc(5, 0),
memorySize: memoryLog,
valid: true,
},
- LOG4: operation{
+ LOG4: {
execute: makeLog(4),
gasCost: makeGasLog(4),
validateStack: makeStackFunc(6, 0),
memorySize: memoryLog,
valid: true,
},
- SWAP1: operation{
+ SWAP1: {
execute: makeSwap(1),
gasCost: gasSwap,
validateStack: makeStackFunc(2, 0),
valid: true,
},
- SWAP2: operation{
+ SWAP2: {
execute: makeSwap(2),
gasCost: gasSwap,
validateStack: makeStackFunc(3, 0),
valid: true,
},
- SWAP3: operation{
+ SWAP3: {
execute: makeSwap(3),
gasCost: gasSwap,
validateStack: makeStackFunc(4, 0),
valid: true,
},
- SWAP4: operation{
+ SWAP4: {
execute: makeSwap(4),
gasCost: gasSwap,
validateStack: makeStackFunc(5, 0),
valid: true,
},
- SWAP5: operation{
+ SWAP5: {
execute: makeSwap(5),
gasCost: gasSwap,
validateStack: makeStackFunc(6, 0),
valid: true,
},
- SWAP6: operation{
+ SWAP6: {
execute: makeSwap(6),
gasCost: gasSwap,
validateStack: makeStackFunc(7, 0),
valid: true,
},
- SWAP7: operation{
+ SWAP7: {
execute: makeSwap(7),
gasCost: gasSwap,
validateStack: makeStackFunc(8, 0),
valid: true,
},
- SWAP8: operation{
+ SWAP8: {
execute: makeSwap(8),
gasCost: gasSwap,
validateStack: makeStackFunc(9, 0),
valid: true,
},
- SWAP9: operation{
+ SWAP9: {
execute: makeSwap(9),
gasCost: gasSwap,
validateStack: makeStackFunc(10, 0),
valid: true,
},
- SWAP10: operation{
+ SWAP10: {
execute: makeSwap(10),
gasCost: gasSwap,
validateStack: makeStackFunc(11, 0),
valid: true,
},
- SWAP11: operation{
+ SWAP11: {
execute: makeSwap(11),
gasCost: gasSwap,
validateStack: makeStackFunc(12, 0),
valid: true,
},
- SWAP12: operation{
+ SWAP12: {
execute: makeSwap(12),
gasCost: gasSwap,
validateStack: makeStackFunc(13, 0),
valid: true,
},
- SWAP13: operation{
+ SWAP13: {
execute: makeSwap(13),
gasCost: gasSwap,
validateStack: makeStackFunc(14, 0),
valid: true,
},
- SWAP14: operation{
+ SWAP14: {
execute: makeSwap(14),
gasCost: gasSwap,
validateStack: makeStackFunc(15, 0),
valid: true,
},
- SWAP15: operation{
+ SWAP15: {
execute: makeSwap(15),
gasCost: gasSwap,
validateStack: makeStackFunc(16, 0),
valid: true,
},
- SWAP16: operation{
+ SWAP16: {
execute: makeSwap(16),
gasCost: gasSwap,
validateStack: makeStackFunc(17, 0),
valid: true,
},
- PUSH1: operation{
+ PUSH1: {
execute: makePush(1, big.NewInt(1)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH2: operation{
+ PUSH2: {
execute: makePush(2, big.NewInt(2)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH3: operation{
+ PUSH3: {
execute: makePush(3, big.NewInt(3)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH4: operation{
+ PUSH4: {
execute: makePush(4, big.NewInt(4)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH5: operation{
+ PUSH5: {
execute: makePush(5, big.NewInt(5)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH6: operation{
+ PUSH6: {
execute: makePush(6, big.NewInt(6)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH7: operation{
+ PUSH7: {
execute: makePush(7, big.NewInt(7)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH8: operation{
+ PUSH8: {
execute: makePush(8, big.NewInt(8)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH9: operation{
+ PUSH9: {
execute: makePush(9, big.NewInt(9)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH10: operation{
+ PUSH10: {
execute: makePush(10, big.NewInt(10)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH11: operation{
+ PUSH11: {
execute: makePush(11, big.NewInt(11)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH12: operation{
+ PUSH12: {
execute: makePush(12, big.NewInt(12)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH13: operation{
+ PUSH13: {
execute: makePush(13, big.NewInt(13)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH14: operation{
+ PUSH14: {
execute: makePush(14, big.NewInt(14)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH15: operation{
+ PUSH15: {
execute: makePush(15, big.NewInt(15)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH16: operation{
+ PUSH16: {
execute: makePush(16, big.NewInt(16)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH17: operation{
+ PUSH17: {
execute: makePush(17, big.NewInt(17)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH18: operation{
+ PUSH18: {
execute: makePush(18, big.NewInt(18)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH19: operation{
+ PUSH19: {
execute: makePush(19, big.NewInt(19)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH20: operation{
+ PUSH20: {
execute: makePush(20, big.NewInt(20)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH21: operation{
+ PUSH21: {
execute: makePush(21, big.NewInt(21)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH22: operation{
+ PUSH22: {
execute: makePush(22, big.NewInt(22)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH23: operation{
+ PUSH23: {
execute: makePush(23, big.NewInt(23)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH24: operation{
+ PUSH24: {
execute: makePush(24, big.NewInt(24)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH25: operation{
+ PUSH25: {
execute: makePush(25, big.NewInt(25)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH26: operation{
+ PUSH26: {
execute: makePush(26, big.NewInt(26)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH27: operation{
+ PUSH27: {
execute: makePush(27, big.NewInt(27)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH28: operation{
+ PUSH28: {
execute: makePush(28, big.NewInt(28)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH29: operation{
+ PUSH29: {
execute: makePush(29, big.NewInt(29)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH30: operation{
+ PUSH30: {
execute: makePush(30, big.NewInt(30)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH31: operation{
+ PUSH31: {
execute: makePush(31, big.NewInt(31)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- PUSH32: operation{
+ PUSH32: {
execute: makePush(32, big.NewInt(32)),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
- DUP1: operation{
+ DUP1: {
execute: makeDup(1),
gasCost: gasDup,
validateStack: makeStackFunc(1, 1),
valid: true,
},
- DUP2: operation{
+ DUP2: {
execute: makeDup(2),
gasCost: gasDup,
validateStack: makeStackFunc(2, 1),
valid: true,
},
- DUP3: operation{
+ DUP3: {
execute: makeDup(3),
gasCost: gasDup,
validateStack: makeStackFunc(3, 1),
valid: true,
},
- DUP4: operation{
+ DUP4: {
execute: makeDup(4),
gasCost: gasDup,
validateStack: makeStackFunc(4, 1),
valid: true,
},
- DUP5: operation{
+ DUP5: {
execute: makeDup(5),
gasCost: gasDup,
validateStack: makeStackFunc(5, 1),
valid: true,
},
- DUP6: operation{
+ DUP6: {
execute: makeDup(6),
gasCost: gasDup,
validateStack: makeStackFunc(6, 1),
valid: true,
},
- DUP7: operation{
+ DUP7: {
execute: makeDup(7),
gasCost: gasDup,
validateStack: makeStackFunc(7, 1),
valid: true,
},
- DUP8: operation{
+ DUP8: {
execute: makeDup(8),
gasCost: gasDup,
validateStack: makeStackFunc(8, 1),
valid: true,
},
- DUP9: operation{
+ DUP9: {
execute: makeDup(9),
gasCost: gasDup,
validateStack: makeStackFunc(9, 1),
valid: true,
},
- DUP10: operation{
+ DUP10: {
execute: makeDup(10),
gasCost: gasDup,
validateStack: makeStackFunc(10, 1),
valid: true,
},
- DUP11: operation{
+ DUP11: {
execute: makeDup(11),
gasCost: gasDup,
validateStack: makeStackFunc(11, 1),
valid: true,
},
- DUP12: operation{
+ DUP12: {
execute: makeDup(12),
gasCost: gasDup,
validateStack: makeStackFunc(12, 1),
valid: true,
},
- DUP13: operation{
+ DUP13: {
execute: makeDup(13),
gasCost: gasDup,
validateStack: makeStackFunc(13, 1),
valid: true,
},
- DUP14: operation{
+ DUP14: {
execute: makeDup(14),
gasCost: gasDup,
validateStack: makeStackFunc(14, 1),
valid: true,
},
- DUP15: operation{
+ DUP15: {
execute: makeDup(15),
gasCost: gasDup,
validateStack: makeStackFunc(15, 1),
valid: true,
},
- DUP16: operation{
+ DUP16: {
execute: makeDup(16),
gasCost: gasDup,
validateStack: makeStackFunc(16, 1),
diff --git a/core/vm/noop.go b/core/vm/noop.go
index ca7d1055a..ef6837273 100644
--- a/core/vm/noop.go
+++ b/core/vm/noop.go
@@ -20,6 +20,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
)
func NoopCanTransfer(db StateDB, from common.Address, balance *big.Int) bool {
@@ -65,4 +66,4 @@ func (NoopStateDB) Exist(common.Address) bool { return f
func (NoopStateDB) Empty(common.Address) bool { return false }
func (NoopStateDB) RevertToSnapshot(int) {}
func (NoopStateDB) Snapshot() int { return 0 }
-func (NoopStateDB) AddLog(*Log) {}
+func (NoopStateDB) AddLog(*types.Log) {}
diff --git a/crypto/crypto.go b/crypto/crypto.go
index f1a4b774c..ce45ebd38 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -194,9 +194,9 @@ func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
// Sign calculates an ECDSA signature.
//
-// This function is susceptible to choosen plaintext attacks that can leak
+// This function is susceptible to chosen plaintext attacks that can leak
// information about the private key that is used for signing. Callers must
-// be aware that the given hash cannot be choosen by an adversery. Common
+// be aware that the given hash cannot be chosen by an adversery. Common
// solution is to hash any input before calculating the signature.
//
// The produced signature is in the [R || S || V] format where V is 0 or 1.
diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go
index 86a582306..f42605d32 100644
--- a/crypto/crypto_test.go
+++ b/crypto/crypto_test.go
@@ -217,7 +217,7 @@ func TestValidateSignatureValues(t *testing.T) {
func checkhash(t *testing.T, name string, f func([]byte) []byte, msg, exp []byte) {
sum := f(msg)
- if bytes.Compare(exp, sum) != 0 {
+ if !bytes.Equal(exp, sum) {
t.Fatalf("hash %s mismatch: want: %x have: %x", name, exp, sum)
}
}
diff --git a/crypto/ecies/asn1.go b/crypto/ecies/asn1.go
index 40dabd329..508a645cd 100644
--- a/crypto/ecies/asn1.go
+++ b/crypto/ecies/asn1.go
@@ -109,7 +109,7 @@ func (curve secgNamedCurve) Equal(curve2 secgNamedCurve) bool {
if len(curve) != len(curve2) {
return false
}
- for i, _ := range curve {
+ for i := range curve {
if curve[i] != curve2[i] {
return false
}
@@ -157,7 +157,7 @@ func (a asnAlgorithmIdentifier) Cmp(b asnAlgorithmIdentifier) bool {
if len(a.Algorithm) != len(b.Algorithm) {
return false
}
- for i, _ := range a.Algorithm {
+ for i := range a.Algorithm {
if a.Algorithm[i] != b.Algorithm[i] {
return false
}
@@ -306,7 +306,7 @@ func (a asnECDHAlgorithm) Cmp(b asnECDHAlgorithm) bool {
if len(a.Algorithm) != len(b.Algorithm) {
return false
}
- for i, _ := range a.Algorithm {
+ for i := range a.Algorithm {
if a.Algorithm[i] != b.Algorithm[i] {
return false
}
@@ -325,7 +325,7 @@ func (a asnKeyDerivationFunction) Cmp(b asnKeyDerivationFunction) bool {
if len(a.Algorithm) != len(b.Algorithm) {
return false
}
- for i, _ := range a.Algorithm {
+ for i := range a.Algorithm {
if a.Algorithm[i] != b.Algorithm[i] {
return false
}
@@ -360,7 +360,7 @@ func (a asnSymmetricEncryption) Cmp(b asnSymmetricEncryption) bool {
if len(a.Algorithm) != len(b.Algorithm) {
return false
}
- for i, _ := range a.Algorithm {
+ for i := range a.Algorithm {
if a.Algorithm[i] != b.Algorithm[i] {
return false
}
@@ -380,7 +380,7 @@ func (a asnMessageAuthenticationCode) Cmp(b asnMessageAuthenticationCode) bool {
if len(a.Algorithm) != len(b.Algorithm) {
return false
}
- for i, _ := range a.Algorithm {
+ for i := range a.Algorithm {
if a.Algorithm[i] != b.Algorithm[i] {
return false
}
diff --git a/crypto/ecies/ecies.go b/crypto/ecies/ecies.go
index 86a70261d..2a16f20a2 100644
--- a/crypto/ecies/ecies.go
+++ b/crypto/ecies/ecies.go
@@ -93,7 +93,7 @@ func ImportECDSA(prv *ecdsa.PrivateKey) *PrivateKey {
}
// Generate an elliptic curve public / private keypair. If params is nil,
-// the recommended default paramters for the key will be chosen.
+// the recommended default parameters for the key will be chosen.
func GenerateKey(rand io.Reader, curve elliptic.Curve, params *ECIESParams) (prv *PrivateKey, err error) {
pb, x, y, err := elliptic.GenerateKey(curve, rand)
if err != nil {
@@ -291,9 +291,8 @@ func Encrypt(rand io.Reader, pub *PublicKey, m, s1, s2 []byte) (ct []byte, err e
// Decrypt decrypts an ECIES ciphertext.
func (prv *PrivateKey) Decrypt(rand io.Reader, c, s1, s2 []byte) (m []byte, err error) {
- if c == nil || len(c) == 0 {
- err = ErrInvalidMessage
- return
+ if len(c) == 0 {
+ return nil, ErrInvalidMessage
}
params := prv.PublicKey.Params
if params == nil {
diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go
index cb09061ce..3b3517baf 100644
--- a/crypto/ecies/ecies_test.go
+++ b/crypto/ecies/ecies_test.go
@@ -492,17 +492,17 @@ type testCase struct {
}
var testCases = []testCase{
- testCase{
+ {
Curve: elliptic.P256(),
Name: "P256",
Expected: true,
},
- testCase{
+ {
Curve: elliptic.P384(),
Name: "P384",
Expected: true,
},
- testCase{
+ {
Curve: elliptic.P521(),
Name: "P521",
Expected: true,
diff --git a/crypto/secp256k1/libsecp256k1/include/secp256k1_schnorr.h b/crypto/secp256k1/libsecp256k1/include/secp256k1_schnorr.h
index 49354933d..9b4f5b607 100644
--- a/crypto/secp256k1/libsecp256k1/include/secp256k1_schnorr.h
+++ b/crypto/secp256k1/libsecp256k1/include/secp256k1_schnorr.h
@@ -99,7 +99,7 @@ SECP256K1_API int secp256k1_schnorr_generate_nonce_pair(
/** Produce a partial Schnorr signature, which can be combined using
* secp256k1_schnorr_partial_combine, to end up with a full signature that is
* verifiable using secp256k1_schnorr_verify.
- * Returns: 1: signature created succesfully.
+ * Returns: 1: signature created successfully.
* 0: no valid signature exists with this combination of keys, nonces
* and message (chance around 1 in 2^128)
* -1: invalid private key, nonce, or public nonces.
@@ -148,7 +148,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorr_partial_sign(
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6);
/** Combine multiple Schnorr partial signatures.
- * Returns: 1: the passed signatures were succesfully combined.
+ * Returns: 1: the passed signatures were successfully combined.
* 0: the resulting signature is not valid (chance of 1 in 2^256)
* -1: some inputs were invalid, or the signatures were not created
* using the same set of nonces
diff --git a/crypto/secp256k1/notes.go b/crypto/secp256k1/notes.go
index 93e6d1902..49fcf8e2d 100644
--- a/crypto/secp256k1/notes.go
+++ b/crypto/secp256k1/notes.go
@@ -163,7 +163,7 @@ int secp256k1_ecdsa_sign_compact(const unsigned char *msg, int msglen,
int *recid);
* Recover an ECDSA public key from a compact signature.
- * Returns: 1: public key succesfully recovered (which guarantees a correct signature).
+ * Returns: 1: public key successfully recovered (which guarantees a correct signature).
* 0: otherwise.
* In: msg: the message assumed to be signed
* msglen: the length of the message
diff --git a/crypto/secp256k1/secp256.go b/crypto/secp256k1/secp256.go
index 4999c5c95..2c5f61450 100644
--- a/crypto/secp256k1/secp256.go
+++ b/crypto/secp256k1/secp256.go
@@ -49,7 +49,7 @@ import (
/*
TODO:
- > store private keys in buffer and shuffle (deters persistance on swap disc)
+ > store private keys in buffer and shuffle (deters persistence on swap disc)
> byte permutation (changing)
> xor with chaning random block (to deter scanning memory for 0x63) (stream cipher?)
*/
diff --git a/crypto/sha3/sha3_test.go b/crypto/sha3/sha3_test.go
index caf72f279..c433761a8 100644
--- a/crypto/sha3/sha3_test.go
+++ b/crypto/sha3/sha3_test.go
@@ -201,7 +201,7 @@ func TestSqueezing(t *testing.T) {
d1 := newShakeHash()
d1.Write([]byte(testString))
var multiple []byte
- for _ = range ref {
+ for range ref {
one := make([]byte, 1)
d1.Read(one)
multiple = append(multiple, one...)
diff --git a/errs/errors.go b/errs/errors.go
index 675649efa..daa814db7 100644
--- a/errs/errors.go
+++ b/errs/errors.go
@@ -19,7 +19,6 @@ package errs
import (
"fmt"
- "github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
@@ -32,15 +31,10 @@ Fields:
Package:
name of the package/component
-
- Level:
- a function mapping error code to logger.LogLevel (severity)
- if not given, errors default to logger.InfoLevel
*/
type Errors struct {
Errors map[int]string
Package string
- Level func(code int) logger.LogLevel
}
/*
@@ -58,7 +52,6 @@ type Error struct {
Code int
Name string
Package string
- level logger.LogLevel
message string
format string
params []interface{}
@@ -69,15 +62,10 @@ func (self *Errors) New(code int, format string, params ...interface{}) *Error {
if !ok {
panic("invalid error code")
}
- level := logger.InfoLevel
- if self.Level != nil {
- level = self.Level(code)
- }
return &Error{
Code: code,
Name: name,
Package: self.Package,
- level: level,
format: format,
params: params,
}
@@ -98,13 +86,3 @@ func (self Error) Log(v glog.Verbose) {
v.Infoln(self)
}
}
-
-/*
-err.Fatal() is true if err's severity level is 0 or 1 (logger.ErrorLevel or logger.Silence)
-*/
-func (self *Error) Fatal() (fatal bool) {
- if self.level < logger.WarnLevel {
- fatal = true
- }
- return
-}
diff --git a/errs/errors_test.go b/errs/errors_test.go
index d6d14b45e..5a2ffbec3 100644
--- a/errs/errors_test.go
+++ b/errs/errors_test.go
@@ -19,8 +19,6 @@ package errs
import (
"fmt"
"testing"
-
- "github.com/ethereum/go-ethereum/logger"
)
func testErrors() *Errors {
@@ -30,14 +28,6 @@ func testErrors() *Errors {
0: "zero",
1: "one",
},
- Level: func(i int) (l logger.LogLevel) {
- if i == 0 {
- l = logger.ErrorLevel
- } else {
- l = logger.WarnLevel
- }
- return
- },
}
}
@@ -49,14 +39,3 @@ func TestErrorMessage(t *testing.T) {
t.Errorf("error message incorrect. expected %v, got %v", exp, message)
}
}
-
-func TestErrorSeverity(t *testing.T) {
- err0 := testErrors().New(0, "zero detail")
- if !err0.Fatal() {
- t.Errorf("error should be fatal")
- }
- err1 := testErrors().New(1, "one detail")
- if err1.Fatal() {
- t.Errorf("error should not be fatal")
- }
-}
diff --git a/eth/api.go b/eth/api.go
index f077e348c..d798c196e 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -435,7 +435,7 @@ func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConf
return true, structLogger.StructLogs(), nil
}
-// callmsg is the message type used for call transations.
+// callmsg is the message type used for call transitions.
type callmsg struct {
addr common.Address
to *common.Address
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 801b6a4f6..1174588ea 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/params"
- rpc "github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
)
diff --git a/eth/backend_test.go b/eth/backend_test.go
index 212c4c4d3..574731fbe 100644
--- a/eth/backend_test.go
+++ b/eth/backend_test.go
@@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
)
@@ -38,12 +37,12 @@ func TestMipmapUpgrade(t *testing.T) {
switch i {
case 1:
receipt := types.NewReceipt(nil, new(big.Int))
- receipt.Logs = vm.Logs{&vm.Log{Address: addr}}
+ receipt.Logs = []*types.Log{{Address: addr}}
gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
case 2:
receipt := types.NewReceipt(nil, new(big.Int))
- receipt.Logs = vm.Logs{&vm.Log{Address: addr}}
+ receipt.Logs = []*types.Log{{Address: addr}}
gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index b1f4b8169..9be4bd87d 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -1005,7 +1005,7 @@ func (d *Downloader) fetchNodeData() error {
// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
// - fetch: network callback to actually send a particular download request to a physical remote peer
// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
-// - capacity: network callback to retreive the estimated type-specific bandwidth capacity of a peer (traffic shaping)
+// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
// - kind: textual label of the type being downloaded to display in log mesages
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 86638ae2d..b43edf53e 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -119,7 +119,7 @@ func (dl *downloadTester) makeChain(n int, seed byte, parent *types.Block, paren
// If the block number is multiple of 3, send a bonus transaction to the miner
if parent == dl.genesis && i%3 == 0 {
signer := types.MakeSigner(params.TestChainConfig, block.Number())
- tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, testKey)
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
if err != nil {
panic(err)
}
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index b0bfc66c8..ea4b6a6f2 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -205,7 +205,7 @@ func (p *peer) FetchNodeData(request *fetchRequest) error {
// Convert the hash set to a retrievable slice
hashes := make([]common.Hash, 0, len(request.Hashes))
- for hash, _ := range request.Hashes {
+ for hash := range request.Hashes {
hashes = append(hashes, hash)
}
go p.getNodeData(hashes)
@@ -314,7 +314,7 @@ func (p *peer) MarkLacking(hash common.Hash) {
defer p.lock.Unlock()
for len(p.lacking) >= maxLackingHashes {
- for drop, _ := range p.lacking {
+ for drop := range p.lacking {
delete(p.lacking, drop)
break
}
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index b7ad92099..dd9590b28 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -844,7 +844,7 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
}
}
// Remove the expired requests from the pending pool
- for id, _ := range expiries {
+ for id := range expiries {
delete(pendPool, id)
}
return expiries
@@ -1063,7 +1063,7 @@ func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(int, boo
// If no data was retrieved, mark their hashes as unavailable for the origin peer
if len(data) == 0 {
- for hash, _ := range request.Hashes {
+ for hash := range request.Hashes {
request.Peer.MarkLacking(hash)
}
}
@@ -1123,15 +1123,20 @@ func (q *queue) deliverNodeData(results []trie.SyncResult, callback func(int, bo
callback(i, progressed, errNoFetchesPending)
return
}
- if prog, _, err := q.stateScheduler.Process([]trie.SyncResult{result}); err != nil {
- // Processing a state result failed, bail out
+
+ batch := q.stateDatabase.NewBatch()
+ prog, _, err := q.stateScheduler.Process([]trie.SyncResult{result}, batch)
+ if err != nil {
+ q.stateSchedLock.Unlock()
+ callback(i, progressed, err)
+ }
+ if err = batch.Write(); err != nil {
q.stateSchedLock.Unlock()
callback(i, progressed, err)
- return
- } else if prog {
- progressed = true
}
+
// Item processing succeeded, release the lock (temporarily)
+ progressed = progressed || prog
q.stateSchedLock.Unlock()
}
callback(len(results), progressed, nil)
diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/fetcher_test.go
index 426bfd542..2e28541ab 100644
--- a/eth/fetcher/fetcher_test.go
+++ b/eth/fetcher/fetcher_test.go
@@ -51,7 +51,7 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
// If the block number is multiple of 3, send a bonus transaction to the miner
if parent == genesis && i%3 == 0 {
signer := types.MakeSigner(params.TestChainConfig, block.Number())
- tx, err := types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, testKey)
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)
if err != nil {
panic(err)
}
diff --git a/eth/filters/api.go b/eth/filters/api.go
index bbb34d3de..02a544ce1 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -29,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
@@ -46,7 +45,7 @@ type filter struct {
deadline *time.Timer // filter is inactiv when deadline triggers
hashes []common.Hash
crit FilterCriteria
- logs []*vm.Log
+ logs []*types.Log
s *Subscription // associated subscription in event system
}
@@ -242,7 +241,7 @@ func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc
var (
rpcSub = notifier.CreateSubscription()
- matchedLogs = make(chan []*vm.Log)
+ matchedLogs = make(chan []*types.Log)
)
logsSub, err := api.events.SubscribeLogs(crit, matchedLogs)
@@ -293,14 +292,14 @@ type FilterCriteria struct {
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter
func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
- logs := make(chan []*vm.Log)
+ logs := make(chan []*types.Log)
logsSub, err := api.events.SubscribeLogs(crit, logs)
if err != nil {
return rpc.ID(""), err
}
api.filtersMu.Lock()
- api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(deadline), logs: make([]*vm.Log, 0), s: logsSub}
+ api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(deadline), logs: make([]*types.Log, 0), s: logsSub}
api.filtersMu.Unlock()
go func() {
@@ -327,7 +326,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
// GetLogs returns logs matching the given argument that are stored within the state.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
-func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*vm.Log, error) {
+func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {
if crit.FromBlock == nil {
crit.FromBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
}
@@ -366,7 +365,7 @@ func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool {
// If the filter could not be found an empty array of logs is returned.
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs
-func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*vm.Log, error) {
+func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) {
api.filtersMu.Lock()
f, found := api.filters[id]
api.filtersMu.Unlock()
@@ -441,9 +440,9 @@ func returnHashes(hashes []common.Hash) []common.Hash {
// returnLogs is a helper that will return an empty log array in case the given logs array is nil,
// otherwise the given logs array is returned.
-func returnLogs(logs []*vm.Log) []*vm.Log {
+func returnLogs(logs []*types.Log) []*types.Log {
if logs == nil {
- return []*vm.Log{}
+ return []*types.Log{}
}
return logs
}
@@ -506,7 +505,7 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
switch topic := t.(type) {
case nil:
// ignore topic when matching logs
- args.Topics[i] = []common.Hash{common.Hash{}}
+ args.Topics[i] = []common.Hash{{}}
case string:
// match specific topic
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index 76ca86524..9a8e2fd70 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
@@ -91,7 +90,7 @@ func (f *Filter) SetTopics(topics [][]common.Hash) {
// all matching entries from the first block that contains matches,
// updating the start point of the filter accordingly. If no results are
// found, a nil slice is returned.
-func (f *Filter) FindOnce(ctx context.Context) ([]*vm.Log, error) {
+func (f *Filter) FindOnce(ctx context.Context) ([]*types.Log, error) {
head, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
if head == nil {
return nil, nil
@@ -122,7 +121,7 @@ func (f *Filter) FindOnce(ctx context.Context) ([]*vm.Log, error) {
}
// Run filters logs with the current parameters set
-func (f *Filter) Find(ctx context.Context) (logs []*vm.Log, err error) {
+func (f *Filter) Find(ctx context.Context) (logs []*types.Log, err error) {
for {
newLogs, err := f.FindOnce(ctx)
if len(newLogs) == 0 || err != nil {
@@ -132,7 +131,7 @@ func (f *Filter) Find(ctx context.Context) (logs []*vm.Log, err error) {
}
}
-func (f *Filter) mipFind(start, end uint64, depth int) (logs []*vm.Log, blockNumber uint64) {
+func (f *Filter) mipFind(start, end uint64, depth int) (logs []*types.Log, blockNumber uint64) {
level := core.MIPMapLevels[depth]
// normalise numerator so we can work in level specific batches and
// work with the proper range checks
@@ -168,7 +167,7 @@ func (f *Filter) mipFind(start, end uint64, depth int) (logs []*vm.Log, blockNum
return nil, end
}
-func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*vm.Log, blockNumber uint64, err error) {
+func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*types.Log, blockNumber uint64, err error) {
for i := start; i <= end; i++ {
blockNumber := rpc.BlockNumber(i)
header, err := f.backend.HeaderByNumber(ctx, blockNumber)
@@ -184,9 +183,9 @@ func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*vm.Log
if err != nil {
return nil, end, err
}
- var unfiltered []*vm.Log
+ var unfiltered []*types.Log
for _, receipt := range receipts {
- unfiltered = append(unfiltered, ([]*vm.Log)(receipt.Logs)...)
+ unfiltered = append(unfiltered, ([]*types.Log)(receipt.Logs)...)
}
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
if len(logs) > 0 {
@@ -209,8 +208,8 @@ func includes(addresses []common.Address, a common.Address) bool {
}
// filterLogs creates a slice of logs matching the given criteria.
-func filterLogs(logs []*vm.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*vm.Log {
- var ret []*vm.Log
+func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log {
+ var ret []*types.Log
Logs:
for _, log := range logs {
if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber {
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index 1b360cfdb..e0ee2ff51 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
@@ -38,7 +37,7 @@ import (
type Type byte
const (
- // UnknownSubscription indicates an unkown subscription type
+ // UnknownSubscription indicates an unknown subscription type
UnknownSubscription Type = iota
// LogsSubscription queries for new or removed (chain reorg) logs
LogsSubscription
@@ -64,7 +63,7 @@ type subscription struct {
typ Type
created time.Time
logsCrit FilterCriteria
- logs chan []*vm.Log
+ logs chan []*types.Log
hashes chan common.Hash
headers chan *types.Header
installed chan struct{} // closed when the filter is installed
@@ -151,7 +150,7 @@ func (es *EventSystem) subscribe(sub *subscription) *Subscription {
// SubscribeLogs creates a subscription that will write all logs matching the
// given criteria to the given logs channel. Default value for the from and to
// block is "latest". If the fromBlock > toBlock an error is returned.
-func (es *EventSystem) SubscribeLogs(crit FilterCriteria, logs chan []*vm.Log) (*Subscription, error) {
+func (es *EventSystem) SubscribeLogs(crit FilterCriteria, logs chan []*types.Log) (*Subscription, error) {
var from, to rpc.BlockNumber
if crit.FromBlock == nil {
from = rpc.LatestBlockNumber
@@ -189,7 +188,7 @@ func (es *EventSystem) SubscribeLogs(crit FilterCriteria, logs chan []*vm.Log) (
// subscribeMinedPendingLogs creates a subscription that returned mined and
// pending logs that match the given criteria.
-func (es *EventSystem) subscribeMinedPendingLogs(crit FilterCriteria, logs chan []*vm.Log) *Subscription {
+func (es *EventSystem) subscribeMinedPendingLogs(crit FilterCriteria, logs chan []*types.Log) *Subscription {
sub := &subscription{
id: rpc.NewID(),
typ: MinedAndPendingLogsSubscription,
@@ -207,7 +206,7 @@ func (es *EventSystem) subscribeMinedPendingLogs(crit FilterCriteria, logs chan
// subscribeLogs creates a subscription that will write all logs matching the
// given criteria to the given logs channel.
-func (es *EventSystem) subscribeLogs(crit FilterCriteria, logs chan []*vm.Log) *Subscription {
+func (es *EventSystem) subscribeLogs(crit FilterCriteria, logs chan []*types.Log) *Subscription {
sub := &subscription{
id: rpc.NewID(),
typ: LogsSubscription,
@@ -225,7 +224,7 @@ func (es *EventSystem) subscribeLogs(crit FilterCriteria, logs chan []*vm.Log) *
// subscribePendingLogs creates a subscription that writes transaction hashes for
// transactions that enter the transaction pool.
-func (es *EventSystem) subscribePendingLogs(crit FilterCriteria, logs chan []*vm.Log) *Subscription {
+func (es *EventSystem) subscribePendingLogs(crit FilterCriteria, logs chan []*types.Log) *Subscription {
sub := &subscription{
id: rpc.NewID(),
typ: PendingLogsSubscription,
@@ -248,7 +247,7 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti
id: rpc.NewID(),
typ: BlocksSubscription,
created: time.Now(),
- logs: make(chan []*vm.Log),
+ logs: make(chan []*types.Log),
hashes: make(chan common.Hash),
headers: headers,
installed: make(chan struct{}),
@@ -265,7 +264,7 @@ func (es *EventSystem) SubscribePendingTxEvents(hashes chan common.Hash) *Subscr
id: rpc.NewID(),
typ: PendingTransactionsSubscription,
created: time.Now(),
- logs: make(chan []*vm.Log),
+ logs: make(chan []*types.Log),
hashes: hashes,
headers: make(chan *types.Header),
installed: make(chan struct{}),
@@ -284,7 +283,7 @@ func (es *EventSystem) broadcast(filters filterIndex, ev *event.Event) {
}
switch e := ev.Data.(type) {
- case vm.Logs:
+ case []*types.Log:
if len(e) > 0 {
for _, f := range filters[LogsSubscription] {
if ev.Time.After(f.created) {
@@ -370,7 +369,7 @@ func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func
}
// filter logs of a single header in light client mode
-func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*vm.Log {
+func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log {
if bloomFilter(header.Bloom, addresses, topics) {
// Get the logs of the block
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
@@ -378,7 +377,7 @@ func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.
if err != nil {
return nil
}
- var unfiltered []*vm.Log
+ var unfiltered []*types.Log
for _, receipt := range receipts {
for _, log := range receipt.Logs {
logcopy := *log
@@ -396,7 +395,7 @@ func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.
func (es *EventSystem) eventLoop() {
var (
index = make(filterIndex)
- sub = es.mux.Subscribe(core.PendingLogsEvent{}, core.RemovedLogsEvent{}, vm.Logs{}, core.TxPreEvent{}, core.ChainEvent{})
+ sub = es.mux.Subscribe(core.PendingLogsEvent{}, core.RemovedLogsEvent{}, []*types.Log{}, core.TxPreEvent{}, core.ChainEvent{})
)
for i := UnknownSubscription; i < LastIndexSubscription; i++ {
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 3ce0cf663..1cfced7e4 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
@@ -263,34 +262,34 @@ func TestLogFilter(t *testing.T) {
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
// posted twice, once as vm.Logs and once as core.PendingLogsEvent
- allLogs = vm.Logs{
- vm.NewLog(firstAddr, []common.Hash{}, []byte(""), 0),
- vm.NewLog(firstAddr, []common.Hash{firstTopic}, []byte(""), 1),
- vm.NewLog(secondAddr, []common.Hash{firstTopic}, []byte(""), 1),
- vm.NewLog(thirdAddress, []common.Hash{secondTopic}, []byte(""), 2),
- vm.NewLog(thirdAddress, []common.Hash{secondTopic}, []byte(""), 3),
+ allLogs = []*types.Log{
+ {Address: firstAddr},
+ {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
+ {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
+ {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},
+ {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},
}
- expectedCase7 = vm.Logs{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
- expectedCase11 = vm.Logs{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
+ expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}
+ expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}
testCases = []struct {
crit FilterCriteria
- expected vm.Logs
+ expected []*types.Log
id rpc.ID
}{
// match all
0: {FilterCriteria{}, allLogs, ""},
// match none due to no matching addresses
- 1: {FilterCriteria{Addresses: []common.Address{common.Address{}, notUsedAddress}, Topics: [][]common.Hash{allLogs[0].Topics}}, vm.Logs{}, ""},
+ 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{allLogs[0].Topics}}, []*types.Log{}, ""},
// match logs based on addresses, ignore topics
2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
// match none due to no matching topics (match with address)
- 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{[]common.Hash{notUsedTopic}}}, vm.Logs{}, ""},
+ 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""},
// match logs based on addresses and topics
- 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, allLogs[3:5], ""},
+ 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
// match logs based on multiple addresses and "or" topics
- 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, allLogs[2:5], ""},
+ 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""},
// logs in the pending block
6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""},
// mined logs with block num >= 2 or pending logs
@@ -300,9 +299,9 @@ func TestLogFilter(t *testing.T) {
// all "mined" logs
9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""},
// all "mined" logs with 1>= block num <=2 and topic secondTopic
- 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{[]common.Hash{secondTopic}}}, allLogs[3:4], ""},
+ 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
// all "mined" and pending logs with topic firstTopic
- 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{[]common.Hash{firstTopic}}}, expectedCase11, ""},
+ 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""},
}
)
@@ -321,14 +320,14 @@ func TestLogFilter(t *testing.T) {
}
for i, tt := range testCases {
- var fetched []*vm.Log
+ var fetched []*types.Log
for { // fetch all expected logs
results, err := api.GetFilterChanges(tt.id)
if err != nil {
t.Fatalf("Unable to fetch logs: %v", err)
}
- fetched = append(fetched, results.([]*vm.Log)...)
+ fetched = append(fetched, results.([]*types.Log)...)
if len(fetched) >= len(tt.expected) {
break
}
@@ -373,21 +372,21 @@ func TestPendingLogsSubscription(t *testing.T) {
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
allLogs = []core.PendingLogsEvent{
- core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(firstAddr, []common.Hash{}, []byte(""), 0)}},
- core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(firstAddr, []common.Hash{firstTopic}, []byte(""), 1)}},
- core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(secondAddr, []common.Hash{firstTopic}, []byte(""), 2)}},
- core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(thirdAddress, []common.Hash{secondTopic}, []byte(""), 3)}},
- core.PendingLogsEvent{Logs: vm.Logs{vm.NewLog(thirdAddress, []common.Hash{secondTopic}, []byte(""), 4)}},
- core.PendingLogsEvent{Logs: vm.Logs{
- vm.NewLog(thirdAddress, []common.Hash{firstTopic}, []byte(""), 5),
- vm.NewLog(thirdAddress, []common.Hash{thirdTopic}, []byte(""), 5),
- vm.NewLog(thirdAddress, []common.Hash{forthTopic}, []byte(""), 5),
- vm.NewLog(firstAddr, []common.Hash{firstTopic}, []byte(""), 5),
+ {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
+ {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
+ {Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
+ {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
+ {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
+ {Logs: []*types.Log{
+ {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
+ {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
+ {Address: thirdAddress, Topics: []common.Hash{forthTopic}, BlockNumber: 5},
+ {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
}},
}
- convertLogs = func(pl []core.PendingLogsEvent) vm.Logs {
- var logs vm.Logs
+ convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
+ var logs []*types.Log
for _, l := range pl {
logs = append(logs, l.Logs...)
}
@@ -396,26 +395,26 @@ func TestPendingLogsSubscription(t *testing.T) {
testCases = []struct {
crit FilterCriteria
- expected vm.Logs
- c chan []*vm.Log
+ expected []*types.Log
+ c chan []*types.Log
sub *Subscription
}{
// match all
{FilterCriteria{}, convertLogs(allLogs), nil, nil},
// match none due to no matching addresses
- {FilterCriteria{Addresses: []common.Address{common.Address{}, notUsedAddress}, Topics: [][]common.Hash{[]common.Hash{}}}, vm.Logs{}, nil, nil},
+ {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{{}}}, []*types.Log{}, nil, nil},
// match logs based on addresses, ignore topics
{FilterCriteria{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
// match none due to no matching topics (match with address)
- {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{[]common.Hash{notUsedTopic}}}, vm.Logs{}, nil, nil},
+ {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
// match logs based on addresses and topics
- {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
+ {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
// match logs based on multiple addresses and "or" topics
- {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
- // block numbers are ignored for filters created with New***Filter, these return all logs that match the given criterias when the state changes
+ {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
+ // block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
{FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
// multiple pending logs, should match only 2 topics from the logs in block 5
- {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, forthTopic}}}, vm.Logs{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
+ {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, forthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
}
)
@@ -423,7 +422,7 @@ func TestPendingLogsSubscription(t *testing.T) {
// on slow machines this could otherwise lead to missing events when the subscription is created after
// (some) events are posted.
for i := range testCases {
- testCases[i].c = make(chan []*vm.Log)
+ testCases[i].c = make(chan []*types.Log)
testCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)
}
@@ -431,7 +430,7 @@ func TestPendingLogsSubscription(t *testing.T) {
i := n
tt := test
go func() {
- var fetched []*vm.Log
+ var fetched []*types.Log
fetchLoop:
for {
logs := <-tt.c
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index ab6a87851..83ff3e9ce 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -27,17 +27,16 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/params"
)
func makeReceipt(addr common.Address) *types.Receipt {
receipt := types.NewReceipt(nil, new(big.Int))
- receipt.Logs = vm.Logs{
- &vm.Log{Address: addr},
+ receipt.Logs = []*types.Log{
+ {Address: addr},
}
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
return receipt
@@ -146,8 +145,8 @@ func TestFilters(t *testing.T) {
switch i {
case 1:
receipt := types.NewReceipt(nil, new(big.Int))
- receipt.Logs = vm.Logs{
- &vm.Log{
+ receipt.Logs = []*types.Log{
+ {
Address: addr,
Topics: []common.Hash{hash1},
},
@@ -156,8 +155,8 @@ func TestFilters(t *testing.T) {
receipts = types.Receipts{receipt}
case 2:
receipt := types.NewReceipt(nil, new(big.Int))
- receipt.Logs = vm.Logs{
- &vm.Log{
+ receipt.Logs = []*types.Log{
+ {
Address: addr,
Topics: []common.Hash{hash2},
},
@@ -166,8 +165,8 @@ func TestFilters(t *testing.T) {
receipts = types.Receipts{receipt}
case 998:
receipt := types.NewReceipt(nil, new(big.Int))
- receipt.Logs = vm.Logs{
- &vm.Log{
+ receipt.Logs = []*types.Log{
+ {
Address: addr,
Topics: []common.Hash{hash3},
},
@@ -176,8 +175,8 @@ func TestFilters(t *testing.T) {
receipts = types.Receipts{receipt}
case 999:
receipt := types.NewReceipt(nil, new(big.Int))
- receipt.Logs = vm.Logs{
- &vm.Log{
+ receipt.Logs = []*types.Log{
+ {
Address: addr,
Topics: []common.Hash{hash4},
},
@@ -211,7 +210,7 @@ func TestFilters(t *testing.T) {
filter := New(backend, true)
filter.SetAddresses([]common.Address{addr})
- filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2, hash3, hash4}})
+ filter.SetTopics([][]common.Hash{{hash1, hash2, hash3, hash4}})
filter.SetBeginBlock(0)
filter.SetEndBlock(-1)
@@ -222,7 +221,7 @@ func TestFilters(t *testing.T) {
filter = New(backend, true)
filter.SetAddresses([]common.Address{addr})
- filter.SetTopics([][]common.Hash{[]common.Hash{hash3}})
+ filter.SetTopics([][]common.Hash{{hash3}})
filter.SetBeginBlock(900)
filter.SetEndBlock(999)
logs, _ = filter.Find(context.Background())
@@ -235,7 +234,7 @@ func TestFilters(t *testing.T) {
filter = New(backend, true)
filter.SetAddresses([]common.Address{addr})
- filter.SetTopics([][]common.Hash{[]common.Hash{hash3}})
+ filter.SetTopics([][]common.Hash{{hash3}})
filter.SetBeginBlock(990)
filter.SetEndBlock(-1)
logs, _ = filter.Find(context.Background())
@@ -247,7 +246,7 @@ func TestFilters(t *testing.T) {
}
filter = New(backend, true)
- filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2}})
+ filter.SetTopics([][]common.Hash{{hash1, hash2}})
filter.SetBeginBlock(1)
filter.SetEndBlock(10)
@@ -258,7 +257,7 @@ func TestFilters(t *testing.T) {
failHash := common.BytesToHash([]byte("fail"))
filter = New(backend, true)
- filter.SetTopics([][]common.Hash{[]common.Hash{failHash}})
+ filter.SetTopics([][]common.Hash{{failHash}})
filter.SetBeginBlock(0)
filter.SetEndBlock(-1)
@@ -279,7 +278,7 @@ func TestFilters(t *testing.T) {
}
filter = New(backend, true)
- filter.SetTopics([][]common.Hash{[]common.Hash{failHash}, []common.Hash{hash1}})
+ filter.SetTopics([][]common.Hash{{failHash}, {hash1}})
filter.SetBeginBlock(0)
filter.SetEndBlock(-1)
diff --git a/eth/handler.go b/eth/handler.go
index 771e69b8d..1de3f67e6 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -607,38 +607,16 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
case msg.Code == NewBlockHashesMsg:
- // Retrieve and deserialize the remote new block hashes notification
- type announce struct {
- Hash common.Hash
- Number uint64
- }
- var announces = []announce{}
-
- if p.version < eth62 {
- // We're running the old protocol, make block number unknown (0)
- var hashes []common.Hash
- if err := msg.Decode(&hashes); err != nil {
- return errResp(ErrDecode, "%v: %v", msg, err)
- }
- for _, hash := range hashes {
- announces = append(announces, announce{hash, 0})
- }
- } else {
- // Otherwise extract both block hash and number
- var request newBlockHashesData
- if err := msg.Decode(&request); err != nil {
- return errResp(ErrDecode, "%v: %v", msg, err)
- }
- for _, block := range request {
- announces = append(announces, announce{block.Hash, block.Number})
- }
+ var announces newBlockHashesData
+ if err := msg.Decode(&announces); err != nil {
+ return errResp(ErrDecode, "%v: %v", msg, err)
}
// Mark the hashes as present at the remote node
for _, block := range announces {
p.MarkBlock(block.Hash)
}
// Schedule all the unknown hashes for retrieval
- unknown := make([]announce, 0, len(announces))
+ unknown := make(newBlockHashesData, 0, len(announces))
for _, block := range announces {
if !pm.blockchain.HasBlock(block.Hash) {
unknown = append(unknown, block)
diff --git a/eth/handler_test.go b/eth/handler_test.go
index f599e9e86..22a4ddf50 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -75,7 +75,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
// Create a "random" unknown hash for testing
var unknown common.Hash
- for i, _ := range unknown {
+ for i := range unknown {
unknown[i] = byte(i)
}
// Create a batch of tests for various scenarios
@@ -246,17 +246,17 @@ func testGetBlockBodies(t *testing.T, protocol int) {
{limit + 1, nil, nil, limit}, // No more than the possible block count should be returned
{0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
{0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
- {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned
+ {0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned
// Existing and non-existing blocks interleaved should not cause problems
{0, []common.Hash{
- common.Hash{},
+ {},
pm.blockchain.GetBlockByNumber(1).Hash(),
- common.Hash{},
+ {},
pm.blockchain.GetBlockByNumber(10).Hash(),
- common.Hash{},
+ {},
pm.blockchain.GetBlockByNumber(100).Hash(),
- common.Hash{},
+ {},
}, []bool{false, true, false, true, false, true, false}, 3},
}
// Run each of the tests and verify the results against the chain
@@ -311,13 +311,13 @@ func testGetNodeData(t *testing.T, protocol int) {
switch i {
case 0:
// In block 1, the test bank sends account #1 some ether.
- tx, _ := types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(signer, testBankKey)
+ tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey)
block.AddTx(tx)
case 1:
// In block 2, the test bank sends some more ether to account #1.
// acc1Addr passes it on to account #2.
- tx1, _ := types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, testBankKey)
- tx2, _ := types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, acc1Key)
+ tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
+ tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
block.AddTx(tx1)
block.AddTx(tx2)
case 2:
@@ -403,13 +403,13 @@ func testGetReceipt(t *testing.T, protocol int) {
switch i {
case 0:
// In block 1, the test bank sends account #1 some ether.
- tx, _ := types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(signer, testBankKey)
+ tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey)
block.AddTx(tx)
case 1:
// In block 2, the test bank sends some more ether to account #1.
// acc1Addr passes it on to account #2.
- tx1, _ := types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, testBankKey)
- tx2, _ := types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, acc1Key)
+ tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank.Address), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
+ tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
block.AddTx(tx1)
block.AddTx(tx2)
case 2:
@@ -491,7 +491,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool
if err := p2p.ExpectMsg(peer.app, GetBlockHeadersMsg, challenge); err != nil {
t.Fatalf("challenge mismatch: %v", err)
}
- // Create a block to reply to the challenge if no timeout is simualted
+ // Create a block to reply to the challenge if no timeout is simulated
if !timeout {
blocks, _ := core.GenerateChain(&params.ChainConfig{}, genesis, db, 1, func(i int, block *core.BlockGen) {
if remoteForked {
diff --git a/eth/helper_test.go b/eth/helper_test.go
index bd6b2d0da..a718a6d21 100644
--- a/eth/helper_test.go
+++ b/eth/helper_test.go
@@ -124,7 +124,7 @@ func (p *testTxPool) Pending() (map[common.Address]types.Transactions, error) {
// newTestTransaction create a new dummy transaction.
func newTestTransaction(from *ecdsa.PrivateKey, nonce uint64, datasize int) *types.Transaction {
tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), make([]byte, datasize))
- tx, _ = tx.SignECDSA(types.HomesteadSigner{}, from)
+ tx, _ = types.SignTx(tx, types.HomesteadSigner{}, from)
return tx
}
diff --git a/eth/protocol_test.go b/eth/protocol_test.go
index 0aac19f43..3b8056433 100644
--- a/eth/protocol_test.go
+++ b/eth/protocol_test.go
@@ -82,7 +82,7 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError)
}
case <-time.After(2 * time.Second):
- t.Errorf("protocol did not shut down withing 2 seconds")
+ t.Errorf("protocol did not shut down within 2 seconds")
}
p.close()
}
@@ -178,7 +178,7 @@ func testSendTransactions(t *testing.T, protocol int) {
func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
// Create a "random" hash for testing
var hash common.Hash
- for i, _ := range hash {
+ for i := range hash {
hash[i] = byte(i)
}
// Assemble some table driven tests
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index 3acaaa0d1..1d04d9e03 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
@@ -294,14 +293,14 @@ func (ec *Client) NonceAt(ctx context.Context, account common.Address, blockNumb
// Filters
// FilterLogs executes a filter query.
-func (ec *Client) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]vm.Log, error) {
- var result []vm.Log
+func (ec *Client) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
+ var result []types.Log
err := ec.c.CallContext(ctx, &result, "eth_getLogs", toFilterArg(q))
return result, err
}
// SubscribeFilterLogs subscribes to the results of a streaming filter query.
-func (ec *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- vm.Log) (ethereum.Subscription, error) {
+func (ec *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
return ec.c.EthSubscribe(ctx, ch, "logs", toFilterArg(q))
}
diff --git a/ethdb/memory_database.go b/ethdb/memory_database.go
index a729f5233..65c487934 100644
--- a/ethdb/memory_database.go
+++ b/ethdb/memory_database.go
@@ -67,7 +67,7 @@ func (db *MemDatabase) Keys() [][]byte {
defer db.lock.RUnlock()
keys := [][]byte{}
- for key, _ := range db.db {
+ for key := range db.db {
keys = append(keys, []byte(key))
}
return keys
diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go
index 716beef69..8692a43bd 100644
--- a/ethstats/ethstats.go
+++ b/ethstats/ethstats.go
@@ -326,7 +326,7 @@ func (s *Service) login(in *json.Decoder, out *json.Encoder) error {
Secret: s.pass,
}
login := map[string][]interface{}{
- "emit": []interface{}{"hello", auth},
+ "emit": {"hello", auth},
}
if err := out.Encode(login); err != nil {
return err
@@ -365,7 +365,7 @@ func (s *Service) reportLatency(out *json.Encoder) error {
start := time.Now()
ping := map[string][]interface{}{
- "emit": []interface{}{"node-ping", map[string]string{
+ "emit": {"node-ping", map[string]string{
"id": s.node,
"clientTime": start.String(),
}},
@@ -383,15 +383,12 @@ func (s *Service) reportLatency(out *json.Encoder) error {
}
// Send back the measured latency
latency := map[string][]interface{}{
- "emit": []interface{}{"latency", map[string]string{
+ "emit": {"latency", map[string]string{
"id": s.node,
"latency": strconv.Itoa(int((time.Since(start) / time.Duration(2)).Nanoseconds() / 1000000)),
}},
}
- if err := out.Encode(latency); err != nil {
- return err
- }
- return nil
+ return out.Encode(latency)
}
// blockStats is the information to report about individual blocks.
@@ -438,12 +435,9 @@ func (s *Service) reportBlock(out *json.Encoder, block *types.Block) error {
"block": s.assembleBlockStats(block),
}
report := map[string][]interface{}{
- "emit": []interface{}{"block", stats},
- }
- if err := out.Encode(report); err != nil {
- return err
+ "emit": {"block", stats},
}
- return nil
+ return out.Encode(report)
}
// assembleBlockStats retrieves any required metadata to report a single block
@@ -497,9 +491,7 @@ func (s *Service) reportHistory(out *json.Encoder, list []uint64) error {
indexes := make([]uint64, 0, historyUpdateRange)
if len(list) > 0 {
// Specific indexes requested, send them back in particular
- for _, idx := range list {
- indexes = append(indexes, idx)
- }
+ indexes = append(indexes, list...)
} else {
// No indexes requested, send back the top ones
var head *types.Header
@@ -531,12 +523,9 @@ func (s *Service) reportHistory(out *json.Encoder, list []uint64) error {
"history": history,
}
report := map[string][]interface{}{
- "emit": []interface{}{"history", stats},
- }
- if err := out.Encode(report); err != nil {
- return err
+ "emit": {"history", stats},
}
- return nil
+ return out.Encode(report)
}
// pendStats is the information to report about pending transactions.
@@ -562,12 +551,9 @@ func (s *Service) reportPending(out *json.Encoder) error {
},
}
report := map[string][]interface{}{
- "emit": []interface{}{"pending", stats},
- }
- if err := out.Encode(report); err != nil {
- return err
+ "emit": {"pending", stats},
}
- return nil
+ return out.Encode(report)
}
// blockStats is the information to report about the local node.
@@ -616,10 +602,7 @@ func (s *Service) reportStats(out *json.Encoder) error {
},
}
report := map[string][]interface{}{
- "emit": []interface{}{"stats", stats},
- }
- if err := out.Encode(report); err != nil {
- return err
+ "emit": {"stats", stats},
}
- return nil
+ return out.Encode(report)
}
diff --git a/event/event_test.go b/event/event_test.go
index 394029301..2c56ecf29 100644
--- a/event/event_test.go
+++ b/event/event_test.go
@@ -144,7 +144,7 @@ func TestMuxConcurrent(t *testing.T) {
func emptySubscriber(mux *TypeMux, types ...interface{}) {
s := mux.Subscribe(testEvent(0))
go func() {
- for _ = range s.Chan() {
+ for range s.Chan() {
}
}()
}
@@ -187,7 +187,7 @@ func BenchmarkChanSend(b *testing.B) {
c := make(chan interface{})
closed := make(chan struct{})
go func() {
- for _ = range c {
+ for range c {
}
}()
diff --git a/event/filter/generic_filter.go b/event/filter/generic_filter.go
index 27f35920d..d679b8bfa 100644
--- a/event/filter/generic_filter.go
+++ b/event/filter/generic_filter.go
@@ -34,7 +34,7 @@ func (self Generic) Compare(f Filter) bool {
strMatch = false
}
- for k, _ := range self.Data {
+ for k := range self.Data {
if _, ok := filter.Data[k]; !ok {
return false
}
diff --git a/interfaces.go b/interfaces.go
index bbb204ff2..f7e71a317 100644
--- a/interfaces.go
+++ b/interfaces.go
@@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"golang.org/x/net/context"
)
@@ -156,8 +155,8 @@ type FilterQuery struct {
// Logs received through a streaming query subscription may have Removed set to true,
// indicating that the log was reverted due to a chain reorganisation.
type LogFilterer interface {
- FilterLogs(ctx context.Context, q FilterQuery) ([]vm.Log, error)
- SubscribeFilterLogs(ctx context.Context, q FilterQuery, ch chan<- vm.Log) (Subscription, error)
+ FilterLogs(ctx context.Context, q FilterQuery) ([]types.Log, error)
+ SubscribeFilterLogs(ctx context.Context, q FilterQuery, ch chan<- types.Log) (Subscription, error)
}
// TransactionSender wraps transaction sending. The SendTransaction method injects a
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index c4fceb5bc..7ea216029 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -470,7 +470,7 @@ func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.A
return res.Hex(), nil
}
-// callmsg is the message type used for call transations.
+// callmsg is the message type used for call transitions.
type callmsg struct {
addr common.Address
to *common.Address
@@ -541,14 +541,14 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
if err := vmError(); err != nil {
return "0x", common.Big0, err
}
- if len(res) == 0 { // backwards compatability
+ if len(res) == 0 { // backwards compatibility
return "0x", gas, err
}
return common.ToHex(res), gas, err
}
// Call executes the given transaction on the state for the given block number.
-// It doesn't make and changes in the state/blockchain and is usefull to execute and retrieve values.
+// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (string, error) {
result, _, err := s.doCall(ctx, args, blockNr)
return result, err
@@ -963,7 +963,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (ma
"logsBloom": receipt.Bloom,
}
if receipt.Logs == nil {
- fields["logs"] = []vm.Logs{}
+ fields["logs"] = [][]*types.Log{}
}
// If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
if receipt.ContractAddress != (common.Address{}) {
diff --git a/internal/jsre/ethereum_js.go b/internal/jsre/ethereum_js.go
index b4eef194c..e1cee8650 100644
--- a/internal/jsre/ethereum_js.go
+++ b/internal/jsre/ethereum_js.go
@@ -2278,7 +2278,7 @@ var toTwosComplement = function (number) {
* Checks if the given string is strictly an address
*
* @method isStrictAddress
- * @param {String} address the given HEX adress
+ * @param {String} address the given HEX address
* @return {Boolean}
*/
var isStrictAddress = function (address) {
@@ -2289,7 +2289,7 @@ var isStrictAddress = function (address) {
* Checks if the given string is an address
*
* @method isAddress
- * @param {String} address the given HEX adress
+ * @param {String} address the given HEX address
* @return {Boolean}
*/
var isAddress = function (address) {
@@ -2311,7 +2311,7 @@ var isAddress = function (address) {
* Checks if the given string is a checksummed address
*
* @method isChecksumAddress
- * @param {String} address the given HEX adress
+ * @param {String} address the given HEX address
* @return {Boolean}
*/
var isChecksumAddress = function (address) {
@@ -2334,7 +2334,7 @@ var isChecksumAddress = function (address) {
* Makes a checksum address
*
* @method toChecksumAddress
- * @param {String} address the given HEX adress
+ * @param {String} address the given HEX address
* @return {String}
*/
var toChecksumAddress = function (address) {
@@ -2356,7 +2356,7 @@ var toChecksumAddress = function (address) {
};
/**
- * Transforms given string to valid 20 bytes-length addres with 0x prefix
+ * Transforms given string to valid 20 bytes-length address with 0x prefix
*
* @method toAddress
* @param {String} address
@@ -3000,7 +3000,7 @@ var ContractFactory = function (eth, abi) {
if (callback) {
- // wait for the contract address adn check if the code was deployed
+ // wait for the contract address and check if the code was deployed
this.eth.sendTransaction(options, function (err, hash) {
if (err) {
callback(err);
@@ -3480,7 +3480,7 @@ Adds the callback and sets up the methods, to iterate over the results.
@method getLogsAtStart
@param {Object} self
-@param {funciton}
+@param {function}
*/
var getLogsAtStart = function(self, callback){
// call getFilterLogs for the first watch callback start
@@ -6429,7 +6429,7 @@ var transferToAddress = function (eth, from, to, value, callback) {
* @method deposit
* @param {String} from
* @param {String} to
- * @param {Value} value to be transfered
+ * @param {Value} value to be transferred
* @param {String} client unique identifier
* @param {Function} callback, callback
*/
@@ -13381,10 +13381,10 @@ module.exports = transfer;
* equivalent to (a % n) in JavaScript.
* FLOOR 3 The remainder has the same sign as the divisor (Python %).
* HALF_EVEN 6 This modulo mode implements the IEEE 754 remainder function.
- * EUCLID 9 Euclidian division. q = sign(n) * floor(a / abs(n)).
+ * EUCLID 9 Euclidean division. q = sign(n) * floor(a / abs(n)).
* The remainder is always positive.
*
- * The truncated division, floored division, Euclidian division and IEEE 754 remainder
+ * The truncated division, floored division, Euclidean division and IEEE 754 remainder
* modes are commonly used for the modulus operation.
* Although the other rounding modes can also be used, they may not give useful results.
*/
@@ -15002,7 +15002,7 @@ module.exports = transfer;
if ( MODULO_MODE == 9 ) {
- // Euclidian division: q = sign(y) * floor(x / abs(y))
+ // Euclidean division: q = sign(y) * floor(x / abs(y))
// r = x - qy where 0 <= r < abs(y)
s = y.s;
y.s = 1;
diff --git a/les/api_backend.go b/les/api_backend.go
index e4f7417d6..3a71ac4e0 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/params"
- rpc "github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/net/context"
)
diff --git a/les/fetcher.go b/les/fetcher.go
index d0958870f..de706de5e 100644
--- a/les/fetcher.go
+++ b/les/fetcher.go
@@ -682,7 +682,7 @@ func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
f.lastUpdateStats.next = newEntry
}
f.lastUpdateStats = newEntry
- for p, _ := range f.peers {
+ for p := range f.peers {
f.checkUpdateStats(p, newEntry)
}
}
diff --git a/les/flowcontrol/manager.go b/les/flowcontrol/manager.go
index d3cc57aa6..28cc6f0fe 100644
--- a/les/flowcontrol/manager.go
+++ b/les/flowcontrol/manager.go
@@ -127,7 +127,7 @@ func (self *ClientManager) removeNode(node *cmNode) {
// recalc sumWeight
func (self *ClientManager) updateNodes(time mclock.AbsTime) (rce bool) {
var sumWeight, rcSum uint64
- for node, _ := range self.nodes {
+ for node := range self.nodes {
rc := node.recharging
node.update(time)
if rc && !node.recharging {
@@ -146,13 +146,13 @@ func (self *ClientManager) updateNodes(time mclock.AbsTime) (rce bool) {
func (self *ClientManager) update(time mclock.AbsTime) {
for {
firstTime := time
- for node, _ := range self.nodes {
+ for node := range self.nodes {
if node.recharging && node.finishRecharge < firstTime {
firstTime = node.finishRecharge
}
}
if self.updateNodes(firstTime) {
- for node, _ := range self.nodes {
+ for node := range self.nodes {
if node.recharging {
node.set(node.serving, self.simReqCnt, self.sumWeight)
}
diff --git a/les/handler_test.go b/les/handler_test.go
index 37c5dd226..0b94d0d30 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -49,7 +49,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
// Create a "random" unknown hash for testing
var unknown common.Hash
- for i, _ := range unknown {
+ for i := range unknown {
unknown[i] = byte(i)
}
// Create a batch of tests for various scenarios
@@ -189,17 +189,17 @@ func testGetBlockBodies(t *testing.T, protocol int) {
//{limit + 1, nil, nil, limit}, // No more than the possible block count should be returned
{0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
{0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
- {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned
+ {0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned
// Existing and non-existing blocks interleaved should not cause problems
{0, []common.Hash{
- common.Hash{},
+ {},
bc.GetBlockByNumber(1).Hash(),
- common.Hash{},
+ {},
bc.GetBlockByNumber(10).Hash(),
- common.Hash{},
+ {},
bc.GetBlockByNumber(100).Hash(),
- common.Hash{},
+ {},
}, []bool{false, true, false, true, false, true, false}, 3},
}
// Run each of the tests and verify the results against the chain
@@ -312,7 +312,7 @@ func testGetProofs(t *testing.T, protocol int) {
var proofreqs []ProofReq
var proofs [][]rlp.RawValue
- accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, common.Address{}}
+ accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, {}}
for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
header := bc.GetHeaderByNumber(i)
root := header.Root
diff --git a/les/helper_test.go b/les/helper_test.go
index 0d1aba9a5..3d6bf3c29 100644
--- a/les/helper_test.go
+++ b/les/helper_test.go
@@ -79,17 +79,17 @@ func testChainGen(i int, block *core.BlockGen) {
switch i {
case 0:
// In block 1, the test bank sends account #1 some ether.
- tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(signer, testBankKey)
+ tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey)
block.AddTx(tx)
case 1:
// In block 2, the test bank sends some more ether to account #1.
// acc1Addr passes it on to account #2.
// acc1Addr creates a test contract.
- tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, testBankKey)
+ tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
nonce := block.TxNonce(acc1Addr)
- tx2, _ := types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, acc1Key)
+ tx2, _ := types.SignTx(types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
nonce++
- tx3, _ := types.NewContractCreation(nonce, big.NewInt(0), big.NewInt(200000), big.NewInt(0), testContractCode).SignECDSA(signer, acc1Key)
+ tx3, _ := types.SignTx(types.NewContractCreation(nonce, big.NewInt(0), big.NewInt(200000), big.NewInt(0), testContractCode), signer, acc1Key)
testContractAddr = crypto.CreateAddress(acc1Addr, nonce)
block.AddTx(tx1)
block.AddTx(tx2)
@@ -99,7 +99,7 @@ func testChainGen(i int, block *core.BlockGen) {
block.SetCoinbase(acc2Addr)
block.SetExtra([]byte("yeehaw"))
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001")
- tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(signer, testBankKey)
+ tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data), signer, testBankKey)
block.AddTx(tx)
case 3:
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
@@ -110,7 +110,7 @@ func testChainGen(i int, block *core.BlockGen) {
b3.Extra = []byte("foo")
block.AddUncle(b3)
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002")
- tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(signer, testBankKey)
+ tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data), signer, testBankKey)
block.AddTx(tx)
}
}
@@ -217,7 +217,7 @@ func (p *testTxPool) GetTransactions() types.Transactions {
// newTestTransaction create a new dummy transaction.
func newTestTransaction(from *ecdsa.PrivateKey, nonce uint64, datasize int) *types.Transaction {
tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), make([]byte, datasize))
- tx, _ = tx.SignECDSA(types.HomesteadSigner{}, from)
+ tx, _ = types.SignTx(tx, types.HomesteadSigner{}, from)
return tx
}
diff --git a/les/metrics.go b/les/metrics.go
index aa0796790..0162a1d1a 100644
--- a/les/metrics.go
+++ b/les/metrics.go
@@ -72,7 +72,7 @@ type meteredMsgReadWriter struct {
}
// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the
-// metrics system is disabled, this fucntion returns the original object.
+// metrics system is disabled, this function returns the original object.
func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter {
if !metrics.Enabled {
return rw
diff --git a/les/peer.go b/les/peer.go
index 770c9bf45..d5008ded1 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -469,7 +469,7 @@ func (ps *peerSet) AllPeerIDs() []string {
res := make([]string, len(ps.peers))
idx := 0
- for id, _ := range ps.peers {
+ for id := range ps.peers {
res[idx] = id
idx++
}
diff --git a/les/randselect_test.go b/les/randselect_test.go
index f3c34305e..9ae7726dd 100644
--- a/les/randselect_test.go
+++ b/les/randselect_test.go
@@ -39,7 +39,7 @@ func TestWeightedRandomSelect(t *testing.T) {
s := newWeightedRandomSelect()
w := -1
list := make([]testWrsItem, cnt)
- for i, _ := range list {
+ for i := range list {
list[i] = testWrsItem{idx: i, widx: &w}
s.update(&list[i])
}
diff --git a/les/serverpool.go b/les/serverpool.go
index 02b5e527e..e3b7cf620 100644
--- a/les/serverpool.go
+++ b/les/serverpool.go
@@ -458,7 +458,7 @@ func (pool *serverPool) loadNodes() {
// ordered from least to most recently connected.
func (pool *serverPool) saveNodes() {
list := make([]*poolEntry, len(pool.knownQueue.queue))
- for i, _ := range list {
+ for i := range list {
list[i] = pool.knownQueue.fetchOldest()
}
enc, err := rlp.EncodeToBytes(list)
diff --git a/les/sync.go b/les/sync.go
index 72c979c61..c143cb145 100644
--- a/les/sync.go
+++ b/les/sync.go
@@ -43,12 +43,12 @@ func (pm *ProtocolManager) syncer() {
for {
select {
case <-pm.newPeerCh:
-/* // Make sure we have peers to select from, then sync
- if pm.peers.Len() < minDesiredPeerCount {
- break
- }
- go pm.synchronise(pm.peers.BestPeer())
-*/
+ /* // Make sure we have peers to select from, then sync
+ if pm.peers.Len() < minDesiredPeerCount {
+ break
+ }
+ go pm.synchronise(pm.peers.BestPeer())
+ */
/*case <-forceSync:
// Force a sync even if not enough peers are present
go pm.synchronise(pm.peers.BestPeer())
diff --git a/les/txrelay.go b/les/txrelay.go
index 036158f5d..84d049b45 100644
--- a/les/txrelay.go
+++ b/les/txrelay.go
@@ -138,7 +138,7 @@ func (self *LesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback
if len(self.txPending) > 0 {
txs := make(types.Transactions, len(self.txPending))
i := 0
- for hash, _ := range self.txPending {
+ for hash := range self.txPending {
txs[i] = self.txSent[hash].tx
i++
}
diff --git a/light/lightchain.go b/light/lightchain.go
index d397f5006..0d28ad2f4 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -135,7 +135,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
return nil, err
}
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
- for hash, _ := range core.BadHashes {
+ for hash := range core.BadHashes {
if header := bc.GetHeaderByHash(hash); header != nil {
glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
bc.SetHead(header.Number.Uint64() - 1)
diff --git a/light/odr_test.go b/light/odr_test.go
index a6c956e9a..2dcfa40d9 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -205,17 +205,17 @@ func testChainGen(i int, block *core.BlockGen) {
switch i {
case 0:
// In block 1, the test bank sends account #1 some ether.
- tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(signer, testBankKey)
+ tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey)
block.AddTx(tx)
case 1:
// In block 2, the test bank sends some more ether to account #1.
// acc1Addr passes it on to account #2.
// acc1Addr creates a test contract.
- tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, testBankKey)
+ tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
nonce := block.TxNonce(acc1Addr)
- tx2, _ := types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(signer, acc1Key)
+ tx2, _ := types.SignTx(types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
nonce++
- tx3, _ := types.NewContractCreation(nonce, big.NewInt(0), big.NewInt(1000000), big.NewInt(0), testContractCode).SignECDSA(signer, acc1Key)
+ tx3, _ := types.SignTx(types.NewContractCreation(nonce, big.NewInt(0), big.NewInt(1000000), big.NewInt(0), testContractCode), signer, acc1Key)
testContractAddr = crypto.CreateAddress(acc1Addr, nonce)
block.AddTx(tx1)
block.AddTx(tx2)
@@ -225,7 +225,7 @@ func testChainGen(i int, block *core.BlockGen) {
block.SetCoinbase(acc2Addr)
block.SetExtra([]byte("yeehaw"))
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001")
- tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(signer, testBankKey)
+ tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data), signer, testBankKey)
block.AddTx(tx)
case 3:
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
@@ -236,7 +236,7 @@ func testChainGen(i int, block *core.BlockGen) {
b3.Extra = []byte("foo")
block.AddUncle(b3)
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002")
- tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(signer, testBankKey)
+ tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data), signer, testBankKey)
block.AddTx(tx)
}
}
diff --git a/light/txpool_test.go b/light/txpool_test.go
index f3eb7980d..e5a4670aa 100644
--- a/light/txpool_test.go
+++ b/light/txpool_test.go
@@ -75,8 +75,8 @@ func txPoolTestChainGen(i int, block *core.BlockGen) {
}
func TestTxPool(t *testing.T) {
- for i, _ := range testTx {
- testTx[i], _ = types.NewTransaction(uint64(i), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(types.HomesteadSigner{}, testBankKey)
+ for i := range testTx {
+ testTx[i], _ = types.SignTx(types.NewTransaction(uint64(i), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey)
}
var (
diff --git a/light/vm_env.go b/light/vm_env.go
index cc0c568c9..1b225c8de 100644
--- a/light/vm_env.go
+++ b/light/vm_env.go
@@ -20,6 +20,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"golang.org/x/net/context"
@@ -42,7 +43,7 @@ func (s *VMState) Error() error {
return s.err
}
-func (s *VMState) AddLog(log *vm.Log) {}
+func (s *VMState) AddLog(log *types.Log) {}
// errHandler handles and stores any state error that happens during execution.
func (s *VMState) errHandler(err error) {
diff --git a/logger/example_test.go b/logger/example_test.go
deleted file mode 100644
index ce5f9da67..000000000
--- a/logger/example_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package logger
-
-import "os"
-
-func ExampleLogger() {
- logger := NewLogger("TAG")
- logger.Infoln("so awesome") // prints [TAG] so awesome
- logger.Infof("this %q is raw", "coin") // prints [TAG] this "coin" is raw
-}
-
-func ExampleLogSystem() {
- filename := "test.log"
- file, _ := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, os.ModePerm)
- fileLog := NewStdLogSystem(file, 0, WarnLevel)
- AddLogSystem(fileLog)
-
- stdoutLog := NewStdLogSystem(os.Stdout, 0, WarnLevel)
- AddLogSystem(stdoutLog)
-
- NewLogger("TAG").Warnln("reactor meltdown") // writes to both logs
-}
diff --git a/logger/glog/glog.go b/logger/glog/glog.go
index edaa21f07..0b33527c3 100644
--- a/logger/glog/glog.go
+++ b/logger/glog/glog.go
@@ -928,7 +928,7 @@ const flushInterval = 30 * time.Second
// flushDaemon periodically flushes the log file buffers.
func (l *loggingT) flushDaemon() {
- for _ = range time.NewTicker(flushInterval).C {
+ for range time.NewTicker(flushInterval).C {
l.lockAndFlushAll()
}
}
diff --git a/logger/log.go b/logger/log.go
deleted file mode 100644
index 38a6ce139..000000000
--- a/logger/log.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package logger
-
-import (
- "fmt"
- "io"
- "log"
- "os"
-
- "github.com/ethereum/go-ethereum/common"
-)
-
-func openLogFile(datadir string, filename string) *os.File {
- path := common.AbsolutePath(datadir, filename)
- file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
- if err != nil {
- panic(fmt.Sprintf("error opening log file '%s': %v", filename, err))
- }
- return file
-}
-
-func New(datadir string, logFile string, logLevel int) LogSystem {
- var writer io.Writer
- if logFile == "" {
- writer = os.Stdout
- } else {
- writer = openLogFile(datadir, logFile)
- }
-
- var sys LogSystem
- sys = NewStdLogSystem(writer, log.LstdFlags, LogLevel(logLevel))
- AddLogSystem(sys)
-
- return sys
-}
-
-func NewJSONsystem(datadir string, logFile string) LogSystem {
- var writer io.Writer
- if logFile == "-" {
- writer = os.Stdout
- } else {
- writer = openLogFile(datadir, logFile)
- }
-
- var sys LogSystem
- sys = NewJsonLogSystem(writer)
- AddLogSystem(sys)
-
- return sys
-}
diff --git a/logger/loggers.go b/logger/loggers.go
deleted file mode 100644
index e63355d0b..000000000
--- a/logger/loggers.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-/*
-Package logger implements a multi-output leveled logger.
-
-Other packages use tagged logger to send log messages to shared
-(process-wide) logging engine. The shared logging engine dispatches to
-multiple log systems. The log level can be set separately per log
-system.
-
-Logging is asynchronous and does not block the caller. Message
-formatting is performed by the caller goroutine to avoid incorrect
-logging of mutable state.
-*/
-package logger
-
-import (
- "encoding/json"
- "fmt"
- "os"
-)
-
-type LogLevel uint32
-
-const (
- // Standard log levels
- Silence LogLevel = iota
- ErrorLevel
- WarnLevel
- InfoLevel
- DebugLevel
- DebugDetailLevel
-)
-
-// A Logger prints messages prefixed by a given tag. It provides named
-// Printf and Println style methods for all loglevels. Each ethereum
-// component should have its own logger with a unique prefix.
-type Logger struct {
- tag string
-}
-
-func NewLogger(tag string) *Logger {
- return &Logger{"[" + tag + "] "}
-}
-
-func (logger *Logger) Sendln(level LogLevel, v ...interface{}) {
- logMessageC <- stdMsg{level, logger.tag + fmt.Sprintln(v...)}
-}
-
-func (logger *Logger) Sendf(level LogLevel, format string, v ...interface{}) {
- logMessageC <- stdMsg{level, logger.tag + fmt.Sprintf(format, v...)}
-}
-
-// Errorln writes a message with ErrorLevel.
-func (logger *Logger) Errorln(v ...interface{}) {
- logger.Sendln(ErrorLevel, v...)
-}
-
-// Warnln writes a message with WarnLevel.
-func (logger *Logger) Warnln(v ...interface{}) {
- logger.Sendln(WarnLevel, v...)
-}
-
-// Infoln writes a message with InfoLevel.
-func (logger *Logger) Infoln(v ...interface{}) {
- logger.Sendln(InfoLevel, v...)
-}
-
-// Debugln writes a message with DebugLevel.
-func (logger *Logger) Debugln(v ...interface{}) {
- logger.Sendln(DebugLevel, v...)
-}
-
-// DebugDetailln writes a message with DebugDetailLevel.
-func (logger *Logger) DebugDetailln(v ...interface{}) {
- logger.Sendln(DebugDetailLevel, v...)
-}
-
-// Errorf writes a message with ErrorLevel.
-func (logger *Logger) Errorf(format string, v ...interface{}) {
- logger.Sendf(ErrorLevel, format, v...)
-}
-
-// Warnf writes a message with WarnLevel.
-func (logger *Logger) Warnf(format string, v ...interface{}) {
- logger.Sendf(WarnLevel, format, v...)
-}
-
-// Infof writes a message with InfoLevel.
-func (logger *Logger) Infof(format string, v ...interface{}) {
- logger.Sendf(InfoLevel, format, v...)
-}
-
-// Debugf writes a message with DebugLevel.
-func (logger *Logger) Debugf(format string, v ...interface{}) {
- logger.Sendf(DebugLevel, format, v...)
-}
-
-// DebugDetailf writes a message with DebugDetailLevel.
-func (logger *Logger) DebugDetailf(format string, v ...interface{}) {
- logger.Sendf(DebugDetailLevel, format, v...)
-}
-
-// Fatalln writes a message with ErrorLevel and exits the program.
-func (logger *Logger) Fatalln(v ...interface{}) {
- logger.Sendln(ErrorLevel, v...)
- Flush()
- os.Exit(0)
-}
-
-// Fatalf writes a message with ErrorLevel and exits the program.
-func (logger *Logger) Fatalf(format string, v ...interface{}) {
- logger.Sendf(ErrorLevel, format, v...)
- Flush()
- os.Exit(0)
-}
-
-type JsonLogger struct {
- Coinbase string
-}
-
-func NewJsonLogger() *JsonLogger {
- return &JsonLogger{}
-}
-
-func (logger *JsonLogger) LogJson(v JsonLog) {
- msgname := v.EventName()
- obj := map[string]interface{}{
- msgname: v,
- }
-
- jsontxt, _ := json.Marshal(obj)
- logMessageC <- (jsonMsg(jsontxt))
-
-}
diff --git a/logger/loggers_test.go b/logger/loggers_test.go
deleted file mode 100644
index 85564698b..000000000
--- a/logger/loggers_test.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package logger
-
-import (
- "io/ioutil"
- "math/rand"
- "os"
- "sync"
- "testing"
- "time"
-)
-
-type TestLogSystem struct {
- mutex sync.Mutex
- output string
- level LogLevel
-}
-
-func (ls *TestLogSystem) LogPrint(msg LogMsg) {
- ls.mutex.Lock()
- if ls.level >= msg.Level() {
- ls.output += msg.String()
- }
- ls.mutex.Unlock()
-}
-
-func (ls *TestLogSystem) SetLogLevel(i LogLevel) {
- ls.mutex.Lock()
- ls.level = i
- ls.mutex.Unlock()
-}
-
-func (ls *TestLogSystem) GetLogLevel() LogLevel {
- ls.mutex.Lock()
- defer ls.mutex.Unlock()
- return ls.level
-}
-
-func (ls *TestLogSystem) CheckOutput(t *testing.T, expected string) {
- ls.mutex.Lock()
- output := ls.output
- ls.mutex.Unlock()
- if output != expected {
- t.Errorf("log output mismatch:\n got: %q\n want: %q\n", output, expected)
- }
-}
-
-type blockedLogSystem struct {
- LogSystem
- unblock chan struct{}
-}
-
-func (ls blockedLogSystem) LogPrint(msg LogMsg) {
- <-ls.unblock
- ls.LogSystem.LogPrint(msg)
-}
-
-func TestLoggerFlush(t *testing.T) {
- Reset()
-
- logger := NewLogger("TEST")
- ls := blockedLogSystem{&TestLogSystem{level: WarnLevel}, make(chan struct{})}
- AddLogSystem(ls)
- for i := 0; i < 5; i++ {
- // these writes shouldn't hang even though ls is blocked
- logger.Errorf(".")
- }
-
- beforeFlush := time.Now()
- time.AfterFunc(80*time.Millisecond, func() { close(ls.unblock) })
- Flush() // this should hang for approx. 80ms
- if blockd := time.Now().Sub(beforeFlush); blockd < 80*time.Millisecond {
- t.Errorf("Flush didn't block long enough, blocked for %v, should've been >= 80ms", blockd)
- }
-
- ls.LogSystem.(*TestLogSystem).CheckOutput(t, "[TEST] .[TEST] .[TEST] .[TEST] .[TEST] .")
-}
-
-func TestLoggerPrintln(t *testing.T) {
- Reset()
-
- logger := NewLogger("TEST")
- testLogSystem := &TestLogSystem{level: WarnLevel}
- AddLogSystem(testLogSystem)
- logger.Errorln("error")
- logger.Warnln("warn")
- logger.Infoln("info")
- logger.Debugln("debug")
- Flush()
-
- testLogSystem.CheckOutput(t, "[TEST] error\n[TEST] warn\n")
-}
-
-func TestLoggerPrintf(t *testing.T) {
- Reset()
-
- logger := NewLogger("TEST")
- testLogSystem := &TestLogSystem{level: WarnLevel}
- AddLogSystem(testLogSystem)
- logger.Errorf("error to %v\n", []int{1, 2, 3})
- logger.Warnf("warn %%d %d", 5)
- logger.Infof("info")
- logger.Debugf("debug")
- Flush()
- testLogSystem.CheckOutput(t, "[TEST] error to [1 2 3]\n[TEST] warn %d 5")
-}
-
-func TestMultipleLogSystems(t *testing.T) {
- Reset()
-
- logger := NewLogger("TEST")
- testLogSystem0 := &TestLogSystem{level: ErrorLevel}
- testLogSystem1 := &TestLogSystem{level: WarnLevel}
- AddLogSystem(testLogSystem0)
- AddLogSystem(testLogSystem1)
- logger.Errorln("error")
- logger.Warnln("warn")
- Flush()
-
- testLogSystem0.CheckOutput(t, "[TEST] error\n")
- testLogSystem1.CheckOutput(t, "[TEST] error\n[TEST] warn\n")
-}
-
-func TestFileLogSystem(t *testing.T) {
- Reset()
-
- logger := NewLogger("TEST")
- filename := "test.log"
- file, _ := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, os.ModePerm)
- testLogSystem := NewStdLogSystem(file, 0, WarnLevel)
- AddLogSystem(testLogSystem)
- logger.Errorf("error to %s\n", filename)
- logger.Warnln("warn")
- Flush()
- contents, _ := ioutil.ReadFile(filename)
- output := string(contents)
- if output != "[TEST] error to test.log\n[TEST] warn\n" {
- t.Error("Expected contents of file 'test.log': '[TEST] error to test.log\\n[TEST] warn\\n', got ", output)
- } else {
- os.Remove(filename)
- }
-}
-
-func TestNoLogSystem(t *testing.T) {
- Reset()
-
- logger := NewLogger("TEST")
- logger.Warnln("warn")
- Flush()
-}
-
-func TestConcurrentAddSystem(t *testing.T) {
- rand.Seed(time.Now().Unix())
- Reset()
-
- logger := NewLogger("TEST")
- stop := make(chan struct{})
- writer := func() {
- select {
- case <-stop:
- return
- default:
- logger.Infoln("foo")
- Flush()
- }
- }
-
- go writer()
- go writer()
-
- stopTime := time.Now().Add(100 * time.Millisecond)
- for time.Now().Before(stopTime) {
- time.Sleep(time.Duration(rand.Intn(20)) * time.Millisecond)
- AddLogSystem(NewStdLogSystem(ioutil.Discard, 0, InfoLevel))
- }
- close(stop)
-}
diff --git a/logger/logsystem.go b/logger/logsystem.go
deleted file mode 100644
index 24f4351d4..000000000
--- a/logger/logsystem.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package logger
-
-import (
- "io"
- "log"
- "sync/atomic"
-)
-
-// LogSystem is implemented by log output devices.
-// All methods can be called concurrently from multiple goroutines.
-type LogSystem interface {
- LogPrint(LogMsg)
-}
-
-// NewStdLogSystem creates a LogSystem that prints to the given writer.
-// The flag values are defined package log.
-func NewStdLogSystem(writer io.Writer, flags int, level LogLevel) *StdLogSystem {
- logger := log.New(writer, "", flags)
- return &StdLogSystem{logger, uint32(level)}
-}
-
-type StdLogSystem struct {
- logger *log.Logger
- level uint32
-}
-
-func (t *StdLogSystem) LogPrint(msg LogMsg) {
- stdmsg, ok := msg.(stdMsg)
- if ok {
- if t.GetLogLevel() >= stdmsg.Level() {
- t.logger.Print(stdmsg.String())
- }
- }
-}
-
-func (t *StdLogSystem) SetLogLevel(i LogLevel) {
- atomic.StoreUint32(&t.level, uint32(i))
-}
-
-func (t *StdLogSystem) GetLogLevel() LogLevel {
- return LogLevel(atomic.LoadUint32(&t.level))
-}
-
-// NewJSONLogSystem creates a LogSystem that prints to the given writer without
-// adding extra information irrespective of loglevel only if message is JSON type
-func NewJsonLogSystem(writer io.Writer) LogSystem {
- logger := log.New(writer, "", 0)
- return &jsonLogSystem{logger}
-}
-
-type jsonLogSystem struct {
- logger *log.Logger
-}
-
-func (t *jsonLogSystem) LogPrint(msg LogMsg) {
- jsonmsg, ok := msg.(jsonMsg)
- if ok {
- t.logger.Print(jsonmsg.String())
- }
-}
diff --git a/logger/sys.go b/logger/sys.go
deleted file mode 100644
index 18d4ea641..000000000
--- a/logger/sys.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package logger
-
-import (
- "fmt"
- "sync"
-)
-
-type stdMsg struct {
- level LogLevel
- msg string
-}
-
-type jsonMsg []byte
-
-func (m jsonMsg) Level() LogLevel {
- return 0
-}
-
-func (m jsonMsg) String() string {
- return string(m)
-}
-
-type LogMsg interface {
- Level() LogLevel
- fmt.Stringer
-}
-
-func (m stdMsg) Level() LogLevel {
- return m.level
-}
-
-func (m stdMsg) String() string {
- return m.msg
-}
-
-var (
- logMessageC = make(chan LogMsg)
- addSystemC = make(chan LogSystem)
- flushC = make(chan chan struct{})
- resetC = make(chan chan struct{})
-)
-
-func init() {
- go dispatchLoop()
-}
-
-// each system can buffer this many messages before
-// blocking incoming log messages.
-const sysBufferSize = 500
-
-func dispatchLoop() {
- var (
- systems []LogSystem
- systemIn []chan LogMsg
- systemWG sync.WaitGroup
- )
- bootSystem := func(sys LogSystem) {
- in := make(chan LogMsg, sysBufferSize)
- systemIn = append(systemIn, in)
- systemWG.Add(1)
- go sysLoop(sys, in, &systemWG)
- }
-
- for {
- select {
- case msg := <-logMessageC:
- for _, c := range systemIn {
- c <- msg
- }
-
- case sys := <-addSystemC:
- systems = append(systems, sys)
- bootSystem(sys)
-
- case waiter := <-resetC:
- // reset means terminate all systems
- for _, c := range systemIn {
- close(c)
- }
- systems = nil
- systemIn = nil
- systemWG.Wait()
- close(waiter)
-
- case waiter := <-flushC:
- // flush means reboot all systems
- for _, c := range systemIn {
- close(c)
- }
- systemIn = nil
- systemWG.Wait()
- for _, sys := range systems {
- bootSystem(sys)
- }
- close(waiter)
- }
- }
-}
-
-func sysLoop(sys LogSystem, in <-chan LogMsg, wg *sync.WaitGroup) {
- for msg := range in {
- sys.LogPrint(msg)
- }
- wg.Done()
-}
-
-// Reset removes all active log systems.
-// It blocks until all current messages have been delivered.
-func Reset() {
- waiter := make(chan struct{})
- resetC <- waiter
- <-waiter
-}
-
-// Flush waits until all current log messages have been dispatched to
-// the active log systems.
-func Flush() {
- waiter := make(chan struct{})
- flushC <- waiter
- <-waiter
-}
-
-// AddLogSystem starts printing messages to the given LogSystem.
-func AddLogSystem(sys LogSystem) {
- addSystemC <- sys
-}
diff --git a/logger/types.go b/logger/types.go
deleted file mode 100644
index ee7e845de..000000000
--- a/logger/types.go
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package logger
-
-import (
- "math/big"
- "time"
-)
-
-type utctime8601 struct{}
-
-func (utctime8601) MarshalJSON() ([]byte, error) {
- timestr := time.Now().UTC().Format(time.RFC3339Nano)
- // Bounds check
- if len(timestr) > 26 {
- timestr = timestr[:26]
- }
- return []byte(`"` + timestr + `Z"`), nil
-}
-
-type JsonLog interface {
- EventName() string
-}
-
-type LogEvent struct {
- // Guid string `json:"guid"`
- Ts utctime8601 `json:"ts"`
- // Level string `json:"level"`
-}
-
-type LogStarting struct {
- ClientString string `json:"client_impl"`
- ProtocolVersion int `json:"eth_version"`
- LogEvent
-}
-
-func (l *LogStarting) EventName() string {
- return "starting"
-}
-
-type P2PConnected struct {
- RemoteId string `json:"remote_id"`
- RemoteAddress string `json:"remote_addr"`
- RemoteVersionString string `json:"remote_version_string"`
- NumConnections int `json:"num_connections"`
- LogEvent
-}
-
-func (l *P2PConnected) EventName() string {
- return "p2p.connected"
-}
-
-type P2PDisconnected struct {
- NumConnections int `json:"num_connections"`
- RemoteId string `json:"remote_id"`
- LogEvent
-}
-
-func (l *P2PDisconnected) EventName() string {
- return "p2p.disconnected"
-}
-
-type EthMinerNewBlock struct {
- BlockHash string `json:"block_hash"`
- BlockNumber *big.Int `json:"block_number"`
- ChainHeadHash string `json:"chain_head_hash"`
- BlockPrevHash string `json:"block_prev_hash"`
- LogEvent
-}
-
-func (l *EthMinerNewBlock) EventName() string {
- return "eth.miner.new_block"
-}
-
-type EthChainReceivedNewBlock struct {
- BlockHash string `json:"block_hash"`
- BlockNumber *big.Int `json:"block_number"`
- ChainHeadHash string `json:"chain_head_hash"`
- BlockPrevHash string `json:"block_prev_hash"`
- RemoteId string `json:"remote_id"`
- LogEvent
-}
-
-func (l *EthChainReceivedNewBlock) EventName() string {
- return "eth.chain.received.new_block"
-}
-
-type EthChainNewHead struct {
- BlockHash string `json:"block_hash"`
- BlockNumber *big.Int `json:"block_number"`
- ChainHeadHash string `json:"chain_head_hash"`
- BlockPrevHash string `json:"block_prev_hash"`
- LogEvent
-}
-
-func (l *EthChainNewHead) EventName() string {
- return "eth.chain.new_head"
-}
-
-type EthTxReceived struct {
- TxHash string `json:"tx_hash"`
- RemoteId string `json:"remote_id"`
- LogEvent
-}
-
-func (l *EthTxReceived) EventName() string {
- return "eth.tx.received"
-}
-
-//
-//
-// The types below are legacy and need to be converted to new format or deleted
-//
-//
-
-// type P2PConnecting struct {
-// RemoteId string `json:"remote_id"`
-// RemoteEndpoint string `json:"remote_endpoint"`
-// NumConnections int `json:"num_connections"`
-// LogEvent
-// }
-
-// func (l *P2PConnecting) EventName() string {
-// return "p2p.connecting"
-// }
-
-// type P2PHandshaked struct {
-// RemoteCapabilities []string `json:"remote_capabilities"`
-// RemoteId string `json:"remote_id"`
-// NumConnections int `json:"num_connections"`
-// LogEvent
-// }
-
-// func (l *P2PHandshaked) EventName() string {
-// return "p2p.handshaked"
-// }
-
-// type P2PDisconnecting struct {
-// Reason string `json:"reason"`
-// RemoteId string `json:"remote_id"`
-// NumConnections int `json:"num_connections"`
-// LogEvent
-// }
-
-// func (l *P2PDisconnecting) EventName() string {
-// return "p2p.disconnecting"
-// }
-
-// type P2PDisconnectingBadHandshake struct {
-// Reason string `json:"reason"`
-// RemoteId string `json:"remote_id"`
-// NumConnections int `json:"num_connections"`
-// LogEvent
-// }
-
-// func (l *P2PDisconnectingBadHandshake) EventName() string {
-// return "p2p.disconnecting.bad_handshake"
-// }
-
-// type P2PDisconnectingBadProtocol struct {
-// Reason string `json:"reason"`
-// RemoteId string `json:"remote_id"`
-// NumConnections int `json:"num_connections"`
-// LogEvent
-// }
-
-// func (l *P2PDisconnectingBadProtocol) EventName() string {
-// return "p2p.disconnecting.bad_protocol"
-// }
-
-// type P2PDisconnectingReputation struct {
-// Reason string `json:"reason"`
-// RemoteId string `json:"remote_id"`
-// NumConnections int `json:"num_connections"`
-// LogEvent
-// }
-
-// func (l *P2PDisconnectingReputation) EventName() string {
-// return "p2p.disconnecting.reputation"
-// }
-
-// type P2PDisconnectingDHT struct {
-// Reason string `json:"reason"`
-// RemoteId string `json:"remote_id"`
-// NumConnections int `json:"num_connections"`
-// LogEvent
-// }
-
-// func (l *P2PDisconnectingDHT) EventName() string {
-// return "p2p.disconnecting.dht"
-// }
-
-// type P2PEthDisconnectingBadBlock struct {
-// Reason string `json:"reason"`
-// RemoteId string `json:"remote_id"`
-// NumConnections int `json:"num_connections"`
-// LogEvent
-// }
-
-// func (l *P2PEthDisconnectingBadBlock) EventName() string {
-// return "p2p.eth.disconnecting.bad_block"
-// }
-
-// type P2PEthDisconnectingBadTx struct {
-// Reason string `json:"reason"`
-// RemoteId string `json:"remote_id"`
-// NumConnections int `json:"num_connections"`
-// LogEvent
-// }
-
-// func (l *P2PEthDisconnectingBadTx) EventName() string {
-// return "p2p.eth.disconnecting.bad_tx"
-// }
-
-// type EthNewBlockBroadcasted struct {
-// BlockNumber int `json:"block_number"`
-// HeadHash string `json:"head_hash"`
-// BlockHash string `json:"block_hash"`
-// BlockDifficulty int `json:"block_difficulty"`
-// BlockPrevHash string `json:"block_prev_hash"`
-// LogEvent
-// }
-
-// func (l *EthNewBlockBroadcasted) EventName() string {
-// return "eth.newblock.broadcasted"
-// }
-
-// type EthNewBlockIsKnown struct {
-// BlockNumber int `json:"block_number"`
-// HeadHash string `json:"head_hash"`
-// BlockHash string `json:"block_hash"`
-// BlockDifficulty int `json:"block_difficulty"`
-// BlockPrevHash string `json:"block_prev_hash"`
-// LogEvent
-// }
-
-// func (l *EthNewBlockIsKnown) EventName() string {
-// return "eth.newblock.is_known"
-// }
-
-// type EthNewBlockIsNew struct {
-// BlockNumber int `json:"block_number"`
-// HeadHash string `json:"head_hash"`
-// BlockHash string `json:"block_hash"`
-// BlockDifficulty int `json:"block_difficulty"`
-// BlockPrevHash string `json:"block_prev_hash"`
-// LogEvent
-// }
-
-// func (l *EthNewBlockIsNew) EventName() string {
-// return "eth.newblock.is_new"
-// }
-
-// type EthNewBlockMissingParent struct {
-// BlockNumber int `json:"block_number"`
-// HeadHash string `json:"head_hash"`
-// BlockHash string `json:"block_hash"`
-// BlockDifficulty int `json:"block_difficulty"`
-// BlockPrevHash string `json:"block_prev_hash"`
-// LogEvent
-// }
-
-// func (l *EthNewBlockMissingParent) EventName() string {
-// return "eth.newblock.missing_parent"
-// }
-
-// type EthNewBlockIsInvalid struct {
-// BlockNumber int `json:"block_number"`
-// HeadHash string `json:"head_hash"`
-// BlockHash string `json:"block_hash"`
-// BlockDifficulty int `json:"block_difficulty"`
-// BlockPrevHash string `json:"block_prev_hash"`
-// LogEvent
-// }
-
-// func (l *EthNewBlockIsInvalid) EventName() string {
-// return "eth.newblock.is_invalid"
-// }
-
-// type EthNewBlockChainIsOlder struct {
-// BlockNumber int `json:"block_number"`
-// HeadHash string `json:"head_hash"`
-// BlockHash string `json:"block_hash"`
-// BlockDifficulty int `json:"block_difficulty"`
-// BlockPrevHash string `json:"block_prev_hash"`
-// LogEvent
-// }
-
-// func (l *EthNewBlockChainIsOlder) EventName() string {
-// return "eth.newblock.chain.is_older"
-// }
-
-// type EthNewBlockChainIsCanonical struct {
-// BlockNumber int `json:"block_number"`
-// HeadHash string `json:"head_hash"`
-// BlockHash string `json:"block_hash"`
-// BlockDifficulty int `json:"block_difficulty"`
-// BlockPrevHash string `json:"block_prev_hash"`
-// LogEvent
-// }
-
-// func (l *EthNewBlockChainIsCanonical) EventName() string {
-// return "eth.newblock.chain.is_cannonical"
-// }
-
-// type EthNewBlockChainNotCanonical struct {
-// BlockNumber int `json:"block_number"`
-// HeadHash string `json:"head_hash"`
-// BlockHash string `json:"block_hash"`
-// BlockDifficulty int `json:"block_difficulty"`
-// BlockPrevHash string `json:"block_prev_hash"`
-// LogEvent
-// }
-
-// func (l *EthNewBlockChainNotCanonical) EventName() string {
-// return "eth.newblock.chain.not_cannonical"
-// }
-
-// type EthTxCreated struct {
-// TxHash string `json:"tx_hash"`
-// TxSender string `json:"tx_sender"`
-// TxAddress string `json:"tx_address"`
-// TxHexRLP string `json:"tx_hexrlp"`
-// TxNonce int `json:"tx_nonce"`
-// LogEvent
-// }
-
-// func (l *EthTxCreated) EventName() string {
-// return "eth.tx.created"
-// }
-
-// type EthTxBroadcasted struct {
-// TxHash string `json:"tx_hash"`
-// TxSender string `json:"tx_sender"`
-// TxAddress string `json:"tx_address"`
-// TxNonce int `json:"tx_nonce"`
-// LogEvent
-// }
-
-// func (l *EthTxBroadcasted) EventName() string {
-// return "eth.tx.broadcasted"
-// }
-
-// type EthTxValidated struct {
-// TxHash string `json:"tx_hash"`
-// TxSender string `json:"tx_sender"`
-// TxAddress string `json:"tx_address"`
-// TxNonce int `json:"tx_nonce"`
-// LogEvent
-// }
-
-// func (l *EthTxValidated) EventName() string {
-// return "eth.tx.validated"
-// }
-
-// type EthTxIsInvalid struct {
-// TxHash string `json:"tx_hash"`
-// TxSender string `json:"tx_sender"`
-// TxAddress string `json:"tx_address"`
-// Reason string `json:"reason"`
-// TxNonce int `json:"tx_nonce"`
-// LogEvent
-// }
-
-// func (l *EthTxIsInvalid) EventName() string {
-// return "eth.tx.is_invalid"
-// }
diff --git a/miner/worker.go b/miner/worker.go
index 3ae51d120..77e4e0205 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -39,8 +39,6 @@ import (
"gopkg.in/fatih/set.v0"
)
-var jsonlogger = logger.NewJsonLogger()
-
const (
resultQueueSize = 10
miningLogAtDepth = 5
@@ -256,7 +254,7 @@ func (self *worker) update() {
self.currentMu.Lock()
acc, _ := types.Sender(self.current.signer, ev.Tx)
- txs := map[common.Address]types.Transactions{acc: types.Transactions{ev.Tx}}
+ txs := map[common.Address]types.Transactions{acc: {ev.Tx}}
txset := types.NewTransactionsByPriceAndNonce(txs)
self.current.commitTransactions(self.mux, txset, self.gasPrice, self.chain)
@@ -327,7 +325,7 @@ func (self *worker) wait() {
}
// broadcast before waiting for validation
- go func(block *types.Block, logs vm.Logs, receipts []*types.Receipt) {
+ go func(block *types.Block, logs []*types.Log, receipts []*types.Receipt) {
self.mux.Post(core.NewMinedBlockEvent{Block: block})
self.mux.Post(core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
@@ -449,7 +447,7 @@ func (self *worker) commitNewWork() {
// Depending whether we support or oppose the fork, override differently
if self.config.DAOForkSupport {
header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
- } else if bytes.Compare(header.Extra, params.DAOForkBlockExtra) == 0 {
+ } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data
}
}
@@ -494,7 +492,7 @@ func (self *worker) commitNewWork() {
}
badUncles = append(badUncles, hash)
} else {
- glog.V(logger.Debug).Infof("commiting %x as uncle\n", hash[:4])
+ glog.V(logger.Debug).Infof("committing %x as uncle\n", hash[:4])
uncles = append(uncles, uncle.Header())
}
}
@@ -537,7 +535,7 @@ func (self *worker) commitUncle(work *Work, uncle *types.Header) error {
func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, gasPrice *big.Int, bc *core.BlockChain) {
gp := new(core.GasPool).AddGas(env.header.GasLimit)
- var coalescedLogs vm.Logs
+ var coalescedLogs []*types.Log
for {
// Retrieve the next transaction and abort if all done
@@ -597,12 +595,12 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
// make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
// logs by filling in the block hash when the block was mined by the local miner. This can
// cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed.
- cpy := make(vm.Logs, len(coalescedLogs))
+ cpy := make([]*types.Log, len(coalescedLogs))
for i, l := range coalescedLogs {
- cpy[i] = new(vm.Log)
+ cpy[i] = new(types.Log)
*cpy[i] = *l
}
- go func(logs vm.Logs, tcount int) {
+ go func(logs []*types.Log, tcount int) {
if len(logs) > 0 {
mux.Post(core.PendingLogsEvent{Logs: logs})
}
@@ -613,7 +611,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
}
}
-func (env *Work) commitTransaction(tx *types.Transaction, bc *core.BlockChain, gp *core.GasPool) (error, vm.Logs) {
+func (env *Work) commitTransaction(tx *types.Transaction, bc *core.BlockChain, gp *core.GasPool) (error, []*types.Log) {
snap := env.state.Snapshot()
receipt, _, err := core.ApplyTransaction(env.config, bc, gp, env.state, env.header, tx, env.header.GasUsed, vm.Config{})
diff --git a/mobile/accounts.go b/mobile/accounts.go
index 90f664d29..47c3a5c21 100644
--- a/mobile/accounts.go
+++ b/mobile/accounts.go
@@ -115,10 +115,10 @@ func (am *AccountManager) Sign(address *Address, hash []byte) (signature []byte,
return am.manager.Sign(address.address, hash)
}
-// SignWithPassphrase signs hash if the private key matching the given address
-// can be decrypted with the given passphrase. The produced signature is in the
+// SignPassphrase signs hash if the private key matching the given address can
+// be decrypted with the given passphrase. The produced signature is in the
// [R || S || V] format where V is 0 or 1.
-func (am *AccountManager) SignWithPassphrase(account *Account, passphrase string, hash []byte) (signature []byte, _ error) {
+func (am *AccountManager) SignPassphrase(account *Account, passphrase string, hash []byte) (signature []byte, _ error) {
return am.manager.SignWithPassphrase(account.account, passphrase, hash)
}
diff --git a/mobile/android_test.go b/mobile/android_test.go
index 9e38c1986..3776f8291 100644
--- a/mobile/android_test.go
+++ b/mobile/android_test.go
@@ -69,7 +69,7 @@ public class AndroidTest extends InstrumentationTestCase {
Hash txHash = new Hash("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef");
// Sign a transaction with a single authorization
- byte[] signature = am.signWithPassphrase(signer, "Signer password", txHash.getBytes());
+ byte[] signature = am.signPassphrase(signer, "Signer password", txHash.getBytes());
// Sign a transaction with multiple manually cancelled authorizations
am.unlock(signer, "Signer password");
diff --git a/mobile/bind.go b/mobile/bind.go
index a25c37aca..bc4eb25ba 100644
--- a/mobile/bind.go
+++ b/mobile/bind.go
@@ -114,17 +114,12 @@ type BoundContract struct {
// DeployContract deploys a contract onto the Ethereum blockchain and binds the
// deployment address with a wrapper.
func DeployContract(opts *TransactOpts, abiJSON string, bytecode []byte, client *EthereumClient, args *Interfaces) (contract *BoundContract, _ error) {
- // Convert all the deployment parameters to Go types
- params := make([]interface{}, len(args.objects))
- for i, obj := range args.objects {
- params[i] = obj
- }
// Deploy the contract to the network
parsed, err := abi.JSON(strings.NewReader(abiJSON))
if err != nil {
return nil, err
}
- addr, tx, bound, err := bind.DeployContract(&opts.opts, parsed, bytecode, client.client, params...)
+ addr, tx, bound, err := bind.DeployContract(&opts.opts, parsed, bytecode, client.client, args.objects...)
if err != nil {
return nil, err
}
@@ -159,32 +154,18 @@ func (c *BoundContract) GetDeployer() *Transaction {
// Call invokes the (constant) contract method with params as input values and
// sets the output to result.
func (c *BoundContract) Call(opts *CallOpts, out *Interfaces, method string, args *Interfaces) error {
- // Convert all the input and output parameters to Go types
- params := make([]interface{}, len(args.objects))
- for i, obj := range args.objects {
- params[i] = obj
- }
results := make([]interface{}, len(out.objects))
- for i, obj := range out.objects {
- results[i] = obj
- }
- // Execute the call to the contract and wrap any results
- if err := c.contract.Call(&opts.opts, &results, method, params...); err != nil {
+ copy(results, out.objects)
+ if err := c.contract.Call(&opts.opts, &results, method, args.objects...); err != nil {
return err
}
- for i, res := range results {
- out.objects[i] = res
- }
+ copy(out.objects, results)
return nil
}
// Transact invokes the (paid) contract method with params as input values.
func (c *BoundContract) Transact(opts *TransactOpts, method string, args *Interfaces) (tx *Transaction, _ error) {
- params := make([]interface{}, len(args.objects))
- for i, obj := range args.objects {
- params[i] = obj
- }
- rawTx, err := c.contract.Transact(&opts.opts, method, params)
+ rawTx, err := c.contract.Transact(&opts.opts, method, args.objects)
if err != nil {
return nil, err
}
diff --git a/mobile/ethclient.go b/mobile/ethclient.go
index 36a15aa47..4e8328501 100644
--- a/mobile/ethclient.go
+++ b/mobile/ethclient.go
@@ -22,7 +22,6 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethclient"
)
@@ -191,7 +190,7 @@ func (ec *EthereumClient) FilterLogs(ctx *Context, query *FilterQuery) (logs *Lo
return nil, err
}
// Temp hack due to vm.Logs being []*vm.Log
- res := make(vm.Logs, len(rawLogs))
+ res := make([]*types.Log, len(rawLogs))
for i, log := range rawLogs {
res[i] = &log
}
@@ -208,7 +207,7 @@ type FilterLogsHandler interface {
// SubscribeFilterLogs subscribes to the results of a streaming filter query.
func (ec *EthereumClient) SubscribeFilterLogs(ctx *Context, query *FilterQuery, handler FilterLogsHandler, buffer int) (sub *Subscription, _ error) {
// Subscribe to the event internally
- ch := make(chan vm.Log, buffer)
+ ch := make(chan types.Log, buffer)
rawSub, err := ec.client.SubscribeFilterLogs(ctx.context, query.query, ch)
if err != nil {
return nil, err
diff --git a/mobile/p2p.go b/mobile/p2p.go
index e717d4004..8d21639e5 100644
--- a/mobile/p2p.go
+++ b/mobile/p2p.go
@@ -38,7 +38,7 @@ func (ni *NodeInfo) GetListenerPort() int { return ni.info.Ports.Listener
func (ni *NodeInfo) GetListenerAddress() string { return ni.info.ListenAddr }
func (ni *NodeInfo) GetProtocols() *Strings {
protos := []string{}
- for proto, _ := range ni.info.Protocols {
+ for proto := range ni.info.Protocols {
protos = append(protos, proto)
}
return &Strings{protos}
diff --git a/mobile/params.go b/mobile/params.go
index 8d5d3edbe..87747c7b0 100644
--- a/mobile/params.go
+++ b/mobile/params.go
@@ -85,8 +85,8 @@ func NewChainConfig() *ChainConfig {
// by the foundation running the V5 discovery protocol.
func FoundationBootnodes() *Enodes {
nodes := &Enodes{nodes: make([]*discv5.Node, len(params.DiscoveryV5Bootnodes))}
- for i, node := range params.DiscoveryV5Bootnodes {
- nodes.nodes[i] = node
+ for i, url := range params.DiscoveryV5Bootnodes {
+ nodes.nodes[i] = discv5.MustParseNode(url)
}
return nodes
}
diff --git a/mobile/vm.go b/mobile/vm.go
index cb098d390..72093e3d5 100644
--- a/mobile/vm.go
+++ b/mobile/vm.go
@@ -21,13 +21,13 @@ package geth
import (
"errors"
- "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/core/types"
)
// Log represents a contract log event. These events are generated by the LOG
// opcode and stored/indexed by the node.
type Log struct {
- log *vm.Log
+ log *types.Log
}
func (l *Log) GetAddress() *Address { return &Address{l.log.Address} }
@@ -40,7 +40,7 @@ func (l *Log) GetBlockHash() *Hash { return &Hash{l.log.BlockHash} }
func (l *Log) GetIndex() int { return int(l.log.Index) }
// Logs represents a slice of VM logs.
-type Logs struct{ logs vm.Logs }
+type Logs struct{ logs []*types.Log }
// Size returns the number of logs in the slice.
func (l *Logs) Size() int {
diff --git a/node/api.go b/node/api.go
index 988eff379..3c451fc8a 100644
--- a/node/api.go
+++ b/node/api.go
@@ -104,7 +104,7 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis
}
}
- if err := api.node.startHTTP(fmt.Sprintf("%s:%d", *host, port), api.node.rpcAPIs, modules, *cors); err != nil {
+ if err := api.node.startHTTP(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, *cors); err != nil {
return false, err
}
return true, nil
diff --git a/node/config_test.go b/node/config_test.go
index b258d2a8b..d18732fdb 100644
--- a/node/config_test.go
+++ b/node/config_test.go
@@ -137,7 +137,7 @@ func TestNodeKeyPersistency(t *testing.T) {
if err != nil {
t.Fatalf("failed to read previously persisted node key: %v", err)
}
- if bytes.Compare(blob1, blob2) != 0 {
+ if !bytes.Equal(blob1, blob2) {
t.Fatalf("persisted node key mismatch: have %x, want %x", blob2, blob1)
}
diff --git a/node/node_example_test.go b/node/node_example_test.go
index 01ff683c0..7c586452f 100644
--- a/node/node_example_test.go
+++ b/node/node_example_test.go
@@ -44,7 +44,7 @@ func (s *SampleService) Stop() error { fmt.Println("Service stoppi
func ExampleUsage() {
// Create a network node to run protocols with the default values. The below list
// is only used to display each of the configuration options. All of these could
- // have been ommited if the default behavior is desired.
+ // have been omitted if the default behavior is desired.
nodeConfig := &node.Config{
DataDir: "", // Empty uses ephemeral storage
PrivateKey: nil, // Nil generates a node key on the fly
diff --git a/node/node_test.go b/node/node_test.go
index d9b26453b..408d4cfcb 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -166,7 +166,7 @@ func TestServiceLifeCycle(t *testing.T) {
if err := stack.Start(); err != nil {
t.Fatalf("failed to start protocol stack: %v", err)
}
- for id, _ := range services {
+ for id := range services {
if !started[id] {
t.Fatalf("service %s: freshly started service not running", id)
}
@@ -178,7 +178,7 @@ func TestServiceLifeCycle(t *testing.T) {
if err := stack.Stop(); err != nil {
t.Fatalf("failed to stop protocol stack: %v", err)
}
- for id, _ := range services {
+ for id := range services {
if !stopped[id] {
t.Fatalf("service %s: freshly terminated service still running", id)
}
@@ -218,7 +218,7 @@ func TestServiceRestarts(t *testing.T) {
}
defer stack.Stop()
- if running != true || started != 1 {
+ if !running || started != 1 {
t.Fatalf("running/started mismatch: have %v/%d, want true/1", running, started)
}
// Restart the stack a few times and check successful service restarts
@@ -227,7 +227,7 @@ func TestServiceRestarts(t *testing.T) {
t.Fatalf("iter %d: failed to restart stack: %v", i, err)
}
}
- if running != true || started != 4 {
+ if !running || started != 4 {
t.Fatalf("running/started mismatch: have %v/%d, want true/4", running, started)
}
}
@@ -270,7 +270,7 @@ func TestServiceConstructionAbortion(t *testing.T) {
if err := stack.Start(); err != failure {
t.Fatalf("iter %d: stack startup failure mismatch: have %v, want %v", i, err, failure)
}
- for id, _ := range services {
+ for id := range services {
if started[id] {
t.Fatalf("service %s: started should not have", id)
}
@@ -322,7 +322,7 @@ func TestServiceStartupAbortion(t *testing.T) {
if err := stack.Start(); err != failure {
t.Fatalf("iter %d: stack startup failure mismatch: have %v, want %v", i, err, failure)
}
- for id, _ := range services {
+ for id := range services {
if started[id] && !stopped[id] {
t.Fatalf("service %s: started but not stopped", id)
}
@@ -376,7 +376,7 @@ func TestServiceTerminationGuarantee(t *testing.T) {
if err := stack.Start(); err != nil {
t.Fatalf("iter %d: failed to start protocol stack: %v", i, err)
}
- for id, _ := range services {
+ for id := range services {
if !started[id] {
t.Fatalf("iter %d, service %s: service not running", i, id)
}
@@ -397,7 +397,7 @@ func TestServiceTerminationGuarantee(t *testing.T) {
t.Fatalf("iter %d: failure count mismatch: have %d, want %d", i, len(err.Services), 1)
}
}
- for id, _ := range services {
+ for id := range services {
if !stopped[id] {
t.Fatalf("iter %d, service %s: service not terminated", i, id)
}
diff --git a/p2p/discover/database.go b/p2p/discover/database.go
index d6ea507bb..8d20d1ec7 100644
--- a/p2p/discover/database.go
+++ b/p2p/discover/database.go
@@ -258,7 +258,7 @@ func (db *nodeDB) expireNodes() error {
continue
}
// Skip the node if not expired yet (and not self)
- if bytes.Compare(id[:], db.self[:]) != 0 {
+ if !bytes.Equal(id[:], db.self[:]) {
if seen := db.lastPong(id); seen.After(threshold) {
continue
}
diff --git a/p2p/discover/database_test.go b/p2p/discover/database_test.go
index 5a729f02b..be972fd2c 100644
--- a/p2p/discover/database_test.go
+++ b/p2p/discover/database_test.go
@@ -242,12 +242,12 @@ func TestNodeDBSeedQuery(t *testing.T) {
if len(seeds) != len(want) {
t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want))
}
- for id, _ := range have {
+ for id := range have {
if _, ok := want[id]; !ok {
t.Errorf("extra seed: %v", id)
}
}
- for id, _ := range want {
+ for id := range want {
if _, ok := have[id]; !ok {
t.Errorf("missing seed: %v", id)
}
diff --git a/p2p/discover/node.go b/p2p/discover/node.go
index eec0bae0c..8b1062d87 100644
--- a/p2p/discover/node.go
+++ b/p2p/discover/node.go
@@ -224,11 +224,8 @@ func (n NodeID) GoString() string {
// HexID converts a hex string to a NodeID.
// The string may be prefixed with 0x.
func HexID(in string) (NodeID, error) {
- if strings.HasPrefix(in, "0x") {
- in = in[2:]
- }
var id NodeID
- b, err := hex.DecodeString(in)
+ b, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
if err != nil {
return id, err
} else if len(b) != len(id) {
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index ad0b5c8ca..839e3ec7e 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -433,7 +433,7 @@ func (tab *Table) bondall(nodes []*Node) (result []*Node) {
rc <- nn
}(nodes[i])
}
- for _ = range nodes {
+ for range nodes {
if n := <-rc; n != nil {
result = append(result, n)
}
diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go
index 102c7c2d1..1037cc609 100644
--- a/p2p/discover/table_test.go
+++ b/p2p/discover/table_test.go
@@ -314,19 +314,19 @@ var lookupTestnet = &preminedTestnet{
target: MustHexID("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"),
targetSha: common.Hash{0x5c, 0x94, 0x4e, 0xe5, 0x1c, 0x5a, 0xe9, 0xf7, 0x2a, 0x95, 0xec, 0xcb, 0x8a, 0xed, 0x3, 0x74, 0xee, 0xcb, 0x51, 0x19, 0xd7, 0x20, 0xcb, 0xea, 0x68, 0x13, 0xe8, 0xe0, 0xd6, 0xad, 0x92, 0x61},
dists: [257][]NodeID{
- 240: []NodeID{
+ 240: {
MustHexID("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"),
MustHexID("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"),
},
- 244: []NodeID{
+ 244: {
MustHexID("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"),
},
- 246: []NodeID{
+ 246: {
MustHexID("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"),
MustHexID("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"),
MustHexID("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"),
},
- 247: []NodeID{
+ 247: {
MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
@@ -338,7 +338,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"),
MustHexID("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"),
},
- 248: []NodeID{
+ 248: {
MustHexID("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"),
MustHexID("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"),
MustHexID("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"),
@@ -356,7 +356,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"),
MustHexID("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"),
},
- 249: []NodeID{
+ 249: {
MustHexID("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"),
MustHexID("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"),
MustHexID("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"),
@@ -374,7 +374,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"),
MustHexID("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"),
},
- 250: []NodeID{
+ 250: {
MustHexID("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"),
MustHexID("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"),
MustHexID("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"),
@@ -392,7 +392,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"),
MustHexID("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"),
},
- 251: []NodeID{
+ 251: {
MustHexID("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"),
MustHexID("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"),
MustHexID("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"),
@@ -410,7 +410,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"),
MustHexID("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"),
},
- 252: []NodeID{
+ 252: {
MustHexID("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"),
MustHexID("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"),
MustHexID("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"),
@@ -428,7 +428,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"),
MustHexID("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"),
},
- 253: []NodeID{
+ 253: {
MustHexID("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"),
MustHexID("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"),
MustHexID("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"),
@@ -446,7 +446,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"),
MustHexID("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"),
},
- 254: []NodeID{
+ 254: {
MustHexID("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"),
MustHexID("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"),
MustHexID("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"),
@@ -464,7 +464,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"),
MustHexID("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"),
},
- 255: []NodeID{
+ 255: {
MustHexID("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"),
MustHexID("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"),
MustHexID("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"),
@@ -482,7 +482,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"),
MustHexID("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"),
},
- 256: []NodeID{
+ 256: {
MustHexID("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"),
MustHexID("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"),
MustHexID("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"),
diff --git a/p2p/discover/udp_test.go b/p2p/discover/udp_test.go
index 8bca37ffe..21e8b561d 100644
--- a/p2p/discover/udp_test.go
+++ b/p2p/discover/udp_test.go
@@ -234,7 +234,7 @@ func TestUDP_findnode(t *testing.T) {
defer test.table.Close()
// put a few nodes into the table. their exact
- // distribution shouldn't matter much, altough we need to
+ // distribution shouldn't matter much, although we need to
// take care not to overflow any bucket.
targetHash := crypto.Keccak256Hash(testTarget[:])
nodes := &nodesByDistance{target: targetHash}
diff --git a/p2p/discv5/database.go b/p2p/discv5/database.go
index 7c47c27fd..44be8a74e 100644
--- a/p2p/discv5/database.go
+++ b/p2p/discv5/database.go
@@ -269,7 +269,7 @@ func (db *nodeDB) expireNodes() error {
continue
}
// Skip the node if not expired yet (and not self)
- if bytes.Compare(id[:], db.self[:]) != 0 {
+ if !bytes.Equal(id[:], db.self[:]) {
if seen := db.lastPong(id); seen.After(threshold) {
continue
}
diff --git a/p2p/discv5/database_test.go b/p2p/discv5/database_test.go
index 4d3330ed2..a2ccb6467 100644
--- a/p2p/discv5/database_test.go
+++ b/p2p/discv5/database_test.go
@@ -242,12 +242,12 @@ func TestNodeDBSeedQuery(t *testing.T) {
if len(seeds) != len(want) {
t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want))
}
- for id, _ := range have {
+ for id := range have {
if _, ok := want[id]; !ok {
t.Errorf("extra seed: %v", id)
}
}
- for id, _ := range want {
+ for id := range want {
if _, ok := have[id]; !ok {
t.Errorf("missing seed: %v", id)
}
diff --git a/p2p/discv5/net_test.go b/p2p/discv5/net_test.go
index 327457c7c..bd234f5ba 100644
--- a/p2p/discv5/net_test.go
+++ b/p2p/discv5/net_test.go
@@ -69,19 +69,19 @@ var lookupTestnet = &preminedTestnet{
target: MustHexID("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"),
targetSha: common.Hash{0x5c, 0x94, 0x4e, 0xe5, 0x1c, 0x5a, 0xe9, 0xf7, 0x2a, 0x95, 0xec, 0xcb, 0x8a, 0xed, 0x3, 0x74, 0xee, 0xcb, 0x51, 0x19, 0xd7, 0x20, 0xcb, 0xea, 0x68, 0x13, 0xe8, 0xe0, 0xd6, 0xad, 0x92, 0x61},
dists: [257][]NodeID{
- 240: []NodeID{
+ 240: {
MustHexID("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"),
MustHexID("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"),
},
- 244: []NodeID{
+ 244: {
MustHexID("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"),
},
- 246: []NodeID{
+ 246: {
MustHexID("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"),
MustHexID("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"),
MustHexID("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"),
},
- 247: []NodeID{
+ 247: {
MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
@@ -93,7 +93,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"),
MustHexID("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"),
},
- 248: []NodeID{
+ 248: {
MustHexID("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"),
MustHexID("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"),
MustHexID("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"),
@@ -111,7 +111,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"),
MustHexID("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"),
},
- 249: []NodeID{
+ 249: {
MustHexID("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"),
MustHexID("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"),
MustHexID("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"),
@@ -129,7 +129,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"),
MustHexID("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"),
},
- 250: []NodeID{
+ 250: {
MustHexID("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"),
MustHexID("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"),
MustHexID("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"),
@@ -147,7 +147,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"),
MustHexID("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"),
},
- 251: []NodeID{
+ 251: {
MustHexID("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"),
MustHexID("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"),
MustHexID("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"),
@@ -165,7 +165,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"),
MustHexID("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"),
},
- 252: []NodeID{
+ 252: {
MustHexID("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"),
MustHexID("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"),
MustHexID("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"),
@@ -183,7 +183,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"),
MustHexID("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"),
},
- 253: []NodeID{
+ 253: {
MustHexID("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"),
MustHexID("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"),
MustHexID("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"),
@@ -201,7 +201,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"),
MustHexID("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"),
},
- 254: []NodeID{
+ 254: {
MustHexID("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"),
MustHexID("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"),
MustHexID("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"),
@@ -219,7 +219,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"),
MustHexID("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"),
},
- 255: []NodeID{
+ 255: {
MustHexID("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"),
MustHexID("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"),
MustHexID("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"),
@@ -237,7 +237,7 @@ var lookupTestnet = &preminedTestnet{
MustHexID("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"),
MustHexID("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"),
},
- 256: []NodeID{
+ 256: {
MustHexID("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"),
MustHexID("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"),
MustHexID("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"),
diff --git a/p2p/discv5/node.go b/p2p/discv5/node.go
index b2025ebcb..cfc833ff5 100644
--- a/p2p/discv5/node.go
+++ b/p2p/discv5/node.go
@@ -262,11 +262,8 @@ func (n NodeID) GoString() string {
// HexID converts a hex string to a NodeID.
// The string may be prefixed with 0x.
func HexID(in string) (NodeID, error) {
- if strings.HasPrefix(in, "0x") {
- in = in[2:]
- }
var id NodeID
- b, err := hex.DecodeString(in)
+ b, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
if err != nil {
return id, err
} else if len(b) != len(id) {
diff --git a/p2p/discv5/sim_test.go b/p2p/discv5/sim_test.go
index cb64d7fa0..3f7fe7463 100644
--- a/p2p/discv5/sim_test.go
+++ b/p2p/discv5/sim_test.go
@@ -74,7 +74,7 @@ func TestSimTopics(t *testing.T) {
go func() {
nets := make([]*Network, 1024)
- for i, _ := range nets {
+ for i := range nets {
net := sim.launchNode(false)
nets[i] = net
if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil {
@@ -147,7 +147,7 @@ func TestSimTopics(t *testing.T) {
func testHierarchicalTopics(i int) []Topic {
digits := strconv.FormatInt(int64(128+i/8), 2)
res := make([]Topic, 8)
- for i, _ := range res {
+ for i := range res {
res[i] = Topic("foo" + digits[1:i+1])
}
return res
@@ -167,7 +167,7 @@ func TestSimTopicHierarchy(t *testing.T) {
go func() {
nets := make([]*Network, 1024)
- for i, _ := range nets {
+ for i := range nets {
net := sim.launchNode(false)
nets[i] = net
if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil {
diff --git a/p2p/discv5/ticket.go b/p2p/discv5/ticket.go
index 752fdc9b4..48dd114f0 100644
--- a/p2p/discv5/ticket.go
+++ b/p2p/discv5/ticket.go
@@ -831,7 +831,7 @@ func (r *topicRadius) recalcRadius() (radius uint64, radiusLookup int) {
maxValue := float64(0)
now := mclock.Now()
v := float64(0)
- for i, _ := range r.buckets {
+ for i := range r.buckets {
r.buckets[i].update(now)
v += r.buckets[i].weights[trOutside] - r.buckets[i].weights[trInside]
r.buckets[i].value = v
diff --git a/p2p/discv5/topic.go b/p2p/discv5/topic.go
index 625921e84..b6bea013c 100644
--- a/p2p/discv5/topic.go
+++ b/p2p/discv5/topic.go
@@ -316,7 +316,7 @@ func (t *topicTable) collectGarbage() {
t.checkDeleteNode(node)
}
- for topic, _ := range t.topics {
+ for topic := range t.topics {
t.checkDeleteTopic(topic)
}
}
diff --git a/p2p/discv5/udp_test.go b/p2p/discv5/udp_test.go
index 98c737669..7d3181594 100644
--- a/p2p/discv5/udp_test.go
+++ b/p2p/discv5/udp_test.go
@@ -126,7 +126,7 @@ var (
// defer test.table.Close()
//
// // put a few nodes into the table. their exact
-// // distribution shouldn't matter much, altough we need to
+// // distribution shouldn't matter much, although we need to
// // take care not to overflow any bucket.
// targetHash := crypto.Keccak256Hash(testTarget[:])
// nodes := &nodesByDistance{target: targetHash}
diff --git a/p2p/nat/natpmp.go b/p2p/nat/natpmp.go
index c2f940891..577a424fb 100644
--- a/p2p/nat/natpmp.go
+++ b/p2p/nat/natpmp.go
@@ -82,7 +82,7 @@ func discoverPMP() Interface {
// any responses after a very short timeout.
timeout := time.NewTimer(1 * time.Second)
defer timeout.Stop()
- for _ = range gws {
+ for range gws {
select {
case c := <-found:
if c != nil {
diff --git a/p2p/peer_test.go b/p2p/peer_test.go
index 6f96a823b..f44300b15 100644
--- a/p2p/peer_test.go
+++ b/p2p/peer_test.go
@@ -299,7 +299,7 @@ func TestMatchProtocols(t *testing.T) {
}
}
// Make sure no protocols missed negotiation
- for name, _ := range tt.Match {
+ for name := range tt.Match {
if _, ok := result[name]; !ok {
t.Errorf("test %d, proto '%s': not negotiated, should have", i, name)
continue
diff --git a/p2p/server.go b/p2p/server.go
index cf9672e2d..298148d3e 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -54,8 +54,6 @@ const (
var errServerStopped = errors.New("server stopped")
-var srvjslog = logger.NewJsonLogger()
-
// Config holds Server options.
type Config struct {
// This field must be set to a valid secp256k1 private key.
@@ -737,12 +735,6 @@ func (srv *Server) checkpoint(c *conn, stage chan<- *conn) error {
// the peer.
func (srv *Server) runPeer(p *Peer) {
glog.V(logger.Debug).Infof("Added %v\n", p)
- srvjslog.LogJson(&logger.P2PConnected{
- RemoteId: p.ID().String(),
- RemoteAddress: p.RemoteAddr().String(),
- RemoteVersionString: p.Name(),
- NumConnections: srv.PeerCount(),
- })
if srv.newPeerHook != nil {
srv.newPeerHook(p)
@@ -753,10 +745,6 @@ func (srv *Server) runPeer(p *Peer) {
srv.delpeer <- p
glog.V(logger.Debug).Infof("Removed %v (%v)\n", p, discreason)
- srvjslog.LogJson(&logger.P2PDisconnected{
- RemoteId: p.ID().String(),
- NumConnections: srv.PeerCount(),
- })
}
// NodeInfo represents a short summary of the information known about the host.
diff --git a/params/bootnodes.go b/params/bootnodes.go
index 830b309d6..13414cb95 100644
--- a/params/bootnodes.go
+++ b/params/bootnodes.go
@@ -16,37 +16,30 @@
package params
-import (
- "github.com/ethereum/go-ethereum/p2p/discover"
- "github.com/ethereum/go-ethereum/p2p/discv5"
-)
-
// MainnetBootnodes are the enode URLs of the P2P bootstrap nodes running on
// the main Ethereum network.
-var MainnetBootnodes = []*discover.Node{
+var MainnetBootnodes = []string{
// ETH/DEV Go Bootnodes
- discover.MustParseNode("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"), // IE
- discover.MustParseNode("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"), // BR
- discover.MustParseNode("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"), // SG
+ "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", // IE
+ "enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303", // BR
+ "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", // SG
// ETH/DEV Cpp Bootnodes
- discover.MustParseNode("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303"),
+ "enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303",
}
// TestnetBootnodes are the enode URLs of the P2P bootstrap nodes running on the
// Morden test network.
-var TestnetBootnodes = []*discover.Node{
+var TestnetBootnodes = []string{
// ETH/DEV Go Bootnodes
- discover.MustParseNode("enode://e4533109cc9bd7604e4ff6c095f7a1d807e15b38e9bfeb05d3b7c423ba86af0a9e89abbf40bd9dde4250fef114cd09270fa4e224cbeef8b7bf05a51e8260d6b8@94.242.229.4:40404"),
- discover.MustParseNode("enode://8c336ee6f03e99613ad21274f269479bf4413fb294d697ef15ab897598afb931f56beb8e97af530aee20ce2bcba5776f4a312bc168545de4d43736992c814592@94.242.229.203:30303"),
-
- // ETH/DEV Cpp Bootnodes
+ "enode://e4533109cc9bd7604e4ff6c095f7a1d807e15b38e9bfeb05d3b7c423ba86af0a9e89abbf40bd9dde4250fef114cd09270fa4e224cbeef8b7bf05a51e8260d6b8@94.242.229.4:40404",
+ "enode://8c336ee6f03e99613ad21274f269479bf4413fb294d697ef15ab897598afb931f56beb8e97af530aee20ce2bcba5776f4a312bc168545de4d43736992c814592@94.242.229.203:30303",
}
// DiscoveryV5Bootnodes are the enode URLs of the P2P bootstrap nodes for the
// experimental RLPx v5 topic-discovery network.
-var DiscoveryV5Bootnodes = []*discv5.Node{
- discv5.MustParseNode("enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30305"),
- discv5.MustParseNode("enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30308"),
- discv5.MustParseNode("enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30309"),
+var DiscoveryV5Bootnodes = []string{
+ "enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30305",
+ "enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30308",
+ "enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30309",
}
diff --git a/params/protocol_params.go b/params/protocol_params.go
index e98925c2b..f5b6bedeb 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -58,7 +58,7 @@ var (
Ripemd160WordGas = big.NewInt(120) //
MinimumDifficulty = big.NewInt(131072) // The minimum that the difficulty may ever be.
CallCreateDepth = big.NewInt(1024) // Maximum depth of call/create stack.
- ExpGas = big.NewInt(10) // Once per EXP instuction.
+ ExpGas = big.NewInt(10) // Once per EXP instruction.
LogGas = big.NewInt(375) // Per LOG* operation.
CopyGas = big.NewInt(3) //
StackLimit = big.NewInt(1024) // Maximum size of VM stack allowed.
diff --git a/pow/dagger/dagger.go b/pow/dagger/dagger.go
deleted file mode 100644
index f54ba71ca..000000000
--- a/pow/dagger/dagger.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package dagger
-
-import (
- "hash"
- "math/big"
- "math/rand"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto/sha3"
- "github.com/ethereum/go-ethereum/logger"
-)
-
-var powlogger = logger.NewLogger("POW")
-
-type Dagger struct {
- hash *big.Int
- xn *big.Int
-}
-
-var Found bool
-
-func (dag *Dagger) Find(obj *big.Int, resChan chan int64) {
- r := rand.New(rand.NewSource(time.Now().UnixNano()))
-
- for i := 0; i < 1000; i++ {
- rnd := r.Int63()
-
- res := dag.Eval(big.NewInt(rnd))
- powlogger.Infof("rnd %v\nres %v\nobj %v\n", rnd, res, obj)
- if res.Cmp(obj) < 0 {
- // Post back result on the channel
- resChan <- rnd
- // Notify other threads we've found a valid nonce
- Found = true
- }
-
- // Break out if found
- if Found {
- break
- }
- }
-
- resChan <- 0
-}
-
-func (dag *Dagger) Search(hash, diff *big.Int) (uint64, []byte) {
- // TODO fix multi threading. Somehow it results in the wrong nonce
- amountOfRoutines := 1
-
- dag.hash = hash
-
- obj := common.BigPow(2, 256)
- obj = obj.Div(obj, diff)
-
- Found = false
- resChan := make(chan int64, 3)
- var res int64
-
- for k := 0; k < amountOfRoutines; k++ {
- go dag.Find(obj, resChan)
-
- // Wait for each go routine to finish
- }
- for k := 0; k < amountOfRoutines; k++ {
- // Get the result from the channel. 0 = quit
- if r := <-resChan; r != 0 {
- res = r
- }
- }
-
- return uint64(res), nil
-}
-
-func (dag *Dagger) Verify(hash, diff, nonce *big.Int) bool {
- dag.hash = hash
-
- obj := common.BigPow(2, 256)
- obj = obj.Div(obj, diff)
-
- return dag.Eval(nonce).Cmp(obj) < 0
-}
-
-func DaggerVerify(hash, diff, nonce *big.Int) bool {
- dagger := &Dagger{}
- dagger.hash = hash
-
- obj := common.BigPow(2, 256)
- obj = obj.Div(obj, diff)
-
- return dagger.Eval(nonce).Cmp(obj) < 0
-}
-
-func (dag *Dagger) Node(L uint64, i uint64) *big.Int {
- if L == i {
- return dag.hash
- }
-
- var m *big.Int
- if L == 9 {
- m = big.NewInt(16)
- } else {
- m = big.NewInt(3)
- }
-
- sha := sha3.NewKeccak256()
- sha.Reset()
- d := sha3.NewKeccak256()
- b := new(big.Int)
- ret := new(big.Int)
-
- for k := 0; k < int(m.Uint64()); k++ {
- d.Reset()
- d.Write(dag.hash.Bytes())
- d.Write(dag.xn.Bytes())
- d.Write(big.NewInt(int64(L)).Bytes())
- d.Write(big.NewInt(int64(i)).Bytes())
- d.Write(big.NewInt(int64(k)).Bytes())
-
- b.SetBytes(Sum(d))
- pk := b.Uint64() & ((1 << ((L - 1) * 3)) - 1)
- sha.Write(dag.Node(L-1, pk).Bytes())
- }
-
- ret.SetBytes(Sum(sha))
-
- return ret
-}
-
-func Sum(sha hash.Hash) []byte {
- //in := make([]byte, 32)
- return sha.Sum(nil)
-}
-
-func (dag *Dagger) Eval(N *big.Int) *big.Int {
- pow := common.BigPow(2, 26)
- dag.xn = pow.Div(N, pow)
-
- sha := sha3.NewKeccak256()
- sha.Reset()
- ret := new(big.Int)
-
- for k := 0; k < 4; k++ {
- d := sha3.NewKeccak256()
- b := new(big.Int)
-
- d.Reset()
- d.Write(dag.hash.Bytes())
- d.Write(dag.xn.Bytes())
- d.Write(N.Bytes())
- d.Write(big.NewInt(int64(k)).Bytes())
-
- b.SetBytes(Sum(d))
- pk := (b.Uint64() & 0x1ffffff)
-
- sha.Write(dag.Node(9, pk).Bytes())
- }
-
- return ret.SetBytes(Sum(sha))
-}
diff --git a/pow/dagger/dagger_test.go b/pow/dagger/dagger_test.go
deleted file mode 100644
index 39b74df30..000000000
--- a/pow/dagger/dagger_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package dagger
-
-import (
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
-)
-
-func BenchmarkDaggerSearch(b *testing.B) {
- hash := big.NewInt(0)
- diff := common.BigPow(2, 36)
- o := big.NewInt(0) // nonce doesn't matter. We're only testing against speed, not validity
-
- // Reset timer so the big generation isn't included in the benchmark
- b.ResetTimer()
- // Validate
- DaggerVerify(hash, diff, o)
-}
diff --git a/pow/ezp/pow.go b/pow/ezp/pow.go
deleted file mode 100644
index 0f7ee3570..000000000
--- a/pow/ezp/pow.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package ezp
-
-import (
- "encoding/binary"
- "math/big"
- "math/rand"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto/sha3"
- "github.com/ethereum/go-ethereum/logger"
- "github.com/ethereum/go-ethereum/pow"
-)
-
-var powlogger = logger.NewLogger("POW")
-
-type EasyPow struct {
- hash *big.Int
- HashRate int64
- turbo bool
-}
-
-func New() *EasyPow {
- return &EasyPow{turbo: false}
-}
-
-func (pow *EasyPow) GetHashrate() int64 {
- return pow.HashRate
-}
-
-func (pow *EasyPow) Turbo(on bool) {
- pow.turbo = on
-}
-
-func (pow *EasyPow) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) {
- r := rand.New(rand.NewSource(time.Now().UnixNano()))
- hash := block.HashNoNonce()
- diff := block.Difficulty()
- //i := int64(0)
- // TODO fix offset
- i := rand.Int63()
- starti := i
- start := time.Now().UnixNano()
-
- defer func() { pow.HashRate = 0 }()
-
- // Make sure stop is empty
-empty:
- for {
- select {
- case <-stop:
- default:
- break empty
- }
- }
-
- for {
- select {
- case <-stop:
- return 0, nil
- default:
- i++
-
- elapsed := time.Now().UnixNano() - start
- hashes := ((float64(1e9) / float64(elapsed)) * float64(i-starti)) / 1000
- pow.HashRate = int64(hashes)
-
- sha := uint64(r.Int63())
- if verify(hash, diff, sha) {
- return sha, nil
- }
- }
-
- if !pow.turbo {
- time.Sleep(20 * time.Microsecond)
- }
- }
-}
-
-func (pow *EasyPow) Verify(block pow.Block) bool {
- return Verify(block)
-}
-
-func verify(hash common.Hash, diff *big.Int, nonce uint64) bool {
- sha := sha3.NewKeccak256()
- n := make([]byte, 8)
- binary.PutUvarint(n, nonce)
- sha.Write(n)
- sha.Write(hash[:])
- verification := new(big.Int).Div(common.BigPow(2, 256), diff)
- res := common.BigD(sha.Sum(nil))
- return res.Cmp(verification) <= 0
-}
-
-func Verify(block pow.Block) bool {
- return verify(block.HashNoNonce(), block.Difficulty(), block.Nonce())
-}
diff --git a/rpc/json.go b/rpc/json.go
index ac5a4acd3..61a4ddf43 100644
--- a/rpc/json.go
+++ b/rpc/json.go
@@ -166,7 +166,7 @@ func parseRequest(incomingMsg json.RawMessage) ([]rpcRequest, bool, Error) {
// subscribe are special, they will always use `subscribeMethod` as first param in the payload
if in.Method == subscribeMethod {
- reqs := []rpcRequest{rpcRequest{id: &in.Id, isPubSub: true}}
+ reqs := []rpcRequest{{id: &in.Id, isPubSub: true}}
if len(in.Payload) > 0 {
// first param must be subscription name
var subscribeMethod [1]string
@@ -184,7 +184,7 @@ func parseRequest(incomingMsg json.RawMessage) ([]rpcRequest, bool, Error) {
}
if in.Method == unsubscribeMethod {
- return []rpcRequest{rpcRequest{id: &in.Id, isPubSub: true,
+ return []rpcRequest{{id: &in.Id, isPubSub: true,
method: unsubscribeMethod, params: in.Payload}}, false, nil
}
@@ -195,10 +195,10 @@ func parseRequest(incomingMsg json.RawMessage) ([]rpcRequest, bool, Error) {
// regular RPC call
if len(in.Payload) == 0 {
- return []rpcRequest{rpcRequest{service: elems[0], method: elems[1], id: &in.Id}}, false, nil
+ return []rpcRequest{{service: elems[0], method: elems[1], id: &in.Id}}, false, nil
}
- return []rpcRequest{rpcRequest{service: elems[0], method: elems[1], id: &in.Id, params: in.Payload}}, false, nil
+ return []rpcRequest{{service: elems[0], method: elems[1], id: &in.Id, params: in.Payload}}, false, nil
}
// parseBatchRequest will parse a batch request into a collection of requests from the given RawMessage, an indication
diff --git a/rpc/subscription.go b/rpc/subscription.go
index 863d34b20..bcdc3cdfc 100644
--- a/rpc/subscription.go
+++ b/rpc/subscription.go
@@ -30,7 +30,7 @@ var (
ErrSubscriptionNotFound = errors.New("subscription not found")
)
-// ID defines a psuedo random number that is used to identify RPC subscriptions.
+// ID defines a pseudo random number that is used to identify RPC subscriptions.
type ID string
// a Subscription is created by a notifier and tight to that notifier. The client can use
diff --git a/rpc/subscription_test.go b/rpc/subscription_test.go
index 8bb341694..97f2c0d65 100644
--- a/rpc/subscription_test.go
+++ b/rpc/subscription_test.go
@@ -62,7 +62,7 @@ func (s *NotificationTestService) SomeSubscription(ctx context.Context, n, val i
subscription := notifier.CreateSubscription()
go func() {
- // test expects n events, if we begin sending event immediatly some events
+ // test expects n events, if we begin sending event immediately some events
// will probably be dropped since the subscription ID might not be send to
// the client.
time.Sleep(5 * time.Second)
diff --git a/rpc/types.go b/rpc/types.go
index 01b95a170..d8d736efb 100644
--- a/rpc/types.go
+++ b/rpc/types.go
@@ -135,7 +135,7 @@ const (
LatestBlockNumber = BlockNumber(-1)
)
-// UnmarshalJSON parses the given JSON fragement into a BlockNumber. It supports:
+// UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports:
// - "latest", "earliest" or "pending" as string arguments
// - the block number
// Returned errors:
diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go
index 4fffaac6d..763fb0b8e 100644
--- a/swarm/network/protocol.go
+++ b/swarm/network/protocol.go
@@ -538,13 +538,6 @@ func (self *bzz) protoError(code int, format string, params ...interface{}) (err
return
}
-func (self *bzz) protoErrorDisconnect(err *errs.Error) {
- err.Log(glog.V(logger.Info))
- if err.Fatal() {
- self.peer.Disconnect(p2p.DiscSubprotocolError)
- }
-}
-
func (self *bzz) send(msg uint64, data interface{}) error {
if self.hive.blockWrite {
return fmt.Errorf("network write blocked")
diff --git a/swarm/storage/dbstore.go b/swarm/storage/dbstore.go
index 5ecc5c500..4ddebb021 100644
--- a/swarm/storage/dbstore.go
+++ b/swarm/storage/dbstore.go
@@ -354,7 +354,7 @@ func (s *DbStore) Get(key Key) (chunk *Chunk, err error) {
hasher := s.hashfunc()
hasher.Write(data)
hash := hasher.Sum(nil)
- if bytes.Compare(hash, key) != 0 {
+ if !bytes.Equal(hash, key) {
s.db.Delete(getDataKey(index.Idx))
err = fmt.Errorf("invalid chunk. hash=%x, key=%v", hash, key[:])
return
diff --git a/swarm/storage/dpa_test.go b/swarm/storage/dpa_test.go
index 1cde1c00e..a68232407 100644
--- a/swarm/storage/dpa_test.go
+++ b/swarm/storage/dpa_test.go
@@ -67,7 +67,7 @@ func TestDPArandom(t *testing.T) {
ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666)
localStore.memStore = NewMemStore(dbStore, defaultCacheCapacity)
resultReader = dpa.Retrieve(key)
- for i, _ := range resultSlice {
+ for i := range resultSlice {
resultSlice[i] = 0
}
n, err = resultReader.ReadAt(resultSlice, 0)
@@ -128,7 +128,7 @@ func TestDPA_capacity(t *testing.T) {
dpa.ChunkStore = localStore
// localStore.dbStore.setCapacity(0)
resultReader = dpa.Retrieve(key)
- for i, _ := range resultSlice {
+ for i := range resultSlice {
resultSlice[i] = 0
}
n, err = resultReader.ReadAt(resultSlice, 0)
diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go
index 334229aed..f97862bbb 100644
--- a/swarm/storage/netstore.go
+++ b/swarm/storage/netstore.go
@@ -99,7 +99,7 @@ func (self *NetStore) Put(entry *Chunk) {
// handle deliveries
if entry.Req != nil {
glog.V(logger.Detail).Infof("NetStore.Put: localStore.Put %v hit existing request...delivering", entry.Key.Log())
- // closing C singals to other routines (local requests)
+ // closing C signals to other routines (local requests)
// that the chunk is has been retrieved
close(entry.Req.C)
// deliver the chunk to requesters upstream
diff --git a/swarm/storage/types.go b/swarm/storage/types.go
index 0dcbc0100..f3ab99c6c 100644
--- a/swarm/storage/types.go
+++ b/swarm/storage/types.go
@@ -41,7 +41,7 @@ func (x Key) Size() uint {
}
func (x Key) isEqual(y Key) bool {
- return bytes.Compare(x, y) == 0
+ return bytes.Equal(x, y)
}
func (h Key) bits(i, j uint) uint {
@@ -177,7 +177,7 @@ It relies on the underlying chunking model.
When calling Split, the caller provides a channel (chan *Chunk) on which it receives chunks to store. The DPA delegates to storage layers (implementing ChunkStore interface).
Split returns an error channel, which the caller can monitor.
-After getting notified that all the data has been split (the error channel is closed), the caller can safely read or save the root key. Optionally it times out if not all chunks get stored or not the entire stream of data has been processed. By inspecting the errc channel the caller can check if any explicit errors (typically IO read/write failures) occured during splitting.
+After getting notified that all the data has been split (the error channel is closed), the caller can safely read or save the root key. Optionally it times out if not all chunks get stored or not the entire stream of data has been processed. By inspecting the errc channel the caller can check if any explicit errors (typically IO read/write failures) occurred during splitting.
When calling Join with a root key, the caller gets returned a seekable lazy reader. The caller again provides a channel on which the caller receives placeholder chunks with missing data. The DPA is supposed to forward this to the chunk stores and notify the chunker if the data has been delivered (i.e. retrieved from memory cache, disk-persisted db or cloud based swarm delivery). As the seekable reader is used, the chunker then puts these together the relevant parts on demand.
*/
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index f04329546..ea63c9996 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -552,9 +552,7 @@ func LoadBlockTests(file string) (map[string]*BlockTest, error) {
// Nothing to see here, please move along...
func prepInt(base int, s string) string {
if base == 16 {
- if strings.HasPrefix(s, "0x") {
- s = s[2:]
- }
+ s = strings.TrimPrefix(s, "0x")
if len(s) == 0 {
s = "00"
}
diff --git a/tests/files/BlockchainTests/TestNetwork/bcTheDaoTest.json b/tests/files/BlockchainTests/TestNetwork/bcTheDaoTest.json
index fcbcbbe9f..1dfdfb70a 100644
--- a/tests/files/BlockchainTests/TestNetwork/bcTheDaoTest.json
+++ b/tests/files/BlockchainTests/TestNetwork/bcTheDaoTest.json
@@ -1,6 +1,6 @@
{
"DaoTransactions" : {
- "acomment" : "This test checks DAO hardfork transition at block 8. According to specification given list of accounts L from the prestate (except a94f5374fce5edbc8e2a8697c15331677e6ebf0b (caller)) should transfer it's balance at the begining of the block 8 to contract C = bf4ed7b27f1d666546e30d74d50d173d20bca754. Then all blocks from 8 to 17(included) must have extradata set to 0x64616f2d686172642d666f726b otherwise blocks considered incorrect. Additionally all uncles with numbers from 8 to 17(included) in this blocks should corespond to extradata requirenmets.",
+ "acomment" : "This test checks DAO hardfork transition at block 8. According to specification given list of accounts L from the prestate (except a94f5374fce5edbc8e2a8697c15331677e6ebf0b (caller)) should transfer it's balance at the beginning of the block 8 to contract C = bf4ed7b27f1d666546e30d74d50d173d20bca754. Then all blocks from 8 to 17(included) must have extradata set to 0x64616f2d686172642d666f726b otherwise blocks considered incorrect. Additionally all uncles with numbers from 8 to 17(included) in this blocks should corespond to extradata requirenmets.",
"blocks" : [
{
"blockHeader" : {
@@ -2430,7 +2430,7 @@
}
},
"DaoTransactions_EmptyTransactionAndForkBlocksAhead" : {
- "acomment" : "This test checks DAO hardfork transition at block 8. According to specification given list of accounts L from the prestate (except a94f5374fce5edbc8e2a8697c15331677e6ebf0b (caller)) should transfer it's balance at the begining of the block 8 to contract C = bf4ed7b27f1d666546e30d74d50d173d20bca754. Then all blocks from 8 to 17(included) must have extradata set to 0x64616f2d686172642d666f726b otherwise blocks considered incorrect. Additionally all uncles with numbers from 8 to 17(included) in this blocks should corespond to extradata requirenmets.",
+ "acomment" : "This test checks DAO hardfork transition at block 8. According to specification given list of accounts L from the prestate (except a94f5374fce5edbc8e2a8697c15331677e6ebf0b (caller)) should transfer it's balance at the beginning of the block 8 to contract C = bf4ed7b27f1d666546e30d74d50d173d20bca754. Then all blocks from 8 to 17(included) must have extradata set to 0x64616f2d686172642d666f726b otherwise blocks considered incorrect. Additionally all uncles with numbers from 8 to 17(included) in this blocks should corespond to extradata requirenmets.",
"blocks" : [
{
"blockHeader" : {
@@ -4516,7 +4516,7 @@
}
},
"DaoTransactions_UncleExtradata" : {
- "acomment" : "This test checks DAO hardfork transition at block 8. According to specification given list of accounts L from the prestate (except a94f5374fce5edbc8e2a8697c15331677e6ebf0b (caller)) should transfer it's balance at the begining of the block 8 to contract C = bf4ed7b27f1d666546e30d74d50d173d20bca754. Then all blocks from 8 to 17(included) must have extradata set to 0x64616f2d686172642d666f726b otherwise blocks considered incorrect. Additionally all uncles with numbers from 8 to 17(included) in this blocks should corespond to extradata requirenmets.",
+ "acomment" : "This test checks DAO hardfork transition at block 8. According to specification given list of accounts L from the prestate (except a94f5374fce5edbc8e2a8697c15331677e6ebf0b (caller)) should transfer it's balance at the beginning of the block 8 to contract C = bf4ed7b27f1d666546e30d74d50d173d20bca754. Then all blocks from 8 to 17(included) must have extradata set to 0x64616f2d686172642d666f726b otherwise blocks considered incorrect. Additionally all uncles with numbers from 8 to 17(included) in this blocks should corespond to extradata requirenmets.",
"blocks" : [
{
"blockHeader" : {
diff --git a/tests/files/ansible/test-files/docker-cpp/Dockerfile b/tests/files/ansible/test-files/docker-cpp/Dockerfile
index a3b0e4ca6..11c8bf5e7 100644
--- a/tests/files/ansible/test-files/docker-cpp/Dockerfile
+++ b/tests/files/ansible/test-files/docker-cpp/Dockerfile
@@ -10,7 +10,7 @@ RUN apt-get install -qy build-essential g++-4.8 git cmake libboost-all-dev libcu
RUN apt-get install -qy automake unzip libgmp-dev libtool libleveldb-dev yasm libminiupnpc-dev libreadline-dev scons
RUN apt-get install -qy libjsoncpp-dev libargtable2-dev
-# NCurses based GUI (not optional though for a succesful compilation, see https://github.com/ethereum/cpp-ethereum/issues/452 )
+# NCurses based GUI (not optional though for a successful compilation, see https://github.com/ethereum/cpp-ethereum/issues/452 )
RUN apt-get install -qy libncurses5-dev
# Qt-based GUI
diff --git a/tests/files/ansible/test-files/docker-cppjit/Dockerfile b/tests/files/ansible/test-files/docker-cppjit/Dockerfile
index 2b10727f0..6b3712555 100644
--- a/tests/files/ansible/test-files/docker-cppjit/Dockerfile
+++ b/tests/files/ansible/test-files/docker-cppjit/Dockerfile
@@ -10,7 +10,7 @@ RUN apt-get install -qy build-essential g++-4.8 git cmake libboost-all-dev libcu
RUN apt-get install -qy automake unzip libgmp-dev libtool libleveldb-dev yasm libminiupnpc-dev libreadline-dev scons
RUN apt-get install -qy libjsoncpp-dev libargtable2-dev
-# NCurses based GUI (not optional though for a succesful compilation, see https://github.com/ethereum/cpp-ethereum/issues/452 )
+# NCurses based GUI (not optional though for a successful compilation, see https://github.com/ethereum/cpp-ethereum/issues/452 )
RUN apt-get install -qy libncurses5-dev
# Qt-based GUI
diff --git a/tests/init.go b/tests/init.go
index 361be5f62..7b0924bc3 100644
--- a/tests/init.go
+++ b/tests/init.go
@@ -87,11 +87,7 @@ func readJsonHttp(uri string, value interface{}) error {
}
defer resp.Body.Close()
- err = readJson(resp.Body, value)
- if err != nil {
- return err
- }
- return nil
+ return readJson(resp.Body, value)
}
func readJsonFile(fn string, value interface{}) error {
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index f47f5f7a1..7841aecfe 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params"
@@ -146,7 +146,7 @@ func runStateTest(chainConfig *params.ChainConfig, test VmTest) error {
ret []byte
// gas *big.Int
// err error
- logs vm.Logs
+ logs []*types.Log
)
ret, logs, _, _ = RunState(chainConfig, statedb, env, test.Transaction)
@@ -159,7 +159,7 @@ func runStateTest(chainConfig *params.ChainConfig, test VmTest) error {
} else {
rexp = common.FromHex(test.Out)
}
- if bytes.Compare(rexp, ret) != 0 {
+ if !bytes.Equal(rexp, ret) {
return fmt.Errorf("return failed. Expected %x, got %x\n", rexp, ret)
}
@@ -203,7 +203,7 @@ func runStateTest(chainConfig *params.ChainConfig, test VmTest) error {
return nil
}
-func RunState(chainConfig *params.ChainConfig, statedb *state.StateDB, env, tx map[string]string) ([]byte, vm.Logs, *big.Int, error) {
+func RunState(chainConfig *params.ChainConfig, statedb *state.StateDB, env, tx map[string]string) ([]byte, []*types.Log, *big.Int, error) {
environment, msg := NewEVMEnvironment(false, chainConfig, statedb, env, tx)
gaspool := new(core.GasPool).AddGas(common.Big(env["currentGasLimit"]))
diff --git a/tests/util.go b/tests/util.go
index a0a6ab374..134d5b4f8 100644
--- a/tests/util.go
+++ b/tests/util.go
@@ -47,7 +47,7 @@ func init() {
}
}
-func checkLogs(tlog []Log, logs vm.Logs) error {
+func checkLogs(tlog []Log, logs []*types.Log) error {
if len(tlog) != len(logs) {
return fmt.Errorf("log length mismatch. Expected %d, got %d", len(tlog), len(logs))
@@ -70,7 +70,7 @@ func checkLogs(tlog []Log, logs vm.Logs) error {
}
}
}
- genBloom := common.LeftPadBytes(types.LogsBloom(vm.Logs{logs[i]}).Bytes(), 256)
+ genBloom := common.LeftPadBytes(types.LogsBloom([]*types.Log{logs[i]}).Bytes(), 256)
if !bytes.Equal(genBloom, common.Hex2Bytes(log.BloomF)) {
return fmt.Errorf("bloom mismatch")
diff --git a/tests/vm_test_util.go b/tests/vm_test_util.go
index dc9f1d62c..25e55886f 100644
--- a/tests/vm_test_util.go
+++ b/tests/vm_test_util.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog"
@@ -164,20 +165,20 @@ func runVmTest(test VmTest) error {
ret []byte
gas *big.Int
err error
- logs vm.Logs
+ logs []*types.Log
)
ret, logs, gas, err = RunVm(statedb, env, test.Exec)
// Compare expected and actual return
rexp := common.FromHex(test.Out)
- if bytes.Compare(rexp, ret) != 0 {
+ if !bytes.Equal(rexp, ret) {
return fmt.Errorf("return failed. Expected %x, got %x\n", rexp, ret)
}
// Check gas usage
if len(test.Gas) == 0 && err == nil {
- return fmt.Errorf("gas unspecified, indicating an error. VM returned (incorrectly) successfull")
+ return fmt.Errorf("gas unspecified, indicating an error. VM returned (incorrectly) successful")
} else {
gexp := common.Big(test.Gas)
if gexp.Cmp(gas) != 0 {
@@ -211,7 +212,7 @@ func runVmTest(test VmTest) error {
return nil
}
-func RunVm(statedb *state.StateDB, env, exec map[string]string) ([]byte, vm.Logs, *big.Int, error) {
+func RunVm(statedb *state.StateDB, env, exec map[string]string) ([]byte, []*types.Log, *big.Int, error) {
chainConfig := &params.ChainConfig{
HomesteadBlock: params.MainNetHomesteadBlock,
DAOForkBlock: params.MainNetDAOForkBlock,
diff --git a/trie/encoding.go b/trie/encoding.go
index 761bad188..2037118dd 100644
--- a/trie/encoding.go
+++ b/trie/encoding.go
@@ -80,7 +80,7 @@ func compactHexEncode(nibbles []byte) []byte {
}
l := (nl + 1) / 2
var str = make([]byte, l)
- for i, _ := range str {
+ for i := range str {
b := nibbles[i*2] * 16
if nl > i*2 {
b += nibbles[i*2+1]
diff --git a/trie/hasher.go b/trie/hasher.go
index e6261819c..98c309531 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -52,7 +52,7 @@ func returnHasherToPool(h *hasher) {
// hash collapses a node down into a hash node, also returning a copy of the
// original node initialzied with the computed hash to replace the original one.
func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) {
- // If we're not storing the node, just hashing, use avaialble cached data
+ // If we're not storing the node, just hashing, use available cached data
if hash, dirty := n.cache(); hash != nil {
if db == nil {
return hash, n, nil
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 2bcc3700e..c56ac85be 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -105,7 +105,7 @@ func TestNodeIteratorCoverage(t *testing.T) {
}
}
// Cross check the hashes and the database itself
- for hash, _ := range hashes {
+ for hash := range hashes {
if _, err := db.Get(hash.Bytes()); err != nil {
t.Errorf("failed to retrieve reported node %x: %v", hash, err)
}
diff --git a/trie/sync.go b/trie/sync.go
index 2158ab750..168501392 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -21,7 +21,6 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb"
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
)
@@ -58,13 +57,13 @@ type TrieSyncLeafCallback func(leaf []byte, parent common.Hash) error
// unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie step by step until all is done.
type TrieSync struct {
- database ethdb.Database // State database for storing all the assembled node data
+ database DatabaseReader
requests map[common.Hash]*request // Pending requests pertaining to a key hash
queue *prque.Prque // Priority queue with the pending requests
}
// NewTrieSync creates a new trie data download scheduler.
-func NewTrieSync(root common.Hash, database ethdb.Database, callback TrieSyncLeafCallback) *TrieSync {
+func NewTrieSync(root common.Hash, database DatabaseReader, callback TrieSyncLeafCallback) *TrieSync {
ts := &TrieSync{
database: database,
requests: make(map[common.Hash]*request),
@@ -145,7 +144,7 @@ func (s *TrieSync) Missing(max int) []common.Hash {
// Process injects a batch of retrieved trie nodes data, returning if something
// was committed to the database and also the index of an entry if processing of
// it failed.
-func (s *TrieSync) Process(results []SyncResult) (bool, int, error) {
+func (s *TrieSync) Process(results []SyncResult, dbw DatabaseWriter) (bool, int, error) {
committed := false
for i, item := range results {
@@ -157,7 +156,7 @@ func (s *TrieSync) Process(results []SyncResult) (bool, int, error) {
// If the item is a raw entry request, commit directly
if request.raw {
request.data = item.Data
- s.commit(request, nil)
+ s.commit(request, dbw)
committed = true
continue
}
@@ -174,7 +173,7 @@ func (s *TrieSync) Process(results []SyncResult) (bool, int, error) {
return committed, i, err
}
if len(requests) == 0 && request.deps == 0 {
- s.commit(request, nil)
+ s.commit(request, dbw)
committed = true
continue
}
@@ -266,16 +265,9 @@ func (s *TrieSync) children(req *request, object node) ([]*request, error) {
// commit finalizes a retrieval request and stores it into the database. If any
// of the referencing parent requests complete due to this commit, they are also
// committed themselves.
-func (s *TrieSync) commit(req *request, batch ethdb.Batch) (err error) {
- // Create a new batch if none was specified
- if batch == nil {
- batch = s.database.NewBatch()
- defer func() {
- err = batch.Write()
- }()
- }
+func (s *TrieSync) commit(req *request, dbw DatabaseWriter) (err error) {
// Write the node content to disk
- if err := batch.Put(req.hash[:], req.data); err != nil {
+ if err := dbw.Put(req.hash[:], req.data); err != nil {
return err
}
delete(s.requests, req.hash)
@@ -284,7 +276,7 @@ func (s *TrieSync) commit(req *request, batch ethdb.Batch) (err error) {
for _, parent := range req.parents {
parent.deps--
if parent.deps == 0 {
- if err := s.commit(parent, batch); err != nil {
+ if err := s.commit(parent, dbw); err != nil {
return err
}
}
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 5292fe5cb..4168c4d65 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -67,7 +67,7 @@ func checkTrieContents(t *testing.T, db Database, root []byte, content map[strin
t.Fatalf("inconsistent trie at %x: %v", root, err)
}
for key, val := range content {
- if have := trie.Get([]byte(key)); bytes.Compare(have, val) != 0 {
+ if have := trie.Get([]byte(key)); !bytes.Equal(have, val) {
t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
}
}
@@ -122,7 +122,7 @@ func testIterativeTrieSync(t *testing.T, batch int) {
}
results[i] = SyncResult{hash, data}
}
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = append(queue[:0], sched.Missing(batch)...)
@@ -152,7 +152,7 @@ func TestIterativeDelayedTrieSync(t *testing.T) {
}
results[i] = SyncResult{hash, data}
}
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = append(queue[len(results):], sched.Missing(10000)...)
@@ -182,7 +182,7 @@ func testIterativeRandomTrieSync(t *testing.T, batch int) {
for len(queue) > 0 {
// Fetch all the queued nodes in a random order
results := make([]SyncResult, 0, len(queue))
- for hash, _ := range queue {
+ for hash := range queue {
data, err := srcDb.Get(hash.Bytes())
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
@@ -190,7 +190,7 @@ func testIterativeRandomTrieSync(t *testing.T, batch int) {
results = append(results, SyncResult{hash, data})
}
// Feed the retrieved results back and queue new tasks
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = make(map[common.Hash]struct{})
@@ -219,7 +219,7 @@ func TestIterativeRandomDelayedTrieSync(t *testing.T) {
for len(queue) > 0 {
// Sync only half of the scheduled nodes, even those in random order
results := make([]SyncResult, 0, len(queue)/2+1)
- for hash, _ := range queue {
+ for hash := range queue {
data, err := srcDb.Get(hash.Bytes())
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
@@ -231,7 +231,7 @@ func TestIterativeRandomDelayedTrieSync(t *testing.T) {
}
}
// Feed the retrieved results back and queue new tasks
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
for _, result := range results {
@@ -272,7 +272,7 @@ func TestDuplicateAvoidanceTrieSync(t *testing.T) {
results[i] = SyncResult{hash, data}
}
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
queue = append(queue[:0], sched.Missing(0)...)
@@ -304,7 +304,7 @@ func TestIncompleteTrieSync(t *testing.T) {
results[i] = SyncResult{hash, data}
}
// Process each of the trie nodes
- if _, index, err := sched.Process(results); err != nil {
+ if _, index, err := sched.Process(results, dstDb); err != nil {
t.Fatalf("failed to process result #%d: %v", index, err)
}
for _, result := range results {
diff --git a/trie/trie.go b/trie/trie.go
index 035a80e74..cd9e20cac 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -60,8 +60,12 @@ func init() {
// Database must be implemented by backing stores for the trie.
type Database interface {
+ DatabaseReader
DatabaseWriter
- // Get returns the value for key from the database.
+}
+
+// DatabaseReader wraps the Get method of a backing store for the trie.
+type DatabaseReader interface {
Get(key []byte) (value []byte, err error)
}
diff --git a/whisper/shhapi/api.go b/whisper/shhapi/api.go
index 24d54b653..379bb90d3 100644
--- a/whisper/shhapi/api.go
+++ b/whisper/shhapi/api.go
@@ -178,14 +178,10 @@ func (api *PublicWhisperAPI) NewFilter(args WhisperFilterArgs) (uint32, error) {
Messages: make(map[common.Hash]*whisperv5.ReceivedMessage),
AcceptP2P: args.AcceptP2P,
}
-
if len(filter.KeySym) > 0 {
filter.SymKeyHash = crypto.Keccak256Hash(filter.KeySym)
}
-
- for _, t := range args.Topics {
- filter.Topics = append(filter.Topics, t)
- }
+ filter.Topics = append(filter.Topics, args.Topics...)
if len(args.Topics) == 0 {
info := "NewFilter: at least one topic must be specified"
diff --git a/whisper/shhapi/api_test.go b/whisper/shhapi/api_test.go
index d2890a9a3..60b6fbd04 100644
--- a/whisper/shhapi/api_test.go
+++ b/whisper/shhapi/api_test.go
@@ -253,7 +253,7 @@ func TestUnmarshalPostArgs(t *testing.T) {
if a.FilterID != 64 {
t.Fatalf("wrong FilterID: %d.", a.FilterID)
}
- if bytes.Compare(a.PeerID[:], a.Topic[:]) != 0 {
+ if !bytes.Equal(a.PeerID[:], a.Topic[:]) {
t.Fatalf("wrong PeerID: %x.", a.PeerID)
}
}
diff --git a/whisper/whisperv2/envelope_test.go b/whisper/whisperv2/envelope_test.go
index 75e2fbe8a..c1b128c61 100644
--- a/whisper/whisperv2/envelope_test.go
+++ b/whisper/whisperv2/envelope_test.go
@@ -40,10 +40,10 @@ func TestEnvelopeOpen(t *testing.T) {
if opened.Flags != message.Flags {
t.Fatalf("flags mismatch: have %d, want %d", opened.Flags, message.Flags)
}
- if bytes.Compare(opened.Signature, message.Signature) != 0 {
+ if !bytes.Equal(opened.Signature, message.Signature) {
t.Fatalf("signature mismatch: have 0x%x, want 0x%x", opened.Signature, message.Signature)
}
- if bytes.Compare(opened.Payload, message.Payload) != 0 {
+ if !bytes.Equal(opened.Payload, message.Payload) {
t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, message.Payload)
}
if opened.Sent.Unix() != message.Sent.Unix() {
@@ -71,7 +71,7 @@ func TestEnvelopeAnonymousOpenUntargeted(t *testing.T) {
if opened.To != nil {
t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To)
}
- if bytes.Compare(opened.Payload, payload) != 0 {
+ if !bytes.Equal(opened.Payload, payload) {
t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload)
}
}
@@ -96,7 +96,7 @@ func TestEnvelopeAnonymousOpenTargeted(t *testing.T) {
if opened.To != nil {
t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To)
}
- if bytes.Compare(opened.Payload, payload) == 0 {
+ if bytes.Equal(opened.Payload, payload) {
t.Fatalf("payload match, should have been encrypted: 0x%x", opened.Payload)
}
}
@@ -127,7 +127,7 @@ func TestEnvelopeIdentifiedOpenUntargeted(t *testing.T) {
if opened.To != nil {
t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To)
}
- if bytes.Compare(opened.Payload, payload) != 0 {
+ if !bytes.Equal(opened.Payload, payload) {
t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload)
}
}
@@ -152,7 +152,7 @@ func TestEnvelopeIdentifiedOpenTargeted(t *testing.T) {
if opened.To != nil {
t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To)
}
- if bytes.Compare(opened.Payload, payload) != 0 {
+ if !bytes.Equal(opened.Payload, payload) {
t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload)
}
}
diff --git a/whisper/whisperv2/filter.go b/whisper/whisperv2/filter.go
index 8ce4a54fb..7404859b7 100644
--- a/whisper/whisperv2/filter.go
+++ b/whisper/whisperv2/filter.go
@@ -116,14 +116,11 @@ func (self filterer) Compare(f filter.Filter) bool {
topics := make([]Topic, len(filter.matcher.conditions))
for i, group := range filter.matcher.conditions {
// Message should contain a single topic entry, extract
- for topics[i], _ = range group {
+ for topics[i] = range group {
break
}
}
- if !self.matcher.Matches(topics) {
- return false
- }
- return true
+ return self.matcher.Matches(topics)
}
// Trigger is called when a filter successfully matches an inbound message.
diff --git a/whisper/whisperv2/filter_test.go b/whisper/whisperv2/filter_test.go
index 5a14a84bb..ffdfd7b34 100644
--- a/whisper/whisperv2/filter_test.go
+++ b/whisper/whisperv2/filter_test.go
@@ -91,7 +91,7 @@ func TestFilterTopicsCreation(t *testing.T) {
continue
}
for k := 0; k < len(condition); k++ {
- if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 {
+ if !bytes.Equal(condition[k][:], tt.filter[j][k][:]) {
t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k])
}
}
@@ -115,7 +115,7 @@ func TestFilterTopicsCreation(t *testing.T) {
continue
}
for k := 0; k < len(condition); k++ {
- if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 {
+ if !bytes.Equal(condition[k][:], tt.filter[j][k][:]) {
t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k])
}
}
@@ -135,7 +135,7 @@ func TestFilterTopicsCreation(t *testing.T) {
continue
}
for k := 0; k < len(condition); k++ {
- if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 {
+ if !bytes.Equal(condition[k][:], tt.filter[j][k][:]) {
t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k])
}
}
@@ -156,7 +156,7 @@ func TestFilterTopicsCreation(t *testing.T) {
continue
}
for k := 0; k < len(condition); k++ {
- if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 {
+ if !bytes.Equal(condition[k][:], tt.filter[j][k][:]) {
t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k])
}
}
diff --git a/whisper/whisperv2/message_test.go b/whisper/whisperv2/message_test.go
index efa64e431..d3b307d2a 100644
--- a/whisper/whisperv2/message_test.go
+++ b/whisper/whisperv2/message_test.go
@@ -40,7 +40,7 @@ func TestMessageSimpleWrap(t *testing.T) {
if len(msg.Signature) != 0 {
t.Fatalf("signature found for simple wrapping: 0x%x", msg.Signature)
}
- if bytes.Compare(msg.Payload, payload) != 0 {
+ if !bytes.Equal(msg.Payload, payload) {
t.Fatalf("payload mismatch after wrapping: have 0x%x, want 0x%x", msg.Payload, payload)
}
if msg.TTL/time.Second != DefaultTTL/time.Second {
@@ -65,7 +65,7 @@ func TestMessageCleartextSignRecover(t *testing.T) {
if msg.Flags&signatureFlag != signatureFlag {
t.Fatalf("signature flag mismatch: have %d, want %d", msg.Flags&signatureFlag, signatureFlag)
}
- if bytes.Compare(msg.Payload, payload) != 0 {
+ if !bytes.Equal(msg.Payload, payload) {
t.Fatalf("payload mismatch after signing: have 0x%x, want 0x%x", msg.Payload, payload)
}
diff --git a/whisper/whisperv2/peer.go b/whisper/whisperv2/peer.go
index 404ebd513..f09ce3523 100644
--- a/whisper/whisperv2/peer.go
+++ b/whisper/whisperv2/peer.go
@@ -149,7 +149,7 @@ func (self *peer) expire() {
return true
})
// Dump all known but unavailable
- for hash, _ := range unmark {
+ for hash := range unmark {
self.known.Remove(hash)
}
}
diff --git a/whisper/whisperv2/peer_test.go b/whisper/whisperv2/peer_test.go
index 9755e134c..87ca5063d 100644
--- a/whisper/whisperv2/peer_test.go
+++ b/whisper/whisperv2/peer_test.go
@@ -221,7 +221,7 @@ func TestPeerMessageExpiration(t *testing.T) {
t.Fatalf("peer pool size mismatch: have %v, want %v", peers, 1)
}
var peer *peer
- for peer, _ = range tester.client.peers {
+ for peer = range tester.client.peers {
break
}
tester.client.peerMu.RUnlock()
diff --git a/whisper/whisperv2/topic_test.go b/whisper/whisperv2/topic_test.go
index efd4a2c61..bb6568996 100644
--- a/whisper/whisperv2/topic_test.go
+++ b/whisper/whisperv2/topic_test.go
@@ -33,13 +33,13 @@ func TestTopicCreation(t *testing.T) {
// Create the topics individually
for i, tt := range topicCreationTests {
topic := NewTopic(tt.data)
- if bytes.Compare(topic[:], tt.hash[:]) != 0 {
+ if !bytes.Equal(topic[:], tt.hash[:]) {
t.Errorf("binary test %d: hash mismatch: have %v, want %v.", i, topic, tt.hash)
}
}
for i, tt := range topicCreationTests {
topic := NewTopicFromString(string(tt.data))
- if bytes.Compare(topic[:], tt.hash[:]) != 0 {
+ if !bytes.Equal(topic[:], tt.hash[:]) {
t.Errorf("textual test %d: hash mismatch: have %v, want %v.", i, topic, tt.hash)
}
}
@@ -55,13 +55,13 @@ func TestTopicCreation(t *testing.T) {
topics := NewTopics(binaryData...)
for i, tt := range topicCreationTests {
- if bytes.Compare(topics[i][:], tt.hash[:]) != 0 {
+ if !bytes.Equal(topics[i][:], tt.hash[:]) {
t.Errorf("binary batch test %d: hash mismatch: have %v, want %v.", i, topics[i], tt.hash)
}
}
topics = NewTopicsFromStrings(textualData...)
for i, tt := range topicCreationTests {
- if bytes.Compare(topics[i][:], tt.hash[:]) != 0 {
+ if !bytes.Equal(topics[i][:], tt.hash[:]) {
t.Errorf("textual batch test %d: hash mismatch: have %v, want %v.", i, topics[i], tt.hash)
}
}
@@ -73,30 +73,30 @@ var topicMatcherCreationTest = struct {
matcher []map[[4]byte]struct{}
}{
binary: [][][]byte{
- [][]byte{},
- [][]byte{
+ {},
+ {
[]byte("Topic A"),
},
- [][]byte{
+ {
[]byte("Topic B1"),
[]byte("Topic B2"),
[]byte("Topic B3"),
},
},
textual: [][]string{
- []string{},
- []string{"Topic A"},
- []string{"Topic B1", "Topic B2", "Topic B3"},
+ {},
+ {"Topic A"},
+ {"Topic B1", "Topic B2", "Topic B3"},
},
matcher: []map[[4]byte]struct{}{
- map[[4]byte]struct{}{},
- map[[4]byte]struct{}{
- [4]byte{0x25, 0xfc, 0x95, 0x66}: struct{}{},
+ {},
+ {
+ {0x25, 0xfc, 0x95, 0x66}: {},
},
- map[[4]byte]struct{}{
- [4]byte{0x93, 0x6d, 0xec, 0x09}: struct{}{},
- [4]byte{0x25, 0x23, 0x34, 0xd3}: struct{}{},
- [4]byte{0x6b, 0xc2, 0x73, 0xd1}: struct{}{},
+ {
+ {0x93, 0x6d, 0xec, 0x09}: {},
+ {0x25, 0x23, 0x34, 0xd3}: {},
+ {0x6b, 0xc2, 0x73, 0xd1}: {},
},
},
}
@@ -106,14 +106,14 @@ func TestTopicMatcherCreation(t *testing.T) {
matcher := newTopicMatcherFromBinary(test.binary...)
for i, cond := range matcher.conditions {
- for topic, _ := range cond {
+ for topic := range cond {
if _, ok := test.matcher[i][topic]; !ok {
t.Errorf("condition %d; extra topic found: 0x%x", i, topic[:])
}
}
}
for i, cond := range test.matcher {
- for topic, _ := range cond {
+ for topic := range cond {
if _, ok := matcher.conditions[i][topic]; !ok {
t.Errorf("condition %d; topic not found: 0x%x", i, topic[:])
}
@@ -122,14 +122,14 @@ func TestTopicMatcherCreation(t *testing.T) {
matcher = newTopicMatcherFromStrings(test.textual...)
for i, cond := range matcher.conditions {
- for topic, _ := range cond {
+ for topic := range cond {
if _, ok := test.matcher[i][topic]; !ok {
t.Errorf("condition %d; extra topic found: 0x%x", i, topic[:])
}
}
}
for i, cond := range test.matcher {
- for topic, _ := range cond {
+ for topic := range cond {
if _, ok := matcher.conditions[i][topic]; !ok {
t.Errorf("condition %d; topic not found: 0x%x", i, topic[:])
}
@@ -155,49 +155,49 @@ var topicMatcherTests = []struct {
},
// Fixed topic matcher should match strictly, but only prefix
{
- filter: [][]string{[]string{"a"}, []string{"b"}},
+ filter: [][]string{{"a"}, {"b"}},
topics: []string{"a"},
match: false,
},
{
- filter: [][]string{[]string{"a"}, []string{"b"}},
+ filter: [][]string{{"a"}, {"b"}},
topics: []string{"a", "b"},
match: true,
},
{
- filter: [][]string{[]string{"a"}, []string{"b"}},
+ filter: [][]string{{"a"}, {"b"}},
topics: []string{"a", "b", "c"},
match: true,
},
// Multi-matcher should match any from a sub-group
{
- filter: [][]string{[]string{"a1", "a2"}},
+ filter: [][]string{{"a1", "a2"}},
topics: []string{"a"},
match: false,
},
{
- filter: [][]string{[]string{"a1", "a2"}},
+ filter: [][]string{{"a1", "a2"}},
topics: []string{"a1"},
match: true,
},
{
- filter: [][]string{[]string{"a1", "a2"}},
+ filter: [][]string{{"a1", "a2"}},
topics: []string{"a2"},
match: true,
},
// Wild-card condition should match anything
{
- filter: [][]string{[]string{}, []string{"b"}},
+ filter: [][]string{{}, {"b"}},
topics: []string{"a"},
match: false,
},
{
- filter: [][]string{[]string{}, []string{"b"}},
+ filter: [][]string{{}, {"b"}},
topics: []string{"a", "b"},
match: true,
},
{
- filter: [][]string{[]string{}, []string{"b"}},
+ filter: [][]string{{}, {"b"}},
topics: []string{"b", "b"},
match: true,
},
diff --git a/whisper/whisperv5/doc.go b/whisper/whisperv5/doc.go
index e2e255e9e..8ec81b180 100644
--- a/whisper/whisperv5/doc.go
+++ b/whisper/whisperv5/doc.go
@@ -42,7 +42,7 @@ const (
statusCode = 0 // used by whisper protocol
messagesCode = 1 // normal whisper message
- p2pCode = 2 // peer-to-peer message (to be consumed by the peer, but not forwarded any futher)
+ p2pCode = 2 // peer-to-peer message (to be consumed by the peer, but not forwarded any further)
p2pRequestCode = 3 // peer-to-peer message, used by Dapp protocol
NumberOfMessageCodes = 64
diff --git a/whisper/whisperv5/message_test.go b/whisper/whisperv5/message_test.go
index 5cbc9182f..3eb71653d 100644
--- a/whisper/whisperv5/message_test.go
+++ b/whisper/whisperv5/message_test.go
@@ -104,10 +104,10 @@ func singleMessageTest(t *testing.T, symmetric bool) {
}
padsz := len(decrypted.Padding)
- if bytes.Compare(steg[:padsz], decrypted.Padding) != 0 {
+ if !bytes.Equal(steg[:padsz], decrypted.Padding) {
t.Fatalf("failed with seed %d: compare padding.", seed)
}
- if bytes.Compare(text, decrypted.Payload) != 0 {
+ if !bytes.Equal(text, decrypted.Payload) {
t.Fatalf("failed with seed %d: compare payload.", seed)
}
if !isMessageSigned(decrypted.Raw[0]) {
@@ -256,10 +256,10 @@ func singleEnvelopeOpenTest(t *testing.T, symmetric bool) {
}
padsz := len(decrypted.Padding)
- if bytes.Compare(steg[:padsz], decrypted.Padding) != 0 {
+ if !bytes.Equal(steg[:padsz], decrypted.Padding) {
t.Fatalf("failed with seed %d: compare padding.", seed)
}
- if bytes.Compare(text, decrypted.Payload) != 0 {
+ if !bytes.Equal(text, decrypted.Payload) {
t.Fatalf("failed with seed %d: compare payload.", seed)
}
if !isMessageSigned(decrypted.Raw[0]) {
diff --git a/whisper/whisperv5/peer.go b/whisper/whisperv5/peer.go
index 4273cfce1..634045504 100644
--- a/whisper/whisperv5/peer.go
+++ b/whisper/whisperv5/peer.go
@@ -148,7 +148,7 @@ func (peer *Peer) expire() {
return true
})
// Dump all known but unavailable
- for hash, _ := range unmark {
+ for hash := range unmark {
peer.known.Remove(hash)
}
}
diff --git a/whisper/whisperv5/peer_test.go b/whisper/whisperv5/peer_test.go
index 88da59bff..34e2ec255 100644
--- a/whisper/whisperv5/peer_test.go
+++ b/whisper/whisperv5/peer_test.go
@@ -207,7 +207,7 @@ func checkPropagation(t *testing.T) {
func validateMail(t *testing.T, index int, mail []*ReceivedMessage) bool {
var cnt int
for _, m := range mail {
- if bytes.Compare(m.Payload, expectedMessage) == 0 {
+ if bytes.Equal(m.Payload, expectedMessage) {
cnt++
}
}
diff --git a/whisper/whisperv5/whisper.go b/whisper/whisperv5/whisper.go
index 789adbdb3..b514c022e 100644
--- a/whisper/whisperv5/whisper.go
+++ b/whisper/whisperv5/whisper.go
@@ -105,7 +105,7 @@ func (w *Whisper) Version() uint {
func (w *Whisper) getPeer(peerID []byte) (*Peer, error) {
w.peerMu.Lock()
defer w.peerMu.Unlock()
- for p, _ := range w.peers {
+ for p := range w.peers {
id := p.peer.ID()
if bytes.Equal(peerID, id[:]) {
return p, nil
diff --git a/whisper/whisperv5/whisper_test.go b/whisper/whisperv5/whisper_test.go
index 9af95f445..dbe0627fa 100644
--- a/whisper/whisperv5/whisper_test.go
+++ b/whisper/whisperv5/whisper_test.go
@@ -239,7 +239,7 @@ func TestWhisperSymKeyManagement(t *testing.T) {
if k1 == nil {
t.Fatalf("first key does not exist.")
}
- if bytes.Compare(k1, randomKey) == 0 {
+ if bytes.Equal(k1, randomKey) {
t.Fatalf("k1 == randomKey.")
}
if k2 != nil {
@@ -264,10 +264,10 @@ func TestWhisperSymKeyManagement(t *testing.T) {
if k2 == nil {
t.Fatalf("k2 does not exist.")
}
- if bytes.Compare(k1, k2) == 0 {
+ if bytes.Equal(k1, k2) {
t.Fatalf("k1 == k2.")
}
- if bytes.Compare(k1, randomKey) == 0 {
+ if bytes.Equal(k1, randomKey) {
t.Fatalf("k1 == randomKey.")
}
if len(k1) != aesKeyLength {