aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.travis.yml8
-rw-r--r--README.md4
-rw-r--r--accounts/abi/argument.go19
-rw-r--r--accounts/abi/pack_test.go62
-rw-r--r--accounts/abi/type.go58
-rw-r--r--accounts/keystore/account_cache.go5
-rw-r--r--accounts/keystore/key.go8
-rw-r--r--accounts/keystore/passphrase.go (renamed from accounts/keystore/keystore_passphrase.go)1
-rw-r--r--accounts/keystore/passphrase_test.go (renamed from accounts/keystore/keystore_passphrase_test.go)0
-rw-r--r--accounts/keystore/plain.go (renamed from accounts/keystore/keystore_plain.go)0
-rw-r--r--accounts/keystore/plain_test.go (renamed from accounts/keystore/keystore_plain_test.go)0
-rw-r--r--accounts/keystore/presale.go8
-rw-r--r--accounts/keystore/wallet.go (renamed from accounts/keystore/keystore_wallet.go)0
-rw-r--r--cmd/evm/runner.go3
-rw-r--r--cmd/evm/staterunner.go2
-rw-r--r--cmd/puppeth/genesis.go386
-rw-r--r--cmd/puppeth/genesis_test.go109
-rw-r--r--cmd/puppeth/module_dashboard.go2
-rw-r--r--cmd/puppeth/puppeth.go19
-rw-r--r--cmd/puppeth/testdata/stureby_aleth.json112
-rw-r--r--cmd/puppeth/testdata/stureby_geth.json47
-rw-r--r--cmd/puppeth/testdata/stureby_parity.json181
-rw-r--r--cmd/puppeth/wizard.go42
-rw-r--r--cmd/puppeth/wizard_dashboard.go4
-rw-r--r--cmd/puppeth/wizard_ethstats.go6
-rw-r--r--cmd/puppeth/wizard_explorer.go2
-rw-r--r--cmd/puppeth/wizard_faucet.go8
-rw-r--r--cmd/puppeth/wizard_genesis.go138
-rw-r--r--cmd/puppeth/wizard_intro.go22
-rw-r--r--cmd/puppeth/wizard_nginx.go4
-rw-r--r--cmd/puppeth/wizard_node.go4
-rw-r--r--cmd/puppeth/wizard_wallet.go2
-rw-r--r--cmd/swarm/config_test.go24
-rw-r--r--cmd/swarm/feeds.go6
-rw-r--r--cmd/swarm/feeds_test.go41
-rw-r--r--cmd/swarm/fs.go36
-rw-r--r--cmd/swarm/fs_test.go30
-rw-r--r--cmd/swarm/swarm-smoke/feed_upload_and_sync.go8
-rw-r--r--cmd/swarm/swarm-smoke/main.go8
-rw-r--r--cmd/swarm/swarm-smoke/upload_and_sync.go11
-rw-r--r--cmd/utils/flags.go35
-rw-r--r--core/blockchain.go404
-rw-r--r--core/blockchain_insert.go143
-rw-r--r--core/blockchain_test.go8
-rw-r--r--core/tx_pool.go2
-rw-r--r--core/types/block.go4
-rw-r--r--core/types/gen_header_json.go20
-rw-r--r--core/vm/evm.go6
-rw-r--r--core/vm/logger_json.go (renamed from cmd/evm/json_logger.go)13
-rw-r--r--eth/api_backend.go4
-rw-r--r--eth/api_tracer.go173
-rw-r--r--eth/downloader/downloader.go22
-rw-r--r--eth/handler_test.go4
-rw-r--r--eth/tracers/internal/tracers/assets.go26
-rw-r--r--eth/tracers/internal/tracers/prestate_tracer.js5
-rw-r--r--internal/ethapi/api.go8
-rw-r--r--internal/ethapi/backend.go2
-rw-r--r--internal/web3ext/web3ext.go12
-rw-r--r--les/api_backend.go4
-rw-r--r--les/fetcher.go56
-rw-r--r--les/flowcontrol/control.go1
-rw-r--r--light/trie.go2
-rw-r--r--miner/worker.go3
-rw-r--r--mobile/big.go7
-rw-r--r--node/config.go44
-rw-r--r--node/node.go2
-rw-r--r--p2p/discover/table.go2
-rw-r--r--p2p/discv5/net.go9
-rw-r--r--p2p/protocols/accounting.go43
-rw-r--r--p2p/protocols/accounting_simulation_test.go10
-rw-r--r--p2p/protocols/protocol.go2
-rw-r--r--p2p/protocols/reporter.go147
-rw-r--r--p2p/protocols/reporter_test.go77
-rw-r--r--p2p/server.go9
-rw-r--r--p2p/simulations/network.go75
-rw-r--r--params/config.go34
-rw-r--r--params/version.go2
-rw-r--r--signer/core/api.go2
-rw-r--r--swarm/OWNERS1
-rw-r--r--swarm/api/api.go27
-rw-r--r--swarm/api/client/client_test.go112
-rw-r--r--swarm/api/http/middleware.go5
-rw-r--r--swarm/api/http/server_test.go104
-rw-r--r--swarm/grafana_dashboards/ldbstore.json2278
-rw-r--r--swarm/grafana_dashboards/swarm.json3198
-rw-r--r--swarm/multihash/multihash.go92
-rw-r--r--swarm/multihash/multihash_test.go53
-rw-r--r--swarm/network/hive.go2
-rw-r--r--swarm/network/kademlia.go153
-rw-r--r--swarm/network/kademlia_test.go89
-rw-r--r--swarm/network/protocol.go4
-rw-r--r--swarm/network/protocol_test.go2
-rw-r--r--swarm/network/simulation/example_test.go4
-rw-r--r--swarm/network/simulation/kademlia.go1
-rw-r--r--swarm/network/simulation/kademlia_test.go2
-rw-r--r--swarm/network/simulation/node_test.go35
-rw-r--r--swarm/network/simulation/simulation.go7
-rw-r--r--swarm/network/simulation/simulation_test.go13
-rw-r--r--swarm/network/simulations/overlay.go4
-rw-r--r--swarm/network/stream/common_test.go16
-rw-r--r--swarm/network/stream/delivery.go6
-rw-r--r--swarm/network/stream/delivery_test.go2
-rw-r--r--swarm/network/stream/intervals_test.go2
-rw-r--r--swarm/network/stream/snapshot_retrieval_test.go1
-rw-r--r--swarm/network/stream/snapshot_sync_test.go31
-rw-r--r--swarm/network/stream/syncer_test.go190
-rw-r--r--swarm/network/stream/visualized_snapshot_sync_sim_test.go3
-rw-r--r--swarm/network_test.go2
-rw-r--r--swarm/pss/api.go16
-rw-r--r--swarm/pss/client/client.go2
-rw-r--r--swarm/pss/handshake.go2
-rw-r--r--swarm/pss/notify/notify.go4
-rw-r--r--swarm/pss/notify/notify_test.go4
-rw-r--r--swarm/pss/protocol_test.go5
-rw-r--r--swarm/pss/pss.go171
-rw-r--r--swarm/pss/pss_test.go497
-rw-r--r--swarm/pss/types.go34
-rw-r--r--swarm/shed/db.go130
-rw-r--r--swarm/shed/db_test.go110
-rw-r--r--swarm/shed/example_store_test.go332
-rw-r--r--swarm/shed/field_string.go66
-rw-r--r--swarm/shed/field_string_test.go110
-rw-r--r--swarm/shed/field_struct.go71
-rw-r--r--swarm/shed/field_struct_test.go127
-rw-r--r--swarm/shed/field_uint64.go108
-rw-r--r--swarm/shed/field_uint64_test.go194
-rw-r--r--swarm/shed/index.go264
-rw-r--r--swarm/shed/index_test.go426
-rw-r--r--swarm/shed/schema.go134
-rw-r--r--swarm/shed/schema_test.go126
-rw-r--r--swarm/state/dbstore.go21
-rw-r--r--swarm/state/inmemorystore.go94
-rw-r--r--swarm/state/store.go26
-rw-r--r--swarm/storage/mock/db/db.go7
-rw-r--r--swarm/storage/mock/mem/mem.go16
-rw-r--r--swarm/storage/mock/mock.go7
-rw-r--r--swarm/storage/mock/rpc/rpc.go6
-rw-r--r--swarm/storage/mock/test/test.go53
-rw-r--r--swarm/swap/swap.go5
-rw-r--r--swarm/swarm.go53
-rw-r--r--swarm/version/version.go2
-rw-r--r--tests/init.go9
-rw-r--r--tests/state_test.go13
m---------tests/testdata0
-rw-r--r--trie/database.go2
-rw-r--r--vendor/github.com/karalabe/hid/appveyor.yml4
-rw-r--r--vendor/github.com/karalabe/hid/hid_disabled.go2
-rw-r--r--vendor/github.com/karalabe/hid/hid_enabled.go12
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go1
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go4
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go2
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db.go2
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go24
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/db_util.go2
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go4
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go13
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/session_util.go4
-rw-r--r--vendor/github.com/syndtr/goleveldb/leveldb/table.go30
-rw-r--r--vendor/vendor.json64
-rw-r--r--whisper/mailserver/mailserver.go12
-rw-r--r--whisper/whisperv6/api_test.go17
161 files changed, 5988 insertions, 6747 deletions
diff --git a/.travis.yml b/.travis.yml
index c1cc7c4aa..33a4f8949 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -29,6 +29,14 @@ matrix:
- os: osx
go: 1.11.x
script:
+ - echo "Increase the maximum number of open file descriptors on macOS"
+ - NOFILE=20480
+ - sudo sysctl -w kern.maxfiles=$NOFILE
+ - sudo sysctl -w kern.maxfilesperproc=$NOFILE
+ - sudo launchctl limit maxfiles $NOFILE $NOFILE
+ - sudo launchctl limit maxfiles
+ - ulimit -S -n $NOFILE
+ - ulimit -n
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
diff --git a/README.md b/README.md
index f308fb101..7593dd090 100644
--- a/README.md
+++ b/README.md
@@ -18,7 +18,7 @@ For prerequisites and detailed build instructions please read the
[Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum)
on the wiki.
-Building geth requires both a Go (version 1.7 or later) and a C compiler.
+Building geth requires both a Go (version 1.9 or later) and a C compiler.
You can install them using your favourite package manager.
Once the dependencies are installed, run
@@ -168,7 +168,7 @@ HTTP based JSON-RPC API options:
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
-via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification)
+via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](https://www.jsonrpc.org/specification)
on all transports. You can reuse the same connection for multiple requests!
**Note: Please understand the security implications of opening up an HTTP/WS based transport before
diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go
index 93b513c34..f544c80db 100644
--- a/accounts/abi/argument.go
+++ b/accounts/abi/argument.go
@@ -243,11 +243,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// input offset is the bytes offset for packed output
inputOffset := 0
for _, abiArg := range abiArgs {
- if abiArg.Type.T == ArrayTy {
- inputOffset += 32 * abiArg.Type.Size
- } else {
- inputOffset += 32
- }
+ inputOffset += getDynamicTypeOffset(abiArg.Type)
}
var ret []byte
for i, a := range args {
@@ -257,14 +253,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
if err != nil {
return nil, err
}
- // check for a slice type (string, bytes, slice)
- if input.Type.requiresLengthPrefix() {
- // calculate the offset
- offset := inputOffset + len(variableInput)
+ // check for dynamic types
+ if isDynamicType(input.Type) {
// set the offset
- ret = append(ret, packNum(reflect.ValueOf(offset))...)
- // Append the packed output to the variable input. The variable input
- // will be appended at the end of the input.
+ ret = append(ret, packNum(reflect.ValueOf(inputOffset))...)
+ // calculate next offset
+ inputOffset += len(packed)
+ // append to variable input
variableInput = append(variableInput, packed...)
} else {
// append the packed value to the input
diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go
index 58a5b7a58..ddd2b7362 100644
--- a/accounts/abi/pack_test.go
+++ b/accounts/abi/pack_test.go
@@ -324,6 +324,66 @@ func TestPack(t *testing.T) {
"foobar",
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
},
+ {
+ "string[]",
+ []string{"hello", "foobar"},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
+ "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
+ "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
+ "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
+ },
+ {
+ "string[2]",
+ []string{"hello", "foobar"},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
+ "0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
+ "68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
+ "0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
+ "666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
+ },
+ {
+ "bytes32[][]",
+ [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
+ "0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
+ "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
+ },
+
+ {
+ "bytes32[][2]",
+ [][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
+ common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
+ "00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
+ "0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
+ "0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
+ },
+
+ {
+ "bytes32[3][2]",
+ [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
+ common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
+ "0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
+ "0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
+ "0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
+ "0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
+ },
} {
typ, err := NewType(test.typ)
if err != nil {
@@ -336,7 +396,7 @@ func TestPack(t *testing.T) {
}
if !bytes.Equal(output, test.output) {
- t.Errorf("%d failed. Expected bytes: '%x' Got: '%x'", i, test.output, output)
+ t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output)
}
}
}
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index dce89d2b4..6bfaabf5a 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -183,23 +183,39 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
return nil, err
}
- if t.T == SliceTy || t.T == ArrayTy {
- var packed []byte
+ switch t.T {
+ case SliceTy, ArrayTy:
+ var ret []byte
+ if t.requiresLengthPrefix() {
+ // append length
+ ret = append(ret, packNum(reflect.ValueOf(v.Len()))...)
+ }
+
+ // calculate offset if any
+ offset := 0
+ offsetReq := isDynamicType(*t.Elem)
+ if offsetReq {
+ offset = getDynamicTypeOffset(*t.Elem) * v.Len()
+ }
+ var tail []byte
for i := 0; i < v.Len(); i++ {
val, err := t.Elem.pack(v.Index(i))
if err != nil {
return nil, err
}
- packed = append(packed, val...)
- }
- if t.T == SliceTy {
- return packBytesSlice(packed, v.Len()), nil
- } else if t.T == ArrayTy {
- return packed, nil
+ if !offsetReq {
+ ret = append(ret, val...)
+ continue
+ }
+ ret = append(ret, packNum(reflect.ValueOf(offset))...)
+ offset += len(val)
+ tail = append(tail, val...)
}
+ return append(ret, tail...), nil
+ default:
+ return packElement(t, v), nil
}
- return packElement(t, v), nil
}
// requireLengthPrefix returns whether the type requires any sort of length
@@ -207,3 +223,27 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
func (t Type) requiresLengthPrefix() bool {
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
}
+
+// isDynamicType returns true if the type is dynamic.
+// StringTy, BytesTy, and SliceTy(irrespective of slice element type) are dynamic types
+// ArrayTy is considered dynamic if and only if the Array element is a dynamic type.
+// This function recursively checks the type for slice and array elements.
+func isDynamicType(t Type) bool {
+ // dynamic types
+ // array is also a dynamic type if the array type is dynamic
+ return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
+}
+
+// getDynamicTypeOffset returns the offset for the type.
+// See `isDynamicType` to know which types are considered dynamic.
+// If the type t is an array and element type is not a dynamic type, then we consider it a static type and
+// return 32 * size of array since length prefix is not required.
+// If t is a dynamic type or element type(for slices and arrays) is dynamic, then we simply return 32 as offset.
+func getDynamicTypeOffset(t Type) int {
+ // if it is an array and there are no dynamic types
+ // then the array is static type
+ if t.T == ArrayTy && !isDynamicType(*t.Elem) {
+ return 32 * t.Size
+ }
+ return 32
+}
diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go
index da3a46eb8..8f660e282 100644
--- a/accounts/keystore/account_cache.go
+++ b/accounts/keystore/account_cache.go
@@ -265,7 +265,10 @@ func (ac *accountCache) scanAccounts() error {
case (addr == common.Address{}):
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
default:
- return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
+ return &accounts.Account{
+ Address: addr,
+ URL: accounts.URL{Scheme: KeyStoreScheme, Path: path},
+ }
}
return nil
}
diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go
index 0564751c4..84d8df0c5 100644
--- a/accounts/keystore/key.go
+++ b/accounts/keystore/key.go
@@ -171,7 +171,10 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
if err != nil {
return nil, accounts.Account{}, err
}
- a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}}
+ a := accounts.Account{
+ Address: key.Address,
+ URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))},
+ }
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
zeroKey(key.PrivateKey)
return nil, a, err
@@ -224,5 +227,6 @@ func toISO8601(t time.Time) string {
} else {
tz = fmt.Sprintf("%03d00", offset/3600)
}
- return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
+ return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s",
+ t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
}
diff --git a/accounts/keystore/keystore_passphrase.go b/accounts/keystore/passphrase.go
index 9794f32fe..a0b6cf538 100644
--- a/accounts/keystore/keystore_passphrase.go
+++ b/accounts/keystore/passphrase.go
@@ -233,6 +233,7 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
PrivateKey: key,
}, nil
}
+
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
if cryptoJson.Cipher != "aes-128-ctr" {
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
diff --git a/accounts/keystore/keystore_passphrase_test.go b/accounts/keystore/passphrase_test.go
index 630682ceb..630682ceb 100644
--- a/accounts/keystore/keystore_passphrase_test.go
+++ b/accounts/keystore/passphrase_test.go
diff --git a/accounts/keystore/keystore_plain.go b/accounts/keystore/plain.go
index f62a133ce..f62a133ce 100644
--- a/accounts/keystore/keystore_plain.go
+++ b/accounts/keystore/plain.go
diff --git a/accounts/keystore/keystore_plain_test.go b/accounts/keystore/plain_test.go
index 32852a0ad..32852a0ad 100644
--- a/accounts/keystore/keystore_plain_test.go
+++ b/accounts/keystore/plain_test.go
diff --git a/accounts/keystore/presale.go b/accounts/keystore/presale.go
index 1554294e1..03055245f 100644
--- a/accounts/keystore/presale.go
+++ b/accounts/keystore/presale.go
@@ -38,7 +38,13 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou
return accounts.Account{}, nil, err
}
key.Id = uuid.NewRandom()
- a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: keyStore.JoinPath(keyFileName(key.Address))}}
+ a := accounts.Account{
+ Address: key.Address,
+ URL: accounts.URL{
+ Scheme: KeyStoreScheme,
+ Path: keyStore.JoinPath(keyFileName(key.Address)),
+ },
+ }
err = keyStore.StoreKey(a.URL.Path, key, password)
return a, key, err
}
diff --git a/accounts/keystore/keystore_wallet.go b/accounts/keystore/wallet.go
index 758fdfe36..758fdfe36 100644
--- a/accounts/keystore/keystore_wallet.go
+++ b/accounts/keystore/wallet.go
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 962fc021d..54b67ce10 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -89,7 +89,7 @@ func runCmd(ctx *cli.Context) error {
genesisConfig *core.Genesis
)
if ctx.GlobalBool(MachineFlag.Name) {
- tracer = NewJSONLogger(logconfig, os.Stdout)
+ tracer = vm.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.GlobalBool(DebugFlag.Name) {
debugLogger = vm.NewStructLogger(logconfig)
tracer = debugLogger
@@ -206,6 +206,7 @@ func runCmd(ctx *cli.Context) error {
execTime := time.Since(tstart)
if ctx.GlobalBool(DumpFlag.Name) {
+ statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump()))
}
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index 06c9be380..b3c69d9b9 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -68,7 +68,7 @@ func stateTestCmd(ctx *cli.Context) error {
)
switch {
case ctx.GlobalBool(MachineFlag.Name):
- tracer = NewJSONLogger(config, os.Stderr)
+ tracer = vm.NewJSONLogger(config, os.Stderr)
case ctx.GlobalBool(DebugFlag.Name):
debugger = vm.NewStructLogger(config)
diff --git a/cmd/puppeth/genesis.go b/cmd/puppeth/genesis.go
index 5f39a889d..1025dfe82 100644
--- a/cmd/puppeth/genesis.go
+++ b/cmd/puppeth/genesis.go
@@ -20,35 +20,41 @@ import (
"encoding/binary"
"errors"
"math"
+ "math/big"
+ "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ math2 "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
)
-// cppEthereumGenesisSpec represents the genesis specification format used by the
+// alethGenesisSpec represents the genesis specification format used by the
// C++ Ethereum implementation.
-type cppEthereumGenesisSpec struct {
+type alethGenesisSpec struct {
SealEngine string `json:"sealEngine"`
Params struct {
- AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
- HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
- EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
- EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
- ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
- ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
- NetworkID hexutil.Uint64 `json:"networkID"`
- ChainID hexutil.Uint64 `json:"chainID"`
- MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
- MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
- MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
- GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
- MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
- DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
- DurationLimit *hexutil.Big `json:"durationLimit"`
- BlockReward *hexutil.Big `json:"blockReward"`
+ AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
+ MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
+ HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
+ DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
+ EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
+ EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
+ ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
+ ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
+ MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
+ MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
+ TieBreakingGas bool `json:"tieBreakingGas"`
+ GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
+ MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
+ DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
+ DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
+ BlockReward *hexutil.Big `json:"blockReward"`
+ NetworkID hexutil.Uint64 `json:"networkID"`
+ ChainID hexutil.Uint64 `json:"chainID"`
+ AllowFutureBlocks bool `json:"allowFutureBlocks"`
} `json:"params"`
Genesis struct {
@@ -62,57 +68,68 @@ type cppEthereumGenesisSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
- Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"`
+ Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
}
-// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled
+// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
-type cppEthereumGenesisSpecAccount struct {
- Balance *hexutil.Big `json:"balance"`
- Nonce uint64 `json:"nonce,omitempty"`
- Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"`
+type alethGenesisSpecAccount struct {
+ Balance *math2.HexOrDecimal256 `json:"balance"`
+ Nonce uint64 `json:"nonce,omitempty"`
+ Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
}
-// cppEthereumGenesisSpecBuiltin is the precompiled contract definition.
-type cppEthereumGenesisSpecBuiltin struct {
- Name string `json:"name,omitempty"`
- StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
- Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"`
+// alethGenesisSpecBuiltin is the precompiled contract definition.
+type alethGenesisSpecBuiltin struct {
+ Name string `json:"name,omitempty"`
+ StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
+ Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
}
-type cppEthereumGenesisSpecLinearPricing struct {
+type alethGenesisSpecLinearPricing struct {
Base uint64 `json:"base"`
Word uint64 `json:"word"`
}
-// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
+// newAlethGenesisSpec converts a go-ethereum genesis block into a Aleth-specific
// chain specification format.
-func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) {
- // Only ethash is currently supported between go-ethereum and cpp-ethereum
+func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSpec, error) {
+ // Only ethash is currently supported between go-ethereum and aleth
if genesis.Config.Ethash == nil {
return nil, errors.New("unsupported consensus engine")
}
- // Reconstruct the chain spec in Parity's format
- spec := &cppEthereumGenesisSpec{
+ // Reconstruct the chain spec in Aleth format
+ spec := &alethGenesisSpec{
SealEngine: "Ethash",
}
+ // Some defaults
spec.Params.AccountStartNonce = 0
+ spec.Params.TieBreakingGas = false
+ spec.Params.AllowFutureBlocks = false
+ spec.Params.DaoHardforkBlock = 0
+
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
- spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
- spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
+
+ // Byzantium
+ if num := genesis.Config.ByzantiumBlock; num != nil {
+ spec.setByzantium(num)
+ }
+ // Constantinople
+ if num := genesis.Config.ConstantinopleBlock; num != nil {
+ spec.setConstantinople(num)
+ }
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
-
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
- spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
+ spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxInt64)
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
- spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
- spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
- spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
+ spec.Params.DifficultyBoundDivisor = (*math2.HexOrDecimal256)(params.DifficultyBoundDivisor)
+ spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
+ spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
@@ -126,77 +143,104 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
- spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount)
for address, account := range genesis.Alloc {
- spec.Accounts[address] = &cppEthereumGenesisSpecAccount{
- Balance: (*hexutil.Big)(account.Balance),
- Nonce: account.Nonce,
- }
+ spec.setAccount(address, account)
}
- spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000},
- }
- spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12},
+
+ spec.setPrecompile(1, &alethGenesisSpecBuiltin{Name: "ecrecover",
+ Linear: &alethGenesisSpecLinearPricing{Base: 3000}})
+ spec.setPrecompile(2, &alethGenesisSpecBuiltin{Name: "sha256",
+ Linear: &alethGenesisSpecLinearPricing{Base: 60, Word: 12}})
+ spec.setPrecompile(3, &alethGenesisSpecBuiltin{Name: "ripemd160",
+ Linear: &alethGenesisSpecLinearPricing{Base: 600, Word: 120}})
+ spec.setPrecompile(4, &alethGenesisSpecBuiltin{Name: "identity",
+ Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
+ if genesis.Config.ByzantiumBlock != nil {
+ spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
+ spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
+ Linear: &alethGenesisSpecLinearPricing{Base: 500}})
+ spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
+ Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
+ spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
+ StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
}
- spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120},
+ return spec, nil
+}
+
+func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpecBuiltin) {
+ if spec.Accounts == nil {
+ spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
- spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3},
+ spec.Accounts[common.UnprefixedAddress(common.BytesToAddress([]byte{address}))].Precompiled = data
+}
+
+func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {
+ if spec.Accounts == nil {
+ spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
- if genesis.Config.ByzantiumBlock != nil {
- spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
- }
- spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500},
- }
- spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000},
- }
- spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{
- Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
- }
+
+ a, exist := spec.Accounts[common.UnprefixedAddress(address)]
+ if !exist {
+ a = &alethGenesisSpecAccount{}
+ spec.Accounts[common.UnprefixedAddress(address)] = a
}
- return spec, nil
+ a.Balance = (*math2.HexOrDecimal256)(account.Balance)
+ a.Nonce = account.Nonce
+
+}
+
+func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
+ spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
+}
+
+func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
+ spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
}
// parityChainSpec is the chain specification format used by Parity.
type parityChainSpec struct {
- Name string `json:"name"`
- Engine struct {
+ Name string `json:"name"`
+ Datadir string `json:"dataDir"`
+ Engine struct {
Ethash struct {
Params struct {
- MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
- DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
- DurationLimit *hexutil.Big `json:"durationLimit"`
- BlockReward *hexutil.Big `json:"blockReward"`
- HomesteadTransition uint64 `json:"homesteadTransition"`
- EIP150Transition uint64 `json:"eip150Transition"`
- EIP160Transition uint64 `json:"eip160Transition"`
- EIP161abcTransition uint64 `json:"eip161abcTransition"`
- EIP161dTransition uint64 `json:"eip161dTransition"`
- EIP649Reward *hexutil.Big `json:"eip649Reward"`
- EIP100bTransition uint64 `json:"eip100bTransition"`
- EIP649Transition uint64 `json:"eip649Transition"`
+ MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
+ DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
+ DurationLimit *hexutil.Big `json:"durationLimit"`
+ BlockReward map[string]string `json:"blockReward"`
+ DifficultyBombDelays map[string]string `json:"difficultyBombDelays"`
+ HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"`
+ EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"`
} `json:"params"`
} `json:"Ethash"`
} `json:"engine"`
Params struct {
- MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
- MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
- GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
- NetworkID hexutil.Uint64 `json:"networkID"`
- MaxCodeSize uint64 `json:"maxCodeSize"`
- EIP155Transition uint64 `json:"eip155Transition"`
- EIP98Transition uint64 `json:"eip98Transition"`
- EIP86Transition uint64 `json:"eip86Transition"`
- EIP140Transition uint64 `json:"eip140Transition"`
- EIP211Transition uint64 `json:"eip211Transition"`
- EIP214Transition uint64 `json:"eip214Transition"`
- EIP658Transition uint64 `json:"eip658Transition"`
+ AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
+ MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
+ MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
+ GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
+ NetworkID hexutil.Uint64 `json:"networkID"`
+ ChainID hexutil.Uint64 `json:"chainID"`
+ MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
+ MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
+ EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
+ EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
+ EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
+ EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
+ EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
+ EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
+ EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
+ EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
+ EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
+ EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
+ EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
+ EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
+ EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
+ EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
} `json:"params"`
Genesis struct {
@@ -215,22 +259,22 @@ type parityChainSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
- Nodes []string `json:"nodes"`
- Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"`
+ Nodes []string `json:"nodes"`
+ Accounts map[common.UnprefixedAddress]*parityChainSpecAccount `json:"accounts"`
}
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
type parityChainSpecAccount struct {
- Balance *hexutil.Big `json:"balance"`
- Nonce uint64 `json:"nonce,omitempty"`
+ Balance math2.HexOrDecimal256 `json:"balance"`
+ Nonce math2.HexOrDecimal64 `json:"nonce,omitempty"`
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
}
// parityChainSpecBuiltin is the precompiled contract definition.
type parityChainSpecBuiltin struct {
Name string `json:"name,omitempty"`
- ActivateAt uint64 `json:"activate_at,omitempty"`
+ ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
}
@@ -265,34 +309,51 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
}
// Reconstruct the chain spec in Parity's format
spec := &parityChainSpec{
- Name: network,
- Nodes: bootnodes,
+ Name: network,
+ Nodes: bootnodes,
+ Datadir: strings.ToLower(network),
}
+ spec.Engine.Ethash.Params.BlockReward = make(map[string]string)
+ spec.Engine.Ethash.Params.DifficultyBombDelays = make(map[string]string)
+ // Frontier
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
- spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
- spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
- spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64()
- spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64()
- spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64()
- spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64()
- spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward)
- spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
+ spec.Engine.Ethash.Params.BlockReward["0x0"] = hexutil.EncodeBig(ethash.FrontierBlockReward)
+
+ // Homestead
+ spec.Engine.Ethash.Params.HomesteadTransition = hexutil.Uint64(genesis.Config.HomesteadBlock.Uint64())
+
+ // Tangerine Whistle : 150
+ // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-608.md
+ spec.Params.EIP150Transition = hexutil.Uint64(genesis.Config.EIP150Block.Uint64())
+
+ // Spurious Dragon: 155, 160, 161, 170
+ // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-607.md
+ spec.Params.EIP155Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
+ spec.Params.EIP160Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
+ spec.Params.EIP161abcTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
+ spec.Params.EIP161dTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
+ // Byzantium
+ if num := genesis.Config.ByzantiumBlock; num != nil {
+ spec.setByzantium(num)
+ }
+ // Constantinople
+ if num := genesis.Config.ConstantinopleBlock; num != nil {
+ spec.setConstantinople(num)
+ }
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
- spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
+ spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
+ spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaxCodeSize = params.MaxCodeSize
- spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
- spec.Params.EIP98Transition = math.MaxUint64
- spec.Params.EIP86Transition = math.MaxUint64
- spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64()
- spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64()
+ // geth has it set from zero
+ spec.Params.MaxCodeSizeTransition = 0
+
+ // Disable this one
+ spec.Params.EIP98Transition = math.MaxInt64
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
@@ -305,42 +366,77 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
- spec.Accounts = make(map[common.Address]*parityChainSpecAccount)
+ spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
for address, account := range genesis.Alloc {
- spec.Accounts[address] = &parityChainSpecAccount{
- Balance: (*hexutil.Big)(account.Balance),
- Nonce: account.Nonce,
+ bal := math2.HexOrDecimal256(*account.Balance)
+
+ spec.Accounts[common.UnprefixedAddress(address)] = &parityChainSpecAccount{
+ Balance: bal,
+ Nonce: math2.HexOrDecimal64(account.Nonce),
}
}
- spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{
- Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}},
- }
- spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{
+ spec.setPrecompile(1, &parityChainSpecBuiltin{Name: "ecrecover",
+ Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}})
+
+ spec.setPrecompile(2, &parityChainSpecBuiltin{
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
- }
- spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{
+ })
+ spec.setPrecompile(3, &parityChainSpecBuiltin{
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
- }
- spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{
+ })
+ spec.setPrecompile(4, &parityChainSpecBuiltin{
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
- }
+ })
if genesis.Config.ByzantiumBlock != nil {
- spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{
- Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
- }
- spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{
- Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
- }
- spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{
- Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
- }
- spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{
- Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
- }
+ blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
+ spec.setPrecompile(5, &parityChainSpecBuiltin{
+ Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
+ })
+ spec.setPrecompile(6, &parityChainSpecBuiltin{
+ Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
+ })
+ spec.setPrecompile(7, &parityChainSpecBuiltin{
+ Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
+ })
+ spec.setPrecompile(8, &parityChainSpecBuiltin{
+ Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
+ })
}
return spec, nil
}
+func (spec *parityChainSpec) setPrecompile(address byte, data *parityChainSpecBuiltin) {
+ if spec.Accounts == nil {
+ spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
+ }
+ a := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
+ if _, exist := spec.Accounts[a]; !exist {
+ spec.Accounts[a] = &parityChainSpecAccount{}
+ }
+ spec.Accounts[a].Builtin = data
+}
+
+func (spec *parityChainSpec) setByzantium(num *big.Int) {
+ spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ByzantiumBlockReward)
+ spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(3000000)
+ n := hexutil.Uint64(num.Uint64())
+ spec.Engine.Ethash.Params.EIP100bTransition = n
+ spec.Params.EIP140Transition = n
+ spec.Params.EIP211Transition = n
+ spec.Params.EIP214Transition = n
+ spec.Params.EIP658Transition = n
+}
+
+func (spec *parityChainSpec) setConstantinople(num *big.Int) {
+ spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ConstantinopleBlockReward)
+ spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(2000000)
+ n := hexutil.Uint64(num.Uint64())
+ spec.Params.EIP145Transition = n
+ spec.Params.EIP1014Transition = n
+ spec.Params.EIP1052Transition = n
+ spec.Params.EIP1283Transition = n
+}
+
// pyEthereumGenesisSpec represents the genesis specification format used by the
// Python Ethereum implementation.
type pyEthereumGenesisSpec struct {
diff --git a/cmd/puppeth/genesis_test.go b/cmd/puppeth/genesis_test.go
new file mode 100644
index 000000000..83e738360
--- /dev/null
+++ b/cmd/puppeth/genesis_test.go
@@ -0,0 +1,109 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/ethereum/go-ethereum/core"
+)
+
+// Tests the go-ethereum to Aleth chainspec conversion for the Stureby testnet.
+func TestAlethSturebyConverter(t *testing.T) {
+ blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ var genesis core.Genesis
+ if err := json.Unmarshal(blob, &genesis); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ spec, err := newAlethGenesisSpec("stureby", &genesis)
+ if err != nil {
+ t.Fatalf("failed creating chainspec: %v", err)
+ }
+
+ expBlob, err := ioutil.ReadFile("testdata/stureby_aleth.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ expspec := &alethGenesisSpec{}
+ if err := json.Unmarshal(expBlob, expspec); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ if !reflect.DeepEqual(expspec, spec) {
+ t.Errorf("chainspec mismatch")
+ c := spew.ConfigState{
+ DisablePointerAddresses: true,
+ SortKeys: true,
+ }
+ exp := strings.Split(c.Sdump(expspec), "\n")
+ got := strings.Split(c.Sdump(spec), "\n")
+ for i := 0; i < len(exp) && i < len(got); i++ {
+ if exp[i] != got[i] {
+ fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
+ }
+ }
+ }
+}
+
+// Tests the go-ethereum to Parity chainspec conversion for the Stureby testnet.
+func TestParitySturebyConverter(t *testing.T) {
+ blob, err := ioutil.ReadFile("testdata/stureby_geth.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ var genesis core.Genesis
+ if err := json.Unmarshal(blob, &genesis); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ spec, err := newParityChainSpec("Stureby", &genesis, []string{})
+ if err != nil {
+ t.Fatalf("failed creating chainspec: %v", err)
+ }
+
+ expBlob, err := ioutil.ReadFile("testdata/stureby_parity.json")
+ if err != nil {
+ t.Fatalf("could not read file: %v", err)
+ }
+ expspec := &parityChainSpec{}
+ if err := json.Unmarshal(expBlob, expspec); err != nil {
+ t.Fatalf("failed parsing genesis: %v", err)
+ }
+ expspec.Nodes = []string{}
+
+ if !reflect.DeepEqual(expspec, spec) {
+ t.Errorf("chainspec mismatch")
+ c := spew.ConfigState{
+ DisablePointerAddresses: true,
+ SortKeys: true,
+ }
+ exp := strings.Split(c.Sdump(expspec), "\n")
+ got := strings.Split(c.Sdump(spec), "\n")
+ for i := 0; i < len(exp) && i < len(got); i++ {
+ if exp[i] != got[i] {
+ fmt.Printf("got: %v\nexp: %v\n", exp[i], got[i])
+ }
+ }
+ }
+}
diff --git a/cmd/puppeth/module_dashboard.go b/cmd/puppeth/module_dashboard.go
index d22bd8110..cb3ed6e71 100644
--- a/cmd/puppeth/module_dashboard.go
+++ b/cmd/puppeth/module_dashboard.go
@@ -640,7 +640,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
files[filepath.Join(workdir, network+".json")] = genesis
if conf.Genesis.Config.Ethash != nil {
- cppSpec, err := newCppEthereumGenesisSpec(network, conf.Genesis)
+ cppSpec, err := newAlethGenesisSpec(network, conf.Genesis)
if err != nil {
return nil, err
}
diff --git a/cmd/puppeth/puppeth.go b/cmd/puppeth/puppeth.go
index f9b8fe481..c3de5f936 100644
--- a/cmd/puppeth/puppeth.go
+++ b/cmd/puppeth/puppeth.go
@@ -43,18 +43,23 @@ func main() {
Usage: "log level to emit to the screen",
},
}
- app.Action = func(c *cli.Context) error {
+ app.Before = func(c *cli.Context) error {
// Set up the logger to print everything and the random generator
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int("loglevel")), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
rand.Seed(time.Now().UnixNano())
- network := c.String("network")
- if strings.Contains(network, " ") || strings.Contains(network, "-") {
- log.Crit("No spaces or hyphens allowed in network name")
- }
- // Start the wizard and relinquish control
- makeWizard(c.String("network")).run()
return nil
}
+ app.Action = runWizard
app.Run(os.Args)
}
+
+// runWizard start the wizard and relinquish control to it.
+func runWizard(c *cli.Context) error {
+ network := c.String("network")
+ if strings.Contains(network, " ") || strings.Contains(network, "-") || strings.ToLower(network) != network {
+ log.Crit("No spaces, hyphens or capital letters allowed in network name")
+ }
+ makeWizard(c.String("network")).run()
+ return nil
+}
diff --git a/cmd/puppeth/testdata/stureby_aleth.json b/cmd/puppeth/testdata/stureby_aleth.json
new file mode 100644
index 000000000..1ef1d8ae1
--- /dev/null
+++ b/cmd/puppeth/testdata/stureby_aleth.json
@@ -0,0 +1,112 @@
+{
+ "sealEngine":"Ethash",
+ "params":{
+ "accountStartNonce":"0x00",
+ "maximumExtraDataSize":"0x20",
+ "homesteadForkBlock":"0x2710",
+ "daoHardforkBlock":"0x00",
+ "EIP150ForkBlock":"0x3a98",
+ "EIP158ForkBlock":"0x59d8",
+ "byzantiumForkBlock":"0x7530",
+ "constantinopleForkBlock":"0x9c40",
+ "minGasLimit":"0x1388",
+ "maxGasLimit":"0x7fffffffffffffff",
+ "tieBreakingGas":false,
+ "gasLimitBoundDivisor":"0x0400",
+ "minimumDifficulty":"0x20000",
+ "difficultyBoundDivisor":"0x0800",
+ "durationLimit":"0x0d",
+ "blockReward":"0x4563918244F40000",
+ "networkID":"0x4cb2e",
+ "chainID":"0x4cb2e",
+ "allowFutureBlocks":false
+ },
+ "genesis":{
+ "nonce":"0x0000000000000000",
+ "difficulty":"0x20000",
+ "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "author":"0x0000000000000000000000000000000000000000",
+ "timestamp":"0x59a4e76d",
+ "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
+ "gasLimit":"0x47b760"
+ },
+ "accounts":{
+ "0000000000000000000000000000000000000001":{
+ "balance":"1",
+ "precompiled":{
+ "name":"ecrecover",
+ "linear":{
+ "base":3000,
+ "word":0
+ }
+ }
+ },
+ "0000000000000000000000000000000000000002":{
+ "balance":"1",
+ "precompiled":{
+ "name":"sha256",
+ "linear":{
+ "base":60,
+ "word":12
+ }
+ }
+ },
+ "0000000000000000000000000000000000000003":{
+ "balance":"1",
+ "precompiled":{
+ "name":"ripemd160",
+ "linear":{
+ "base":600,
+ "word":120
+ }
+ }
+ },
+ "0000000000000000000000000000000000000004":{
+ "balance":"1",
+ "precompiled":{
+ "name":"identity",
+ "linear":{
+ "base":15,
+ "word":3
+ }
+ }
+ },
+ "0000000000000000000000000000000000000005":{
+ "balance":"1",
+ "precompiled":{
+ "name":"modexp",
+ "startingBlock":"0x7530"
+ }
+ },
+ "0000000000000000000000000000000000000006":{
+ "balance":"1",
+ "precompiled":{
+ "name":"alt_bn128_G1_add",
+ "startingBlock":"0x7530",
+ "linear":{
+ "base":500,
+ "word":0
+ }
+ }
+ },
+ "0000000000000000000000000000000000000007":{
+ "balance":"1",
+ "precompiled":{
+ "name":"alt_bn128_G1_mul",
+ "startingBlock":"0x7530",
+ "linear":{
+ "base":40000,
+ "word":0
+ }
+ }
+ },
+ "0000000000000000000000000000000000000008":{
+ "balance":"1",
+ "precompiled":{
+ "name":"alt_bn128_pairing_product",
+ "startingBlock":"0x7530"
+ }
+ }
+ }
+}
diff --git a/cmd/puppeth/testdata/stureby_geth.json b/cmd/puppeth/testdata/stureby_geth.json
new file mode 100644
index 000000000..c8c3b3c95
--- /dev/null
+++ b/cmd/puppeth/testdata/stureby_geth.json
@@ -0,0 +1,47 @@
+{
+ "config": {
+ "ethash":{},
+ "chainId": 314158,
+ "homesteadBlock": 10000,
+ "eip150Block": 15000,
+ "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "eip155Block": 23000,
+ "eip158Block": 23000,
+ "byzantiumBlock": 30000,
+ "constantinopleBlock": 40000
+ },
+ "nonce": "0x0",
+ "timestamp": "0x59a4e76d",
+ "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
+ "gasLimit": "0x47b760",
+ "difficulty": "0x20000",
+ "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase": "0x0000000000000000000000000000000000000000",
+ "alloc": {
+ "0000000000000000000000000000000000000001": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000002": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000003": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000004": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000005": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000006": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000007": {
+ "balance": "0x01"
+ },
+ "0000000000000000000000000000000000000008": {
+ "balance": "0x01"
+ }
+ }
+}
diff --git a/cmd/puppeth/testdata/stureby_parity.json b/cmd/puppeth/testdata/stureby_parity.json
new file mode 100644
index 000000000..f3fa8386a
--- /dev/null
+++ b/cmd/puppeth/testdata/stureby_parity.json
@@ -0,0 +1,181 @@
+{
+ "name":"Stureby",
+ "dataDir":"stureby",
+ "engine":{
+ "Ethash":{
+ "params":{
+ "minimumDifficulty":"0x20000",
+ "difficultyBoundDivisor":"0x800",
+ "durationLimit":"0xd",
+ "blockReward":{
+ "0x0":"0x4563918244f40000",
+ "0x7530":"0x29a2241af62c0000",
+ "0x9c40":"0x1bc16d674ec80000"
+ },
+ "homesteadTransition":"0x2710",
+ "eip100bTransition":"0x7530",
+ "difficultyBombDelays":{
+ "0x7530":"0x2dc6c0",
+ "0x9c40":"0x1e8480"
+ }
+ }
+ }
+ },
+ "params":{
+ "accountStartNonce":"0x0",
+ "maximumExtraDataSize":"0x20",
+ "gasLimitBoundDivisor":"0x400",
+ "minGasLimit":"0x1388",
+ "networkID":"0x4cb2e",
+ "chainID":"0x4cb2e",
+ "maxCodeSize":"0x6000",
+ "maxCodeSizeTransition":"0x0",
+ "eip98Transition": "0x7fffffffffffffff",
+ "eip150Transition":"0x3a98",
+ "eip160Transition":"0x59d8",
+ "eip161abcTransition":"0x59d8",
+ "eip161dTransition":"0x59d8",
+ "eip155Transition":"0x59d8",
+ "eip140Transition":"0x7530",
+ "eip211Transition":"0x7530",
+ "eip214Transition":"0x7530",
+ "eip658Transition":"0x7530",
+ "eip145Transition":"0x9c40",
+ "eip1014Transition":"0x9c40",
+ "eip1052Transition":"0x9c40",
+ "eip1283Transition":"0x9c40"
+ },
+ "genesis":{
+ "seal":{
+ "ethereum":{
+ "nonce":"0x0000000000000000",
+ "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
+ }
+ },
+ "difficulty":"0x20000",
+ "author":"0x0000000000000000000000000000000000000000",
+ "timestamp":"0x59a4e76d",
+ "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
+ "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
+ "gasLimit":"0x47b760"
+ },
+ "nodes":[
+ "enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
+ "enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
+ "enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
+ "enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
+ "enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
+ "enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
+ "enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
+ "enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
+ ],
+ "accounts":{
+ "0000000000000000000000000000000000000001":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"ecrecover",
+ "pricing":{
+ "linear":{
+ "base":3000,
+ "word":0
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000002":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"sha256",
+ "pricing":{
+ "linear":{
+ "base":60,
+ "word":12
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000003":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"ripemd160",
+ "pricing":{
+ "linear":{
+ "base":600,
+ "word":120
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000004":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"identity",
+ "pricing":{
+ "linear":{
+ "base":15,
+ "word":3
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000005":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"modexp",
+ "activate_at":"0x7530",
+ "pricing":{
+ "modexp":{
+ "divisor":20
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000006":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"alt_bn128_add",
+ "activate_at":"0x7530",
+ "pricing":{
+ "linear":{
+ "base":500,
+ "word":0
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000007":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"alt_bn128_mul",
+ "activate_at":"0x7530",
+ "pricing":{
+ "linear":{
+ "base":40000,
+ "word":0
+ }
+ }
+ }
+ },
+ "0000000000000000000000000000000000000008":{
+ "balance":"1",
+ "nonce":"0",
+ "builtin":{
+ "name":"alt_bn128_pairing",
+ "activate_at":"0x7530",
+ "pricing":{
+ "alt_bn128_pairing":{
+ "base":100000,
+ "pair":80000
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/cmd/puppeth/wizard.go b/cmd/puppeth/wizard.go
index b88a61de7..83536506c 100644
--- a/cmd/puppeth/wizard.go
+++ b/cmd/puppeth/wizard.go
@@ -23,6 +23,7 @@ import (
"io/ioutil"
"math/big"
"net"
+ "net/url"
"os"
"path/filepath"
"sort"
@@ -118,6 +119,47 @@ func (w *wizard) readDefaultString(def string) string {
return def
}
+// readDefaultYesNo reads a single line from stdin, trimming if from spaces and
+// interpreting it as a 'yes' or a 'no'. If an empty line is entered, the default
+// value is returned.
+func (w *wizard) readDefaultYesNo(def bool) bool {
+ for {
+ fmt.Printf("> ")
+ text, err := w.in.ReadString('\n')
+ if err != nil {
+ log.Crit("Failed to read user input", "err", err)
+ }
+ if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
+ return def
+ }
+ if text == "y" || text == "yes" {
+ return true
+ }
+ if text == "n" || text == "no" {
+ return false
+ }
+ log.Error("Invalid input, expected 'y', 'yes', 'n', 'no' or empty")
+ }
+}
+
+// readURL reads a single line from stdin, trimming if from spaces and trying to
+// interpret it as a URL (http, https or file).
+func (w *wizard) readURL() *url.URL {
+ for {
+ fmt.Printf("> ")
+ text, err := w.in.ReadString('\n')
+ if err != nil {
+ log.Crit("Failed to read user input", "err", err)
+ }
+ uri, err := url.Parse(strings.TrimSpace(text))
+ if err != nil {
+ log.Error("Invalid input, expected URL", "err", err)
+ continue
+ }
+ return uri
+ }
+}
+
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
// to parse into an integer.
func (w *wizard) readInt() int {
diff --git a/cmd/puppeth/wizard_dashboard.go b/cmd/puppeth/wizard_dashboard.go
index 1a01631ff..8a8370845 100644
--- a/cmd/puppeth/wizard_dashboard.go
+++ b/cmd/puppeth/wizard_dashboard.go
@@ -137,14 +137,14 @@ func (w *wizard) deployDashboard() {
if w.conf.ethstats != "" {
fmt.Println()
fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)")
- infos.trusted = w.readDefaultString("y") == "y"
+ infos.trusted = w.readDefaultYesNo(true)
}
// Try to deploy the dashboard container on the host
nocache := false
if existed {
fmt.Println()
fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil {
log.Error("Failed to deploy dashboard container", "err", err)
diff --git a/cmd/puppeth/wizard_ethstats.go b/cmd/puppeth/wizard_ethstats.go
index fb2529c26..58ff3efbe 100644
--- a/cmd/puppeth/wizard_ethstats.go
+++ b/cmd/puppeth/wizard_ethstats.go
@@ -67,11 +67,11 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
// The user might want to clear the entire list, although generally probably not
fmt.Println()
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
- if w.readDefaultString("n") != "n" {
+ if w.readDefaultYesNo(false) {
infos.banned = nil
}
// Offer the user to explicitly add/remove certain IP addresses
@@ -106,7 +106,7 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
trusted := make([]string, 0, len(w.servers))
for _, client := range w.servers {
diff --git a/cmd/puppeth/wizard_explorer.go b/cmd/puppeth/wizard_explorer.go
index 413511c1c..a128fb9fb 100644
--- a/cmd/puppeth/wizard_explorer.go
+++ b/cmd/puppeth/wizard_explorer.go
@@ -100,7 +100,7 @@ func (w *wizard) deployExplorer() {
if existed {
fmt.Println()
fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil {
log.Error("Failed to deploy explorer container", "err", err)
diff --git a/cmd/puppeth/wizard_faucet.go b/cmd/puppeth/wizard_faucet.go
index 6f0840894..9068c1d30 100644
--- a/cmd/puppeth/wizard_faucet.go
+++ b/cmd/puppeth/wizard_faucet.go
@@ -81,7 +81,7 @@ func (w *wizard) deployFaucet() {
if infos.captchaToken != "" {
fmt.Println()
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
infos.captchaToken, infos.captchaSecret = "", ""
}
}
@@ -89,7 +89,7 @@ func (w *wizard) deployFaucet() {
// No previous authorization (or old one discarded)
fmt.Println()
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
- if w.readDefaultString("n") == "n" {
+ if !w.readDefaultYesNo(false) {
log.Warn("Users will be able to requests funds via automated scripts")
} else {
// Captcha protection explicitly requested, read the site and secret keys
@@ -132,7 +132,7 @@ func (w *wizard) deployFaucet() {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) funding account (y/n)? (default = yes)\n", key.Address.Hex())
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
infos.node.keyJSON, infos.node.keyPass = "", ""
}
}
@@ -166,7 +166,7 @@ func (w *wizard) deployFaucet() {
if existed {
fmt.Println()
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy faucet container", "err", err)
diff --git a/cmd/puppeth/wizard_genesis.go b/cmd/puppeth/wizard_genesis.go
index 6c4cd571f..95da5bd4f 100644
--- a/cmd/puppeth/wizard_genesis.go
+++ b/cmd/puppeth/wizard_genesis.go
@@ -20,9 +20,13 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "io"
"io/ioutil"
"math/big"
"math/rand"
+ "net/http"
+ "os"
+ "path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -40,11 +44,12 @@ func (w *wizard) makeGenesis() {
Difficulty: big.NewInt(524288),
Alloc: make(core.GenesisAlloc),
Config: &params.ChainConfig{
- HomesteadBlock: big.NewInt(1),
- EIP150Block: big.NewInt(2),
- EIP155Block: big.NewInt(3),
- EIP158Block: big.NewInt(3),
- ByzantiumBlock: big.NewInt(4),
+ HomesteadBlock: big.NewInt(1),
+ EIP150Block: big.NewInt(2),
+ EIP155Block: big.NewInt(3),
+ EIP158Block: big.NewInt(3),
+ ByzantiumBlock: big.NewInt(4),
+ ConstantinopleBlock: big.NewInt(5),
},
}
// Figure out which consensus engine to choose
@@ -114,9 +119,13 @@ func (w *wizard) makeGenesis() {
}
break
}
- // Add a batch of precompile balances to avoid them getting deleted
- for i := int64(0); i < 256; i++ {
- genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
+ fmt.Println()
+ fmt.Println("Should the precompile-addresses (0x1 .. 0xff) be pre-funded with 1 wei? (advisable yes)")
+ if w.readDefaultYesNo(true) {
+ // Add a batch of precompile balances to avoid them getting deleted
+ for i := int64(0); i < 256; i++ {
+ genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
+ }
}
// Query the user for some custom extras
fmt.Println()
@@ -130,53 +139,130 @@ func (w *wizard) makeGenesis() {
w.conf.flush()
}
+// importGenesis imports a Geth genesis spec into puppeth.
+func (w *wizard) importGenesis() {
+ // Request the genesis JSON spec URL from the user
+ fmt.Println()
+ fmt.Println("Where's the genesis file? (local file or http/https url)")
+ url := w.readURL()
+
+ // Convert the various allowed URLs to a reader stream
+ var reader io.Reader
+
+ switch url.Scheme {
+ case "http", "https":
+ // Remote web URL, retrieve it via an HTTP client
+ res, err := http.Get(url.String())
+ if err != nil {
+ log.Error("Failed to retrieve remote genesis", "err", err)
+ return
+ }
+ defer res.Body.Close()
+ reader = res.Body
+
+ case "":
+ // Schemaless URL, interpret as a local file
+ file, err := os.Open(url.String())
+ if err != nil {
+ log.Error("Failed to open local genesis", "err", err)
+ return
+ }
+ defer file.Close()
+ reader = file
+
+ default:
+ log.Error("Unsupported genesis URL scheme", "scheme", url.Scheme)
+ return
+ }
+ // Parse the genesis file and inject it successful
+ var genesis core.Genesis
+ if err := json.NewDecoder(reader).Decode(&genesis); err != nil {
+ log.Error("Invalid genesis spec: %v", err)
+ return
+ }
+ log.Info("Imported genesis block")
+
+ w.conf.Genesis = &genesis
+ w.conf.flush()
+}
+
// manageGenesis permits the modification of chain configuration parameters in
// a genesis config and the export of the entire genesis spec.
func (w *wizard) manageGenesis() {
// Figure out whether to modify or export the genesis
fmt.Println()
fmt.Println(" 1. Modify existing fork rules")
- fmt.Println(" 2. Export genesis configuration")
+ fmt.Println(" 2. Export genesis configurations")
fmt.Println(" 3. Remove genesis configuration")
choice := w.read()
- switch {
- case choice == "1":
+ switch choice {
+ case "1":
// Fork rule updating requested, iterate over each fork
fmt.Println()
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock)
w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock)
fmt.Println()
- fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
+ fmt.Printf("Which block should EIP150 (Tangerine Whistle) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block)
fmt.Println()
- fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
+ fmt.Printf("Which block should EIP155 (Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block)
fmt.Println()
- fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
+ fmt.Printf("Which block should EIP158/161 (also Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block)
fmt.Println()
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock)
w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock)
+ fmt.Println()
+ fmt.Printf("Which block should Constantinople come into effect? (default = %v)\n", w.conf.Genesis.Config.ConstantinopleBlock)
+ w.conf.Genesis.Config.ConstantinopleBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ConstantinopleBlock)
+
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
- case choice == "2":
+ case "2":
// Save whatever genesis configuration we currently have
fmt.Println()
- fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
+ fmt.Printf("Which folder to save the genesis specs into? (default = current)\n")
+ fmt.Printf(" Will create %s.json, %s-aleth.json, %s-harmony.json, %s-parity.json\n", w.network, w.network, w.network, w.network)
+
+ folder := w.readDefaultString(".")
+ if err := os.MkdirAll(folder, 0755); err != nil {
+ log.Error("Failed to create spec folder", "folder", folder, "err", err)
+ return
+ }
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
- if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
+
+ // Export the native genesis spec used by puppeth and Geth
+ gethJson := filepath.Join(folder, fmt.Sprintf("%s.json", w.network))
+ if err := ioutil.WriteFile((gethJson), out, 0644); err != nil {
log.Error("Failed to save genesis file", "err", err)
+ return
}
- log.Info("Exported existing genesis block")
+ log.Info("Saved native genesis chain spec", "path", gethJson)
- case choice == "3":
+ // Export the genesis spec used by Aleth (formerly C++ Ethereum)
+ if spec, err := newAlethGenesisSpec(w.network, w.conf.Genesis); err != nil {
+ log.Error("Failed to create Aleth chain spec", "err", err)
+ } else {
+ saveGenesis(folder, w.network, "aleth", spec)
+ }
+ // Export the genesis spec used by Parity
+ if spec, err := newParityChainSpec(w.network, w.conf.Genesis, []string{}); err != nil {
+ log.Error("Failed to create Parity chain spec", "err", err)
+ } else {
+ saveGenesis(folder, w.network, "parity", spec)
+ }
+ // Export the genesis spec used by Harmony (formerly EthereumJ
+ saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
+
+ case "3":
// Make sure we don't have any services running
if len(w.conf.servers()) > 0 {
log.Error("Genesis reset requires all services and servers torn down")
@@ -186,8 +272,20 @@ func (w *wizard) manageGenesis() {
w.conf.Genesis = nil
w.conf.flush()
-
default:
log.Error("That's not something I can do")
+ return
+ }
+}
+
+// saveGenesis JSON encodes an arbitrary genesis spec into a pre-defined file.
+func saveGenesis(folder, network, client string, spec interface{}) {
+ path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
+
+ out, _ := json.Marshal(spec)
+ if err := ioutil.WriteFile(path, out, 0644); err != nil {
+ log.Error("Failed to save genesis file", "client", client, "err", err)
+ return
}
+ log.Info("Saved genesis chain spec", "client", client, "path", path)
}
diff --git a/cmd/puppeth/wizard_intro.go b/cmd/puppeth/wizard_intro.go
index 60aa0f7ff..75fb04b76 100644
--- a/cmd/puppeth/wizard_intro.go
+++ b/cmd/puppeth/wizard_intro.go
@@ -61,14 +61,14 @@ func (w *wizard) run() {
// Make sure we have a good network name to work with fmt.Println()
// Docker accepts hyphens in image names, but doesn't like it for container names
if w.network == "" {
- fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
+ fmt.Println("Please specify a network name to administer (no spaces, hyphens or capital letters please)")
for {
w.network = w.readString()
- if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
+ if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") && strings.ToLower(w.network) == w.network {
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
break
}
- log.Error("I also like to live dangerously, still no spaces or hyphens")
+ log.Error("I also like to live dangerously, still no spaces, hyphens or capital letters")
}
}
log.Info("Administering Ethereum network", "name", w.network)
@@ -131,7 +131,20 @@ func (w *wizard) run() {
case choice == "2":
if w.conf.Genesis == nil {
- w.makeGenesis()
+ fmt.Println()
+ fmt.Println("What would you like to do? (default = create)")
+ fmt.Println(" 1. Create new genesis from scratch")
+ fmt.Println(" 2. Import already existing genesis")
+
+ choice := w.read()
+ switch {
+ case choice == "" || choice == "1":
+ w.makeGenesis()
+ case choice == "2":
+ w.importGenesis()
+ default:
+ log.Error("That's not something I can do")
+ }
} else {
w.manageGenesis()
}
@@ -149,7 +162,6 @@ func (w *wizard) run() {
} else {
w.manageComponents()
}
-
default:
log.Error("That's not something I can do")
}
diff --git a/cmd/puppeth/wizard_nginx.go b/cmd/puppeth/wizard_nginx.go
index 4eeae93a0..8397b7fd5 100644
--- a/cmd/puppeth/wizard_nginx.go
+++ b/cmd/puppeth/wizard_nginx.go
@@ -41,12 +41,12 @@ func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (str
// Reverse proxy is not running, offer to deploy a new one
fmt.Println()
fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)")
- if w.readDefaultString("y") == "y" {
+ if w.readDefaultYesNo(true) {
nocache := false
if proxy != nil {
fmt.Println()
fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployNginx(client, w.network, port, nocache); err != nil {
log.Error("Failed to deploy reverse-proxy", "err", err)
diff --git a/cmd/puppeth/wizard_node.go b/cmd/puppeth/wizard_node.go
index 49b10a023..e37297f6d 100644
--- a/cmd/puppeth/wizard_node.go
+++ b/cmd/puppeth/wizard_node.go
@@ -126,7 +126,7 @@ func (w *wizard) deployNode(boot bool) {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) signing account (y/n)? (default = yes)\n", key.Address.Hex())
- if w.readDefaultString("y") != "y" {
+ if !w.readDefaultYesNo(true) {
infos.keyJSON, infos.keyPass = "", ""
}
}
@@ -165,7 +165,7 @@ func (w *wizard) deployNode(boot bool) {
if existed {
fmt.Println()
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy Ethereum node container", "err", err)
diff --git a/cmd/puppeth/wizard_wallet.go b/cmd/puppeth/wizard_wallet.go
index 7624d11e2..ca1ea5bd2 100644
--- a/cmd/puppeth/wizard_wallet.go
+++ b/cmd/puppeth/wizard_wallet.go
@@ -96,7 +96,7 @@ func (w *wizard) deployWallet() {
if existed {
fmt.Println()
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
- nocache = w.readDefaultString("n") != "n"
+ nocache = w.readDefaultYesNo(false)
}
if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy wallet container", "err", err)
diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go
index 02198f878..18be316e5 100644
--- a/cmd/swarm/config_test.go
+++ b/cmd/swarm/config_test.go
@@ -26,14 +26,14 @@ import (
"testing"
"time"
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm"
"github.com/ethereum/go-ethereum/swarm/api"
-
- "github.com/docker/docker/pkg/reexec"
)
-func TestDumpConfig(t *testing.T) {
+func TestConfigDump(t *testing.T) {
swarm := runSwarm(t, "dumpconfig")
defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf)
@@ -91,8 +91,8 @@ func TestConfigCmdLineOverrides(t *testing.T) {
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
- "--datadir", dir,
- "--ipcpath", conf.IPCPath,
+ fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -189,9 +189,9 @@ func TestConfigFileOverrides(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
- "--ens-api", "",
- "--ipcpath", conf.IPCPath,
- "--datadir", dir,
+ fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
+ fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -407,9 +407,9 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
- "--ens-api", "",
- "--datadir", dir,
- "--ipcpath", conf.IPCPath,
+ fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
+ fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -466,7 +466,7 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
node.Shutdown()
}
-func TestValidateConfig(t *testing.T) {
+func TestConfigValidate(t *testing.T) {
for _, c := range []struct {
cfg *api.Config
err string
diff --git a/cmd/swarm/feeds.go b/cmd/swarm/feeds.go
index f26a8cc7d..6cd971a92 100644
--- a/cmd/swarm/feeds.go
+++ b/cmd/swarm/feeds.go
@@ -169,7 +169,6 @@ func feedUpdate(ctx *cli.Context) {
query = new(feed.Query)
query.User = signer.Address()
query.Topic = getTopic(ctx)
-
}
// Retrieve a feed update request
@@ -178,6 +177,11 @@ func feedUpdate(ctx *cli.Context) {
utils.Fatalf("Error retrieving feed status: %s", err.Error())
}
+ // Check that the provided signer matches the request to sign
+ if updateRequest.User != signer.Address() {
+ utils.Fatalf("Signer address does not match the update request")
+ }
+
// set the new data
updateRequest.SetData(data)
diff --git a/cmd/swarm/feeds_test.go b/cmd/swarm/feeds_test.go
index a0cedf0d3..4c40f62a8 100644
--- a/cmd/swarm/feeds_test.go
+++ b/cmd/swarm/feeds_test.go
@@ -19,7 +19,6 @@ package main
import (
"bytes"
"encoding/json"
- "fmt"
"io/ioutil"
"os"
"testing"
@@ -69,7 +68,7 @@ func TestCLIFeedUpdate(t *testing.T) {
hexData}
// create an update and expect an exit without errors
- log.Info(fmt.Sprintf("updating a feed with 'swarm feed update'"))
+ log.Info("updating a feed with 'swarm feed update'")
cmd := runSwarm(t, flags...)
cmd.ExpectExit()
@@ -116,7 +115,7 @@ func TestCLIFeedUpdate(t *testing.T) {
"--user", address.Hex(),
}
- log.Info(fmt.Sprintf("getting feed info with 'swarm feed info'"))
+ log.Info("getting feed info with 'swarm feed info'")
cmd = runSwarm(t, flags...)
_, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout
cmd.ExpectExit()
@@ -141,9 +140,9 @@ func TestCLIFeedUpdate(t *testing.T) {
"--topic", topic.Hex(),
}
- log.Info(fmt.Sprintf("Publishing manifest with 'swarm feed create'"))
+ log.Info("Publishing manifest with 'swarm feed create'")
cmd = runSwarm(t, flags...)
- _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`) // regex hack to extract stdout
+ _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
cmd.ExpectExit()
manifestAddress := matches[0] // read the received feed manifest
@@ -162,4 +161,36 @@ func TestCLIFeedUpdate(t *testing.T) {
if !bytes.Equal(data, retrieved) {
t.Fatalf("Received %s, expected %s", retrieved, data)
}
+
+ // test publishing a manifest for a different user
+ flags = []string{
+ "--bzzapi", srv.URL,
+ "feed", "create",
+ "--topic", topic.Hex(),
+ "--user", "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // different user
+ }
+
+ log.Info("Publishing manifest with 'swarm feed create' for a different user")
+ cmd = runSwarm(t, flags...)
+ _, matches = cmd.ExpectRegexp(`[a-f\d]{64}`)
+ cmd.ExpectExit()
+
+ manifestAddress = matches[0] // read the received feed manifest
+
+ // now let's try to update that user's manifest which we don't have the private key for
+ flags = []string{
+ "--bzzapi", srv.URL,
+ "--bzzaccount", pkFileName,
+ "feed", "update",
+ "--manifest", manifestAddress,
+ hexData}
+
+ // create an update and expect an error given there is a user mismatch
+ log.Info("updating a feed with 'swarm feed update'")
+ cmd = runSwarm(t, flags...)
+ cmd.ExpectRegexp("Fatal:.*") // best way so far to detect a failure.
+ cmd.ExpectExit()
+ if cmd.ExitStatus() == 0 {
+ t.Fatal("Expected nonzero exit code when updating a manifest with the wrong user. Got 0.")
+ }
}
diff --git a/cmd/swarm/fs.go b/cmd/swarm/fs.go
index b970b2e8c..edeeddff8 100644
--- a/cmd/swarm/fs.go
+++ b/cmd/swarm/fs.go
@@ -24,7 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
- "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/fuse"
"gopkg.in/urfave/cli.v1"
@@ -41,27 +41,24 @@ var fsCommand = cli.Command{
Action: mount,
CustomHelpTemplate: helpTemplate,
Name: "mount",
- Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "mount a swarm hash to a mount point",
- ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
+ ArgsUsage: "swarm fs mount <manifest hash> <mount point>",
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: unmount,
CustomHelpTemplate: helpTemplate,
Name: "unmount",
- Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "unmount a swarmfs mount",
- ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
+ ArgsUsage: "swarm fs unmount <mount point>",
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: listMounts,
CustomHelpTemplate: helpTemplate,
Name: "list",
- Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "list swarmfs mounts",
- ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
+ ArgsUsage: "swarm fs list",
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
},
@@ -70,7 +67,7 @@ var fsCommand = cli.Command{
func mount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 2 {
- utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
+ utils.Fatalf("Usage: swarm fs mount <manifestHash> <file name>")
}
client, err := dialRPC(cliContext)
@@ -97,7 +94,7 @@ func unmount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 1 {
- utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
+ utils.Fatalf("Usage: swarm fs unmount <mount path>")
}
client, err := dialRPC(cliContext)
if err != nil {
@@ -145,20 +142,21 @@ func listMounts(cliContext *cli.Context) {
}
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
- var endpoint string
+ endpoint := getIPCEndpoint(ctx)
+ log.Info("IPC endpoint", "path", endpoint)
+ return rpc.Dial(endpoint)
+}
- if ctx.IsSet(utils.IPCPathFlag.Name) {
- endpoint = ctx.String(utils.IPCPathFlag.Name)
- } else {
- utils.Fatalf("swarm ipc endpoint not specified")
- }
+func getIPCEndpoint(ctx *cli.Context) string {
+ cfg := defaultNodeConfig
+ utils.SetNodeConfig(ctx, &cfg)
+
+ endpoint := cfg.IPCEndpoint()
- if endpoint == "" {
- endpoint = node.DefaultIPCEndpoint(clientIdentifier)
- } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
+ if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
// Backwards compatibility with geth < 1.5 which required
// these prefixes.
endpoint = endpoint[4:]
}
- return rpc.Dial(endpoint)
+ return endpoint
}
diff --git a/cmd/swarm/fs_test.go b/cmd/swarm/fs_test.go
index ac4223b66..5f58d6c0d 100644
--- a/cmd/swarm/fs_test.go
+++ b/cmd/swarm/fs_test.go
@@ -20,6 +20,7 @@ package main
import (
"bytes"
+ "fmt"
"io"
"io/ioutil"
"os"
@@ -28,6 +29,7 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
)
@@ -36,6 +38,26 @@ type testFile struct {
content string
}
+// TestCLISwarmFsDefaultIPCPath tests if the most basic fs command, i.e., list
+// can find and correctly connect to a running Swarm node on the default
+// IPCPath.
+func TestCLISwarmFsDefaultIPCPath(t *testing.T) {
+ cluster := newTestCluster(t, 1)
+ defer cluster.Shutdown()
+
+ handlingNode := cluster.Nodes[0]
+ list := runSwarm(t, []string{
+ "--datadir", handlingNode.Dir,
+ "fs",
+ "list",
+ }...)
+
+ list.WaitExit()
+ if list.Err != nil {
+ t.Fatal(list.Err)
+ }
+}
+
// TestCLISwarmFs is a high-level test of swarmfs
//
// This test fails on travis for macOS as this executable exits with code 1
@@ -59,9 +81,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
mount := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"mount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mhash,
mountPoint,
}...)
@@ -101,9 +123,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmount := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"unmount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mountPoint,
}...)
_, matches := unmount.ExpectRegexp(hashRegexp)
@@ -136,9 +158,9 @@ func TestCLISwarmFs(t *testing.T) {
//remount, check files
newMount := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"mount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
hash, // the latest hash
secondMountPoint,
}...)
@@ -172,9 +194,9 @@ func TestCLISwarmFs(t *testing.T) {
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmountSec := runSwarm(t, []string{
+ fmt.Sprintf("--%s", utils.IPCPathFlag.Name), filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
"fs",
"unmount",
- "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
secondMountPoint,
}...)
diff --git a/cmd/swarm/swarm-smoke/feed_upload_and_sync.go b/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
index 1371d6654..7ec152826 100644
--- a/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
+++ b/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
@@ -16,7 +16,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
colorable "github.com/mattn/go-colorable"
"github.com/pborman/uuid"
@@ -34,9 +33,9 @@ func cliFeedUploadAndSync(c *cli.Context) error {
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
- generateEndpoints(scheme, cluster, from, to)
+ generateEndpoints(scheme, cluster, appName, from, to)
- log.Info("generating and uploading MRUs to " + endpoints[0] + " and syncing")
+ log.Info("generating and uploading feeds to " + endpoints[0] + " and syncing")
// create a random private key to sign updates with and derive the address
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test")
@@ -218,8 +217,7 @@ func cliFeedUploadAndSync(c *cli.Context) error {
if err != nil {
return err
}
- multihashHex := hexutil.Encode(multihash.ToMultihash(hashBytes))
-
+ multihashHex := hexutil.Encode(hashBytes)
fileHash, err := digest(f)
if err != nil {
return err
diff --git a/cmd/swarm/swarm-smoke/main.go b/cmd/swarm/swarm-smoke/main.go
index 4ff17fd5b..845998dc1 100644
--- a/cmd/swarm/swarm-smoke/main.go
+++ b/cmd/swarm/swarm-smoke/main.go
@@ -29,6 +29,7 @@ var (
endpoints []string
includeLocalhost bool
cluster string
+ appName string
scheme string
filesize int
from int
@@ -49,6 +50,12 @@ func main() {
Usage: "cluster to point to (prod or a given namespace)",
Destination: &cluster,
},
+ cli.StringFlag{
+ Name: "app",
+ Value: "swarm",
+ Usage: "application to point to (swarm or swarm-private)",
+ Destination: &appName,
+ },
cli.IntFlag{
Name: "cluster-from",
Value: 8501,
@@ -107,5 +114,6 @@ func main() {
err := app.Run(os.Args)
if err != nil {
log.Error(err.Error())
+ os.Exit(1)
}
}
diff --git a/cmd/swarm/swarm-smoke/upload_and_sync.go b/cmd/swarm/swarm-smoke/upload_and_sync.go
index 7872421d3..3843457dc 100644
--- a/cmd/swarm/swarm-smoke/upload_and_sync.go
+++ b/cmd/swarm/swarm-smoke/upload_and_sync.go
@@ -33,20 +33,19 @@ import (
"time"
"github.com/ethereum/go-ethereum/log"
- colorable "github.com/mattn/go-colorable"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
-func generateEndpoints(scheme string, cluster string, from int, to int) {
+func generateEndpoints(scheme string, cluster string, app string, from int, to int) {
if cluster == "prod" {
for port := from; port <= to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port))
}
} else {
for port := from; port <= to; port++ {
- endpoints = append(endpoints, fmt.Sprintf("%s://swarm-%v-%s.stg.swarm-gateways.net", scheme, port, cluster))
+ endpoints = append(endpoints, fmt.Sprintf("%s://%s-%v-%s.stg.swarm-gateways.net", scheme, app, port, cluster))
}
}
@@ -57,11 +56,11 @@ func generateEndpoints(scheme string, cluster string, from int, to int) {
func cliUploadAndSync(c *cli.Context) error {
log.PrintOrigins(true)
- log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+ log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
- defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
+ defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "kb", filesize) }(time.Now())
- generateEndpoints(scheme, cluster, from, to)
+ generateEndpoints(scheme, cluster, appName, from, to)
log.Info("uploading to " + endpoints[0] + " and syncing")
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index d7b698c7e..6a285fcb3 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -824,17 +824,12 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
// makeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
func makeDatabaseHandles() int {
- limit, err := fdlimit.Current()
+ limit, err := fdlimit.Maximum()
if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
}
- if limit < 2048 {
- if err := fdlimit.Raise(2048); err != nil {
- Fatalf("Failed to raise file descriptor allowance: %v", err)
- }
- }
- if limit > 2048 { // cap database file descriptors even if more is available
- limit = 2048
+ if err := fdlimit.Raise(uint64(limit)); err != nil {
+ Fatalf("Failed to raise file descriptor allowance: %v", err)
}
return limit / 2 // Leave half for networking and other stuff
}
@@ -978,16 +973,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setWS(ctx, cfg)
setNodeUserIdent(ctx, cfg)
- switch {
- case ctx.GlobalIsSet(DataDirFlag.Name):
- cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
- case ctx.GlobalBool(DeveloperFlag.Name):
- cfg.DataDir = "" // unless explicitly requested, use memory databases
- case ctx.GlobalBool(TestnetFlag.Name):
- cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
- case ctx.GlobalBool(RinkebyFlag.Name):
- cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
- }
+ setDataDir(ctx, cfg)
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name)
@@ -1000,6 +986,19 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
}
}
+func setDataDir(ctx *cli.Context, cfg *node.Config) {
+ switch {
+ case ctx.GlobalIsSet(DataDirFlag.Name):
+ cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
+ case ctx.GlobalBool(DeveloperFlag.Name):
+ cfg.DataDir = "" // unless explicitly requested, use memory databases
+ case ctx.GlobalBool(TestnetFlag.Name):
+ cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
+ case ctx.GlobalBool(RinkebyFlag.Name):
+ cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
+ }
+}
+
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
diff --git a/core/blockchain.go b/core/blockchain.go
index d173b2de2..c29063a73 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -47,7 +47,10 @@ import (
)
var (
- blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
+ blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
+ blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
ErrNoGenesis = errors.New("Genesis not found in chain")
)
@@ -207,6 +210,11 @@ func (bc *BlockChain) getProcInterrupt() bool {
return atomic.LoadInt32(&bc.procInterrupt) == 1
}
+// GetVMConfig returns the block chain VM config.
+func (bc *BlockChain) GetVMConfig() *vm.Config {
+ return &bc.vmConfig
+}
+
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
@@ -445,7 +453,11 @@ func (bc *BlockChain) repair(head **types.Block) error {
return nil
}
// Otherwise rewind one block and recheck state availability there
- (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
+ block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
+ if block == nil {
+ return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
+ }
+ (*head) = block
}
}
@@ -1036,6 +1048,18 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
return status, nil
}
+// addFutureBlock checks if the block is within the max allowed window to get
+// accepted for future processing, and returns an error if the block is too far
+// ahead and was not added.
+func (bc *BlockChain) addFutureBlock(block *types.Block) error {
+ max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
+ if block.Time().Cmp(max) > 0 {
+ return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
+ }
+ bc.futureBlocks.Add(block.Hash(), block)
+ return nil
+}
+
// InsertChain attempts to insert the given batch of blocks in to the canonical
// chain or, otherwise, create a fork. If an error is returned it will return
// the index number of the failing block as well an error describing what went
@@ -1043,18 +1067,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
//
// After insertion is done, all accumulated events will be fired.
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
- n, events, logs, err := bc.insertChain(chain)
- bc.PostChainEvents(events, logs)
- return n, err
-}
-
-// insertChain will execute the actual chain insertion and event aggregation. The
-// only reason this method exists as a separate one is to make locking cleaner
-// with deferred statements.
-func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
// Sanity check that we have something meaningful to import
if len(chain) == 0 {
- return 0, nil, nil, nil
+ return 0, nil
}
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ {
@@ -1063,16 +1078,36 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
- return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
+ return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
}
}
// Pre-checks passed, start the full block imports
bc.wg.Add(1)
- defer bc.wg.Done()
-
bc.chainmu.Lock()
- defer bc.chainmu.Unlock()
+ n, events, logs, err := bc.insertChain(chain, true)
+ bc.chainmu.Unlock()
+ bc.wg.Done()
+
+ bc.PostChainEvents(events, logs)
+ return n, err
+}
+
+// insertChain is the internal implementation of insertChain, which assumes that
+// 1) chains are contiguous, and 2) The chain mutex is held.
+//
+// This method is split out so that import batches that require re-injecting
+// historical blocks can do so without releasing the lock, which could lead to
+// racey behaviour. If a sidechain import is in progress, and the historic state
+// is imported, but then new canon-head is added before the actual sidechain
+// completes, then the historic state could be pruned again
+func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
+ // If the chain is terminating, don't even bother starting u
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ return 0, nil, nil, nil
+ }
+ // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
+ senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
@@ -1089,16 +1124,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
for i, block := range chain {
headers[i] = block.Header()
- seals[i] = true
+ seals[i] = verifySeals
}
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
defer close(abort)
- // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
- senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
+ // Peek the error for the first block to decide the directing import logic
+ it := newInsertIterator(chain, results, bc.Validator())
- // Iterate over the blocks and insert when the verifier permits
- for i, block := range chain {
+ block, err := it.next()
+ switch {
+ // First block is pruned, insert as sidechain and reorg only if TD grows enough
+ case err == consensus.ErrPrunedAncestor:
+ return bc.insertSidechain(it)
+
+ // First block is future, shove it (and all children) to the future queue (unknown ancestor)
+ case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
+ for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ block, err = it.next()
+ }
+ stats.queued += it.processed()
+ stats.ignored += it.remaining()
+
+ // If there are any still remaining, mark as ignored
+ return it.index, events, coalescedLogs, err
+
+ // First block (and state) is known
+ // 1. We did a roll-back, and should now do a re-import
+ // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
+ // from the canonical chain, which has not been verified.
+ case err == ErrKnownBlock:
+ // Skip all known blocks that behind us
+ current := bc.CurrentBlock().NumberU64()
+
+ for block != nil && err == ErrKnownBlock && current >= block.NumberU64() {
+ stats.ignored++
+ block, err = it.next()
+ }
+ // Falls through to the block import
+
+ // Some other error occurred, abort
+ case err != nil:
+ stats.ignored += len(it.chain)
+ bc.reportBlock(block, nil, err)
+ return it.index, events, coalescedLogs, err
+ }
+ // No validation errors for the first block (or chain prefix skipped)
+ for ; block != nil && err == nil; block, err = it.next() {
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
@@ -1107,115 +1182,53 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
// If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
bc.reportBlock(block, nil, ErrBlacklistedHash)
- return i, events, coalescedLogs, ErrBlacklistedHash
+ return it.index, events, coalescedLogs, ErrBlacklistedHash
}
- // Wait for the block's verification to complete
- bstart := time.Now()
+ // Retrieve the parent block and it's state to execute on top
+ start := time.Now()
- err := <-results
- if err == nil {
- err = bc.Validator().ValidateBody(block)
- }
- switch {
- case err == ErrKnownBlock:
- // Block and state both already known. However if the current block is below
- // this number we did a rollback and we should reimport it nonetheless.
- if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
- stats.ignored++
- continue
- }
-
- case err == consensus.ErrFutureBlock:
- // Allow up to MaxFuture second in the future blocks. If this limit is exceeded
- // the chain is discarded and processed at a later time if given.
- max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
- if block.Time().Cmp(max) > 0 {
- return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
- }
- bc.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
-
- case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
- bc.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
-
- case err == consensus.ErrPrunedAncestor:
- // Block competing with the canonical chain, store in the db, but don't process
- // until the competitor TD goes above the canonical TD
- currentBlock := bc.CurrentBlock()
- localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
- externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
- if localTd.Cmp(externTd) > 0 {
- if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
- return i, events, coalescedLogs, err
- }
- continue
- }
- // Competitor chain beat canonical, gather all blocks from the common ancestor
- var winner []*types.Block
-
- parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
- for !bc.HasState(parent.Root()) {
- winner = append(winner, parent)
- parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
- }
- for j := 0; j < len(winner)/2; j++ {
- winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
- }
- // Import all the pruned blocks to make the state available
- bc.chainmu.Unlock()
- _, evs, logs, err := bc.insertChain(winner)
- bc.chainmu.Lock()
- events, coalescedLogs = evs, logs
-
- if err != nil {
- return i, events, coalescedLogs, err
- }
-
- case err != nil:
- bc.reportBlock(block, nil, err)
- return i, events, coalescedLogs, err
- }
- // Create a new statedb using the parent block and report an
- // error if it fails.
- var parent *types.Block
- if i == 0 {
+ parent := it.previous()
+ if parent == nil {
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
- } else {
- parent = chain[i-1]
}
state, err := state.New(parent.Root(), bc.stateCache)
if err != nil {
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
// Process block using the parent state as reference point.
+ t0 := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
+ t1 := time.Now()
if err != nil {
bc.reportBlock(block, receipts, err)
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
// Validate the state using the default validator
- err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
- if err != nil {
+ if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err)
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
- proctime := time.Since(bstart)
+ t2 := time.Now()
+ proctime := time.Since(start)
// Write the block to the chain and get the status.
status, err := bc.WriteBlockWithState(block, receipts, state)
+ t3 := time.Now()
if err != nil {
- return i, events, coalescedLogs, err
+ return it.index, events, coalescedLogs, err
}
+ blockInsertTimer.UpdateSince(start)
+ blockExecutionTimer.Update(t1.Sub(t0))
+ blockValidationTimer.Update(t2.Sub(t1))
+ blockWriteTimer.Update(t3.Sub(t2))
switch status {
case CanonStatTy:
- log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
- "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
+ log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
+ "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
+ "elapsed", common.PrettyDuration(time.Since(start)),
+ "root", block.Root())
coalescedLogs = append(coalescedLogs, logs...)
- blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block
@@ -1223,78 +1236,153 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
bc.gcproc += proctime
case SideStatTy:
- log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
- common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
-
- blockInsertTimer.UpdateSince(bstart)
+ log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
+ "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
+ "root", block.Root())
events = append(events, ChainSideEvent{block})
}
+ blockInsertTimer.UpdateSince(start)
stats.processed++
stats.usedGas += usedGas
cache, _ := bc.stateCache.TrieDB().Size()
- stats.report(chain, i, cache)
+ stats.report(chain, it.index, cache)
}
+ // Any blocks remaining here? The only ones we care about are the future ones
+ if block != nil && err == consensus.ErrFutureBlock {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ block, err = it.next()
+
+ for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
+ if err := bc.addFutureBlock(block); err != nil {
+ return it.index, events, coalescedLogs, err
+ }
+ stats.queued++
+ }
+ }
+ stats.ignored += it.remaining()
+
// Append a single chain head event if we've progressed the chain
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
events = append(events, ChainHeadEvent{lastCanon})
}
- return 0, events, coalescedLogs, nil
+ return it.index, events, coalescedLogs, err
}
-// insertStats tracks and reports on block insertion.
-type insertStats struct {
- queued, processed, ignored int
- usedGas uint64
- lastIndex int
- startTime mclock.AbsTime
-}
-
-// statsReportLimit is the time limit during import and export after which we
-// always print out progress. This avoids the user wondering what's going on.
-const statsReportLimit = 8 * time.Second
-
-// report prints statistics if some number of blocks have been processed
-// or more than a few seconds have passed since the last message.
-func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
- // Fetch the timings for the batch
+// insertSidechain is called when an import batch hits upon a pruned ancestor
+// error, which happens when a sidechain with a sufficiently old fork-block is
+// found.
+//
+// The method writes all (header-and-body-valid) blocks to disk, then tries to
+// switch over to the new chain if the TD exceeded the current chain.
+func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) {
var (
- now = mclock.Now()
- elapsed = time.Duration(now) - time.Duration(st.startTime)
+ externTd *big.Int
+ current = bc.CurrentBlock().NumberU64()
)
- // If we're at the last block of the batch or report period reached, log
- if index == len(chain)-1 || elapsed >= statsReportLimit {
- var (
- end = chain[index]
- txs = countTransactions(chain[st.lastIndex : index+1])
- )
- context := []interface{}{
- "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
- "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
- "number", end.Number(), "hash", end.Hash(),
+ // The first sidechain block error is already verified to be ErrPrunedAncestor.
+ // Since we don't import them here, we expect ErrUnknownAncestor for the remaining
+ // ones. Any other errors means that the block is invalid, and should not be written
+ // to disk.
+ block, err := it.current(), consensus.ErrPrunedAncestor
+ for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
+ // Check the canonical state root for that number
+ if number := block.NumberU64(); current >= number {
+ if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() {
+ // This is most likely a shadow-state attack. When a fork is imported into the
+ // database, and it eventually reaches a block height which is not pruned, we
+ // just found that the state already exist! This means that the sidechain block
+ // refers to a state which already exists in our canon chain.
+ //
+ // If left unchecked, we would now proceed importing the blocks, without actually
+ // having verified the state of the previous blocks.
+ log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
+
+ // If someone legitimately side-mines blocks, they would still be imported as usual. However,
+ // we cannot risk writing unverified blocks to disk when they obviously target the pruning
+ // mechanism.
+ return it.index, nil, nil, errors.New("sidechain ghost-state attack")
+ }
}
- if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
- context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
+ if externTd == nil {
+ externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
}
- context = append(context, []interface{}{"cache", cache}...)
+ externTd = new(big.Int).Add(externTd, block.Difficulty())
- if st.queued > 0 {
- context = append(context, []interface{}{"queued", st.queued}...)
- }
- if st.ignored > 0 {
- context = append(context, []interface{}{"ignored", st.ignored}...)
+ if !bc.HasBlock(block.Hash(), block.NumberU64()) {
+ start := time.Now()
+ if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
+ return it.index, nil, nil, err
+ }
+ log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(),
+ "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
+ "root", block.Root())
}
- log.Info("Imported new chain segment", context...)
+ }
+ // At this point, we've written all sidechain blocks to database. Loop ended
+ // either on some other error or all were processed. If there was some other
+ // error, we can ignore the rest of those blocks.
+ //
+ // If the externTd was larger than our local TD, we now need to reimport the previous
+ // blocks to regenerate the required state
+ localTd := bc.GetTd(bc.CurrentBlock().Hash(), current)
+ if localTd.Cmp(externTd) > 0 {
+ log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd)
+ return it.index, nil, nil, err
+ }
+ // Gather all the sidechain hashes (full blocks may be memory heavy)
+ var (
+ hashes []common.Hash
+ numbers []uint64
+ )
+ parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64())
+ for parent != nil && !bc.HasState(parent.Root) {
+ hashes = append(hashes, parent.Hash())
+ numbers = append(numbers, parent.Number.Uint64())
- *st = insertStats{startTime: now, lastIndex: index + 1}
+ parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
}
-}
+ if parent == nil {
+ return it.index, nil, nil, errors.New("missing parent")
+ }
+ // Import all the pruned blocks to make the state available
+ var (
+ blocks []*types.Block
+ memory common.StorageSize
+ )
+ for i := len(hashes) - 1; i >= 0; i-- {
+ // Append the next block to our batch
+ block := bc.GetBlock(hashes[i], numbers[i])
-func countTransactions(chain []*types.Block) (c int) {
- for _, b := range chain {
- c += len(b.Transactions())
+ blocks = append(blocks, block)
+ memory += block.Size()
+
+ // If memory use grew too large, import and continue. Sadly we need to discard
+ // all raised events and logs from notifications since we're too heavy on the
+ // memory here.
+ if len(blocks) >= 2048 || memory > 64*1024*1024 {
+ log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
+ if _, _, _, err := bc.insertChain(blocks, false); err != nil {
+ return 0, nil, nil, err
+ }
+ blocks, memory = blocks[:0], 0
+
+ // If the chain is terminating, stop processing blocks
+ if atomic.LoadInt32(&bc.procInterrupt) == 1 {
+ log.Debug("Premature abort during blocks processing")
+ return 0, nil, nil, nil
+ }
+ }
+ }
+ if len(blocks) > 0 {
+ log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
+ return bc.insertChain(blocks, false)
}
- return c
+ return 0, nil, nil, nil
}
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
@@ -1469,8 +1557,10 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
bc.addBadBlock(block)
var receiptString string
- for _, receipt := range receipts {
- receiptString += fmt.Sprintf("\t%v\n", receipt)
+ for i, receipt := range receipts {
+ receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
+ i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
+ receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
}
log.Error(fmt.Sprintf(`
########## BAD BLOCK #########
diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go
new file mode 100644
index 000000000..70bea3544
--- /dev/null
+++ b/core/blockchain_insert.go
@@ -0,0 +1,143 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// insertStats tracks and reports on block insertion.
+type insertStats struct {
+ queued, processed, ignored int
+ usedGas uint64
+ lastIndex int
+ startTime mclock.AbsTime
+}
+
+// statsReportLimit is the time limit during import and export after which we
+// always print out progress. This avoids the user wondering what's going on.
+const statsReportLimit = 8 * time.Second
+
+// report prints statistics if some number of blocks have been processed
+// or more than a few seconds have passed since the last message.
+func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
+ // Fetch the timings for the batch
+ var (
+ now = mclock.Now()
+ elapsed = time.Duration(now) - time.Duration(st.startTime)
+ )
+ // If we're at the last block of the batch or report period reached, log
+ if index == len(chain)-1 || elapsed >= statsReportLimit {
+ // Count the number of transactions in this segment
+ var txs int
+ for _, block := range chain[st.lastIndex : index+1] {
+ txs += len(block.Transactions())
+ }
+ end := chain[index]
+
+ // Assemble the log context and send it to the logger
+ context := []interface{}{
+ "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
+ "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
+ "number", end.Number(), "hash", end.Hash(),
+ }
+ if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
+ context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
+ }
+ context = append(context, []interface{}{"cache", cache}...)
+
+ if st.queued > 0 {
+ context = append(context, []interface{}{"queued", st.queued}...)
+ }
+ if st.ignored > 0 {
+ context = append(context, []interface{}{"ignored", st.ignored}...)
+ }
+ log.Info("Imported new chain segment", context...)
+
+ // Bump the stats reported to the next section
+ *st = insertStats{startTime: now, lastIndex: index + 1}
+ }
+}
+
+// insertIterator is a helper to assist during chain import.
+type insertIterator struct {
+ chain types.Blocks
+ results <-chan error
+ index int
+ validator Validator
+}
+
+// newInsertIterator creates a new iterator based on the given blocks, which are
+// assumed to be a contiguous chain.
+func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator {
+ return &insertIterator{
+ chain: chain,
+ results: results,
+ index: -1,
+ validator: validator,
+ }
+}
+
+// next returns the next block in the iterator, along with any potential validation
+// error for that block. When the end is reached, it will return (nil, nil).
+func (it *insertIterator) next() (*types.Block, error) {
+ if it.index+1 >= len(it.chain) {
+ it.index = len(it.chain)
+ return nil, nil
+ }
+ it.index++
+ if err := <-it.results; err != nil {
+ return it.chain[it.index], err
+ }
+ return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
+}
+
+// current returns the current block that's being processed.
+func (it *insertIterator) current() *types.Block {
+ if it.index < 0 || it.index+1 >= len(it.chain) {
+ return nil
+ }
+ return it.chain[it.index]
+}
+
+// previous returns the previous block was being processed, or nil
+func (it *insertIterator) previous() *types.Block {
+ if it.index < 1 {
+ return nil
+ }
+ return it.chain[it.index-1]
+}
+
+// first returns the first block in the it.
+func (it *insertIterator) first() *types.Block {
+ return it.chain[0]
+}
+
+// remaining returns the number of remaining blocks.
+func (it *insertIterator) remaining() int {
+ return len(it.chain) - it.index
+}
+
+// processed returns the number of processed blocks.
+func (it *insertIterator) processed() int {
+ return it.index + 1
+}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index aef810050..5ab29e205 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -579,11 +579,11 @@ func testInsertNonceError(t *testing.T, full bool) {
blockchain.hc.engine = blockchain.engine
failRes, err = blockchain.InsertHeaderChain(headers, 1)
}
- // Check that the returned error indicates the failure.
+ // Check that the returned error indicates the failure
if failRes != failAt {
- t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
+ t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
}
- // Check that all no blocks after the failing block have been inserted.
+ // Check that all blocks after the failing block have been inserted
for j := 0; j < i-failAt; j++ {
if full {
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
@@ -1345,7 +1345,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
t.Fatalf("failed to insert shared chain: %v", err)
}
if _, err := chain.InsertChain(original); err != nil {
- t.Fatalf("failed to insert shared chain: %v", err)
+ t.Fatalf("failed to insert original chain: %v", err)
}
// Ensure that the state associated with the forking point is pruned away
if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
diff --git a/core/tx_pool.go b/core/tx_pool.go
index f6da5da2a..fc35d1f24 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -825,7 +825,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error {
// addTxsLocked attempts to queue a batch of transactions if they are valid,
// whilst assuming the transaction pool lock is already held.
func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error {
- // Add the batch of transaction, tracking the accepted ones
+ // Add the batch of transactions, tracking the accepted ones
dirty := make(map[common.Address]struct{})
errs := make([]error, len(txs))
diff --git a/core/types/block.go b/core/types/block.go
index 8a21bba1e..9d11f60d8 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -81,8 +81,8 @@ type Header struct {
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Time *big.Int `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash" gencodec:"required"`
- Nonce BlockNonce `json:"nonce" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
}
// field type overrides for gencodec
diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go
index 1b92cd9cf..59a1c9c43 100644
--- a/core/types/gen_header_json.go
+++ b/core/types/gen_header_json.go
@@ -13,6 +13,7 @@ import (
var _ = (*headerMarshaling)(nil)
+// MarshalJSON marshals as JSON.
func (h Header) MarshalJSON() ([]byte, error) {
type Header struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
@@ -28,8 +29,8 @@ func (h Header) MarshalJSON() ([]byte, error) {
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest common.Hash `json:"mixHash" gencodec:"required"`
- Nonce BlockNonce `json:"nonce" gencodec:"required"`
+ MixDigest common.Hash `json:"mixHash"`
+ Nonce BlockNonce `json:"nonce"`
Hash common.Hash `json:"hash"`
}
var enc Header
@@ -52,6 +53,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
return json.Marshal(&enc)
}
+// UnmarshalJSON unmarshals from JSON.
func (h *Header) UnmarshalJSON(input []byte) error {
type Header struct {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
@@ -67,8 +69,8 @@ func (h *Header) UnmarshalJSON(input []byte) error {
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
- MixDigest *common.Hash `json:"mixHash" gencodec:"required"`
- Nonce *BlockNonce `json:"nonce" gencodec:"required"`
+ MixDigest *common.Hash `json:"mixHash"`
+ Nonce *BlockNonce `json:"nonce"`
}
var dec Header
if err := json.Unmarshal(input, &dec); err != nil {
@@ -126,13 +128,11 @@ func (h *Header) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'extraData' for Header")
}
h.Extra = *dec.Extra
- if dec.MixDigest == nil {
- return errors.New("missing required field 'mixHash' for Header")
+ if dec.MixDigest != nil {
+ h.MixDigest = *dec.MixDigest
}
- h.MixDigest = *dec.MixDigest
- if dec.Nonce == nil {
- return errors.New("missing required field 'nonce' for Header")
+ if dec.Nonce != nil {
+ h.Nonce = *dec.Nonce
}
- h.Nonce = *dec.Nonce
return nil
}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 968d2219e..ba4d1e9eb 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -339,6 +339,12 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
contract := NewContract(caller, to, new(big.Int), gas)
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
+ // We do an AddBalance of zero here, just in order to trigger a touch.
+ // This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium,
+ // but is the correct thing to do and matters on other networks, in tests, and potential
+ // future scenarios
+ evm.StateDB.AddBalance(addr, bigZero)
+
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in Homestead this also counts for code storage gas errors.
diff --git a/cmd/evm/json_logger.go b/core/vm/logger_json.go
index 50cb4f0e4..ac3c40759 100644
--- a/cmd/evm/json_logger.go
+++ b/core/vm/logger_json.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
-package main
+package vm
import (
"encoding/json"
@@ -24,17 +24,16 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/core/vm"
)
type JSONLogger struct {
encoder *json.Encoder
- cfg *vm.LogConfig
+ cfg *LogConfig
}
// NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
// into the provided stream.
-func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger {
+func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger {
return &JSONLogger{json.NewEncoder(writer), cfg}
}
@@ -43,8 +42,8 @@ func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create
}
// CaptureState outputs state information on the logger.
-func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
- log := vm.StructLog{
+func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
+ log := StructLog{
Pc: pc,
Op: op,
Gas: gas,
@@ -65,7 +64,7 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos
}
// CaptureFault outputs state information on the logger.
-func (l *JSONLogger) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
+func (l *JSONLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error {
return nil
}
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 8748d444f..a48815e0d 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -125,12 +125,12 @@ func (b *EthAPIBackend) GetTd(blockHash common.Hash) *big.Int {
return b.eth.blockchain.GetTdByHash(blockHash)
}
-func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) {
+func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {
state.SetBalance(msg.From(), math.MaxBig256)
vmError := func() error { return nil }
context := core.NewEVMContext(msg, header, b.eth.BlockChain(), nil)
- return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), vmError, nil
+ return vm.NewEVM(context, state, b.eth.chainConfig, *b.eth.blockchain.GetVMConfig()), vmError, nil
}
func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
diff --git a/eth/api_tracer.go b/eth/api_tracer.go
index 2ebbcc5fd..0b8f8aa00 100644
--- a/eth/api_tracer.go
+++ b/eth/api_tracer.go
@@ -17,11 +17,13 @@
package eth
import (
+ "bufio"
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
+ "os"
"runtime"
"sync"
"time"
@@ -60,6 +62,13 @@ type TraceConfig struct {
Reexec *uint64
}
+// StdTraceConfig holds extra parameters to standard-json trace functions.
+type StdTraceConfig struct {
+ *vm.LogConfig
+ Reexec *uint64
+ TxHash common.Hash
+}
+
// txTraceResult is the result of a single transaction trace.
type txTraceResult struct {
Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer
@@ -366,7 +375,7 @@ func (api *PrivateDebugAPI) TraceBlockByNumber(ctx context.Context, number rpc.B
func (api *PrivateDebugAPI) TraceBlockByHash(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
block := api.eth.blockchain.GetBlockByHash(hash)
if block == nil {
- return nil, fmt.Errorf("block #%x not found", hash)
+ return nil, fmt.Errorf("block %#x not found", hash)
}
return api.traceBlock(ctx, block, config)
}
@@ -391,13 +400,41 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string,
return api.TraceBlock(ctx, blob, config)
}
-// TraceBadBlock returns the structured logs created during the execution of a block
-// within the blockchain 'badblocks' cache
-func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, index int, config *TraceConfig) ([]*txTraceResult, error) {
- if blocks := api.eth.blockchain.BadBlocks(); index < len(blocks) {
- return api.traceBlock(ctx, blocks[index], config)
+// TraceBadBlockByHash returns the structured logs created during the execution of
+// EVM against a block pulled from the pool of bad ones and returns them as a JSON
+// object.
+func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
+ blocks := api.eth.blockchain.BadBlocks()
+ for _, block := range blocks {
+ if block.Hash() == hash {
+ return api.traceBlock(ctx, block, config)
+ }
+ }
+ return nil, fmt.Errorf("bad block %#x not found", hash)
+}
+
+// StandardTraceBlockToFile dumps the structured logs created during the
+// execution of EVM to the local file system and returns a list of files
+// to the caller.
+func (api *PrivateDebugAPI) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
+ block := api.eth.blockchain.GetBlockByHash(hash)
+ if block == nil {
+ return nil, fmt.Errorf("block %#x not found", hash)
}
- return nil, fmt.Errorf("index out of range")
+ return api.standardTraceBlockToFile(ctx, block, config)
+}
+
+// StandardTraceBadBlockToFile dumps the structured logs created during the
+// execution of EVM against a block pulled from the pool of bad ones to the
+// local file system and returns a list of files to the caller.
+func (api *PrivateDebugAPI) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
+ blocks := api.eth.blockchain.BadBlocks()
+ for _, block := range blocks {
+ if block.Hash() == hash {
+ return api.standardTraceBlockToFile(ctx, block, config)
+ }
+ }
+ return nil, fmt.Errorf("bad block %#x not found", hash)
}
// traceBlock configures a new tracer according to the provided configuration, and
@@ -410,7 +447,7 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
}
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
- return nil, fmt.Errorf("parent %x not found", block.ParentHash())
+ return nil, fmt.Errorf("parent %#x not found", block.ParentHash())
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
@@ -481,6 +518,106 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
return results, nil
}
+// standardTraceBlockToFile configures a new tracer which uses standard JSON output,
+// and traces either a full block or an individual transaction. The return value will
+// be one filename per transaction traced.
+func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) {
+ // If we're tracing a single transaction, make sure it's present
+ if config != nil && config.TxHash != (common.Hash{}) {
+ var exists bool
+ for _, tx := range block.Transactions() {
+ if exists = (tx.Hash() == config.TxHash); exists {
+ break
+ }
+ }
+ if !exists {
+ return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash)
+ }
+ }
+ // Create the parent state database
+ if err := api.eth.engine.VerifyHeader(api.eth.blockchain, block.Header(), true); err != nil {
+ return nil, err
+ }
+ parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
+ if parent == nil {
+ return nil, fmt.Errorf("parent %#x not found", block.ParentHash())
+ }
+ reexec := defaultTraceReexec
+ if config != nil && config.Reexec != nil {
+ reexec = *config.Reexec
+ }
+ statedb, err := api.computeStateDB(parent, reexec)
+ if err != nil {
+ return nil, err
+ }
+ // Retrieve the tracing configurations, or use default values
+ var (
+ logConfig vm.LogConfig
+ txHash common.Hash
+ )
+ if config != nil {
+ if config.LogConfig != nil {
+ logConfig = *config.LogConfig
+ }
+ txHash = config.TxHash
+ }
+ logConfig.Debug = true
+
+ // Execute transaction, either tracing all or just the requested one
+ var (
+ signer = types.MakeSigner(api.config, block.Number())
+ dumps []string
+ )
+ for i, tx := range block.Transactions() {
+ // Prepare the trasaction for un-traced execution
+ var (
+ msg, _ = tx.AsMessage(signer)
+ vmctx = core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
+
+ vmConf vm.Config
+ dump *os.File
+ err error
+ )
+ // If the transaction needs tracing, swap out the configs
+ if tx.Hash() == txHash || txHash == (common.Hash{}) {
+ // Generate a unique temporary file to dump it into
+ prefix := fmt.Sprintf("block_%#x-%d-%#x-", block.Hash().Bytes()[:4], i, tx.Hash().Bytes()[:4])
+
+ dump, err = ioutil.TempFile(os.TempDir(), prefix)
+ if err != nil {
+ return nil, err
+ }
+ dumps = append(dumps, dump.Name())
+
+ // Swap out the noop logger to the standard tracer
+ vmConf = vm.Config{
+ Debug: true,
+ Tracer: vm.NewJSONLogger(&logConfig, bufio.NewWriter(dump)),
+ EnablePreimageRecording: true,
+ }
+ }
+ // Execute the transaction and flush any traces to disk
+ vmenv := vm.NewEVM(vmctx, statedb, api.config, vmConf)
+ _, _, _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))
+
+ if dump != nil {
+ dump.Close()
+ log.Info("Wrote standard trace", "file", dump.Name())
+ }
+ if err != nil {
+ return dumps, err
+ }
+ // Finalize the state so any modifications are written to the trie
+ statedb.Finalise(true)
+
+ // If we've traced the transaction we were looking for, abort
+ if tx.Hash() == txHash {
+ break
+ }
+ }
+ return dumps, nil
+}
+
// computeStateDB retrieves the state database associated with a certain block.
// If no state is locally available for the given block, a number of blocks are
// attempted to be reexecuted to generate the desired state.
@@ -506,7 +643,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
if err != nil {
switch err.(type) {
case *trie.MissingNodeError:
- return nil, errors.New("required historical state unavailable")
+ return nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec)
default:
return nil, err
}
@@ -520,7 +657,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
for block.NumberU64() < origin {
// Print progress logs if long enough time elapsed
if time.Since(logged) > 8*time.Second {
- log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "elapsed", time.Since(start))
+ log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "remaining", origin-block.NumberU64()-1, "elapsed", time.Since(start))
logged = time.Now()
}
// Retrieve the next block to regenerate and process it
@@ -529,15 +666,15 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
}
_, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{})
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
- root, err := statedb.Commit(true)
+ root, err := statedb.Commit(api.eth.blockchain.Config().IsEIP158(block.Number()))
if err != nil {
return nil, err
}
if err := statedb.Reset(root); err != nil {
- return nil, err
+ return nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err)
}
database.TrieDB().Reference(root, common.Hash{})
if proot != (common.Hash{}) {
@@ -556,7 +693,7 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Ha
// Retrieve the transaction and assemble its EVM context
tx, blockHash, _, index := rawdb.ReadTransaction(api.eth.ChainDb(), hash)
if tx == nil {
- return nil, fmt.Errorf("transaction %x not found", hash)
+ return nil, fmt.Errorf("transaction %#x not found", hash)
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
@@ -636,11 +773,11 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
// Create the parent state database
block := api.eth.blockchain.GetBlockByHash(blockHash)
if block == nil {
- return nil, vm.Context{}, nil, fmt.Errorf("block %x not found", blockHash)
+ return nil, vm.Context{}, nil, fmt.Errorf("block %#x not found", blockHash)
}
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
- return nil, vm.Context{}, nil, fmt.Errorf("parent %x not found", block.ParentHash())
+ return nil, vm.Context{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash())
}
statedb, err := api.computeStateDB(parent, reexec)
if err != nil {
@@ -659,10 +796,10 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewEVM(context, statedb, api.config, vm.Config{})
if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
- return nil, vm.Context{}, nil, fmt.Errorf("tx %x failed: %v", tx.Hash(), err)
+ return nil, vm.Context{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
statedb.Finalise(true)
}
- return nil, vm.Context{}, nil, fmt.Errorf("tx index %d out of range for block %x", txIndex, blockHash)
+ return nil, vm.Context{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, blockHash)
}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index f81a5cbac..3a177ab9d 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -99,6 +99,7 @@ type Downloader struct {
mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle)
mux *event.TypeMux // Event multiplexer to announce sync operation events
+ genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT)
queue *queue // Scheduler for selecting the hashes to download
peers *peerSet // Set of active peers from which download can proceed
stateDB ethdb.Database
@@ -664,7 +665,28 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header)
}
p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
if localHeight >= MaxForkAncestry {
+ // We're above the max reorg threshold, find the earliest fork point
floor = int64(localHeight - MaxForkAncestry)
+
+ // If we're doing a light sync, ensure the floor doesn't go below the CHT, as
+ // all headers before that point will be missing.
+ if d.mode == LightSync {
+ // If we dont know the current CHT position, find it
+ if d.genesis == 0 {
+ header := d.lightchain.CurrentHeader()
+ for header != nil {
+ d.genesis = header.Number.Uint64()
+ if floor >= int64(d.genesis)-1 {
+ break
+ }
+ header = d.lightchain.GetHeaderByHash(header.ParentHash)
+ }
+ }
+ // We already know the "genesis" block number, cap floor to that
+ if floor < int64(d.genesis)-1 {
+ floor = int64(d.genesis) - 1
+ }
+ }
}
from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 7811cd480..44824fd0b 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -585,7 +585,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
}
}(peer)
}
- timeoutCh := time.NewTimer(time.Millisecond * 100).C
+ timeout := time.After(300 * time.Millisecond)
var receivedCount int
outer:
for {
@@ -597,7 +597,7 @@ outer:
if receivedCount == totalPeers {
break outer
}
- case <-timeoutCh:
+ case <-timeout:
break outer
}
}
diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go
index 04dd6fe89..addd32882 100644
--- a/eth/tracers/internal/tracers/assets.go
+++ b/eth/tracers/internal/tracers/assets.go
@@ -1,14 +1,14 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
-// 4byte_tracer.js
-// bigram_tracer.js
-// call_tracer.js
-// evmdis_tracer.js
-// noop_tracer.js
-// opcount_tracer.js
-// prestate_tracer.js
-// trigram_tracer.js
-// unigram_tracer.js
+// 4byte_tracer.js (2.933kB)
+// bigram_tracer.js (1.712kB)
+// call_tracer.js (8.596kB)
+// evmdis_tracer.js (4.194kB)
+// noop_tracer.js (1.271kB)
+// opcount_tracer.js (1.372kB)
+// prestate_tracer.js (3.892kB)
+// trigram_tracer.js (1.788kB)
+// unigram_tracer.js (1.51kB)
package tracers
@@ -28,7 +28,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
- return nil, fmt.Errorf("Read %q: %v", name, err)
+ return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
@@ -36,7 +36,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close()
if err != nil {
- return nil, fmt.Errorf("Read %q: %v", name, err)
+ return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
@@ -197,7 +197,7 @@ func opcount_tracerJs() (*asset, error) {
return a, nil
}
-var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x57\xdd\x6f\x1b\xb9\x11\x7f\xde\xfd\x2b\xa6\x7e\x91\x84\x53\x56\xce\x15\xb8\x02\x72\x5d\x60\xa3\x28\x89\x00\x9d\x6d\x48\x4a\x5d\xf7\x70\x0f\x5c\x72\x76\xc5\x13\x45\x2e\x48\xae\x3e\x10\xf8\x7f\x2f\x86\xfb\x21\xcb\x67\x27\x6e\xeb\x27\x2f\x39\xfc\xcd\xf7\x6f\x46\xa3\x11\x4c\x4c\x79\xb4\xb2\x58\x7b\xf8\xf9\xf2\xfd\xdf\x60\xb5\x46\x28\xcc\x3b\xf4\x6b\xb4\x58\x6d\x21\xad\xfc\xda\x58\x17\x8f\x46\xb0\x5a\x4b\x07\xb9\x54\x08\xd2\x41\xc9\xac\x07\x93\x83\x7f\x26\xaf\x64\x66\x99\x3d\x26\xf1\x68\x54\xbf\x79\xf1\x9a\x10\x72\x8b\x08\xce\xe4\x7e\xcf\x2c\x8e\xe1\x68\x2a\xe0\x4c\x83\x45\x21\x9d\xb7\x32\xab\x3c\x82\xf4\xc0\xb4\x18\x19\x0b\x5b\x23\x64\x7e\x24\x48\xe9\xa1\xd2\x02\x6d\x50\xed\xd1\x6e\x5d\x6b\xc7\xe7\x9b\xaf\x30\x47\xe7\xd0\xc2\x67\xd4\x68\x99\x82\xbb\x2a\x53\x92\xc3\x5c\x72\xd4\x0e\x81\x39\x28\xe9\xc4\xad\x51\x40\x16\xe0\xe8\xe1\x27\x32\x65\xd9\x98\x02\x9f\x4c\xa5\x05\xf3\xd2\xe8\x21\xa0\x24\xcb\x61\x87\xd6\x49\xa3\xe1\xaf\xad\xaa\x06\x70\x08\xc6\x12\x48\x9f\x79\x72\xc0\x82\x29\xe9\xdd\x00\x98\x3e\x82\x62\xfe\xf4\xf4\x0d\x01\x39\xf9\x2d\x40\xea\xa0\x66\x6d\x4a\x04\xbf\x66\x9e\xbc\xde\x4b\xa5\x20\x43\xa8\x1c\xe6\x95\x1a\x12\x5a\x56\x79\xb8\x9f\xad\xbe\xdc\x7e\x5d\x41\x7a\xf3\x00\xf7\xe9\x62\x91\xde\xac\x1e\xae\x60\x2f\xfd\xda\x54\x1e\x70\x87\x35\x94\xdc\x96\x4a\xa2\x80\x3d\xb3\x96\x69\x7f\x04\x93\x13\xc2\xaf\xd3\xc5\xe4\x4b\x7a\xb3\x4a\x3f\xcc\xe6\xb3\xd5\x03\x18\x0b\x9f\x66\xab\x9b\xe9\x72\x09\x9f\x6e\x17\x90\xc2\x5d\xba\x58\xcd\x26\x5f\xe7\xe9\x02\xee\xbe\x2e\xee\x6e\x97\xd3\x04\x96\x48\x56\x21\xbd\xff\x71\xcc\xf3\x90\x3d\x8b\x20\xd0\x33\xa9\x5c\x1b\x89\x07\x53\x81\x5b\x9b\x4a\x09\x58\xb3\x1d\x82\x45\x8e\x72\x87\x02\x18\x70\x53\x1e\xdf\x9c\x54\xc2\x62\xca\xe8\x22\xf8\xfc\x6a\x41\xc2\x2c\x07\x6d\xfc\x10\x1c\x22\xfc\x7d\xed\x7d\x39\x1e\x8d\xf6\xfb\x7d\x52\xe8\x2a\x31\xb6\x18\xa9\x1a\xce\x8d\xfe\x91\xc4\x84\x59\x5a\x74\x9e\x79\x5c\x59\xc6\xd1\x82\xa9\x7c\x59\x79\x07\xae\xca\x73\xc9\x25\x6a\x0f\x52\xe7\xc6\x6e\x43\xa5\x80\x37\xc0\x2d\x32\x8f\xc0\x40\x19\xce\x14\xe0\x01\x79\x15\xee\xea\x48\x87\x72\xb5\x4c\x3b\xc6\xc3\x69\x6e\xcd\x96\x7c\xad\x9c\xa7\x7f\x9c\xc3\x6d\xa6\x50\x40\x81\x1a\x9d\x74\x90\x29\xc3\x37\x49\xfc\x2d\x8e\x9e\x18\x43\x75\x12\x3c\x6c\x84\x42\x6d\xec\xb1\x67\x11\xb2\x4a\x2a\x21\x75\x91\xc4\x51\x2b\x3d\x06\x5d\x29\x35\x8c\x03\x84\x32\x66\x53\x95\x29\xe7\xa6\x0a\xb6\xff\x81\xdc\xd7\x60\xae\x44\x2e\x73\x2a\x0e\xd6\xdd\x7a\x13\xae\x3a\xbd\x26\x23\xf9\x24\x8e\xce\x60\xc6\x90\x57\x3a\xb8\xd3\x67\x42\xd8\x21\x88\x6c\xf0\x2d\x8e\xa2\x1d\xb3\x84\x05\xd7\xe0\xcd\x17\x3c\x84\xcb\xc1\x55\x1c\x45\x32\x87\xbe\x5f\x4b\x97\xb4\xc0\xbf\x31\xce\x7f\x87\xeb\xeb\xeb\xd0\xd4\xb9\xd4\x28\x06\x40\x10\xd1\x4b\x62\xf5\x4d\x94\x31\xc5\x34\xc7\x31\xf4\x2e\x0f\x3d\xf8\x09\x44\x96\x14\xe8\x3f\xd4\xa7\xb5\xb2\xc4\x9b\xa5\xb7\x52\x17\xfd\xf7\xbf\x0c\x86\xe1\x95\x36\xe1\x0d\x34\xe2\x37\xa6\x13\xae\xef\xb9\x11\xe1\xba\xb1\xb9\x96\x9a\x18\xd1\x08\x35\x52\xce\x1b\xcb\x0a\x1c\xc3\xb7\x47\xfa\x7e\x24\xaf\x1e\xe3\xe8\xf1\x2c\xca\xcb\x5a\xe8\x95\x28\x37\x10\x80\xda\xdb\xae\xce\x0b\x49\x9d\xfa\x34\x01\x01\xef\x7b\x49\x58\xb6\xa6\x3c\x4b\xc2\x06\x8f\x3f\xce\x04\x5d\x48\x71\xe8\x2e\x36\x78\x1c\x5c\xc5\xaf\xa6\x28\x69\x8c\xfe\x4d\x8a\xc3\xcb\xf9\x22\xc0\x1d\x53\x1d\x60\x1d\xbf\x25\x21\x9c\xec\x1a\x04\xdd\x41\x07\xc9\xfe\xe5\x1a\x2e\x2e\x0f\x97\xff\xe7\xdf\x45\x63\xc1\x0b\x25\xf3\xcc\xec\x37\x98\xf6\x78\x9e\x4f\x8b\xae\x52\x9e\xda\x4e\xea\x9d\xd9\x10\x81\xae\x29\x4f\x4a\x85\xd4\x98\x92\xaa\xc6\xd5\x0c\x96\x21\x6a\x90\x1e\x2d\x23\x0a\x37\x3b\xb4\x34\xbd\xc0\xa2\xaf\xac\x76\x5d\x3a\x73\xa9\x99\x6a\x81\x9b\xec\x7b\xcb\x78\xdd\xbb\xf5\xf9\x93\x9c\x72\x7f\x08\xd9\x0c\x3e\x8e\x46\x90\x7a\x20\x3f\xa1\x34\x52\xfb\x21\xec\x11\x34\xa2\x20\x02\x12\x28\x2a\xee\x03\x5e\x6f\xc7\x54\x85\xbd\x9a\x64\x88\xaa\xc3\x53\x53\xd1\x44\x7a\x42\x42\xc3\x60\xe0\xd6\xec\xc2\xa8\xcd\x18\xdf\x40\xd3\xf8\xc6\xca\x42\xea\xb8\x89\xe9\x59\xd3\x93\x45\x09\x01\x07\xb3\x42\xcd\x50\xee\xe9\xe4\x43\xc8\x7f\x26\x8b\x99\xf6\xcf\x8a\xa8\x8e\x7c\xfb\x74\xf0\x7b\xd2\x34\x71\xe2\x88\x78\xfb\x3f\x0f\x86\xf0\xfe\x97\xae\x32\xbd\x21\x28\xf8\x31\x98\x37\xaf\x43\xc5\xcf\x2b\xe2\xe5\x67\x41\x0d\x31\xc9\x4f\x41\x6b\xe2\xaa\x8c\xd2\x51\xfb\x19\xe2\x78\xce\x26\x57\xdf\xc1\x3d\xf7\xad\xc5\x6d\x42\x93\x30\x21\x5e\x07\xad\x53\xf4\x11\xb9\xc5\x2d\x4d\x17\xca\x02\x67\x4a\xa1\xed\x39\x08\xdc\x35\x6c\xca\x29\xe4\x0b\xb7\xa5\x3f\xb6\x33\xc7\x33\x5b\xa0\x77\x3f\x36\x2c\xe0\xbc\x7b\xd7\x52\x71\x08\xc5\xb1\x44\xb8\xbe\x86\xde\x64\x31\x4d\x57\xd3\x5e\xd3\x4c\xa3\x11\xdc\x63\xd8\xc8\x32\x25\x33\xa1\x8e\x20\x50\xa1\xc7\xda\x2e\xa3\x43\x88\x3a\x6a\x1a\xd2\x6a\x45\x4b\x0f\x1e\xa4\xf3\x52\x17\x50\x33\xd6\x9e\xe6\x7b\x03\x17\x7a\x84\xb3\xca\x51\xb5\x3e\x1b\x86\xde\xd0\x66\x63\x91\xf8\x8d\xe6\x50\x68\x37\xa6\x64\xb7\x09\xe5\xd2\x3a\x0f\xa5\x62\x1c\x13\xc2\xeb\x8c\x79\x3d\xbf\x0d\x33\x93\xea\x45\x68\xc1\x00\x74\x1a\xb4\x4c\xd1\xa0\x26\xf5\x0e\xfa\x2d\xc6\x20\x8e\x22\xdb\x4a\x3f\xc1\xbe\x3a\x51\x82\xf3\x58\x3e\x25\x04\x5a\x70\x70\x87\x44\xe5\x81\x0d\xea\xa1\x4c\xba\xfe\xf9\x6b\xb3\x05\xa0\x4b\xe2\x88\xde\x3d\xe9\x6b\x65\x8a\xf3\xbe\x16\x75\x58\x78\x65\x2d\xe5\xbf\x1b\x05\x39\xf5\xf8\x1f\x95\xf3\x14\x53\x4b\xe1\x69\xd8\xe2\x25\xb2\x0e\xd4\x4c\x53\x7f\xf0\xe7\x21\x4a\xf3\x33\xcc\x2b\x52\xd7\x4c\xcb\x7a\xab\x2c\x8d\x47\xed\x25\x53\xea\x48\x79\xd8\x5b\x5a\xa7\x68\x81\x1a\x82\x93\x24\x15\x18\x27\x88\x4a\xcd\x55\x25\xea\x32\x08\x75\xdc\xe0\xb9\x60\xf3\xf9\x1e\xb6\x45\xe7\x58\x81\x09\x55\x52\x2e\x0f\xcd\x26\xab\xa1\x57\x93\x5c\x7f\xd0\x4b\x3a\x23\xcf\x29\x46\x99\x22\x69\x8b\x8c\xb8\x3a\x15\xc2\xa2\x73\xfd\x41\xc3\x39\x5d\x66\xef\xd7\xa8\x29\xf8\xa0\x71\x0f\xdd\x8a\xc4\x38\xa7\x95\x51\x0c\x81\x09\x41\xd4\xf6\x6c\x9d\x89\xa3\xc8\xed\xa5\xe7\x6b\x08\x9a\x4c\x79\xea\xc5\x41\x53\xff\x9c\x39\x84\x8b\xe9\xbf\x56\x93\xdb\x8f\xd3\xc9\xed\xdd\xc3\xc5\x18\xce\xce\x96\xb3\x7f\x4f\xbb\xb3\x0f\xe9\x3c\xbd\x99\x4c\x2f\xc6\xa7\x39\x74\xee\x90\x37\xad\x0b\xa4\xd0\x79\xc6\x37\x49\x89\xb8\xe9\x5f\x9e\xf3\xc0\xc9\xc1\x28\xca\x2c\xb2\xcd\xd5\xc9\x98\xba\x41\x1b\x1d\x2d\xe5\xc2\x35\xbc\x1a\xac\xab\xd7\xad\x99\x34\xf2\xfd\x96\xc8\x4f\x2b\x51\xa0\x8a\xef\xda\x91\xce\xe7\x9d\xe7\xf4\x41\xe1\xe8\x0e\x3e\x4e\xe7\xd3\xcf\xe9\x6a\x7a\x26\xb5\x5c\xa5\xab\xd9\xa4\x3e\xfa\xaf\x43\xf4\xfe\xcd\x21\xea\x2d\x97\xab\xdb\xc5\xb4\x37\x6e\xbe\xe6\xb7\xe9\xc7\xde\x9f\x14\x36\x7b\xd3\xf7\x8a\xcc\x9b\x7b\x63\xc5\xff\x92\xab\x27\xbb\x43\xce\x5e\x5a\x1d\x02\x09\x71\x5f\x3d\xfb\x89\x00\x4c\xb7\xfc\x91\xd7\x3f\x93\xa2\xf0\xfe\x45\xc6\x78\x8c\x1f\xe3\xff\x04\x00\x00\xff\xff\xb5\x44\x89\xaf\xbc\x0f\x00\x00")
+var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdf\x6f\x1a\x49\x12\x7e\x9e\xf9\x2b\x4a\xfb\x02\x68\xc9\x90\xec\x49\x7b\x12\x3e\x9f\x34\x21\x24\x41\x62\x6d\x0b\xc8\xf9\x7c\xab\x7d\xe8\xe9\xae\x19\x7a\x69\xba\x47\xfd\x03\x8c\x22\xff\xef\xa7\xea\x99\x01\xc3\xda\x49\xee\xde\xcc\x74\xf5\x57\x55\x5f\x55\x7d\x5d\x1e\x8d\x60\x62\xea\x83\x95\xd5\xda\xc3\x2f\x6f\xdf\xfd\x1d\x56\x6b\x84\xca\xbc\x41\xbf\x46\x8b\x61\x0b\x79\xf0\x6b\x63\x5d\x3a\x1a\xc1\x6a\x2d\x1d\x94\x52\x21\x48\x07\x35\xb3\x1e\x4c\x09\xfe\xc2\x5e\xc9\xc2\x32\x7b\xc8\xd2\xd1\xa8\xb9\xf3\xe2\x31\x21\x94\x16\x11\x9c\x29\xfd\x9e\x59\x1c\xc3\xc1\x04\xe0\x4c\x83\x45\x21\x9d\xb7\xb2\x08\x1e\x41\x7a\x60\x5a\x8c\x8c\x85\xad\x11\xb2\x3c\x10\xa4\xf4\x10\xb4\x40\x1b\x5d\x7b\xb4\x5b\xd7\xc5\xf1\xe9\xe6\x0b\xcc\xd1\x39\xb4\xf0\x09\x35\x5a\xa6\xe0\x2e\x14\x4a\x72\x98\x4b\x8e\xda\x21\x30\x07\x35\x7d\x71\x6b\x14\x50\x44\x38\xba\xf8\x91\x42\x59\xb6\xa1\xc0\x47\x13\xb4\x60\x5e\x1a\x3d\x04\x94\x14\x39\xec\xd0\x3a\x69\x34\xfc\xad\x73\xd5\x02\x0e\xc1\x58\x02\xe9\x33\x4f\x09\x58\x30\x35\xdd\x1b\x00\xd3\x07\x50\xcc\x9f\xae\xfe\x00\x21\xa7\xbc\x05\x48\x1d\xdd\xac\x4d\x8d\xe0\xd7\xcc\x53\xd6\x7b\xa9\x14\x14\x08\xc1\x61\x19\xd4\x90\xd0\x8a\xe0\xe1\x7e\xb6\xfa\x7c\xfb\x65\x05\xf9\xcd\x03\xdc\xe7\x8b\x45\x7e\xb3\x7a\xb8\x82\xbd\xf4\x6b\x13\x3c\xe0\x0e\x1b\x28\xb9\xad\x95\x44\x01\x7b\x66\x2d\xd3\xfe\x00\xa6\x24\x84\xdf\xa6\x8b\xc9\xe7\xfc\x66\x95\xbf\x9f\xcd\x67\xab\x07\x30\x16\x3e\xce\x56\x37\xd3\xe5\x12\x3e\xde\x2e\x20\x87\xbb\x7c\xb1\x9a\x4d\xbe\xcc\xf3\x05\xdc\x7d\x59\xdc\xdd\x2e\xa7\x19\x2c\x91\xa2\x42\xba\xff\x7d\xce\xcb\x58\x3d\x8b\x20\xd0\x33\xa9\x5c\xc7\xc4\x83\x09\xe0\xd6\x26\x28\x01\x6b\xb6\x43\xb0\xc8\x51\xee\x50\x00\x03\x6e\xea\xc3\x0f\x17\x95\xb0\x98\x32\xba\x8a\x39\xbf\xda\x90\x30\x2b\x41\x1b\x3f\x04\x87\x08\xff\x58\x7b\x5f\x8f\x47\xa3\xfd\x7e\x9f\x55\x3a\x64\xc6\x56\x23\xd5\xc0\xb9\xd1\x3f\xb3\x94\x30\x6b\x8b\xce\x33\x8f\x2b\xcb\x38\x5a\x30\xc1\xd7\xc1\x3b\x70\xa1\x2c\x25\x97\xa8\x3d\x48\x5d\x1a\xbb\x8d\x9d\x02\xde\x00\xb7\xc8\x3c\x02\x03\x65\x38\x53\x80\x8f\xc8\x43\x3c\x6b\x98\x8e\xed\x6a\x99\x76\x8c\xc7\xaf\xa5\x35\x5b\xca\x35\x38\x4f\x7f\x38\x87\xdb\x42\xa1\x80\x0a\x35\x3a\xe9\xa0\x50\x86\x6f\xb2\xf4\x6b\x9a\x3c\x0b\x86\xfa\x24\x66\xd8\x1a\xc5\xde\xd8\x63\xcf\x22\x14\x41\x2a\x21\x75\x95\xa5\x49\x67\x3d\x06\x1d\x94\x1a\xa6\x11\x42\x19\xb3\x09\x75\xce\xb9\x09\x31\xf6\x3f\x91\xfb\x06\xcc\xd5\xc8\x65\x49\xcd\xc1\x8e\xa7\xde\xc4\xa3\xa3\x5f\x53\x90\x7d\x96\x26\x67\x30\x63\x28\x83\x8e\xe9\xf4\x99\x10\x76\x08\xa2\x18\x7c\x4d\x93\x64\xc7\x2c\x61\xc1\x35\x78\xf3\x19\x1f\xe3\xe1\xe0\x2a\x4d\x12\x59\x42\xdf\xaf\xa5\xcb\x3a\xe0\xdf\x19\xe7\x7f\xc0\xf5\xf5\x75\x1c\xea\x52\x6a\x14\x03\x20\x88\xe4\x25\xb3\xe6\x24\x29\x98\x62\x9a\xe3\x18\x7a\x6f\x1f\x7b\xf0\x33\x88\x22\xab\xd0\xbf\x6f\xbe\x36\xce\x32\x6f\x96\xde\x4a\x5d\xf5\xdf\xfd\x3a\x18\xc6\x5b\xda\xc4\x3b\xd0\x9a\xdf\x98\xa3\x71\x73\xce\x8d\x88\xc7\x6d\xcc\x8d\xd5\xc4\x88\xd6\xa8\xb5\x72\xde\x58\x56\xe1\x18\xbe\x3e\xd1\xef\x27\xca\xea\x29\x4d\x9e\xce\x58\x5e\x36\x46\xaf\xb0\xdc\x42\x00\x6a\x6f\x8f\x7d\x5e\x49\x9a\xd4\xe7\x05\x88\x78\xdf\x2a\xc2\xb2\x0b\xe5\xa2\x08\x1b\x3c\x7c\xbf\x12\x74\x20\xc5\xe3\xf1\x60\x83\x87\xc1\x55\xfa\x6a\x89\xb2\x36\xe8\xdf\xa5\x78\xfc\xd1\x7a\x5d\xdc\x39\xe3\x75\x49\x56\xa7\x78\x07\x83\x0b\x1e\x2d\xba\xa0\x3c\xb5\xbb\xd4\x3b\xb3\x21\xe1\x5a\x13\x3f\x4a\x45\x4a\x4c\x4d\xd5\x72\x8d\x72\x14\x88\x1a\xa4\x47\xcb\x48\x3a\xcd\x0e\x2d\xbd\x1a\x60\xd1\x07\xab\xdd\x91\xc6\x52\x6a\xa6\x3a\xe0\x96\x75\x6f\x19\x6f\x66\xa6\xf9\xfe\x8c\x4b\xee\x1f\x23\x8b\x31\xbb\xd1\x08\x72\x0f\x94\x22\xd4\x46\x6a\x3f\x84\x3d\x82\x46\x14\x34\xf8\x02\x45\xe0\x3e\xe2\xf5\x76\x4c\x05\xec\x35\xc3\x4d\x12\x19\xaf\x9a\x40\x2f\xc1\xb3\xe1\x1f\xc6\x00\xb7\x66\x17\x9f\xb8\x82\xf1\x0d\xb4\x03\x67\xac\xac\xa4\x4e\x5b\x3a\xcf\x86\x8d\x22\xca\x08\x38\x86\x15\x6b\x45\x45\xa4\x2f\xef\x99\x82\x6b\x28\x64\x35\xd3\xfe\xa2\x78\x0d\xe9\xdd\xd5\xc1\x1f\x59\x3b\x3c\x99\x23\xc1\xeb\xff\x32\x18\xc2\xbb\x5f\x8f\x1d\xe1\x0d\x41\xc1\xf7\xc1\xbc\x79\x1d\x2a\xbd\x6c\x86\x97\xaf\x45\x37\x34\xc1\x3f\x47\xaf\x99\x0b\x05\x95\xa3\xc9\x33\xf2\x78\x3e\xc5\x57\xdf\xc0\x3d\xcf\xad\xc3\x6d\xa9\xc9\x98\x10\xaf\x83\x36\x25\xfa\x80\xdc\xe2\x96\x54\x9d\xaa\xc0\x99\x52\x68\x7b\x0e\xa2\x66\x0c\xdb\x76\x8a\xf5\xc2\x6d\xed\x0f\x9d\xd6\x7b\x66\x2b\xf4\xee\xfb\x81\x45\x9c\x37\x6f\x3a\x09\x8c\x54\x1c\x6a\x84\xeb\x6b\xe8\x4d\x16\xd3\x7c\x35\xed\xb5\x63\x34\x1a\xc1\x3d\xc6\x4d\xa8\x50\xb2\x10\xea\x00\x02\x15\x7a\x6c\xe2\x32\x3a\x52\x74\x94\x84\x21\xad\x34\xb4\x6c\xe0\xa3\x74\x5e\xea\x0a\x1a\xa5\xd8\xd3\xbb\xda\xc2\xc5\x19\xe1\x2c\x38\xea\xd6\x8b\x47\xc8\x1b\xda\x28\x2c\x92\xae\x90\xfe\xc7\x71\x63\x4a\x1e\x37\x90\x52\x5a\xe7\xa1\x56\x8c\x63\x46\x78\xc7\x60\x5e\xaf\x6f\x3b\xc9\xe4\x7a\x11\x47\x30\x02\x9d\x1e\x38\xa6\xe8\x81\x24\xf7\x0e\xfa\x1d\xc6\x20\x4d\x12\xdb\x59\x3f\xc3\xbe\x3a\x49\x82\xf3\x58\x3f\x17\x04\x5a\x2c\x70\x87\x24\xa1\x51\x0d\x9a\xc7\x90\x7c\xfd\xeb\xb7\xf6\xf5\x45\x97\xa5\x09\xdd\x7b\x36\xd7\xca\x54\xe7\x73\x2d\x1a\x5a\x78\xb0\x96\xea\x7f\x94\xe0\x92\x66\xfc\xcf\xe0\x3c\x71\x6a\x89\x9e\x56\x2d\x5e\x12\xc9\x28\x89\xf4\xda\x0e\xfe\x2a\x86\xf4\x6e\xc5\x77\x82\xdc\xb5\xaf\x54\xb3\xcd\xd5\xc6\xa3\xf6\x92\x29\x75\xa0\x3a\xec\x2d\xad\x31\xb4\xb8\x0c\xc1\x49\xb2\x8a\x8a\x13\x4d\xa5\xe6\x2a\x88\xa6\x0d\x62\x1f\xb7\x78\x2e\xc6\x7c\xbe\xff\x6c\xd1\x39\x56\x61\x46\x9d\x54\xca\xc7\x76\x83\xd4\xd0\x6b\x44\xae\x3f\xe8\x65\xc7\x20\xcf\x25\x46\x99\x2a\xeb\x9a\x8c\x64\x3a\x17\xc2\xa2\x73\xfd\x41\xab\x39\xc7\xca\xde\xaf\x51\x13\xf9\xa0\x71\x0f\xc7\xd5\x84\x71\x4e\xab\x9a\x18\x02\x13\x82\xa4\xed\x62\x8d\x48\x93\xc4\xed\xa5\xe7\x6b\x88\x9e\x4c\x7d\x9a\xc5\x41\xdb\xff\x9c\x39\x84\x9f\xa6\xff\x5e\x4d\x6e\x3f\x4c\x27\xb7\x77\x0f\x3f\x8d\xe1\xec\xdb\x72\xf6\x9f\xe9\xf1\xdb\xfb\x7c\x9e\xdf\x4c\xa6\x3f\x8d\xe3\xdb\xfc\x42\x42\xde\x74\x29\x90\x43\xe7\x19\xdf\x64\x35\xe2\xa6\xff\xf6\x5c\x07\x4e\x09\x26\x49\x61\x91\x6d\xae\x4e\xc1\x34\x03\xda\xfa\xe8\x24\x17\xae\xe1\x55\xb2\xae\x5e\x8f\x66\xd2\xda\xf7\x3b\x21\x3f\xad\x22\x51\x2a\xbe\x19\x47\x3e\x9f\x1f\x33\xa7\x1f\x44\xc7\xf1\xc3\x87\xe9\x7c\xfa\x29\x5f\x4d\xcf\xac\x96\xab\x7c\x35\x9b\x34\x9f\xfe\x67\x8a\xde\xfd\x30\x45\xbd\xe5\x72\x75\xbb\x98\xf6\xc6\xed\xaf\xf9\x6d\xfe\xa1\xf7\x17\x87\xed\xbe\xf2\xad\x26\xf3\xe6\xde\x58\xf1\xff\xd4\xea\xd9\xee\x50\xb2\x97\x56\x87\x28\x42\xdc\x87\x8b\xd5\x1c\x98\xee\xf4\xa3\x6c\xfe\x3d\x49\xe2\xfd\x17\x15\xe3\x29\x7d\x4a\xff\x1b\x00\x00\xff\xff\x7c\xdb\x3f\x79\x34\x0f\x00\x00")
func prestate_tracerJsBytes() ([]byte, error) {
return bindataRead(
@@ -213,7 +213,7 @@ func prestate_tracerJs() (*asset, error) {
}
info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd0, 0xd5, 0x5, 0x92, 0xed, 0xf4, 0x69, 0x2e, 0x14, 0x48, 0x35, 0x67, 0xcc, 0xf2, 0x3e, 0xc7, 0xf, 0x18, 0x22, 0x7a, 0x4d, 0x6f, 0x31, 0xad, 0x3c, 0x92, 0x77, 0xb4, 0x1, 0x2a, 0xd3, 0x7c}}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd, 0xb0, 0x72, 0x28, 0xc7, 0x27, 0x97, 0x4d, 0xe, 0xbf, 0x29, 0xe1, 0xa8, 0xd7, 0x52, 0x13, 0xa1, 0x19, 0xc3, 0xfb, 0x8d, 0x5b, 0xcb, 0xdd, 0xa5, 0xd7, 0x98, 0x34, 0x6a, 0xbf, 0x33, 0x6c}}
return a, nil
}
diff --git a/eth/tracers/internal/tracers/prestate_tracer.js b/eth/tracers/internal/tracers/prestate_tracer.js
index 99f71d2c3..56aa2b210 100644
--- a/eth/tracers/internal/tracers/prestate_tracer.js
+++ b/eth/tracers/internal/tracers/prestate_tracer.js
@@ -40,10 +40,7 @@
var idx = toHex(key);
if (this.prestate[acc].storage[idx] === undefined) {
- var val = toHex(db.getState(addr, key));
- if (val != "0x0000000000000000000000000000000000000000000000000000000000000000") {
- this.prestate[acc].storage[idx] = toHex(db.getState(addr, key));
- }
+ this.prestate[acc].storage[idx] = toHex(db.getState(addr, key));
}
},
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 43a33e992..656555b3b 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -683,7 +683,7 @@ type CallArgs struct {
Data hexutil.Bytes `json:"data"`
}
-func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration) ([]byte, uint64, bool, error) {
+func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration) ([]byte, uint64, bool, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
@@ -724,7 +724,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
defer cancel()
// Get a new instance of the EVM.
- evm, vmError, err := s.b.GetEVM(ctx, msg, state, header, vmCfg)
+ evm, vmError, err := s.b.GetEVM(ctx, msg, state, header)
if err != nil {
return nil, 0, false, err
}
@@ -748,7 +748,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
// Call executes the given transaction on the state for the given block number.
// It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
- result, _, _, err := s.doCall(ctx, args, blockNr, vm.Config{}, 5*time.Second)
+ result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second)
return (hexutil.Bytes)(result), err
}
@@ -777,7 +777,7 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h
executable := func(gas uint64) bool {
args.Gas = hexutil.Uint64(gas)
- _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, vm.Config{}, 0)
+ _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0)
if err != nil || failed {
return false
}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index c9ffe230c..e23ee03b1 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -53,7 +53,7 @@ type Backend interface {
GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetTd(blockHash common.Hash) *big.Int
- GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error)
+ GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error)
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index a5f319653..06bfcef69 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -385,6 +385,18 @@ web3._extend({
inputFormatter: [null]
}),
new web3._extend.Method({
+ name: 'standardTraceBadBlockToFile',
+ call: 'debug_standardTraceBadBlockToFile',
+ params: 2,
+ inputFormatter: [null, null]
+ }),
+ new web3._extend.Method({
+ name: 'standardTraceBlockToFile',
+ call: 'debug_standardTraceBlockToFile',
+ params: 2,
+ inputFormatter: [null, null]
+ }),
+ new web3._extend.Method({
name: 'traceBlockByNumber',
call: 'debug_traceBlockByNumber',
params: 2,
diff --git a/les/api_backend.go b/les/api_backend.go
index aa748a4ea..753139623 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -105,10 +105,10 @@ func (b *LesApiBackend) GetTd(hash common.Hash) *big.Int {
return b.eth.blockchain.GetTdByHash(hash)
}
-func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) {
+func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {
state.SetBalance(msg.From(), math.MaxBig256)
context := core.NewEVMContext(msg, header, b.eth.blockchain, nil)
- return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), state.Error, nil
+ return vm.NewEVM(context, state, b.eth.chainConfig, vm.Config{}), state.Error, nil
}
func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
diff --git a/les/fetcher.go b/les/fetcher.go
index f0d3b188d..2615f69df 100644
--- a/les/fetcher.go
+++ b/les/fetcher.go
@@ -141,36 +141,39 @@ func (f *lightFetcher) syncLoop() {
s := requesting
requesting = false
var (
- rq *distReq
- reqID uint64
+ rq *distReq
+ reqID uint64
+ syncing bool
)
if !f.syncing && !(newAnnounce && s) {
- rq, reqID = f.nextRequest()
+ rq, reqID, syncing = f.nextRequest()
}
- syncing := f.syncing
f.lock.Unlock()
if rq != nil {
requesting = true
- _, ok := <-f.pm.reqDist.queue(rq)
- if !ok {
+ if _, ok := <-f.pm.reqDist.queue(rq); ok {
+ if syncing {
+ f.lock.Lock()
+ f.syncing = true
+ f.lock.Unlock()
+ } else {
+ go func() {
+ time.Sleep(softRequestTimeout)
+ f.reqMu.Lock()
+ req, ok := f.requested[reqID]
+ if ok {
+ req.timeout = true
+ f.requested[reqID] = req
+ }
+ f.reqMu.Unlock()
+ // keep starting new requests while possible
+ f.requestChn <- false
+ }()
+ }
+ } else {
f.requestChn <- false
}
-
- if !syncing {
- go func() {
- time.Sleep(softRequestTimeout)
- f.reqMu.Lock()
- req, ok := f.requested[reqID]
- if ok {
- req.timeout = true
- f.requested[reqID] = req
- }
- f.reqMu.Unlock()
- // keep starting new requests while possible
- f.requestChn <- false
- }()
- }
}
case reqID := <-f.timeoutChn:
f.reqMu.Lock()
@@ -209,6 +212,7 @@ func (f *lightFetcher) syncLoop() {
f.checkSyncedHeaders(p)
f.syncing = false
f.lock.Unlock()
+ f.requestChn <- false
}
}
}
@@ -405,7 +409,7 @@ func (f *lightFetcher) requestedID(reqID uint64) bool {
// nextRequest selects the peer and announced head to be requested next, amount
// to be downloaded starting from the head backwards is also returned
-func (f *lightFetcher) nextRequest() (*distReq, uint64) {
+func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
var (
bestHash common.Hash
bestAmount uint64
@@ -427,14 +431,12 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) {
}
}
if bestTd == f.maxConfirmedTd {
- return nil, 0
+ return nil, 0, false
}
- f.syncing = bestSyncing
-
var rq *distReq
reqID := genReqID()
- if f.syncing {
+ if bestSyncing {
rq = &distReq{
getCost: func(dp distPeer) uint64 {
return 0
@@ -500,7 +502,7 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) {
},
}
}
- return rq, reqID
+ return rq, reqID, bestSyncing
}
// deliverHeaders delivers header download request responses for processing
diff --git a/les/flowcontrol/control.go b/les/flowcontrol/control.go
index d50eb809c..8ef4ba511 100644
--- a/les/flowcontrol/control.go
+++ b/les/flowcontrol/control.go
@@ -82,7 +82,6 @@ func (peer *ClientNode) RequestProcessed(cost uint64) (bv, realCost uint64) {
time := mclock.Now()
peer.recalcBV(time)
peer.bufValue -= cost
- peer.recalcBV(time)
rcValue, rcost := peer.cm.processed(peer.cmNode, time)
if rcValue < peer.params.BufLimit {
bv := peer.params.BufLimit - rcValue
diff --git a/light/trie.go b/light/trie.go
index c07e99461..ab4e18b43 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -108,7 +108,7 @@ func (t *odrTrie) TryGet(key []byte) ([]byte, error) {
func (t *odrTrie) TryUpdate(key, value []byte) error {
key = crypto.Keccak256(key)
return t.do(key, func() error {
- return t.trie.TryDelete(key)
+ return t.trie.TryUpdate(key, value)
})
}
diff --git a/miner/worker.go b/miner/worker.go
index 8579c5c84..48473796b 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@@ -692,7 +691,7 @@ func (w *worker) updateSnapshot() {
func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) {
snap := w.current.state.Snapshot()
- receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, vm.Config{})
+ receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig())
if err != nil {
w.current.state.RevertToSnapshot(snap)
return nil, err
diff --git a/mobile/big.go b/mobile/big.go
index dd7b15878..86ea93245 100644
--- a/mobile/big.go
+++ b/mobile/big.go
@@ -84,6 +84,13 @@ func (bi *BigInt) SetString(x string, base int) {
// BigInts represents a slice of big ints.
type BigInts struct{ bigints []*big.Int }
+// NewBigInts creates a slice of uninitialized big numbers.
+func NewBigInts(size int) *BigInts {
+ return &BigInts{
+ bigints: make([]*big.Int, size),
+ }
+}
+
// Size returns the number of big ints in the slice.
func (bi *BigInts) Size() int {
return len(bi.bigints)
diff --git a/node/config.go b/node/config.go
index 8f10f4f61..7b32a5908 100644
--- a/node/config.go
+++ b/node/config.go
@@ -24,6 +24,7 @@ import (
"path/filepath"
"runtime"
"strings"
+ "sync"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
@@ -152,6 +153,10 @@ type Config struct {
// Logger is a custom logger to use with the p2p.Server.
Logger log.Logger `toml:",omitempty"`
+
+ staticNodesWarning bool
+ trustedNodesWarning bool
+ oldGethResourceWarning bool
}
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
@@ -263,8 +268,8 @@ var isOldGethResource = map[string]bool{
"chaindata": true,
"nodes": true,
"nodekey": true,
- "static-nodes.json": true,
- "trusted-nodes.json": true,
+ "static-nodes.json": false, // no warning for these because they have their
+ "trusted-nodes.json": false, // own separate warning.
}
// ResolvePath resolves path in the instance directory.
@@ -277,13 +282,15 @@ func (c *Config) ResolvePath(path string) string {
}
// Backwards-compatibility: ensure that data directory files created
// by geth 1.4 are used if they exist.
- if c.name() == "geth" && isOldGethResource[path] {
+ if warn, isOld := isOldGethResource[path]; isOld {
oldpath := ""
- if c.Name == "geth" {
+ if c.name() == "geth" {
oldpath = filepath.Join(c.DataDir, path)
}
if oldpath != "" && common.FileExist(oldpath) {
- // TODO: print warning
+ if warn {
+ c.warnOnce(&c.oldGethResourceWarning, "Using deprecated resource file %s, please move this file to the 'geth' subdirectory of datadir.", oldpath)
+ }
return oldpath
}
}
@@ -337,17 +344,17 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
// StaticNodes returns a list of node enode URLs configured as static nodes.
func (c *Config) StaticNodes() []*enode.Node {
- return c.parsePersistentNodes(c.ResolvePath(datadirStaticNodes))
+ return c.parsePersistentNodes(&c.staticNodesWarning, c.ResolvePath(datadirStaticNodes))
}
// TrustedNodes returns a list of node enode URLs configured as trusted nodes.
func (c *Config) TrustedNodes() []*enode.Node {
- return c.parsePersistentNodes(c.ResolvePath(datadirTrustedNodes))
+ return c.parsePersistentNodes(&c.trustedNodesWarning, c.ResolvePath(datadirTrustedNodes))
}
// parsePersistentNodes parses a list of discovery node URLs loaded from a .json
// file from within the data directory.
-func (c *Config) parsePersistentNodes(path string) []*enode.Node {
+func (c *Config) parsePersistentNodes(w *bool, path string) []*enode.Node {
// Short circuit if no node config is present
if c.DataDir == "" {
return nil
@@ -355,10 +362,12 @@ func (c *Config) parsePersistentNodes(path string) []*enode.Node {
if _, err := os.Stat(path); err != nil {
return nil
}
+ c.warnOnce(w, "Found deprecated node list file %s, please use the TOML config file instead.", path)
+
// Load the nodes from the config file.
var nodelist []string
if err := common.LoadJSON(path, &nodelist); err != nil {
- log.Error(fmt.Sprintf("Can't load node file %s: %v", path, err))
+ log.Error(fmt.Sprintf("Can't load node list file: %v", err))
return nil
}
// Interpret the list as a discovery node array
@@ -440,3 +449,20 @@ func makeAccountManager(conf *Config) (*accounts.Manager, string, error) {
}
return accounts.NewManager(backends...), ephemeral, nil
}
+
+var warnLock sync.Mutex
+
+func (c *Config) warnOnce(w *bool, format string, args ...interface{}) {
+ warnLock.Lock()
+ defer warnLock.Unlock()
+
+ if *w {
+ return
+ }
+ l := c.Logger
+ if l == nil {
+ l = log.Root()
+ }
+ l.Warn(fmt.Sprintf(format, args...))
+ *w = true
+}
diff --git a/node/node.go b/node/node.go
index 0a931a9dd..c35a50972 100644
--- a/node/node.go
+++ b/node/node.go
@@ -287,7 +287,7 @@ func (n *Node) startInProc(apis []rpc.API) error {
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
return err
}
- n.log.Debug("InProc registered", "service", api.Service, "namespace", api.Namespace)
+ n.log.Debug("InProc registered", "namespace", api.Namespace)
}
n.inprocHandler = handler
return nil
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index afd4c9a27..9f7f1d41b 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -434,7 +434,7 @@ func (tab *Table) loadSeedNodes() {
for i := range seeds {
seed := seeds[i]
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
- log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
+ log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.add(seed)
}
}
diff --git a/p2p/discv5/net.go b/p2p/discv5/net.go
index a6cabf080..cdeb28dd5 100644
--- a/p2p/discv5/net.go
+++ b/p2p/discv5/net.go
@@ -567,12 +567,11 @@ loop:
net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
if n.state != nil && n.state.canQuery {
return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
- } else {
- if n.state == unknown {
- net.ping(n, n.addr())
- }
- return nil
}
+ if n.state == unknown {
+ net.ping(n, n.addr())
+ }
+ return nil
})
case <-statsDump.C:
diff --git a/p2p/protocols/accounting.go b/p2p/protocols/accounting.go
index 06a1a5845..770406a27 100644
--- a/p2p/protocols/accounting.go
+++ b/p2p/protocols/accounting.go
@@ -16,29 +16,32 @@
package protocols
-import "github.com/ethereum/go-ethereum/metrics"
+import (
+ "time"
+
+ "github.com/ethereum/go-ethereum/metrics"
+)
//define some metrics
var (
- //NOTE: these metrics just define the interfaces and are currently *NOT persisted* over sessions
//All metrics are cumulative
//total amount of units credited
- mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", nil)
+ mBalanceCredit metrics.Counter
//total amount of units debited
- mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", nil)
+ mBalanceDebit metrics.Counter
//total amount of bytes credited
- mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", nil)
+ mBytesCredit metrics.Counter
//total amount of bytes debited
- mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", nil)
+ mBytesDebit metrics.Counter
//total amount of credited messages
- mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", nil)
+ mMsgCredit metrics.Counter
//total amount of debited messages
- mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", nil)
+ mMsgDebit metrics.Counter
//how many times local node had to drop remote peers
- mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", nil)
+ mPeerDrops metrics.Counter
//how many times local node overdrafted and dropped
- mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", nil)
+ mSelfDrops metrics.Counter
)
//Prices defines how prices are being passed on to the accounting instance
@@ -105,6 +108,26 @@ func NewAccounting(balance Balance, po Prices) *Accounting {
return ah
}
+//SetupAccountingMetrics creates a separate registry for p2p accounting metrics;
+//this registry should be independent of any other metrics as it persists at different endpoints.
+//It also instantiates the given metrics and starts the persisting go-routine which
+//at the passed interval writes the metrics to a LevelDB
+func SetupAccountingMetrics(reportInterval time.Duration, path string) *AccountingMetrics {
+ //create an empty registry
+ registry := metrics.NewRegistry()
+ //instantiate the metrics
+ mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", registry)
+ mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", registry)
+ mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", registry)
+ mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", registry)
+ mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", registry)
+ mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", registry)
+ mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", registry)
+ mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", registry)
+ //create the DB and start persisting
+ return NewAccountingMetrics(registry, reportInterval, path)
+}
+
//Implement Hook.Send
// Send takes a peer, a size and a msg and
// - calculates the cost for the local node sending a msg of size to peer using the Prices interface
diff --git a/p2p/protocols/accounting_simulation_test.go b/p2p/protocols/accounting_simulation_test.go
index 65b737abe..e90a1d81d 100644
--- a/p2p/protocols/accounting_simulation_test.go
+++ b/p2p/protocols/accounting_simulation_test.go
@@ -20,7 +20,10 @@ import (
"context"
"flag"
"fmt"
+ "io/ioutil"
"math/rand"
+ "os"
+ "path/filepath"
"reflect"
"sync"
"testing"
@@ -66,6 +69,13 @@ func init() {
func TestAccountingSimulation(t *testing.T) {
//setup the balances objects for every node
bal := newBalances(*nodes)
+ //setup the metrics system or tests will fail trying to write metrics
+ dir, err := ioutil.TempDir("", "account-sim")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ SetupAccountingMetrics(1*time.Second, filepath.Join(dir, "metrics.db"))
//define the node.Service for this test
services := adapters.Services{
"accounting": func(ctx *adapters.ServiceContext) (node.Service, error) {
diff --git a/p2p/protocols/protocol.go b/p2p/protocols/protocol.go
index 7dddd852f..b16720dd3 100644
--- a/p2p/protocols/protocol.go
+++ b/p2p/protocols/protocol.go
@@ -381,7 +381,7 @@ func (p *Peer) handleIncoming(handle func(ctx context.Context, msg interface{})
// * arguments
// * context
// * the local handshake to be sent to the remote peer
-// * funcion to be called on the remote handshake (can be nil)
+// * function to be called on the remote handshake (can be nil)
// * expects a remote handshake back of the same type
// * the dialing peer needs to send the handshake first and then waits for remote
// * the listening peer waits for the remote handshake and then sends it
diff --git a/p2p/protocols/reporter.go b/p2p/protocols/reporter.go
new file mode 100644
index 000000000..215d4fe31
--- /dev/null
+++ b/p2p/protocols/reporter.go
@@ -0,0 +1,147 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package protocols
+
+import (
+ "encoding/binary"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+//AccountMetrics abstracts away the metrics DB and
+//the reporter to persist metrics
+type AccountingMetrics struct {
+ reporter *reporter
+}
+
+//Close will be called when the node is being shutdown
+//for a graceful cleanup
+func (am *AccountingMetrics) Close() {
+ close(am.reporter.quit)
+ am.reporter.db.Close()
+}
+
+//reporter is an internal structure used to write p2p accounting related
+//metrics to a LevelDB. It will periodically write the accrued metrics to the DB.
+type reporter struct {
+ reg metrics.Registry //the registry for these metrics (independent of other metrics)
+ interval time.Duration //duration at which the reporter will persist metrics
+ db *leveldb.DB //the actual DB
+ quit chan struct{} //quit the reporter loop
+}
+
+//NewMetricsDB creates a new LevelDB instance used to persist metrics defined
+//inside p2p/protocols/accounting.go
+func NewAccountingMetrics(r metrics.Registry, d time.Duration, path string) *AccountingMetrics {
+ var val = make([]byte, 8)
+ var err error
+
+ //Create the LevelDB
+ db, err := leveldb.OpenFile(path, nil)
+ if err != nil {
+ log.Error(err.Error())
+ return nil
+ }
+
+ //Check for all defined metrics that there is a value in the DB
+ //If there is, assign it to the metric. This means that the node
+ //has been running before and that metrics have been persisted.
+ metricsMap := map[string]metrics.Counter{
+ "account.balance.credit": mBalanceCredit,
+ "account.balance.debit": mBalanceDebit,
+ "account.bytes.credit": mBytesCredit,
+ "account.bytes.debit": mBytesDebit,
+ "account.msg.credit": mMsgCredit,
+ "account.msg.debit": mMsgDebit,
+ "account.peerdrops": mPeerDrops,
+ "account.selfdrops": mSelfDrops,
+ }
+ //iterate the map and get the values
+ for key, metric := range metricsMap {
+ val, err = db.Get([]byte(key), nil)
+ //until the first time a value is being written,
+ //this will return an error.
+ //it could be beneficial though to log errors later,
+ //but that would require a different logic
+ if err == nil {
+ metric.Inc(int64(binary.BigEndian.Uint64(val)))
+ }
+ }
+
+ //create the reporter
+ rep := &reporter{
+ reg: r,
+ interval: d,
+ db: db,
+ quit: make(chan struct{}),
+ }
+
+ //run the go routine
+ go rep.run()
+
+ m := &AccountingMetrics{
+ reporter: rep,
+ }
+
+ return m
+}
+
+//run is the goroutine which periodically sends the metrics to the configured LevelDB
+func (r *reporter) run() {
+ intervalTicker := time.NewTicker(r.interval)
+
+ for {
+ select {
+ case <-intervalTicker.C:
+ //at each tick send the metrics
+ if err := r.save(); err != nil {
+ log.Error("unable to send metrics to LevelDB", "err", err)
+ //If there is an error in writing, exit the routine; we assume here that the error is
+ //severe and don't attempt to write again.
+ //Also, this should prevent leaking when the node is stopped
+ return
+ }
+ case <-r.quit:
+ //graceful shutdown
+ return
+ }
+ }
+}
+
+//send the metrics to the DB
+func (r *reporter) save() error {
+ //create a LevelDB Batch
+ batch := leveldb.Batch{}
+ //for each metric in the registry (which is independent)...
+ r.reg.Each(func(name string, i interface{}) {
+ metric, ok := i.(metrics.Counter)
+ if ok {
+ //assuming every metric here to be a Counter (separate registry)
+ //...create a snapshot...
+ ms := metric.Snapshot()
+ byteVal := make([]byte, 8)
+ binary.BigEndian.PutUint64(byteVal, uint64(ms.Count()))
+ //...and save the value to the DB
+ batch.Put([]byte(name), byteVal)
+ }
+ })
+ return r.db.Write(&batch, nil)
+}
diff --git a/p2p/protocols/reporter_test.go b/p2p/protocols/reporter_test.go
new file mode 100644
index 000000000..b9f06e674
--- /dev/null
+++ b/p2p/protocols/reporter_test.go
@@ -0,0 +1,77 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package protocols
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+)
+
+//TestReporter tests that the metrics being collected for p2p accounting
+//are being persisted and available after restart of a node.
+//It simulates restarting by just recreating the DB as if the node had restarted.
+func TestReporter(t *testing.T) {
+ //create a test directory
+ dir, err := ioutil.TempDir("", "reporter-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ //setup the metrics
+ log.Debug("Setting up metrics first time")
+ reportInterval := 5 * time.Millisecond
+ metrics := SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db"))
+ log.Debug("Done.")
+
+ //do some metrics
+ mBalanceCredit.Inc(12)
+ mBytesCredit.Inc(34)
+ mMsgDebit.Inc(9)
+
+ //give the reporter time to write the metrics to DB
+ time.Sleep(20 * time.Millisecond)
+
+ //set the metrics to nil - this effectively simulates the node having shut down...
+ mBalanceCredit = nil
+ mBytesCredit = nil
+ mMsgDebit = nil
+ //close the DB also, or we can't create a new one
+ metrics.Close()
+
+ //setup the metrics again
+ log.Debug("Setting up metrics second time")
+ metrics = SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db"))
+ defer metrics.Close()
+ log.Debug("Done.")
+
+ //now check the metrics, they should have the same value as before "shutdown"
+ if mBalanceCredit.Count() != 12 {
+ t.Fatalf("Expected counter to be %d, but is %d", 12, mBalanceCredit.Count())
+ }
+ if mBytesCredit.Count() != 34 {
+ t.Fatalf("Expected counter to be %d, but is %d", 23, mBytesCredit.Count())
+ }
+ if mMsgDebit.Count() != 9 {
+ t.Fatalf("Expected counter to be %d, but is %d", 9, mMsgDebit.Count())
+ }
+}
diff --git a/p2p/server.go b/p2p/server.go
index 667860863..566f01ffc 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -22,7 +22,6 @@ import (
"crypto/ecdsa"
"encoding/hex"
"errors"
- "fmt"
"net"
"sort"
"sync"
@@ -391,7 +390,7 @@ type sharedUDPConn struct {
func (s *sharedUDPConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) {
packet, ok := <-s.unhandled
if !ok {
- return 0, nil, fmt.Errorf("Connection was closed")
+ return 0, nil, errors.New("Connection was closed")
}
l := len(packet.Data)
if l > len(b) {
@@ -425,7 +424,7 @@ func (srv *Server) Start() (err error) {
// static fields
if srv.PrivateKey == nil {
- return fmt.Errorf("Server.PrivateKey must be set to a non-nil key")
+ return errors.New("Server.PrivateKey must be set to a non-nil key")
}
if srv.newTransport == nil {
srv.newTransport = newRLPX
@@ -903,7 +902,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
if dialDest != nil {
dialPubkey = new(ecdsa.PublicKey)
if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil {
- return fmt.Errorf("dial destination doesn't have a secp256k1 public key")
+ return errors.New("dial destination doesn't have a secp256k1 public key")
}
}
// Run the encryption handshake.
@@ -937,7 +936,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
return err
}
if id := c.node.ID(); !bytes.Equal(crypto.Keccak256(phs.ID), id[:]) {
- clog.Trace("Wrong devp2p handshake identity", "phsid", fmt.Sprintf("%x", phs.ID))
+ clog.Trace("Wrong devp2p handshake identity", "phsid", hex.EncodeToString(phs.ID))
return DiscUnexpectedIdentity
}
c.caps, c.name = phs.Caps, phs.Name
diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go
index 92ccfde81..ab9f582c5 100644
--- a/p2p/simulations/network.go
+++ b/p2p/simulations/network.go
@@ -20,6 +20,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"sync"
"time"
@@ -705,8 +706,11 @@ func (net *Network) snapshot(addServices []string, removeServices []string) (*Sn
return snap, nil
}
+var snapshotLoadTimeout = 120 * time.Second
+
// Load loads a network snapshot
func (net *Network) Load(snap *Snapshot) error {
+ // Start nodes.
for _, n := range snap.Nodes {
if _, err := net.NewNodeWithConfig(n.Node.Config); err != nil {
return err
@@ -718,6 +722,69 @@ func (net *Network) Load(snap *Snapshot) error {
return err
}
}
+
+ // Prepare connection events counter.
+ allConnected := make(chan struct{}) // closed when all connections are established
+ done := make(chan struct{}) // ensures that the event loop goroutine is terminated
+ defer close(done)
+
+ // Subscribe to event channel.
+ // It needs to be done outside of the event loop goroutine (created below)
+ // to ensure that the event channel is blocking before connect calls are made.
+ events := make(chan *Event)
+ sub := net.Events().Subscribe(events)
+ defer sub.Unsubscribe()
+
+ go func() {
+ // Expected number of connections.
+ total := len(snap.Conns)
+ // Set of all established connections from the snapshot, not other connections.
+ // Key array element 0 is the connection One field value, and element 1 connection Other field.
+ connections := make(map[[2]enode.ID]struct{}, total)
+
+ for {
+ select {
+ case e := <-events:
+ // Ignore control events as they do not represent
+ // connect or disconnect (Up) state change.
+ if e.Control {
+ continue
+ }
+ // Detect only connection events.
+ if e.Type != EventTypeConn {
+ continue
+ }
+ connection := [2]enode.ID{e.Conn.One, e.Conn.Other}
+ // Nodes are still not connected or have been disconnected.
+ if !e.Conn.Up {
+ // Delete the connection from the set of established connections.
+ // This will prevent false positive in case disconnections happen.
+ delete(connections, connection)
+ log.Warn("load snapshot: unexpected disconnection", "one", e.Conn.One, "other", e.Conn.Other)
+ continue
+ }
+ // Check that the connection is from the snapshot.
+ for _, conn := range snap.Conns {
+ if conn.One == e.Conn.One && conn.Other == e.Conn.Other {
+ // Add the connection to the set of established connections.
+ connections[connection] = struct{}{}
+ if len(connections) == total {
+ // Signal that all nodes are connected.
+ close(allConnected)
+ return
+ }
+
+ break
+ }
+ }
+ case <-done:
+ // Load function returned, terminate this goroutine.
+ return
+ }
+ }
+ }()
+
+ // Start connecting.
for _, conn := range snap.Conns {
if !net.GetNode(conn.One).Up || !net.GetNode(conn.Other).Up {
@@ -729,6 +796,14 @@ func (net *Network) Load(snap *Snapshot) error {
return err
}
}
+
+ select {
+ // Wait until all connections from the snapshot are established.
+ case <-allConnected:
+ // Make sure that we do not wait forever.
+ case <-time.After(snapshotLoadTimeout):
+ return errors.New("snapshot connections not established")
+ }
return nil
}
diff --git a/params/config.go b/params/config.go
index 007e4a66d..7734c6e9c 100644
--- a/params/config.go
+++ b/params/config.go
@@ -42,17 +42,17 @@ var (
EIP155Block: big.NewInt(2675000),
EIP158Block: big.NewInt(2675000),
ByzantiumBlock: big.NewInt(4370000),
- ConstantinopleBlock: nil,
+ ConstantinopleBlock: big.NewInt(7080000),
Ethash: new(EthashConfig),
}
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "mainnet",
- SectionIndex: 203,
- SectionHead: common.HexToHash("0xc9e05fc67c6a9815adc8072eb18805b53da53a9a6a273e05541e1b7542cf937a"),
- CHTRoot: common.HexToHash("0xb85f42447d59f7c3e6679b9a37ed983593fd52efd6251b883592662e95769d5b"),
- BloomRoot: common.HexToHash("0xf93d50cb4c49b403c6fd33cd60896d3b36184275be0a51bae4df5e8844ac624c"),
+ SectionIndex: 206,
+ SectionHead: common.HexToHash("0x9fa677c7c0580136f5a86d9b2fd29b112e531f0284396298b8809bcb6787b538"),
+ CHTRoot: common.HexToHash("0x7f32dfb29e341b4c8c10ea2e06a812bcea470366f635b7a8b3d0856684cd76f4"),
+ BloomRoot: common.HexToHash("0x0169e174f0a8172aec217556d8a25c7ba7ca52aacff170325230a75740ff1eaf"),
}
// TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network.
@@ -73,10 +73,10 @@ var (
// TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
TestnetTrustedCheckpoint = &TrustedCheckpoint{
Name: "testnet",
- SectionIndex: 134,
- SectionHead: common.HexToHash("0x17053ecbe045bebefaa01e7716cc85a4e22647e181416cc1098ccbb73a088931"),
- CHTRoot: common.HexToHash("0x4d2b86422e46ed76f0e3f50f06632c409f809c8375e53c8bc0f782bcb93dd49a"),
- BloomRoot: common.HexToHash("0xccba62232ee56c2967afc58f136a47ba7dc545ae586e6be666430d94516306c7"),
+ SectionIndex: 136,
+ SectionHead: common.HexToHash("0xe5d80bb08d92bbc12dfe510c64cba01eafcbb4ba585e7c7ab7f8a93c6f295ab3"),
+ CHTRoot: common.HexToHash("0xe3ca77ab0cb51eec74f4f7458e36aee207c68768387b39cb0bcff0940a6264d8"),
+ BloomRoot: common.HexToHash("0x30c8eeadac5539d3dcd6e88915d1a07cb2f3a1d6ebe7e553e3ee783c04c68c2d"),
}
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
@@ -90,7 +90,7 @@ var (
EIP155Block: big.NewInt(3),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(1035301),
- ConstantinopleBlock: nil,
+ ConstantinopleBlock: big.NewInt(3660663),
Clique: &CliqueConfig{
Period: 15,
Epoch: 30000,
@@ -100,10 +100,10 @@ var (
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
Name: "rinkeby",
- SectionIndex: 100,
- SectionHead: common.HexToHash("0xf18f9b43e16f37b12e68818536ffe455ff18d676274ffdd856a8520ed61bb514"),
- CHTRoot: common.HexToHash("0x473f5d603b1fedad75d97fd58692130b9ac9ade1aca01eb9363d79bd1c43c791"),
- BloomRoot: common.HexToHash("0xa39ced3ddbb87e909c7531df2afb6414bea9c9a60ab94da9c6b467535f05326e"),
+ SectionIndex: 103,
+ SectionHead: common.HexToHash("0x9f38b903852831bf4fa7992f7fd43d8b26da2deb82b421fb845cf6faee54e056"),
+ CHTRoot: common.HexToHash("0x2d710c2cea468d2e604838000d658ee213e4abb07f90c4f71f5cd7f8510aa708"),
+ BloomRoot: common.HexToHash("0xcc401060280c2cc82697ea5ecef8cac61e52063c37533a2e9609332419704d5f"),
}
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
@@ -111,16 +111,16 @@ var (
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
+ AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
+ AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
- TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
+ TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
TestRules = TestChainConfig.Rules(new(big.Int))
)
diff --git a/params/version.go b/params/version.go
index b9dcc2a84..2291d5f3c 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 8 // Minor version component of the current release
- VersionPatch = 19 // Patch version component of the current release
+ VersionPatch = 20 // Patch version component of the current release
VersionMeta = "unstable" // Version metadata to append to the version string
)
diff --git a/signer/core/api.go b/signer/core/api.go
index 2b96cdb5f..e9a335785 100644
--- a/signer/core/api.go
+++ b/signer/core/api.go
@@ -82,7 +82,7 @@ type SignerUI interface {
// OnSignerStartup is invoked when the signer boots, and tells the UI info about external API location and version
// information
OnSignerStartup(info StartupInfo)
- // OnInputRequried is invoked when clef requires user input, for example master password or
+ // OnInputRequired is invoked when clef requires user input, for example master password or
// pin-code for unlocking hardware wallets
OnInputRequired(info UserInputRequest) (UserInputResponse, error)
}
diff --git a/swarm/OWNERS b/swarm/OWNERS
index d4204e08c..4b9ca96eb 100644
--- a/swarm/OWNERS
+++ b/swarm/OWNERS
@@ -7,7 +7,6 @@ swarm
├── fuse ────────────────── @jmozah, @holisticode
├── grafana_dashboards ──── @nonsense
├── metrics ─────────────── @nonsense, @holisticode
-├── multihash ───────────── @nolash
├── network ─────────────── ethersphere
│ ├── bitvector ───────── @zelig, @janos, @gbalint
│ ├── priorityqueue ───── @zelig, @janos, @gbalint
diff --git a/swarm/api/api.go b/swarm/api/api.go
index 7bb631967..33a8e3539 100644
--- a/swarm/api/api.go
+++ b/swarm/api/api.go
@@ -42,7 +42,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
@@ -417,7 +416,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
return reader, mimeType, status, nil, err
}
// get the data of the update
- _, rsrcData, err := a.feed.GetContent(entry.Feed)
+ _, contentAddr, err := a.feed.GetContent(entry.Feed)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
@@ -425,23 +424,23 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
return reader, mimeType, status, nil, err
}
- // extract multihash
- decodedMultihash, err := multihash.FromMultihash(rsrcData)
- if err != nil {
+ // extract content hash
+ if len(contentAddr) != storage.AddressLength {
apiGetInvalid.Inc(1)
status = http.StatusUnprocessableEntity
- log.Warn("invalid multihash in feed update", "err", err)
- return reader, mimeType, status, nil, err
+ errorMessage := fmt.Sprintf("invalid swarm hash in feed update. Expected %d bytes. Got %d", storage.AddressLength, len(contentAddr))
+ log.Warn(errorMessage)
+ return reader, mimeType, status, nil, errors.New(errorMessage)
}
- manifestAddr = storage.Address(decodedMultihash)
- log.Trace("feed update contains multihash", "key", manifestAddr)
+ manifestAddr = storage.Address(contentAddr)
+ log.Trace("feed update contains swarm hash", "key", manifestAddr)
- // get the manifest the multihash digest points to
+ // get the manifest the swarm hash points to
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
- log.Warn(fmt.Sprintf("loadManifestTrie (feed update multihash) error: %v", err))
+ log.Warn(fmt.Sprintf("loadManifestTrie (feed update) error: %v", err))
return reader, mimeType, status, nil, err
}
@@ -451,8 +450,8 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
if entry == nil {
status = http.StatusNotFound
apiGetNotFound.Inc(1)
- err = fmt.Errorf("manifest (feed update multihash) entry for '%s' not found", path)
- log.Trace("manifest (feed update multihash) entry not found", "key", manifestAddr, "path", path)
+ err = fmt.Errorf("manifest (feed update) entry for '%s' not found", path)
+ log.Trace("manifest (feed update) entry not found", "key", manifestAddr, "path", path)
return reader, mimeType, status, nil, err
}
}
@@ -472,7 +471,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
// no entry found
status = http.StatusNotFound
apiGetNotFound.Inc(1)
- err = fmt.Errorf("manifest entry for '%s' not found", path)
+ err = fmt.Errorf("Not found: could not find resource '%s'", path)
log.Trace("manifest entry not found", "key", contentAddr, "path", path)
}
return
diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go
index 76b349397..39f6e4797 100644
--- a/swarm/api/client/client_test.go
+++ b/swarm/api/client/client_test.go
@@ -25,13 +25,13 @@ import (
"sort"
"testing"
+ "github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/swarm/api"
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
)
@@ -368,58 +368,99 @@ func newTestSigner() (*feed.GenericSigner, error) {
return feed.NewGenericSigner(privKey), nil
}
-// test the transparent resolving of multihash feed updates with bzz:// scheme
+// Test the transparent resolving of feed updates with bzz:// scheme
//
-// first upload data, and store the multihash to the resulting manifest in a feed update
-// retrieving the update with the multihash should return the manifest pointing directly to the data
+// First upload data to bzz:, and store the Swarm hash to the resulting manifest in a feed update.
+// This effectively uses a feed to store a pointer to content rather than the content itself
+// Retrieving the update with the Swarm hash should return the manifest pointing directly to the data
// and raw retrieve of that hash should return the data
-func TestClientCreateFeedMultihash(t *testing.T) {
+func TestClientBzzWithFeed(t *testing.T) {
signer, _ := newTestSigner()
+ // Initialize a Swarm test server
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
- client := NewClient(srv.URL)
+ swarmClient := NewClient(srv.URL)
defer srv.Close()
- // add the data our multihash aliased manifest will point to
- databytes := []byte("bar")
-
- swarmHash, err := client.UploadRaw(bytes.NewReader(databytes), int64(len(databytes)), false)
+ // put together some data for our test:
+ dataBytes := []byte(`
+ //
+ // Create some data our manifest will point to. Data that could be very big and wouldn't fit in a feed update.
+ // So what we are going to do is upload it to Swarm bzz:// and obtain a **manifest hash** pointing to it:
+ //
+ // MANIFEST HASH --> DATA
+ //
+ // Then, we store that **manifest hash** into a Swarm Feed update. Once we have done this,
+ // we can use the **feed manifest hash** in bzz:// instead, this way: bzz://feed-manifest-hash.
+ //
+ // FEED MANIFEST HASH --> MANIFEST HASH --> DATA
+ //
+ // Given that we can update the feed at any time with a new **manifest hash** but the **feed manifest hash**
+ // stays constant, we have effectively created a fixed address to changing content. (Applause)
+ //
+ // FEED MANIFEST HASH (the same) --> MANIFEST HASH(2) --> DATA(2)
+ //
+ `)
+
+ // Create a virtual File out of memory containing the above data
+ f := &File{
+ ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)),
+ ManifestEntry: api.ManifestEntry{
+ ContentType: "text/plain",
+ Mode: 0660,
+ Size: int64(len(dataBytes)),
+ },
+ }
+
+ // upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
+ manifestAddressHex, err := swarmClient.Upload(f, "", false)
if err != nil {
- t.Fatalf("Error uploading raw test data: %s", err)
+ t.Fatalf("Error creating manifest: %s", err)
}
- s := common.FromHex(swarmHash)
- mh := multihash.ToMultihash(s)
+ // convert the hex-encoded manifest hash to a 32-byte slice
+ manifestAddress := common.FromHex(manifestAddressHex)
+
+ if len(manifestAddress) != storage.AddressLength {
+ t.Fatalf("Something went wrong. Got a hash of an unexpected length. Expected %d bytes. Got %d", storage.AddressLength, len(manifestAddress))
+ }
- // our feed topic
- topic, _ := feed.NewTopic("foo.eth", nil)
+ // Now create a **feed manifest**. For that, we need a topic:
+ topic, _ := feed.NewTopic("interesting topic indeed", nil)
- createRequest := feed.NewFirstRequest(topic)
+ // Build a feed request to update data
+ request := feed.NewFirstRequest(topic)
- createRequest.SetData(mh)
- if err := createRequest.Sign(signer); err != nil {
+ // Put the 32-byte address of the manifest into the feed update
+ request.SetData(manifestAddress)
+
+ // Sign the update
+ if err := request.Sign(signer); err != nil {
t.Fatalf("Error signing update: %s", err)
}
- feedManifestHash, err := client.CreateFeedWithManifest(createRequest)
-
+ // Publish the update and at the same time request a **feed manifest** to be created
+ feedManifestAddressHex, err := swarmClient.CreateFeedWithManifest(request)
if err != nil {
t.Fatalf("Error creating feed manifest: %s", err)
}
- correctManifestAddrHex := "bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b"
- if feedManifestHash != correctManifestAddrHex {
- t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestHash)
+ // Check we have received the exact **feed manifest** to be expected
+ // given the topic and user signing the updates:
+ correctFeedManifestAddrHex := "747c402e5b9dc715a25a4393147512167bab018a007fad7cdcd9adc7fce1ced2"
+ if feedManifestAddressHex != correctFeedManifestAddrHex {
+ t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctFeedManifestAddrHex, feedManifestAddressHex)
}
// Check we get a not found error when trying to get feed updates with a made-up manifest
- _, err = client.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
+ _, err = swarmClient.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
if err != ErrNoFeedUpdatesFound {
t.Fatalf("Expected to receive ErrNoFeedUpdatesFound error. Got: %s", err)
}
- reader, err := client.QueryFeed(nil, correctManifestAddrHex)
+ // If we query the feed directly we should get **manifest hash** back:
+ reader, err := swarmClient.QueryFeed(nil, correctFeedManifestAddrHex)
if err != nil {
t.Fatalf("Error retrieving feed updates: %s", err)
}
@@ -428,10 +469,27 @@ func TestClientCreateFeedMultihash(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(mh, gotData) {
- t.Fatalf("Expected: %v, got %v", mh, gotData)
+
+ //Check that indeed the **manifest hash** is retrieved
+ if !bytes.Equal(manifestAddress, gotData) {
+ t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
+ }
+
+ // Now the final test we were looking for: Use bzz://<feed-manifest> and that should resolve all manifests
+ // and return the original data directly:
+ f, err = swarmClient.Download(feedManifestAddressHex, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ gotData, err = ioutil.ReadAll(f)
+ if err != nil {
+ t.Fatal(err)
}
+ // Check that we get back the original data:
+ if !bytes.Equal(dataBytes, gotData) {
+ t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
+ }
}
// TestClientCreateUpdateFeed will check that feeds can be created and updated via the HTTP client.
diff --git a/swarm/api/http/middleware.go b/swarm/api/http/middleware.go
index f5f70138b..115a00856 100644
--- a/swarm/api/http/middleware.go
+++ b/swarm/api/http/middleware.go
@@ -5,6 +5,7 @@ import (
"net/http"
"runtime/debug"
"strings"
+ "time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
@@ -73,9 +74,13 @@ func ParseURI(h http.Handler) http.Handler {
func InitLoggingResponseWriter(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ startTime := time.Now()
+ defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).UpdateSince(startTime)
+
writer := newLoggingResponseWriter(w)
h.ServeHTTP(writer, r)
log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode)
+ metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.%d.time", r.Method, writer.statusCode), nil).UpdateSince(startTime)
})
}
diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go
index 1ef3deece..e82762ce0 100644
--- a/swarm/api/http/server_test.go
+++ b/swarm/api/http/server_test.go
@@ -45,7 +45,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/testutil"
@@ -69,60 +68,91 @@ func newTestSigner() (*feed.GenericSigner, error) {
return feed.NewGenericSigner(privKey), nil
}
-// test the transparent resolving of multihash-containing feed updates with bzz:// scheme
+// Test the transparent resolving of feed updates with bzz:// scheme
//
-// first upload data, and store the multihash to the resulting manifest in a feed update
-// retrieving the update with the multihash should return the manifest pointing directly to the data
+// First upload data to bzz:, and store the Swarm hash to the resulting manifest in a feed update.
+// This effectively uses a feed to store a pointer to content rather than the content itself
+// Retrieving the update with the Swarm hash should return the manifest pointing directly to the data
// and raw retrieve of that hash should return the data
-func TestBzzFeedMultihash(t *testing.T) {
+func TestBzzWithFeed(t *testing.T) {
signer, _ := newTestSigner()
+ // Initialize Swarm test server
srv := NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
- // add the data our multihash aliased manifest will point to
- databytes := "bar"
- testBzzUrl := fmt.Sprintf("%s/bzz:/", srv.URL)
- resp, err := http.Post(testBzzUrl, "text/plain", bytes.NewReader([]byte(databytes)))
+ // put together some data for our test:
+ dataBytes := []byte(`
+ //
+ // Create some data our manifest will point to. Data that could be very big and wouldn't fit in a feed update.
+ // So what we are going to do is upload it to Swarm bzz:// and obtain a **manifest hash** pointing to it:
+ //
+ // MANIFEST HASH --> DATA
+ //
+ // Then, we store that **manifest hash** into a Swarm Feed update. Once we have done this,
+ // we can use the **feed manifest hash** in bzz:// instead, this way: bzz://feed-manifest-hash.
+ //
+ // FEED MANIFEST HASH --> MANIFEST HASH --> DATA
+ //
+ // Given that we can update the feed at any time with a new **manifest hash** but the **feed manifest hash**
+ // stays constant, we have effectively created a fixed address to changing content. (Applause)
+ //
+ // FEED MANIFEST HASH (the same) --> MANIFEST HASH(2) --> DATA(2) ...
+ //
+ `)
+
+ // POST data to bzz and get back a content-addressed **manifest hash** pointing to it.
+ resp, err := http.Post(fmt.Sprintf("%s/bzz:/", srv.URL), "text/plain", bytes.NewReader([]byte(dataBytes)))
if err != nil {
t.Fatal(err)
}
+
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
- b, err := ioutil.ReadAll(resp.Body)
+ manifestAddressHex, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
- s := common.FromHex(string(b))
- mh := multihash.ToMultihash(s)
- log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
+ manifestAddress := common.FromHex(string(manifestAddressHex))
- topic, _ := feed.NewTopic("foo.eth", nil)
+ log.Info("added data", "manifest", string(manifestAddressHex))
+
+ // At this point we have uploaded the data and have a manifest pointing to it
+ // Now store that manifest address in a feed update.
+ // We also want a feed manifest, so we can use it to refer to the feed.
+
+ // First, create a topic for our feed:
+ topic, _ := feed.NewTopic("interesting topic indeed", nil)
+
+ // Create a feed update request:
updateRequest := feed.NewFirstRequest(topic)
- updateRequest.SetData(mh)
+ // Store the **manifest address** as data into the feed update.
+ updateRequest.SetData(manifestAddress)
+ // Sign the update
if err := updateRequest.Sign(signer); err != nil {
t.Fatal(err)
}
- log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
+ log.Info("added data", "data", common.ToHex(manifestAddress))
- testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL))
+ // Build the feed update http request:
+ feedUpdateURL, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL))
if err != nil {
t.Fatal(err)
}
- query := testUrl.Query()
+ query := feedUpdateURL.Query()
body := updateRequest.AppendValues(query) // this adds all query parameters and returns the data to be posted
- query.Set("manifest", "1") // indicate we want a manifest back
- testUrl.RawQuery = query.Encode()
+ query.Set("manifest", "1") // indicate we want a feed manifest back
+ feedUpdateURL.RawQuery = query.Encode()
- // create the multihash update
- resp, err = http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body))
+ // submit the feed update request to Swarm
+ resp, err = http.Post(feedUpdateURL.String(), "application/octet-stream", bytes.NewReader(body))
if err != nil {
t.Fatal(err)
}
@@ -130,24 +160,25 @@ func TestBzzFeedMultihash(t *testing.T) {
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
- b, err = ioutil.ReadAll(resp.Body)
+
+ feedManifestAddressHex, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
- rsrcResp := &storage.Address{}
- err = json.Unmarshal(b, rsrcResp)
+ feedManifestAddress := &storage.Address{}
+ err = json.Unmarshal(feedManifestAddressHex, feedManifestAddress)
if err != nil {
- t.Fatalf("data %s could not be unmarshaled: %v", b, err)
+ t.Fatalf("data %s could not be unmarshaled: %v", feedManifestAddressHex, err)
}
- correctManifestAddrHex := "bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b"
- if rsrcResp.Hex() != correctManifestAddrHex {
- t.Fatalf("Response feed manifest address mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
+ correctManifestAddrHex := "747c402e5b9dc715a25a4393147512167bab018a007fad7cdcd9adc7fce1ced2"
+ if feedManifestAddress.Hex() != correctManifestAddrHex {
+ t.Fatalf("Response feed manifest address mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestAddress.Hex())
}
// get bzz manifest transparent feed update resolve
- testBzzUrl = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
- resp, err = http.Get(testBzzUrl)
+ getBzzURL := fmt.Sprintf("%s/bzz:/%s", srv.URL, feedManifestAddress)
+ resp, err = http.Get(getBzzURL)
if err != nil {
t.Fatal(err)
}
@@ -155,12 +186,12 @@ func TestBzzFeedMultihash(t *testing.T) {
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
- b, err = ioutil.ReadAll(resp.Body)
+ retrievedData, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
- if !bytes.Equal(b, []byte(databytes)) {
- t.Fatalf("retrieved data mismatch, expected %x, got %x", databytes, b)
+ if !bytes.Equal(retrievedData, []byte(dataBytes)) {
+ t.Fatalf("retrieved data mismatch, expected %x, got %x", dataBytes, retrievedData)
}
}
@@ -245,7 +276,8 @@ func TestBzzFeed(t *testing.T) {
t.Fatalf("Expected manifest Feed '%s', got '%s'", correctFeedHex, manifest.Entries[0].Feed.Hex())
}
- // get bzz manifest transparent feed update resolve
+ // take the chance to have bzz: crash on resolving a feed update that does not contain
+ // a swarm hash:
testBzzUrl := fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
resp, err = http.Get(testBzzUrl)
if err != nil {
@@ -253,7 +285,7 @@ func TestBzzFeed(t *testing.T) {
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
- t.Fatal("Expected error status since feed update does not contain multihash. Received 200 OK")
+ t.Fatal("Expected error status since feed update does not contain a Swarm hash. Received 200 OK")
}
_, err = ioutil.ReadAll(resp.Body)
if err != nil {
diff --git a/swarm/grafana_dashboards/ldbstore.json b/swarm/grafana_dashboards/ldbstore.json
deleted file mode 100644
index 2d64380ba..000000000
--- a/swarm/grafana_dashboards/ldbstore.json
+++ /dev/null
@@ -1,2278 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "$$hashKey": "object:325",
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": null,
- "graphTooltip": 1,
- "id": 5,
- "iteration": 1527598894689,
- "links": [],
- "panels": [
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 40,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 42,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.cachehit.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get cachehit",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 43,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.cachemiss.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get cachemiss",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 7
- },
- "id": 44,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total LocalStore.GetOrCreateRequest",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 7
- },
- "id": 47,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.errfetching.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore GetOrCreateRequest ErrFetching",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 45,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.hit.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore.GetOrCreateRequest hit",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 13
- },
- "id": 49,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.getorcreaterequest.miss.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore GetOrCreateRequest miss",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 19
- },
- "id": 48,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.error.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get error",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 19
- },
- "id": 46,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.localstore.get.errfetching.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LocalStore get ErrFetching",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "LocalStore",
- "type": "row"
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 1
- },
- "id": 27,
- "panels": [],
- "title": "LDBStore",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 2
- },
- "id": 29,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.get.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore get",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 2
- },
- "id": 30,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.put.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore put",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 8
- },
- "id": 31,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.synciterator.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore SyncIterator",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 8
- },
- "id": 32,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.synciterator.seek.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore SyncIterator Seek/Next",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 14
- },
- "id": 50,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.collectgarbage.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore Collect Garbage",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 14
- },
- "id": 51,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbstore.collectgarbage.delete.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBStore Collect Garbage - Actual Deletes",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 20
- },
- "id": 34,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 39
- },
- "id": 36,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbdatabase.get.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBDatabase get",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 39
- },
- "id": 37,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbdatabase.write.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBDatabase write",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 45
- },
- "id": 38,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.ldbdatabase.newiterator.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LDBDatabase NewIterator",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "LDBDatabase",
- "type": "row"
- }
- ],
- "refresh": "10s",
- "schemaVersion": 16,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "auto": false,
- "auto_count": 30,
- "auto_min": "10s",
- "current": {
- "text": "10s",
- "value": "10s"
- },
- "hide": 0,
- "label": "resolution",
- "name": "myinterval",
- "options": [
- {
- "selected": false,
- "text": "5s",
- "value": "5s"
- },
- {
- "selected": true,
- "text": "10s",
- "value": "10s"
- },
- {
- "selected": false,
- "text": "30s",
- "value": "30s"
- },
- {
- "selected": false,
- "text": "100s",
- "value": "100s"
- }
- ],
- "query": "5s,10s,30s,100s",
- "refresh": 2,
- "type": "interval"
- },
- {
- "allValue": null,
- "current": {
- "text": "swarm_30399 + swarm_30400 + swarm_30401",
- "value": [
- "swarm_30399",
- "swarm_30400",
- "swarm_30401"
- ]
- },
- "datasource": "metrics",
- "hide": 0,
- "includeAll": true,
- "label": null,
- "multi": true,
- "name": "host",
- "options": [],
- "query": "SHOW TAG VALUES WITH KEY = \"host\"",
- "refresh": 1,
- "regex": "",
- "sort": 1,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "swarm.http.request.GET.time.span",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "LDBStore and LDBDatabase",
- "uid": "zS6beG7iz",
- "version": 28
-}
diff --git a/swarm/grafana_dashboards/swarm.json b/swarm/grafana_dashboards/swarm.json
deleted file mode 100644
index 3ee244d15..000000000
--- a/swarm/grafana_dashboards/swarm.json
+++ /dev/null
@@ -1,3198 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "$$hashKey": "object:147",
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": null,
- "graphTooltip": 1,
- "id": 2,
- "iteration": 1527598859072,
- "links": [],
- "panels": [
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 34,
- "panels": [],
- "title": "P2P",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 36,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.send.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P Send() - messages sent",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 37,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "p95($tag_host)",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.send_t.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P Send() timer - 95%ile",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 10
- },
- "id": 38,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "1 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority.1.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- },
- {
- "alias": "2 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority.2.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- },
- {
- "alias": "3 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority.3.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "C",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P SendPriority() - messages sent",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 10
- },
- "id": 39,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "1 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority_t.1.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": []
- },
- {
- "alias": "2 $tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.sendpriority_t.2.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "P2P SendPriority() timer - 95%ile",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 19
- },
- "id": 40,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$__interval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "none"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.registry.peers.gauge",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "last"
- }
- ]
- ],
- "tags": []
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Registry Peers",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 28
- },
- "id": 32,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 2
- },
- "id": 14,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.stack.uptime.gauge",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Uptime",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "Uptime",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 29
- },
- "id": 28,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 7
- },
- "id": 2,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "GET",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "null"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.GET.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- },
- {
- "alias": "POST",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "null"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.POST.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Total HTTP Requests",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 7
- },
- "id": 26,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.GET.time.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP GET requests 95% timer",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 15,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.GET.time.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p50"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP GET requests 50% timer",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 13
- },
- "id": 8,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "POST",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.http.request.POST.time.span",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "p95"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "mean"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "HTTP POST requests 95% timer",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "ns",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "HTTP",
- "type": "row"
- },
- {
- "collapsed": true,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 30
- },
- "id": 30,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 0,
- "y": 8
- },
- "id": 16,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.lazychunkreader.read.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LazyChunkReader read() calls",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 12,
- "y": 8
- },
- "id": 18,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.lazychunkreader.read.err.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LazyChunkReader read errors",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 17,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.lazychunkreader.read.bytes.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "LazyChunkReader bytes read",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "decbytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "title": "LazyChunkReader",
- "type": "row"
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 31
- },
- "id": 25,
- "panels": [],
- "title": "All measurements",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 32
- },
- "id": 3,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.api.get.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "API Get (BZZ)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 32
- },
- "id": 13,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.network.stream.request_from_peers.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Request from peers",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 38
- },
- "id": 11,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.network.stream.received_chunks.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Received chunks",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 38
- },
- "id": 12,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.storage.cache.requests.size.gauge",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "max"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Requests cache entries",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 44
- },
- "id": 9,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.network.stream.handle_retrieve_request_msg.count.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Handle retrieve request msg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 44
- },
- "id": 20,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.syncer.setnextbatch.iterator.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "syncer setnextbatch iterator calls",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 50
- },
- "id": 21,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handlewantedhashesmsg.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer HandleWantedHashesMsg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 50
- },
- "id": 22,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handlesubscribemsg.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer HandleSubscribeMsg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 56
- },
- "id": 23,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handlewantedhashesmsg.actualget.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer HandleWantedHashesMsg actual get",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "metrics",
- "fill": 1,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 56
- },
- "id": 19,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": true,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": true,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "$tag_host",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "$myinterval"
- ],
- "type": "time"
- },
- {
- "params": [
- "host"
- ],
- "type": "tag"
- },
- {
- "params": [
- "0"
- ],
- "type": "fill"
- }
- ],
- "measurement": "swarm.peer.handleofferedhashes.count",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "value"
- ],
- "type": "field"
- },
- {
- "params": [],
- "type": "sum"
- }
- ]
- ],
- "tags": [
- {
- "key": "host",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "peer OfferedHashesMsg",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "none",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ]
- }
- ],
- "refresh": "30s",
- "schemaVersion": 16,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "auto": false,
- "auto_count": 30,
- "auto_min": "10s",
- "current": {
- "text": "10s",
- "value": "10s"
- },
- "hide": 0,
- "label": "resolution",
- "name": "myinterval",
- "options": [
- {
- "selected": false,
- "text": "5s",
- "value": "5s"
- },
- {
- "selected": true,
- "text": "10s",
- "value": "10s"
- },
- {
- "selected": false,
- "text": "30s",
- "value": "30s"
- },
- {
- "selected": false,
- "text": "100s",
- "value": "100s"
- }
- ],
- "query": "5s,10s,30s,100s",
- "refresh": 2,
- "type": "interval"
- },
- {
- "allValue": null,
- "current": {
- "text": "swarm_30399 + swarm_30400 + swarm_30401 + swarm_30402",
- "value": [
- "swarm_30399",
- "swarm_30400",
- "swarm_30401",
- "swarm_30402"
- ]
- },
- "datasource": "metrics",
- "hide": 0,
- "includeAll": true,
- "label": null,
- "multi": true,
- "name": "host",
- "options": [],
- "query": "SHOW TAG VALUES WITH KEY = \"host\"",
- "refresh": 1,
- "regex": "",
- "sort": 1,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "swarm.http.request.GET.time.span",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "Swarm",
- "uid": "vmEtxxgmz",
- "version": 138
-}
diff --git a/swarm/multihash/multihash.go b/swarm/multihash/multihash.go
deleted file mode 100644
index 3306e3a6d..000000000
--- a/swarm/multihash/multihash.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package multihash
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
-)
-
-const (
- defaultMultihashLength = 32
- defaultMultihashTypeCode = 0x1b
-)
-
-var (
- multihashTypeCode uint8
- MultihashLength = defaultMultihashLength
-)
-
-func init() {
- multihashTypeCode = defaultMultihashTypeCode
- MultihashLength = defaultMultihashLength
-}
-
-// check if valid swarm multihash
-func isSwarmMultihashType(code uint8) bool {
- return code == multihashTypeCode
-}
-
-// GetMultihashLength returns the digest length of the provided multihash
-// It will fail if the multihash is not a valid swarm mulithash
-func GetMultihashLength(data []byte) (int, int, error) {
- cursor := 0
- typ, c := binary.Uvarint(data)
- if c <= 0 {
- return 0, 0, errors.New("unreadable hashtype field")
- }
- if !isSwarmMultihashType(uint8(typ)) {
- return 0, 0, fmt.Errorf("hash code %x is not a swarm hashtype", typ)
- }
- cursor += c
- hashlength, c := binary.Uvarint(data[cursor:])
- if c <= 0 {
- return 0, 0, errors.New("unreadable length field")
- }
- cursor += c
-
- // we cheekily assume hashlength < maxint
- inthashlength := int(hashlength)
- if len(data[c:]) < inthashlength {
- return 0, 0, errors.New("length mismatch")
- }
- return inthashlength, cursor, nil
-}
-
-// FromMulithash returns the digest portion of the multihash
-// It will fail if the multihash is not a valid swarm multihash
-func FromMultihash(data []byte) ([]byte, error) {
- hashLength, _, err := GetMultihashLength(data)
- if err != nil {
- return nil, err
- }
- return data[len(data)-hashLength:], nil
-}
-
-// ToMulithash wraps the provided digest data with a swarm mulithash header
-func ToMultihash(hashData []byte) []byte {
- buf := bytes.NewBuffer(nil)
- b := make([]byte, 8)
- c := binary.PutUvarint(b, uint64(multihashTypeCode))
- buf.Write(b[:c])
- c = binary.PutUvarint(b, uint64(len(hashData)))
- buf.Write(b[:c])
- buf.Write(hashData)
- return buf.Bytes()
-}
diff --git a/swarm/multihash/multihash_test.go b/swarm/multihash/multihash_test.go
deleted file mode 100644
index 85df741dd..000000000
--- a/swarm/multihash/multihash_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package multihash
-
-import (
- "bytes"
- "math/rand"
- "testing"
-)
-
-// parse multihash, and check that invalid multihashes fail
-func TestCheckMultihash(t *testing.T) {
- hashbytes := make([]byte, 32)
- c, err := rand.Read(hashbytes)
- if err != nil {
- t.Fatal(err)
- } else if c < 32 {
- t.Fatal("short read")
- }
-
- expected := ToMultihash(hashbytes)
-
- l, hl, _ := GetMultihashLength(expected)
- if l != 32 {
- t.Fatalf("expected length %d, got %d", 32, l)
- } else if hl != 2 {
- t.Fatalf("expected header length %d, got %d", 2, hl)
- }
- if _, _, err := GetMultihashLength(expected[1:]); err == nil {
- t.Fatal("expected failure on corrupt header")
- }
- if _, _, err := GetMultihashLength(expected[:len(expected)-2]); err == nil {
- t.Fatal("expected failure on short content")
- }
- dh, _ := FromMultihash(expected)
- if !bytes.Equal(dh, hashbytes) {
- t.Fatalf("expected content hash %x, got %x", hashbytes, dh)
- }
-}
diff --git a/swarm/network/hive.go b/swarm/network/hive.go
index 1aa1ae42a..ebef54592 100644
--- a/swarm/network/hive.go
+++ b/swarm/network/hive.go
@@ -165,8 +165,8 @@ func (h *Hive) Run(p *BzzPeer) error {
// otherwise just send depth to new peer
dp.NotifyDepth(depth)
}
+ NotifyPeer(p.BzzAddr, h.Kademlia)
}
- NotifyPeer(p.BzzAddr, h.Kademlia)
defer h.Off(dp)
return dp.Run(dp.HandleMsg)
}
diff --git a/swarm/network/kademlia.go b/swarm/network/kademlia.go
index cd94741be..a8ecaa4be 100644
--- a/swarm/network/kademlia.go
+++ b/swarm/network/kademlia.go
@@ -81,14 +81,15 @@ func NewKadParams() *KadParams {
// Kademlia is a table of live peers and a db of known peers (node records)
type Kademlia struct {
lock sync.RWMutex
- *KadParams // Kademlia configuration parameters
- base []byte // immutable baseaddress of the table
- addrs *pot.Pot // pots container for known peer addresses
- conns *pot.Pot // pots container for live peer connections
- depth uint8 // stores the last current depth of saturation
- nDepth int // stores the last neighbourhood depth
- nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
- addrCountC chan int // returned by AddrCountC function to signal peer count change
+ *KadParams // Kademlia configuration parameters
+ base []byte // immutable baseaddress of the table
+ addrs *pot.Pot // pots container for known peer addresses
+ conns *pot.Pot // pots container for live peer connections
+ depth uint8 // stores the last current depth of saturation
+ nDepth int // stores the last neighbourhood depth
+ nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
+ addrCountC chan int // returned by AddrCountC function to signal peer count change
+ Pof func(pot.Val, pot.Val, int) (int, bool) // function for calculating kademlia routing distance between two addresses
}
// NewKademlia creates a Kademlia table for base address addr
@@ -103,6 +104,7 @@ func NewKademlia(addr []byte, params *KadParams) *Kademlia {
KadParams: params,
addrs: pot.NewPot(nil, 0),
conns: pot.NewPot(nil, 0),
+ Pof: pof,
}
}
@@ -175,7 +177,7 @@ func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) {
k.lock.Lock()
defer k.lock.Unlock()
minsize := k.MinBinSize
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
// if there is a callable neighbour within the current proxBin, connect
// this makes sure nearest neighbour set is fully connected
var ppo int
@@ -289,6 +291,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) {
// neighbourhood depth on each change.
// Not receiving from the returned channel will block On function
// when the neighbourhood depth is changed.
+// TODO: Why is this exported, and if it should be; why can't we have more subscribers than one?
func (k *Kademlia) NeighbourhoodDepthC() <-chan int {
k.lock.Lock()
defer k.lock.Unlock()
@@ -305,7 +308,7 @@ func (k *Kademlia) sendNeighbourhoodDepthChange() {
// It provides signaling of neighbourhood depth change.
// This part of the code is sending new neighbourhood depth to nDepthC if that condition is met.
if k.nDepthC != nil {
- nDepth := k.neighbourhoodDepth()
+ nDepth := depthForPot(k.conns, k.MinProxBinSize, k.base)
if nDepth != k.nDepth {
k.nDepth = nDepth
k.nDepthC <- nDepth
@@ -361,7 +364,7 @@ func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(con
var startPo int
var endPo int
- kadDepth := k.neighbourhoodDepth()
+ kadDepth := depthForPot(k.conns, k.MinProxBinSize, k.base)
k.conns.EachBin(base, pof, o, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
if startPo > 0 && endPo != k.MaxProxDisplay {
@@ -395,7 +398,7 @@ func (k *Kademlia) eachConn(base []byte, o int, f func(*Peer, int, bool) bool) {
if len(base) == 0 {
base = k.base
}
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
k.conns.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
if po > o {
return true
@@ -417,7 +420,7 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool
if len(base) == 0 {
base = k.base
}
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
k.addrs.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
if po > o {
return true
@@ -426,21 +429,72 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool
})
}
-// neighbourhoodDepth returns the proximity order that defines the distance of
+func (k *Kademlia) NeighbourhoodDepth() (depth int) {
+ k.lock.RLock()
+ defer k.lock.RUnlock()
+ return depthForPot(k.conns, k.MinProxBinSize, k.base)
+}
+
+// depthForPot returns the proximity order that defines the distance of
// the nearest neighbour set with cardinality >= MinProxBinSize
// if there is altogether less than MinProxBinSize peers it returns 0
// caller must hold the lock
-func (k *Kademlia) neighbourhoodDepth() (depth int) {
- if k.conns.Size() < k.MinProxBinSize {
+func depthForPot(p *pot.Pot, minProxBinSize int, pivotAddr []byte) (depth int) {
+ if p.Size() <= minProxBinSize {
return 0
}
+
+ // total number of peers in iteration
var size int
+
+ // true if iteration has all prox peers
+ var b bool
+
+ // last po recorded in iteration
+ var lastPo int
+
f := func(v pot.Val, i int) bool {
+ // po == 256 means that addr is the pivot address(self)
+ if i == 256 {
+ return true
+ }
size++
- depth = i
- return size < k.MinProxBinSize
+
+ // this means we have all nn-peers.
+ // depth is by default set to the bin of the farthest nn-peer
+ if size == minProxBinSize {
+ b = true
+ depth = i
+ return true
+ }
+
+ // if there are empty bins between farthest nn and current node,
+ // the depth should recalculated to be
+ // the farthest of those empty bins
+ //
+ // 0 abac ccde
+ // 1 2a2a
+ // 2 589f <--- nearest non-nn
+ // ============ DEPTH 3 ===========
+ // 3 <--- don't count as empty bins
+ // 4 <--- don't count as empty bins
+ // 5 cbcb cdcd <---- furthest nn
+ // 6 a1a2 b3c4
+ if b && i < depth {
+ depth = i + 1
+ lastPo = i
+ return false
+ }
+ lastPo = i
+ return true
+ }
+ p.EachNeighbour(pivotAddr, pof, f)
+
+ // cover edge case where more than one farthest nn
+ // AND we only have nn-peers
+ if lastPo == depth {
+ depth = 0
}
- k.conns.EachNeighbour(k.base, pof, f)
return depth
}
@@ -500,7 +554,7 @@ func (k *Kademlia) string() string {
liverows := make([]string, k.MaxProxDisplay)
peersrows := make([]string, k.MaxProxDisplay)
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
rest := k.conns.Size()
k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
var rowlen int
@@ -570,6 +624,7 @@ type PeerPot struct {
// as hexadecimal representations of the address.
// used for testing only
func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot {
+
// create a table of all nodes for health check
np := pot.NewPot(nil, 0)
for _, addr := range addrs {
@@ -578,34 +633,47 @@ func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot {
ppmap := make(map[string]*PeerPot)
for i, a := range addrs {
- pl := 256
- prev := 256
+
+ // actual kademlia depth
+ depth := depthForPot(np, kadMinProxSize, a)
+
+ // upon entering a new iteration
+ // this will hold the value the po should be
+ // if it's one higher than the po in the last iteration
+ prevPo := 256
+
+ // all empty bins which are outside neighbourhood depth
var emptyBins []int
+
+ // all nn-peers
var nns [][]byte
- np.EachNeighbour(addrs[i], pof, func(val pot.Val, po int) bool {
- a := val.([]byte)
+
+ np.EachNeighbour(a, pof, func(val pot.Val, po int) bool {
+ addr := val.([]byte)
+ // po == 256 means that addr is the pivot address(self)
if po == 256 {
return true
}
- if pl == 256 || pl == po {
- nns = append(nns, a)
- }
- if pl == 256 && len(nns) >= kadMinProxSize {
- pl = po
- prev = po
+
+ // iterate through the neighbours, going from the closest to the farthest
+ // we calculate the nearest neighbours that should be in the set
+ // depth in this case equates to:
+ // 1. Within all bins that are higher or equal than depth there are
+ // at least minProxBinSize peers connected
+ // 2. depth-1 bin is not empty
+ if po >= depth {
+ nns = append(nns, addr)
+ prevPo = depth - 1
+ return true
}
- if prev < pl {
- for j := prev; j > po; j-- {
- emptyBins = append(emptyBins, j)
- }
+ for j := prevPo; j > po; j-- {
+ emptyBins = append(emptyBins, j)
}
- prev = po - 1
+ prevPo = po - 1
return true
})
- for j := prev; j >= 0; j-- {
- emptyBins = append(emptyBins, j)
- }
- log.Trace(fmt.Sprintf("%x NNS: %s", addrs[i][:4], LogAddrs(nns)))
+
+ log.Trace(fmt.Sprintf("%x NNS: %s, emptyBins: %s", addrs[i][:4], LogAddrs(nns), logEmptyBins(emptyBins)))
ppmap[common.Bytes2Hex(a)] = &PeerPot{nns, emptyBins}
}
return ppmap
@@ -620,7 +688,7 @@ func (k *Kademlia) saturation(n int) int {
prev++
return prev == po && size >= n
})
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
if depth < prev {
return depth
}
@@ -633,8 +701,11 @@ func (k *Kademlia) full(emptyBins []int) (full bool) {
prev := 0
e := len(emptyBins)
ok := true
- depth := k.neighbourhoodDepth()
+ depth := depthForPot(k.conns, k.MinProxBinSize, k.base)
k.conns.EachBin(k.base, pof, 0, func(po, _ int, _ func(func(val pot.Val, i int) bool) bool) bool {
+ if po >= depth {
+ return false
+ }
if prev == depth+1 {
return true
}
diff --git a/swarm/network/kademlia_test.go b/swarm/network/kademlia_test.go
index d2e051f45..184a2d942 100644
--- a/swarm/network/kademlia_test.go
+++ b/swarm/network/kademlia_test.go
@@ -25,6 +25,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/protocols"
"github.com/ethereum/go-ethereum/swarm/pot"
)
@@ -73,6 +76,76 @@ func Register(k *Kademlia, regs ...string) {
}
}
+// tests the validity of neighborhood depth calculations
+//
+// in particular, it tests that if there are one or more consecutive
+// empty bins above the farthest "nearest neighbor-peer" then
+// the depth should be set at the farthest of those empty bins
+//
+// TODO: Make test adapt to change in MinProxBinSize
+func TestNeighbourhoodDepth(t *testing.T) {
+ baseAddressBytes := RandomAddr().OAddr
+ kad := NewKademlia(baseAddressBytes, NewKadParams())
+
+ baseAddress := pot.NewAddressFromBytes(baseAddressBytes)
+
+ closerAddress := pot.RandomAddressAt(baseAddress, 7)
+ closerPeer := newTestDiscoveryPeer(closerAddress, kad)
+ kad.On(closerPeer)
+ depth := kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("expected depth 0, was %d", depth)
+ }
+
+ sameAddress := pot.RandomAddressAt(baseAddress, 7)
+ samePeer := newTestDiscoveryPeer(sameAddress, kad)
+ kad.On(samePeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("expected depth 0, was %d", depth)
+ }
+
+ midAddress := pot.RandomAddressAt(baseAddress, 4)
+ midPeer := newTestDiscoveryPeer(midAddress, kad)
+ kad.On(midPeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 5 {
+ t.Fatalf("expected depth 5, was %d", depth)
+ }
+
+ kad.Off(midPeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("expected depth 0, was %d", depth)
+ }
+
+ fartherAddress := pot.RandomAddressAt(baseAddress, 1)
+ fartherPeer := newTestDiscoveryPeer(fartherAddress, kad)
+ kad.On(fartherPeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 2 {
+ t.Fatalf("expected depth 2, was %d", depth)
+ }
+
+ midSameAddress := pot.RandomAddressAt(baseAddress, 4)
+ midSamePeer := newTestDiscoveryPeer(midSameAddress, kad)
+ kad.Off(closerPeer)
+ kad.On(midPeer)
+ kad.On(midSamePeer)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 2 {
+ t.Fatalf("expected depth 2, was %d", depth)
+ }
+
+ kad.Off(fartherPeer)
+ log.Trace(kad.string())
+ time.Sleep(time.Millisecond)
+ depth = kad.NeighbourhoodDepth()
+ if depth != 0 {
+ t.Fatalf("expected depth 0, was %d", depth)
+ }
+}
+
func testSuggestPeer(k *Kademlia, expAddr string, expPo int, expWant bool) error {
addr, o, want := k.SuggestPeer()
if binStr(addr) != expAddr {
@@ -376,7 +449,7 @@ func TestKademliaHiveString(t *testing.T) {
Register(k, "10000000", "10000001")
k.MaxProxDisplay = 8
h := k.String()
- expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), MinProxBinSize: 2, MinBinSize: 1, MaxBinSize: 4\n000 0 | 2 8100 (0) 8000 (0)\n============ DEPTH: 1 ==========================================\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
+ expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), MinProxBinSize: 2, MinBinSize: 1, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
if expH[104:] != h[104:] {
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h)
}
@@ -644,3 +717,17 @@ func TestKademliaCase5(t *testing.T) {
"78fafa0809929a1279ece089a51d12457c2d8416dff859aeb2ccc24bb50df5ec", "1dd39b1257e745f147cbbc3cadd609ccd6207c41056dbc4254bba5d2527d3ee5", "5f61dd66d4d94aec8fcc3ce0e7885c7edf30c43143fa730e2841c5d28e3cd081", "8aa8b0472cb351d967e575ad05c4b9f393e76c4b01ef4b3a54aac5283b78abc9", "4502f385152a915b438a6726ce3ea9342e7a6db91a23c2f6bee83a885ed7eb82", "718677a504249db47525e959ef1784bed167e1c46f1e0275b9c7b588e28a3758", "7c54c6ed1f8376323896ed3a4e048866410de189e9599dd89bf312ca4adb96b5", "18e03bd3378126c09e799a497150da5c24c895aedc84b6f0dbae41fc4bac081a", "23db76ac9e6e58d9f5395ca78252513a7b4118b4155f8462d3d5eec62486cadc", "40ae0e8f065e96c7adb7fa39505136401f01780481e678d718b7f6dbb2c906ec", "c1539998b8bae19d339d6bbb691f4e9daeb0e86847545229e80fe0dffe716e92", "ed139d73a2699e205574c08722ca9f030ad2d866c662f1112a276b91421c3cb9", "5bdb19584b7a36d09ca689422ef7e6bb681b8f2558a6b2177a8f7c812f631022", "636c9de7fe234ffc15d67a504c69702c719f626c17461d3f2918e924cd9d69e2", "de4455413ff9335c440d52458c6544191bd58a16d85f700c1de53b62773064ea", "de1963310849527acabc7885b6e345a56406a8f23e35e436b6d9725e69a79a83", "a80a50a467f561210a114cba6c7fb1489ed43a14d61a9edd70e2eb15c31f074d", "7804f12b8d8e6e4b375b242058242068a3809385e05df0e64973cde805cf729c", "60f9aa320c02c6f2e6370aa740cf7cea38083fa95fca8c99552cda52935c1520", "d8da963602390f6c002c00ce62a84b514edfce9ebde035b277a957264bb54d21", "8463d93256e026fe436abad44697152b9a56ac8e06a0583d318e9571b83d073c", "9a3f78fcefb9a05e40a23de55f6153d7a8b9d973ede43a380bf46bb3b3847de1", "e3bb576f4b3760b9ca6bff59326f4ebfc4a669d263fb7d67ab9797adea54ed13", "4d5cdbd6dcca5bdf819a0fe8d175dc55cc96f088d37462acd5ea14bc6296bdbe", "5a0ed28de7b5258c727cb85447071c74c00a5fbba9e6bc0393bc51944d04ab2a", "61e4ddb479c283c638f4edec24353b6cc7a3a13b930824aad016b0996ca93c47", "7e3610868acf714836cafaaa7b8c009a9ac6e3a6d443e5586cf661530a204ee2", "d74b244d4345d2c86e30a097105e4fb133d53c578320285132a952cdaa64416e", "cfeed57d0f935bfab89e3f630a7c97e0b1605f0724d85a008bbfb92cb47863a8", "580837af95055670e20d494978f60c7f1458dc4b9e389fc7aa4982b2aca3bce3", "df55c0c49e6c8a83d82dfa1c307d3bf6a20e18721c80d8ec4f1f68dc0a137ced", "5f149c51ce581ba32a285439a806c063ced01ccd4211cd024e6a615b8f216f95", "1eb76b00aeb127b10dd1b7cd4c3edeb4d812b5a658f0feb13e85c4d2b7c6fe06", "7a56ba7c3fb7cbfb5561a46a75d95d7722096b45771ec16e6fa7bbfab0b35dfe", "4bae85ad88c28470f0015246d530adc0cd1778bdd5145c3c6b538ee50c4e04bd", "afd1892e2a7145c99ec0ebe9ded0d3fec21089b277a68d47f45961ec5e39e7e0", "953138885d7b36b0ef79e46030f8e61fd7037fbe5ce9e0a94d728e8c8d7eab86", "de761613ef305e4f628cb6bf97d7b7dc69a9d513dc233630792de97bcda777a6", "3f3087280063d09504c084bbf7fdf984347a72b50d097fd5b086ffabb5b3fb4c", "7d18a94bb1ebfdef4d3e454d2db8cb772f30ca57920dd1e402184a9e598581a0", "a7d6fbdc9126d9f10d10617f49fb9f5474ffe1b229f76b7dd27cebba30eccb5d", "fad0246303618353d1387ec10c09ee991eb6180697ed3470ed9a6b377695203d", "1cf66e09ea51ee5c23df26615a9e7420be2ac8063f28f60a3bc86020e94fe6f3", "8269cdaa153da7c358b0b940791af74d7c651cd4d3f5ed13acfe6d0f2c539e7f", "90d52eaaa60e74bf1c79106113f2599471a902d7b1c39ac1f55b20604f453c09", "9788fd0c09190a3f3d0541f68073a2f44c2fcc45bb97558a7c319f36c25a75b3", "10b68fc44157ecfdae238ee6c1ce0333f906ad04d1a4cb1505c8e35c3c87fbb0", "e5284117fdf3757920475c786e0004cb00ba0932163659a89b36651a01e57394", "403ad51d911e113dcd5f9ff58c94f6d278886a2a4da64c3ceca2083282c92de3",
)
}
+
+func newTestDiscoveryPeer(addr pot.Address, kad *Kademlia) *Peer {
+ rw := &p2p.MsgPipeRW{}
+ p := p2p.NewPeer(enode.ID{}, "foo", []p2p.Cap{})
+ pp := protocols.NewPeer(p, rw, &protocols.Spec{})
+ bp := &BzzPeer{
+ Peer: pp,
+ BzzAddr: &BzzAddr{
+ OAddr: addr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", addr[:])),
+ },
+ }
+ return NewPeer(bp, kad)
+}
diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go
index 66ae94a88..4b9b28cdc 100644
--- a/swarm/network/protocol.go
+++ b/swarm/network/protocol.go
@@ -44,7 +44,7 @@ const (
// BzzSpec is the spec of the generic swarm handshake
var BzzSpec = &protocols.Spec{
Name: "bzz",
- Version: 7,
+ Version: 8,
MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{
HandshakeMsg{},
@@ -54,7 +54,7 @@ var BzzSpec = &protocols.Spec{
// DiscoverySpec is the spec for the bzz discovery subprotocols
var DiscoverySpec = &protocols.Spec{
Name: "hive",
- Version: 6,
+ Version: 8,
MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{
peersMsg{},
diff --git a/swarm/network/protocol_test.go b/swarm/network/protocol_test.go
index f0d266628..53ceda744 100644
--- a/swarm/network/protocol_test.go
+++ b/swarm/network/protocol_test.go
@@ -31,7 +31,7 @@ import (
)
const (
- TestProtocolVersion = 7
+ TestProtocolVersion = 8
TestProtocolNetworkID = 3
)
diff --git a/swarm/network/simulation/example_test.go b/swarm/network/simulation/example_test.go
index bacc64d53..7b6204617 100644
--- a/swarm/network/simulation/example_test.go
+++ b/swarm/network/simulation/example_test.go
@@ -33,6 +33,10 @@ import (
// BucketKeyKademlia key. This allows to use WaitTillHealthy to block until
// all nodes have the their Kadmlias healthy.
func ExampleSimulation_WaitTillHealthy() {
+
+ log.Error("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
+ return
+
sim := simulation.New(map[string]simulation.ServiceFunc{
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
addr := network.NewAddr(ctx.Config.Node())
diff --git a/swarm/network/simulation/kademlia.go b/swarm/network/simulation/kademlia.go
index f895181d9..7982810ca 100644
--- a/swarm/network/simulation/kademlia.go
+++ b/swarm/network/simulation/kademlia.go
@@ -33,6 +33,7 @@ var BucketKeyKademlia BucketKey = "kademlia"
// WaitTillHealthy is blocking until the health of all kademlias is true.
// If error is not nil, a map of kademlia that was found not healthy is returned.
+// TODO: Check correctness since change in kademlia depth calculation logic
func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (ill map[enode.ID]*network.Kademlia, err error) {
// Prepare PeerPot map for checking Kademlia health
var ppmap map[string]*network.PeerPot
diff --git a/swarm/network/simulation/kademlia_test.go b/swarm/network/simulation/kademlia_test.go
index 285644a0f..f02b0e541 100644
--- a/swarm/network/simulation/kademlia_test.go
+++ b/swarm/network/simulation/kademlia_test.go
@@ -28,11 +28,11 @@ import (
)
func TestWaitTillHealthy(t *testing.T) {
+
sim := New(map[string]ServiceFunc{
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
addr := network.NewAddr(ctx.Config.Node())
hp := network.NewHiveParams()
- hp.Discovery = false
config := &network.BzzConfig{
OverlayAddr: addr.Over(),
UnderlayAddr: addr.Under(),
diff --git a/swarm/network/simulation/node_test.go b/swarm/network/simulation/node_test.go
index 086ab606f..01346ef14 100644
--- a/swarm/network/simulation/node_test.go
+++ b/swarm/network/simulation/node_test.go
@@ -160,6 +160,41 @@ func TestAddNodeWithService(t *testing.T) {
}
}
+func TestAddNodeMultipleServices(t *testing.T) {
+ sim := New(map[string]ServiceFunc{
+ "noop1": noopServiceFunc,
+ "noop2": noopService2Func,
+ })
+ defer sim.Close()
+
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n := sim.Net.GetNode(id).Node.(*adapters.SimNode)
+ if n.Service("noop1") == nil {
+ t.Error("service noop1 not found on node")
+ }
+ if n.Service("noop2") == nil {
+ t.Error("service noop2 not found on node")
+ }
+}
+
+func TestAddNodeDuplicateServiceError(t *testing.T) {
+ sim := New(map[string]ServiceFunc{
+ "noop1": noopServiceFunc,
+ "noop2": noopServiceFunc,
+ })
+ defer sim.Close()
+
+ wantErr := "duplicate service: *simulation.noopService"
+ _, err := sim.AddNode()
+ if err.Error() != wantErr {
+ t.Errorf("got error %q, want %q", err, wantErr)
+ }
+}
+
func TestAddNodes(t *testing.T) {
sim := New(noopServiceFuncMap)
defer sim.Close()
diff --git a/swarm/network/simulation/simulation.go b/swarm/network/simulation/simulation.go
index f6d3ce229..e5435b9f0 100644
--- a/swarm/network/simulation/simulation.go
+++ b/swarm/network/simulation/simulation.go
@@ -68,6 +68,10 @@ type ServiceFunc func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Se
// New creates a new Simulation instance with new
// simulations.Network initialized with provided services.
+// Services map must have unique keys as service names and
+// every ServiceFunc must return a node.Service of the unique type.
+// This restriction is required by node.Node.Start() function
+// which is used to start node.Service returned by ServiceFunc.
func New(services map[string]ServiceFunc) (s *Simulation) {
s = &Simulation{
buckets: make(map[enode.ID]*sync.Map),
@@ -76,6 +80,9 @@ func New(services map[string]ServiceFunc) (s *Simulation) {
adapterServices := make(map[string]adapters.ServiceFunc, len(services))
for name, serviceFunc := range services {
+ // Scope this variables correctly
+ // as they will be in the adapterServices[name] function accessed later.
+ name, serviceFunc := name, serviceFunc
s.serviceNames = append(s.serviceNames, name)
adapterServices[name] = func(ctx *adapters.ServiceContext) (node.Service, error) {
b := new(sync.Map)
diff --git a/swarm/network/simulation/simulation_test.go b/swarm/network/simulation/simulation_test.go
index eed09bf50..ca8599d7c 100644
--- a/swarm/network/simulation/simulation_test.go
+++ b/swarm/network/simulation/simulation_test.go
@@ -205,3 +205,16 @@ func (t *noopService) Start(server *p2p.Server) error {
func (t *noopService) Stop() error {
return nil
}
+
+// a helper function for most basic noop service
+// of a different type then noopService to test
+// multiple services on one node.
+func noopService2Func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+ return new(noopService2), nil, nil
+}
+
+// noopService2 is the service that does not do anything
+// but implements node.Service interface.
+type noopService2 struct {
+ noopService
+}
diff --git a/swarm/network/simulations/overlay.go b/swarm/network/simulations/overlay.go
index caf7ff1f2..284ae6398 100644
--- a/swarm/network/simulations/overlay.go
+++ b/swarm/network/simulations/overlay.go
@@ -64,12 +64,12 @@ func init() {
type Simulation struct {
mtx sync.Mutex
- stores map[enode.ID]*state.InmemoryStore
+ stores map[enode.ID]state.Store
}
func NewSimulation() *Simulation {
return &Simulation{
- stores: make(map[enode.ID]*state.InmemoryStore),
+ stores: make(map[enode.ID]state.Store),
}
}
diff --git a/swarm/network/stream/common_test.go b/swarm/network/stream/common_test.go
index c5f1fa176..e0a7f7e12 100644
--- a/swarm/network/stream/common_test.go
+++ b/swarm/network/stream/common_test.go
@@ -38,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
- mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
"github.com/ethereum/go-ethereum/swarm/testutil"
colorable "github.com/mattn/go-colorable"
)
@@ -69,21 +68,6 @@ func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
-func createGlobalStore() (string, *mockdb.GlobalStore, error) {
- var globalStore *mockdb.GlobalStore
- globalStoreDir, err := ioutil.TempDir("", "global.store")
- if err != nil {
- log.Error("Error initiating global store temp directory!", "err", err)
- return "", nil, err
- }
- globalStore, err = mockdb.NewGlobalStore(globalStoreDir)
- if err != nil {
- log.Error("Error initiating global store!", "err", err)
- return "", nil, err
- }
- return globalStoreDir, globalStore, nil
-}
-
func newStreamerTester(t *testing.T, registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
// setup
addr := network.RandomAddr() // tested peers peer address
diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go
index 0109fbdef..c73298d9a 100644
--- a/swarm/network/stream/delivery.go
+++ b/swarm/network/stream/delivery.go
@@ -39,6 +39,7 @@ const (
var (
processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil)
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
+ retrieveChunkFail = metrics.NewRegisteredCounter("network.stream.retrieve_chunks_fail.count", nil)
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil)
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil)
@@ -169,7 +170,8 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
go func() {
chunk, err := d.chunkStore.Get(ctx, req.Addr)
if err != nil {
- log.Warn("ChunkStore.Get can not retrieve chunk", "err", err)
+ retrieveChunkFail.Inc(1)
+ log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err)
return
}
if req.SkipCheck {
@@ -255,7 +257,7 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
}
sp = d.getPeer(id)
if sp == nil {
- log.Warn("Delivery.RequestFromPeers: peer not found", "id", id)
+ //log.Warn("Delivery.RequestFromPeers: peer not found", "id", id)
return true
}
spID = &id
diff --git a/swarm/network/stream/delivery_test.go b/swarm/network/stream/delivery_test.go
index a6173a389..f69f80499 100644
--- a/swarm/network/stream/delivery_test.go
+++ b/swarm/network/stream/delivery_test.go
@@ -453,6 +453,8 @@ func TestDeliveryFromNodes(t *testing.T) {
}
func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
node := ctx.Config.Node()
diff --git a/swarm/network/stream/intervals_test.go b/swarm/network/stream/intervals_test.go
index defb6df50..668cf586c 100644
--- a/swarm/network/stream/intervals_test.go
+++ b/swarm/network/stream/intervals_test.go
@@ -52,6 +52,8 @@ func TestIntervalsLiveAndHistory(t *testing.T) {
}
func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
nodes := 2
chunkCount := dataChunkCount
externalStreamName := "externalStream"
diff --git a/swarm/network/stream/snapshot_retrieval_test.go b/swarm/network/stream/snapshot_retrieval_test.go
index 5ea0b1511..932e28b32 100644
--- a/swarm/network/stream/snapshot_retrieval_test.go
+++ b/swarm/network/stream/snapshot_retrieval_test.go
@@ -246,6 +246,7 @@ simulation's `action` function.
The snapshot should have 'streamer' in its service list.
*/
func runRetrievalTest(chunkCount int, nodeCount int) error {
+
sim := simulation.New(retrievalSimServiceMap)
defer sim.Close()
diff --git a/swarm/network/stream/snapshot_sync_test.go b/swarm/network/stream/snapshot_sync_test.go
index 6b92c32ae..4a632c8c9 100644
--- a/swarm/network/stream/snapshot_sync_test.go
+++ b/swarm/network/stream/snapshot_sync_test.go
@@ -35,7 +35,8 @@ import (
"github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
- mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
+ "github.com/ethereum/go-ethereum/swarm/storage/mock"
+ mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@@ -181,6 +182,8 @@ func streamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Servic
}
func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(simServiceMap)
defer sim.Close()
@@ -268,20 +271,9 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
// or until the timeout is reached.
- var gDir string
- var globalStore *mockdb.GlobalStore
+ var globalStore mock.GlobalStorer
if *useMockStore {
- gDir, globalStore, err = createGlobalStore()
- if err != nil {
- return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
- }
- defer func() {
- os.RemoveAll(gDir)
- err := globalStore.Close()
- if err != nil {
- log.Error("Error closing global store! %v", "err", err)
- }
- }()
+ globalStore = mockmem.NewGlobalStore()
}
REPEAT:
for {
@@ -339,6 +331,8 @@ assuming that the snapshot file identifies a healthy
kademlia network. The snapshot should have 'streamer' in its service list.
*/
func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
n := ctx.Config.Node()
@@ -476,14 +470,9 @@ func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int)
return err
}
- var gDir string
- var globalStore *mockdb.GlobalStore
+ var globalStore mock.GlobalStorer
if *useMockStore {
- gDir, globalStore, err = createGlobalStore()
- if err != nil {
- return fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
- }
- defer os.RemoveAll(gDir)
+ globalStore = mockmem.NewGlobalStore()
}
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
// or until the timeout is reached.
diff --git a/swarm/network/stream/syncer_test.go b/swarm/network/stream/syncer_test.go
index fe20bab26..3e3cee18d 100644
--- a/swarm/network/stream/syncer_test.go
+++ b/swarm/network/stream/syncer_test.go
@@ -35,7 +35,8 @@ import (
"github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
- mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
+ "github.com/ethereum/go-ethereum/swarm/storage/mock"
+ mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@@ -48,7 +49,7 @@ func TestSyncerSimulation(t *testing.T) {
testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1)
}
-func createMockStore(globalStore *mockdb.GlobalStore, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
+func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
address := common.BytesToAddress(id.Bytes())
mockStore := globalStore.NewNodeStore(address)
params := storage.NewDefaultLocalStoreParams()
@@ -67,11 +68,12 @@ func createMockStore(globalStore *mockdb.GlobalStore, id enode.ID, addr *network
}
func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
sim := simulation.New(map[string]simulation.ServiceFunc{
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
var store storage.ChunkStore
- var globalStore *mockdb.GlobalStore
- var gDir, datadir string
+ var datadir string
node := ctx.Config.Node()
addr := network.NewAddr(node)
@@ -79,11 +81,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
addr.OAddr[0] = byte(0)
if *useMockStore {
- gDir, globalStore, err = createGlobalStore()
- if err != nil {
- return nil, nil, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
- }
- store, datadir, err = createMockStore(globalStore, node.ID(), addr)
+ store, datadir, err = createMockStore(mockmem.NewGlobalStore(), node.ID(), addr)
} else {
store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
}
@@ -94,13 +92,6 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
cleanup = func() {
store.Close()
os.RemoveAll(datadir)
- if *useMockStore {
- err := globalStore.Close()
- if err != nil {
- log.Error("Error closing global store! %v", "err", err)
- }
- os.RemoveAll(gDir)
- }
}
localStore := store.(*storage.LocalStore)
netStore, err := storage.NewNetStore(localStore, nil)
@@ -243,3 +234,170 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
t.Fatal(result.Error)
}
}
+
+//TestSameVersionID just checks that if the version is not changed,
+//then streamer peers see each other
+func TestSameVersionID(t *testing.T) {
+ //test version ID
+ v := uint(1)
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ var store storage.ChunkStore
+ var datadir string
+
+ node := ctx.Config.Node()
+ addr := network.NewAddr(node)
+
+ store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyStore, store)
+ cleanup = func() {
+ store.Close()
+ os.RemoveAll(datadir)
+ }
+ localStore := store.(*storage.LocalStore)
+ netStore, err := storage.NewNetStore(localStore, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyDB, netStore)
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ delivery := NewDelivery(kad, netStore)
+ netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
+
+ bucket.Store(bucketKeyDelivery, delivery)
+
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingAutoSubscribe,
+ }, nil)
+ //assign to each node the same version ID
+ r.spec.Version = v
+
+ bucket.Store(bucketKeyRegistry, r)
+
+ return r, cleanup, nil
+
+ },
+ })
+ defer sim.Close()
+
+ //connect just two nodes
+ log.Info("Adding nodes to simulation")
+ _, err := sim.AddNodesAndConnectChain(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Info("Starting simulation")
+ ctx := context.Background()
+ //make sure they have time to connect
+ time.Sleep(200 * time.Millisecond)
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ //get the pivot node's filestore
+ nodes := sim.UpNodeIDs()
+
+ item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
+ if !ok {
+ return fmt.Errorf("No filestore")
+ }
+ registry := item.(*Registry)
+
+ //the peers should connect, thus getting the peer should not return nil
+ if registry.getPeer(nodes[1]) == nil {
+ t.Fatal("Expected the peer to not be nil, but it is")
+ }
+ return nil
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+ log.Info("Simulation ended")
+}
+
+//TestDifferentVersionID proves that if the streamer protocol version doesn't match,
+//then the peers are not connected at streamer level
+func TestDifferentVersionID(t *testing.T) {
+ //create a variable to hold the version ID
+ v := uint(0)
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ var store storage.ChunkStore
+ var datadir string
+
+ node := ctx.Config.Node()
+ addr := network.NewAddr(node)
+
+ store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyStore, store)
+ cleanup = func() {
+ store.Close()
+ os.RemoveAll(datadir)
+ }
+ localStore := store.(*storage.LocalStore)
+ netStore, err := storage.NewNetStore(localStore, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ bucket.Store(bucketKeyDB, netStore)
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ delivery := NewDelivery(kad, netStore)
+ netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
+
+ bucket.Store(bucketKeyDelivery, delivery)
+
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
+ Retrieval: RetrievalDisabled,
+ Syncing: SyncingAutoSubscribe,
+ }, nil)
+
+ //increase the version ID for each node
+ v++
+ r.spec.Version = v
+
+ bucket.Store(bucketKeyRegistry, r)
+
+ return r, cleanup, nil
+
+ },
+ })
+ defer sim.Close()
+
+ //connect the nodes
+ log.Info("Adding nodes to simulation")
+ _, err := sim.AddNodesAndConnectChain(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Info("Starting simulation")
+ ctx := context.Background()
+ //make sure they have time to connect
+ time.Sleep(200 * time.Millisecond)
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ //get the pivot node's filestore
+ nodes := sim.UpNodeIDs()
+
+ item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
+ if !ok {
+ return fmt.Errorf("No filestore")
+ }
+ registry := item.(*Registry)
+
+ //getting the other peer should fail due to the different version numbers
+ if registry.getPeer(nodes[1]) != nil {
+ t.Fatal("Expected the peer to be nil, but it is not")
+ }
+ return nil
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+ log.Info("Simulation ended")
+
+}
diff --git a/swarm/network/stream/visualized_snapshot_sync_sim_test.go b/swarm/network/stream/visualized_snapshot_sync_sim_test.go
index 437c17e5e..f6d618020 100644
--- a/swarm/network/stream/visualized_snapshot_sync_sim_test.go
+++ b/swarm/network/stream/visualized_snapshot_sync_sim_test.go
@@ -84,6 +84,8 @@ func watchSim(sim *simulation.Simulation) (context.Context, context.CancelFunc)
//This test requests bogus hashes into the network
func TestNonExistingHashesWithServer(t *testing.T) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
nodeCount, _, sim := setupSim(retrievalSimServiceMap)
defer sim.Close()
@@ -143,6 +145,7 @@ func sendSimTerminatedEvent(sim *simulation.Simulation) {
//can visualize messages like SendOfferedMsg, WantedHashesMsg, DeliveryMsg
func TestSnapshotSyncWithServer(t *testing.T) {
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
nodeCount, chunkCount, sim := setupSim(simServiceMap)
defer sim.Close()
diff --git a/swarm/network_test.go b/swarm/network_test.go
index d84f28147..41993dfc6 100644
--- a/swarm/network_test.go
+++ b/swarm/network_test.go
@@ -259,6 +259,8 @@ type testSwarmNetworkOptions struct {
// - May wait for Kademlia on every node to be healthy.
// - Checking if a file is retrievable from all nodes.
func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwarmNetworkStep) {
+
+ t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
if o == nil {
o = new(testSwarmNetworkOptions)
}
diff --git a/swarm/pss/api.go b/swarm/pss/api.go
index eba7bb722..587382d72 100644
--- a/swarm/pss/api.go
+++ b/swarm/pss/api.go
@@ -51,7 +51,7 @@ func NewAPI(ps *Pss) *API {
//
// All incoming messages to the node matching this topic will be encapsulated in the APIMsg
// struct and sent to the subscriber
-func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription, error) {
+func (pssapi *API) Receive(ctx context.Context, topic Topic, raw bool, prox bool) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return nil, fmt.Errorf("Subscribe not supported")
@@ -59,7 +59,7 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription,
psssub := notifier.CreateSubscription()
- handler := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ hndlr := NewHandler(func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
apimsg := &APIMsg{
Msg: hexutil.Bytes(msg),
Asymmetric: asymmetric,
@@ -69,9 +69,15 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription,
log.Warn(fmt.Sprintf("notification on pss sub topic rpc (sub %v) msg %v failed!", psssub.ID, msg))
}
return nil
+ })
+ if raw {
+ hndlr.caps.raw = true
+ }
+ if prox {
+ hndlr.caps.prox = true
}
- deregf := pssapi.Register(&topic, handler)
+ deregf := pssapi.Register(&topic, hndlr)
go func() {
defer deregf()
select {
@@ -158,6 +164,10 @@ func (pssapi *API) SendSym(symkeyhex string, topic Topic, msg hexutil.Bytes) err
return pssapi.Pss.SendSym(symkeyhex, topic, msg[:])
}
+func (pssapi *API) SendRaw(addr hexutil.Bytes, topic Topic, msg hexutil.Bytes) error {
+ return pssapi.Pss.SendRaw(PssAddress(addr), topic, msg[:])
+}
+
func (pssapi *API) GetPeerTopics(pubkeyhex string) ([]Topic, error) {
topics, _, err := pssapi.Pss.GetPublickeyPeers(pubkeyhex)
return topics, err
diff --git a/swarm/pss/client/client.go b/swarm/pss/client/client.go
index d541081d3..5ee387aa7 100644
--- a/swarm/pss/client/client.go
+++ b/swarm/pss/client/client.go
@@ -236,7 +236,7 @@ func (c *Client) RunProtocol(ctx context.Context, proto *p2p.Protocol) error {
topichex := topicobj.String()
msgC := make(chan pss.APIMsg)
c.peerPool[topicobj] = make(map[string]*pssRPCRW)
- sub, err := c.rpc.Subscribe(ctx, "pss", msgC, "receive", topichex)
+ sub, err := c.rpc.Subscribe(ctx, "pss", msgC, "receive", topichex, false, false)
if err != nil {
return fmt.Errorf("pss event subscription failed: %v", err)
}
diff --git a/swarm/pss/handshake.go b/swarm/pss/handshake.go
index e3ead77d0..5486abafa 100644
--- a/swarm/pss/handshake.go
+++ b/swarm/pss/handshake.go
@@ -486,7 +486,7 @@ func (api *HandshakeAPI) Handshake(pubkeyid string, topic Topic, sync bool, flus
// Activate handshake functionality on a topic
func (api *HandshakeAPI) AddHandshake(topic Topic) error {
- api.ctrl.deregisterFuncs[topic] = api.ctrl.pss.Register(&topic, api.ctrl.handler)
+ api.ctrl.deregisterFuncs[topic] = api.ctrl.pss.Register(&topic, NewHandler(api.ctrl.handler))
return nil
}
diff --git a/swarm/pss/notify/notify.go b/swarm/pss/notify/notify.go
index 3731fb9db..d3c89058b 100644
--- a/swarm/pss/notify/notify.go
+++ b/swarm/pss/notify/notify.go
@@ -113,7 +113,7 @@ func NewController(ps *pss.Pss) *Controller {
notifiers: make(map[string]*notifier),
subscriptions: make(map[string]*subscription),
}
- ctrl.pss.Register(&controlTopic, ctrl.Handler)
+ ctrl.pss.Register(&controlTopic, pss.NewHandler(ctrl.Handler))
return ctrl
}
@@ -336,7 +336,7 @@ func (c *Controller) handleNotifyWithKeyMsg(msg *Msg) error {
// \TODO keep track of and add actual address
updaterAddr := pss.PssAddress([]byte{})
c.pss.SetSymmetricKey(symkey, topic, &updaterAddr, true)
- c.pss.Register(&topic, c.Handler)
+ c.pss.Register(&topic, pss.NewHandler(c.Handler))
return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload[:len(msg.Payload)-symKeyLength])
}
diff --git a/swarm/pss/notify/notify_test.go b/swarm/pss/notify/notify_test.go
index d4d383a6b..6100195b0 100644
--- a/swarm/pss/notify/notify_test.go
+++ b/swarm/pss/notify/notify_test.go
@@ -121,7 +121,7 @@ func TestStart(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
defer cancel()
rmsgC := make(chan *pss.APIMsg)
- rightSub, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", controlTopic)
+ rightSub, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", controlTopic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -174,7 +174,7 @@ func TestStart(t *testing.T) {
t.Fatalf("expected payload length %d, have %d", len(updateMsg)+symKeyLength, len(dMsg.Payload))
}
- rightSubUpdate, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", rsrcTopic)
+ rightSubUpdate, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", rsrcTopic, false, false)
if err != nil {
t.Fatal(err)
}
diff --git a/swarm/pss/protocol_test.go b/swarm/pss/protocol_test.go
index 4ef3e90a0..520c48a20 100644
--- a/swarm/pss/protocol_test.go
+++ b/swarm/pss/protocol_test.go
@@ -92,7 +92,7 @@ func testProtocol(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -100,7 +100,7 @@ func testProtocol(t *testing.T) {
rmsgC := make(chan APIMsg)
rctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -130,6 +130,7 @@ func testProtocol(t *testing.T) {
log.Debug("lnode ok")
case cerr := <-lctx.Done():
t.Fatalf("test message timed out: %v", cerr)
+ return
}
select {
case <-rmsgC:
diff --git a/swarm/pss/pss.go b/swarm/pss/pss.go
index e1e24e1f5..d0986d280 100644
--- a/swarm/pss/pss.go
+++ b/swarm/pss/pss.go
@@ -23,11 +23,13 @@ import (
"crypto/rand"
"errors"
"fmt"
+ "hash"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -136,10 +138,10 @@ type Pss struct {
symKeyDecryptCacheCapacity int // max amount of symkeys to keep.
// message handling
- handlers map[Topic]map[*Handler]bool // topic and version based pss payload handlers. See pss.Handle()
- handlersMu sync.RWMutex
- allowRaw bool
- hashPool sync.Pool
+ handlers map[Topic]map[*handler]bool // topic and version based pss payload handlers. See pss.Handle()
+ handlersMu sync.RWMutex
+ hashPool sync.Pool
+ topicHandlerCaps map[Topic]*handlerCaps // caches capabilities of each topic's handlers (see handlerCap* consts in types.go)
// process
quitC chan struct{}
@@ -180,11 +182,12 @@ func NewPss(k *network.Kademlia, params *PssParams) (*Pss, error) {
symKeyDecryptCache: make([]*string, params.SymKeyCacheCapacity),
symKeyDecryptCacheCapacity: params.SymKeyCacheCapacity,
- handlers: make(map[Topic]map[*Handler]bool),
- allowRaw: params.AllowRaw,
+ handlers: make(map[Topic]map[*handler]bool),
+ topicHandlerCaps: make(map[Topic]*handlerCaps),
+
hashPool: sync.Pool{
New: func() interface{} {
- return storage.MakeHashFunc(storage.DefaultHash)()
+ return sha3.NewKeccak256()
},
},
}
@@ -313,30 +316,54 @@ func (p *Pss) PublicKey() *ecdsa.PublicKey {
//
// Returns a deregister function which needs to be called to
// deregister the handler,
-func (p *Pss) Register(topic *Topic, handler Handler) func() {
+func (p *Pss) Register(topic *Topic, hndlr *handler) func() {
p.handlersMu.Lock()
defer p.handlersMu.Unlock()
handlers := p.handlers[*topic]
if handlers == nil {
- handlers = make(map[*Handler]bool)
+ handlers = make(map[*handler]bool)
p.handlers[*topic] = handlers
+ log.Debug("registered handler", "caps", hndlr.caps)
+ }
+ if hndlr.caps == nil {
+ hndlr.caps = &handlerCaps{}
+ }
+ handlers[hndlr] = true
+ if _, ok := p.topicHandlerCaps[*topic]; !ok {
+ p.topicHandlerCaps[*topic] = &handlerCaps{}
}
- handlers[&handler] = true
- return func() { p.deregister(topic, &handler) }
+ if hndlr.caps.raw {
+ p.topicHandlerCaps[*topic].raw = true
+ }
+ if hndlr.caps.prox {
+ p.topicHandlerCaps[*topic].prox = true
+ }
+ return func() { p.deregister(topic, hndlr) }
}
-func (p *Pss) deregister(topic *Topic, h *Handler) {
+func (p *Pss) deregister(topic *Topic, hndlr *handler) {
p.handlersMu.Lock()
defer p.handlersMu.Unlock()
handlers := p.handlers[*topic]
- if len(handlers) == 1 {
+ if len(handlers) > 1 {
delete(p.handlers, *topic)
+ // topic caps might have changed now that a handler is gone
+ caps := &handlerCaps{}
+ for h := range handlers {
+ if h.caps.raw {
+ caps.raw = true
+ }
+ if h.caps.prox {
+ caps.prox = true
+ }
+ }
+ p.topicHandlerCaps[*topic] = caps
return
}
- delete(handlers, h)
+ delete(handlers, hndlr)
}
// get all registered handlers for respective topics
-func (p *Pss) getHandlers(topic Topic) map[*Handler]bool {
+func (p *Pss) getHandlers(topic Topic) map[*handler]bool {
p.handlersMu.RLock()
defer p.handlersMu.RUnlock()
return p.handlers[topic]
@@ -348,12 +375,11 @@ func (p *Pss) getHandlers(topic Topic) map[*Handler]bool {
// Only passes error to pss protocol handler if payload is not valid pssmsg
func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
metrics.GetOrRegisterCounter("pss.handlepssmsg", nil).Inc(1)
-
pssmsg, ok := msg.(*PssMsg)
-
if !ok {
return fmt.Errorf("invalid message type. Expected *PssMsg, got %T ", msg)
}
+ log.Trace("handler", "self", label(p.Kademlia.BaseAddr()), "topic", label(pssmsg.Payload.Topic[:]))
if int64(pssmsg.Expire) < time.Now().Unix() {
metrics.GetOrRegisterCounter("pss.expire", nil).Inc(1)
log.Warn("pss filtered expired message", "from", common.ToHex(p.Kademlia.BaseAddr()), "to", common.ToHex(pssmsg.To))
@@ -365,13 +391,34 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
}
p.addFwdCache(pssmsg)
- if !p.isSelfPossibleRecipient(pssmsg) {
- log.Trace("pss was for someone else :'( ... forwarding", "pss", common.ToHex(p.BaseAddr()))
+ psstopic := Topic(pssmsg.Payload.Topic)
+
+ // raw is simplest handler contingency to check, so check that first
+ var isRaw bool
+ if pssmsg.isRaw() {
+ if !p.topicHandlerCaps[psstopic].raw {
+ log.Debug("No handler for raw message", "topic", psstopic)
+ return nil
+ }
+ isRaw = true
+ }
+
+ // check if we can be recipient:
+ // - no prox handler on message and partial address matches
+ // - prox handler on message and we are in prox regardless of partial address match
+ // store this result so we don't calculate again on every handler
+ var isProx bool
+ if _, ok := p.topicHandlerCaps[psstopic]; ok {
+ isProx = p.topicHandlerCaps[psstopic].prox
+ }
+ isRecipient := p.isSelfPossibleRecipient(pssmsg, isProx)
+ if !isRecipient {
+ log.Trace("pss was for someone else :'( ... forwarding", "pss", common.ToHex(p.BaseAddr()), "prox", isProx)
return p.enqueue(pssmsg)
}
- log.Trace("pss for us, yay! ... let's process!", "pss", common.ToHex(p.BaseAddr()))
- if err := p.process(pssmsg); err != nil {
+ log.Trace("pss for us, yay! ... let's process!", "pss", common.ToHex(p.BaseAddr()), "prox", isProx, "raw", isRaw, "topic", label(pssmsg.Payload.Topic[:]))
+ if err := p.process(pssmsg, isRaw, isProx); err != nil {
qerr := p.enqueue(pssmsg)
if qerr != nil {
return fmt.Errorf("process fail: processerr %v, queueerr: %v", err, qerr)
@@ -384,7 +431,7 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
// Entry point to processing a message for which the current node can be the intended recipient.
// Attempts symmetric and asymmetric decryption with stored keys.
// Dispatches message to all handlers matching the message topic
-func (p *Pss) process(pssmsg *PssMsg) error {
+func (p *Pss) process(pssmsg *PssMsg, raw bool, prox bool) error {
metrics.GetOrRegisterCounter("pss.process", nil).Inc(1)
var err error
@@ -397,10 +444,8 @@ func (p *Pss) process(pssmsg *PssMsg) error {
envelope := pssmsg.Payload
psstopic := Topic(envelope.Topic)
- if pssmsg.isRaw() {
- if !p.allowRaw {
- return errors.New("raw message support disabled")
- }
+
+ if raw {
payload = pssmsg.Payload.Data
} else {
if pssmsg.isSym() {
@@ -422,19 +467,27 @@ func (p *Pss) process(pssmsg *PssMsg) error {
return err
}
}
- p.executeHandlers(psstopic, payload, from, asymmetric, keyid)
+ p.executeHandlers(psstopic, payload, from, raw, prox, asymmetric, keyid)
return nil
}
-func (p *Pss) executeHandlers(topic Topic, payload []byte, from *PssAddress, asymmetric bool, keyid string) {
+func (p *Pss) executeHandlers(topic Topic, payload []byte, from *PssAddress, raw bool, prox bool, asymmetric bool, keyid string) {
handlers := p.getHandlers(topic)
peer := p2p.NewPeer(enode.ID{}, fmt.Sprintf("%x", from), []p2p.Cap{})
- for f := range handlers {
- err := (*f)(payload, peer, asymmetric, keyid)
+ for h := range handlers {
+ if !h.caps.raw && raw {
+ log.Warn("norawhandler")
+ continue
+ }
+ if !h.caps.prox && prox {
+ log.Warn("noproxhandler")
+ continue
+ }
+ err := (h.f)(payload, peer, asymmetric, keyid)
if err != nil {
- log.Warn("Pss handler %p failed: %v", f, err)
+ log.Warn("Pss handler failed", "err", err)
}
}
}
@@ -445,9 +498,23 @@ func (p *Pss) isSelfRecipient(msg *PssMsg) bool {
}
// test match of leftmost bytes in given message to node's Kademlia address
-func (p *Pss) isSelfPossibleRecipient(msg *PssMsg) bool {
+func (p *Pss) isSelfPossibleRecipient(msg *PssMsg, prox bool) bool {
local := p.Kademlia.BaseAddr()
- return bytes.Equal(msg.To, local[:len(msg.To)])
+
+ // if a partial address matches we are possible recipient regardless of prox
+ // if not and prox is not set, we are surely not
+ if bytes.Equal(msg.To, local[:len(msg.To)]) {
+
+ return true
+ } else if !prox {
+ return false
+ }
+
+ depth := p.Kademlia.NeighbourhoodDepth()
+ po, _ := p.Kademlia.Pof(p.Kademlia.BaseAddr(), msg.To, 0)
+ log.Trace("selfpossible", "po", po, "depth", depth)
+
+ return depth <= po
}
/////////////////////////////////////////////////////////////////////
@@ -684,9 +751,6 @@ func (p *Pss) enqueue(msg *PssMsg) error {
//
// Will fail if raw messages are disallowed
func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error {
- if !p.allowRaw {
- return errors.New("Raw messages not enabled")
- }
pssMsgParams := &msgParams{
raw: true,
}
@@ -699,7 +763,17 @@ func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error {
pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix())
pssMsg.Payload = payload
p.addFwdCache(pssMsg)
- return p.enqueue(pssMsg)
+ err := p.enqueue(pssMsg)
+ if err != nil {
+ return err
+ }
+
+ // if we have a proxhandler on this topic
+ // also deliver message to ourselves
+ if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox {
+ return p.process(pssMsg, true, true)
+ }
+ return nil
}
// Send a message using symmetric encryption
@@ -800,7 +874,16 @@ func (p *Pss) send(to []byte, topic Topic, msg []byte, asymmetric bool, key []by
pssMsg.To = to
pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix())
pssMsg.Payload = envelope
- return p.enqueue(pssMsg)
+ err = p.enqueue(pssMsg)
+ if err != nil {
+ return err
+ }
+ if _, ok := p.topicHandlerCaps[topic]; ok {
+ if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox {
+ return p.process(pssMsg, true, true)
+ }
+ }
+ return nil
}
// Forwards a pss message to the peer(s) closest to the to recipient address in the PssMsg struct
@@ -895,6 +978,10 @@ func (p *Pss) cleanFwdCache() {
}
}
+func label(b []byte) string {
+ return fmt.Sprintf("%04x", b[:2])
+}
+
// add a message to the cache
func (p *Pss) addFwdCache(msg *PssMsg) error {
metrics.GetOrRegisterCounter("pss.addfwdcache", nil).Inc(1)
@@ -934,10 +1021,14 @@ func (p *Pss) checkFwdCache(msg *PssMsg) bool {
// Digest of message
func (p *Pss) digest(msg *PssMsg) pssDigest {
- hasher := p.hashPool.Get().(storage.SwarmHash)
+ return p.digestBytes(msg.serialize())
+}
+
+func (p *Pss) digestBytes(msg []byte) pssDigest {
+ hasher := p.hashPool.Get().(hash.Hash)
defer p.hashPool.Put(hasher)
hasher.Reset()
- hasher.Write(msg.serialize())
+ hasher.Write(msg)
digest := pssDigest{}
key := hasher.Sum(nil)
copy(digest[:], key[:digestLength])
diff --git a/swarm/pss/pss_test.go b/swarm/pss/pss_test.go
index 66a90be62..72f62acd9 100644
--- a/swarm/pss/pss_test.go
+++ b/swarm/pss/pss_test.go
@@ -48,20 +48,23 @@ import (
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/network"
+ "github.com/ethereum/go-ethereum/swarm/pot"
"github.com/ethereum/go-ethereum/swarm/state"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
)
var (
- initOnce = sync.Once{}
- debugdebugflag = flag.Bool("vv", false, "veryverbose")
- debugflag = flag.Bool("v", false, "verbose")
- longrunning = flag.Bool("longrunning", false, "do run long-running tests")
- w *whisper.Whisper
- wapi *whisper.PublicWhisperAPI
- psslogmain log.Logger
- pssprotocols map[string]*protoCtrl
- useHandshake bool
+ initOnce = sync.Once{}
+ loglevel = flag.Int("loglevel", 2, "logging verbosity")
+ longrunning = flag.Bool("longrunning", false, "do run long-running tests")
+ w *whisper.Whisper
+ wapi *whisper.PublicWhisperAPI
+ psslogmain log.Logger
+ pssprotocols map[string]*protoCtrl
+ useHandshake bool
+ noopHandlerFunc = func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ return nil
+ }
)
func init() {
@@ -75,16 +78,9 @@ func init() {
func initTest() {
initOnce.Do(
func() {
- loglevel := log.LvlInfo
- if *debugflag {
- loglevel = log.LvlDebug
- } else if *debugdebugflag {
- loglevel = log.LvlTrace
- }
-
psslogmain = log.New("psslog", "*")
hs := log.StreamHandler(os.Stderr, log.TerminalFormat(true))
- hf := log.LvlFilterHandler(loglevel, hs)
+ hf := log.LvlFilterHandler(log.Lvl(*loglevel), hs)
h := log.CallerFileHandler(hf)
log.Root().SetHandler(h)
@@ -280,15 +276,14 @@ func TestAddressMatch(t *testing.T) {
}
pssmsg := &PssMsg{
- To: remoteaddr,
- Payload: &whisper.Envelope{},
+ To: remoteaddr,
}
// differ from first byte
if ps.isSelfRecipient(pssmsg) {
t.Fatalf("isSelfRecipient true but %x != %x", remoteaddr, localaddr)
}
- if ps.isSelfPossibleRecipient(pssmsg) {
+ if ps.isSelfPossibleRecipient(pssmsg, false) {
t.Fatalf("isSelfPossibleRecipient true but %x != %x", remoteaddr[:8], localaddr[:8])
}
@@ -297,7 +292,7 @@ func TestAddressMatch(t *testing.T) {
if ps.isSelfRecipient(pssmsg) {
t.Fatalf("isSelfRecipient true but %x != %x", remoteaddr, localaddr)
}
- if !ps.isSelfPossibleRecipient(pssmsg) {
+ if !ps.isSelfPossibleRecipient(pssmsg, false) {
t.Fatalf("isSelfPossibleRecipient false but %x == %x", remoteaddr[:8], localaddr[:8])
}
@@ -306,13 +301,342 @@ func TestAddressMatch(t *testing.T) {
if !ps.isSelfRecipient(pssmsg) {
t.Fatalf("isSelfRecipient false but %x == %x", remoteaddr, localaddr)
}
- if !ps.isSelfPossibleRecipient(pssmsg) {
+ if !ps.isSelfPossibleRecipient(pssmsg, false) {
t.Fatalf("isSelfPossibleRecipient false but %x == %x", remoteaddr[:8], localaddr[:8])
}
+
}
-//
-func TestHandlerConditions(t *testing.T) {
+// test that message is handled by sender if a prox handler exists and sender is in prox of message
+func TestProxShortCircuit(t *testing.T) {
+
+ // sender node address
+ localAddr := network.RandomAddr().Over()
+ localPotAddr := pot.NewAddressFromBytes(localAddr)
+
+ // set up kademlia
+ kadParams := network.NewKadParams()
+ kad := network.NewKademlia(localAddr, kadParams)
+ peerCount := kad.MinBinSize + 1
+
+ // set up pss
+ privKey, err := crypto.GenerateKey()
+ pssp := NewPssParams().WithPrivateKey(privKey)
+ ps, err := NewPss(kad, pssp)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ // create kademlia peers, so we have peers both inside and outside minproxlimit
+ var peers []*network.Peer
+ proxMessageAddress := pot.RandomAddressAt(localPotAddr, peerCount).Bytes()
+ distantMessageAddress := pot.RandomAddressAt(localPotAddr, 0).Bytes()
+
+ for i := 0; i < peerCount; i++ {
+ rw := &p2p.MsgPipeRW{}
+ ptpPeer := p2p.NewPeer(enode.ID{}, "wanna be with me? [ ] yes [ ] no", []p2p.Cap{})
+ protoPeer := protocols.NewPeer(ptpPeer, rw, &protocols.Spec{})
+ peerAddr := pot.RandomAddressAt(localPotAddr, i)
+ bzzPeer := &network.BzzPeer{
+ Peer: protoPeer,
+ BzzAddr: &network.BzzAddr{
+ OAddr: peerAddr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", peerAddr[:])),
+ },
+ }
+ peer := network.NewPeer(bzzPeer, kad)
+ kad.On(peer)
+ peers = append(peers, peer)
+ }
+
+ // register it marking prox capability
+ delivered := make(chan struct{})
+ rawHandlerFunc := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ log.Trace("in allowraw handler")
+ delivered <- struct{}{}
+ return nil
+ }
+ topic := BytesToTopic([]byte{0x2a})
+ hndlrProxDereg := ps.Register(&topic, &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ prox: true,
+ },
+ })
+ defer hndlrProxDereg()
+
+ // send message too far away for sender to be in prox
+ // reception of this message should time out
+ errC := make(chan error)
+ go func() {
+ err := ps.SendRaw(distantMessageAddress, topic, []byte("foo"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+
+ ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ t.Fatal("raw distant message delivered")
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ }
+
+ // send message that should be within sender prox
+ // this message should be delivered
+ go func() {
+ err := ps.SendRaw(proxMessageAddress, topic, []byte("bar"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal("raw timeout")
+ }
+
+ // try the same prox message with sym and asym send
+ proxAddrPss := PssAddress(proxMessageAddress)
+ symKeyId, err := ps.GenerateSymmetricKey(topic, &proxAddrPss, true)
+ go func() {
+ err := ps.SendSym(symKeyId, topic, []byte("baz"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal("sym timeout")
+ }
+
+ err = ps.SetPeerPublicKey(&privKey.PublicKey, topic, &proxAddrPss)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pubKeyId := hexutil.Encode(crypto.FromECDSAPub(&privKey.PublicKey))
+ go func() {
+ err := ps.SendAsym(pubKeyId, topic, []byte("xyzzy"))
+ if err != nil {
+ errC <- err
+ }
+ }()
+ ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
+ defer cancel()
+ select {
+ case <-delivered:
+ case err := <-errC:
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal("asym timeout")
+ }
+}
+
+// verify that node can be set as recipient regardless of explicit message address match if minimum one handler of a topic is explicitly set to allow it
+// note that in these tests we use the raw capability on handlers for convenience
+func TestAddressMatchProx(t *testing.T) {
+
+ // recipient node address
+ localAddr := network.RandomAddr().Over()
+ localPotAddr := pot.NewAddressFromBytes(localAddr)
+
+ // set up kademlia
+ kadparams := network.NewKadParams()
+ kad := network.NewKademlia(localAddr, kadparams)
+ nnPeerCount := kad.MinBinSize
+ peerCount := nnPeerCount + 2
+
+ // set up pss
+ privKey, err := crypto.GenerateKey()
+ pssp := NewPssParams().WithPrivateKey(privKey)
+ ps, err := NewPss(kad, pssp)
+ if err != nil {
+ t.Fatal(err.Error())
+ }
+
+ // create kademlia peers, so we have peers both inside and outside minproxlimit
+ var peers []*network.Peer
+ for i := 0; i < peerCount; i++ {
+ rw := &p2p.MsgPipeRW{}
+ ptpPeer := p2p.NewPeer(enode.ID{}, "362436 call me anytime", []p2p.Cap{})
+ protoPeer := protocols.NewPeer(ptpPeer, rw, &protocols.Spec{})
+ peerAddr := pot.RandomAddressAt(localPotAddr, i)
+ bzzPeer := &network.BzzPeer{
+ Peer: protoPeer,
+ BzzAddr: &network.BzzAddr{
+ OAddr: peerAddr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", peerAddr[:])),
+ },
+ }
+ peer := network.NewPeer(bzzPeer, kad)
+ kad.On(peer)
+ peers = append(peers, peer)
+ }
+
+ // TODO: create a test in the network package to make a table with n peers where n-m are proxpeers
+ // meanwhile test regression for kademlia since we are compiling the test parameters from different packages
+ var proxes int
+ var conns int
+ kad.EachConn(nil, peerCount, func(p *network.Peer, po int, prox bool) bool {
+ conns++
+ if prox {
+ proxes++
+ }
+ log.Trace("kadconn", "po", po, "peer", p, "prox", prox)
+ return true
+ })
+ if proxes != nnPeerCount {
+ t.Fatalf("expected %d proxpeers, have %d", nnPeerCount, proxes)
+ } else if conns != peerCount {
+ t.Fatalf("expected %d peers total, have %d", peerCount, proxes)
+ }
+
+ // remote address distances from localAddr to try and the expected outcomes if we use prox handler
+ remoteDistances := []int{
+ 255,
+ nnPeerCount + 1,
+ nnPeerCount,
+ nnPeerCount - 1,
+ 0,
+ }
+ expects := []bool{
+ true,
+ true,
+ true,
+ false,
+ false,
+ }
+
+ // first the unit test on the method that calculates possible receipient using prox
+ for i, distance := range remoteDistances {
+ pssMsg := newPssMsg(&msgParams{})
+ pssMsg.To = make([]byte, len(localAddr))
+ copy(pssMsg.To, localAddr)
+ var byteIdx = distance / 8
+ pssMsg.To[byteIdx] ^= 1 << uint(7-(distance%8))
+ log.Trace(fmt.Sprintf("addrmatch %v", bytes.Equal(pssMsg.To, localAddr)))
+ if ps.isSelfPossibleRecipient(pssMsg, true) != expects[i] {
+ t.Fatalf("expected distance %d to be %v", distance, expects[i])
+ }
+ }
+
+ // we move up to higher level and test the actual message handler
+ // for each distance check if we are possible recipient when prox variant is used is set
+
+ // this handler will increment a counter for every message that gets passed to the handler
+ var receives int
+ rawHandlerFunc := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ log.Trace("in allowraw handler")
+ receives++
+ return nil
+ }
+
+ // register it marking prox capability
+ topic := BytesToTopic([]byte{0x2a})
+ hndlrProxDereg := ps.Register(&topic, &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ prox: true,
+ },
+ })
+
+ // test the distances
+ var prevReceive int
+ for i, distance := range remoteDistances {
+ remotePotAddr := pot.RandomAddressAt(localPotAddr, distance)
+ remoteAddr := remotePotAddr.Bytes()
+
+ var data [32]byte
+ rand.Read(data[:])
+ pssMsg := newPssMsg(&msgParams{raw: true})
+ pssMsg.To = remoteAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ Data: data[:],
+ }
+
+ log.Trace("withprox addrs", "local", localAddr, "remote", remoteAddr)
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if (!expects[i] && prevReceive != receives) || (expects[i] && prevReceive == receives) {
+ t.Fatalf("expected distance %d recipient %v when prox is set for handler", distance, expects[i])
+ }
+ prevReceive = receives
+ }
+
+ // now add a non prox-capable handler and test
+ ps.Register(&topic, &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ },
+ })
+ receives = 0
+ prevReceive = 0
+ for i, distance := range remoteDistances {
+ remotePotAddr := pot.RandomAddressAt(localPotAddr, distance)
+ remoteAddr := remotePotAddr.Bytes()
+
+ var data [32]byte
+ rand.Read(data[:])
+ pssMsg := newPssMsg(&msgParams{raw: true})
+ pssMsg.To = remoteAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ Data: data[:],
+ }
+
+ log.Trace("withprox addrs", "local", localAddr, "remote", remoteAddr)
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if (!expects[i] && prevReceive != receives) || (expects[i] && prevReceive == receives) {
+ t.Fatalf("expected distance %d recipient %v when prox is set for handler", distance, expects[i])
+ }
+ prevReceive = receives
+ }
+
+ // now deregister the prox capable handler, now none of the messages will be handled
+ hndlrProxDereg()
+ receives = 0
+
+ for _, distance := range remoteDistances {
+ remotePotAddr := pot.RandomAddressAt(localPotAddr, distance)
+ remoteAddr := remotePotAddr.Bytes()
+
+ pssMsg := newPssMsg(&msgParams{raw: true})
+ pssMsg.To = remoteAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ Data: []byte(remotePotAddr.String()),
+ }
+
+ log.Trace("noprox addrs", "local", localAddr, "remote", remoteAddr)
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives != 0 {
+ t.Fatalf("expected distance %d to not be recipient when prox is not set for handler", distance)
+ }
+
+ }
+}
+
+// verify that message queueing happens when it should, and that expired and corrupt messages are dropped
+func TestMessageProcessing(t *testing.T) {
t.Skip("Disabled due to probable faulty logic for outbox expectations")
// setup
@@ -326,13 +650,12 @@ func TestHandlerConditions(t *testing.T) {
ps := newTestPss(privkey, network.NewKademlia(addr, network.NewKadParams()), NewPssParams())
// message should pass
- msg := &PssMsg{
- To: addr,
- Expire: uint32(time.Now().Add(time.Second * 60).Unix()),
- Payload: &whisper.Envelope{
- Topic: [4]byte{},
- Data: []byte{0x66, 0x6f, 0x6f},
- },
+ msg := newPssMsg(&msgParams{})
+ msg.To = addr
+ msg.Expire = uint32(time.Now().Add(time.Second * 60).Unix())
+ msg.Payload = &whisper.Envelope{
+ Topic: [4]byte{},
+ Data: []byte{0x66, 0x6f, 0x6f},
}
if err := ps.handlePssMsg(context.TODO(), msg); err != nil {
t.Fatal(err.Error())
@@ -498,6 +821,7 @@ func TestKeys(t *testing.T) {
}
}
+// check that we can retrieve previously added public key entires per topic and peer
func TestGetPublickeyEntries(t *testing.T) {
privkey, err := crypto.GenerateKey()
@@ -557,7 +881,7 @@ OUTER:
}
// forwarding should skip peers that do not have matching pss capabilities
-func TestMismatch(t *testing.T) {
+func TestPeerCapabilityMismatch(t *testing.T) {
// create privkey for forwarder node
privkey, err := crypto.GenerateKey()
@@ -615,6 +939,76 @@ func TestMismatch(t *testing.T) {
}
+// verifies that message handlers for raw messages only are invoked when minimum one handler for the topic exists in which raw messages are explicitly allowed
+func TestRawAllow(t *testing.T) {
+
+ // set up pss like so many times before
+ privKey, err := crypto.GenerateKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+ baseAddr := network.RandomAddr()
+ kad := network.NewKademlia((baseAddr).Over(), network.NewKadParams())
+ ps := newTestPss(privKey, kad, nil)
+ topic := BytesToTopic([]byte{0x2a})
+
+ // create handler innards that increments every time a message hits it
+ var receives int
+ rawHandlerFunc := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
+ log.Trace("in allowraw handler")
+ receives++
+ return nil
+ }
+
+ // wrap this handler function with a handler without raw capability and register it
+ hndlrNoRaw := &handler{
+ f: rawHandlerFunc,
+ }
+ ps.Register(&topic, hndlrNoRaw)
+
+ // test it with a raw message, should be poo-poo
+ pssMsg := newPssMsg(&msgParams{
+ raw: true,
+ })
+ pssMsg.To = baseAddr.OAddr
+ pssMsg.Expire = uint32(time.Now().Unix() + 4200)
+ pssMsg.Payload = &whisper.Envelope{
+ Topic: whisper.TopicType(topic),
+ }
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives > 0 {
+ t.Fatalf("Expected handler not to be executed with raw cap off")
+ }
+
+ // now wrap the same handler function with raw capabilities and register it
+ hndlrRaw := &handler{
+ f: rawHandlerFunc,
+ caps: &handlerCaps{
+ raw: true,
+ },
+ }
+ deregRawHandler := ps.Register(&topic, hndlrRaw)
+
+ // should work now
+ pssMsg.Payload.Data = []byte("Raw Deal")
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives == 0 {
+ t.Fatalf("Expected handler to be executed with raw cap on")
+ }
+
+ // now deregister the raw capable handler
+ prevReceives := receives
+ deregRawHandler()
+
+ // check that raw messages fail again
+ pssMsg.Payload.Data = []byte("Raw Trump")
+ ps.handlePssMsg(context.TODO(), pssMsg)
+ if receives != prevReceives {
+ t.Fatalf("Expected handler not to be executed when raw handler is retracted")
+ }
+}
+
+// verifies that nodes can send and receive raw (verbatim) messages
func TestSendRaw(t *testing.T) {
t.Run("32", testSendRaw)
t.Run("8", testSendRaw)
@@ -658,19 +1052,19 @@ func testSendRaw(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, lcancel := context.WithTimeout(context.Background(), time.Second*10)
defer lcancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, true, false)
log.Trace("lsub", "id", lsub)
defer lsub.Unsubscribe()
rmsgC := make(chan APIMsg)
rctx, rcancel := context.WithTimeout(context.Background(), time.Second*10)
defer rcancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, true, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
// send and verify delivery
lmsg := []byte("plugh")
- err = clients[1].Call(nil, "pss_sendRaw", loaddrhex, topic, lmsg)
+ err = clients[1].Call(nil, "pss_sendRaw", loaddrhex, topic, hexutil.Encode(lmsg))
if err != nil {
t.Fatal(err)
}
@@ -683,7 +1077,7 @@ func testSendRaw(t *testing.T) {
t.Fatalf("test message (left) timed out: %v", cerr)
}
rmsg := []byte("xyzzy")
- err = clients[0].Call(nil, "pss_sendRaw", roaddrhex, topic, rmsg)
+ err = clients[0].Call(nil, "pss_sendRaw", roaddrhex, topic, hexutil.Encode(rmsg))
if err != nil {
t.Fatal(err)
}
@@ -757,13 +1151,13 @@ func testSendSym(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, lcancel := context.WithTimeout(context.Background(), time.Second*10)
defer lcancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, false, false)
log.Trace("lsub", "id", lsub)
defer lsub.Unsubscribe()
rmsgC := make(chan APIMsg)
rctx, rcancel := context.WithTimeout(context.Background(), time.Second*10)
defer rcancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
@@ -872,13 +1266,13 @@ func testSendAsym(t *testing.T) {
lmsgC := make(chan APIMsg)
lctx, lcancel := context.WithTimeout(context.Background(), time.Second*10)
defer lcancel()
- lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
+ lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic, false, false)
log.Trace("lsub", "id", lsub)
defer lsub.Unsubscribe()
rmsgC := make(chan APIMsg)
rctx, rcancel := context.WithTimeout(context.Background(), time.Second*10)
defer rcancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
@@ -1037,7 +1431,7 @@ func testNetwork(t *testing.T) {
msgC := make(chan APIMsg)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
- sub, err := rpcclient.Subscribe(ctx, "pss", msgC, "receive", topic)
+ sub, err := rpcclient.Subscribe(ctx, "pss", msgC, "receive", topic, false, false)
if err != nil {
t.Fatal(err)
}
@@ -1209,7 +1603,7 @@ func TestDeduplication(t *testing.T) {
rmsgC := make(chan APIMsg)
rctx, cancel := context.WithTimeout(context.Background(), time.Second*1)
defer cancel()
- rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
+ rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic, false, false)
log.Trace("rsub", "id", rsub)
defer rsub.Unsubscribe()
@@ -1392,8 +1786,8 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) {
if err != nil {
b.Fatalf("could not generate whisper envelope: %v", err)
}
- ps.Register(&topic, func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
- return nil
+ ps.Register(&topic, &handler{
+ f: noopHandlerFunc,
})
pssmsgs = append(pssmsgs, &PssMsg{
To: to,
@@ -1402,7 +1796,7 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) {
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
- if err := ps.process(pssmsgs[len(pssmsgs)-(i%len(pssmsgs))-1]); err != nil {
+ if err := ps.process(pssmsgs[len(pssmsgs)-(i%len(pssmsgs))-1], false, false); err != nil {
b.Fatalf("pss processing failed: %v", err)
}
}
@@ -1476,15 +1870,15 @@ func benchmarkSymkeyBruteforceSameaddr(b *testing.B) {
if err != nil {
b.Fatalf("could not generate whisper envelope: %v", err)
}
- ps.Register(&topic, func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error {
- return nil
+ ps.Register(&topic, &handler{
+ f: noopHandlerFunc,
})
pssmsg := &PssMsg{
To: addr[len(addr)-1][:],
Payload: env,
}
for i := 0; i < b.N; i++ {
- if err := ps.process(pssmsg); err != nil {
+ if err := ps.process(pssmsg, false, false); err != nil {
b.Fatalf("pss processing failed: %v", err)
}
}
@@ -1581,7 +1975,12 @@ func newServices(allowRaw bool) adapters.Services {
if useHandshake {
SetHandshakeController(ps, NewHandshakeParams())
}
- ps.Register(&PingTopic, pp.Handle)
+ ps.Register(&PingTopic, &handler{
+ f: pp.Handle,
+ caps: &handlerCaps{
+ raw: true,
+ },
+ })
ps.addAPI(rpc.API{
Namespace: "psstest",
Version: "0.3",
diff --git a/swarm/pss/types.go b/swarm/pss/types.go
index 56c2c51dc..ba963067c 100644
--- a/swarm/pss/types.go
+++ b/swarm/pss/types.go
@@ -159,9 +159,39 @@ func (msg *PssMsg) String() string {
}
// Signature for a message handler function for a PssMsg
-//
// Implementations of this type are passed to Pss.Register together with a topic,
-type Handler func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error
+type HandlerFunc func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error
+
+type handlerCaps struct {
+ raw bool
+ prox bool
+}
+
+// Handler defines code to be executed upon reception of content.
+type handler struct {
+ f HandlerFunc
+ caps *handlerCaps
+}
+
+// NewHandler returns a new message handler
+func NewHandler(f HandlerFunc) *handler {
+ return &handler{
+ f: f,
+ caps: &handlerCaps{},
+ }
+}
+
+// WithRaw is a chainable method that allows raw messages to be handled.
+func (h *handler) WithRaw() *handler {
+ h.caps.raw = true
+ return h
+}
+
+// WithProxBin is a chainable method that allows sending messages with full addresses to neighbourhoods using the kademlia depth as reference
+func (h *handler) WithProxBin() *handler {
+ h.caps.prox = true
+ return h
+}
// the stateStore handles saving and loading PSS peers and their corresponding keys
// it is currently unimplemented
diff --git a/swarm/shed/db.go b/swarm/shed/db.go
new file mode 100644
index 000000000..e128b8cbc
--- /dev/null
+++ b/swarm/shed/db.go
@@ -0,0 +1,130 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package shed provides a simple abstraction components to compose
+// more complex operations on storage data organized in fields and indexes.
+//
+// Only type which holds logical information about swarm storage chunks data
+// and metadata is IndexItem. This part is not generalized mostly for
+// performance reasons.
+package shed
+
+import (
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+// The limit for LevelDB OpenFilesCacheCapacity.
+const openFileLimit = 128
+
+// DB provides abstractions over LevelDB in order to
+// implement complex structures using fields and ordered indexes.
+// It provides a schema functionality to store fields and indexes
+// information about naming and types.
+type DB struct {
+ ldb *leveldb.DB
+}
+
+// NewDB constructs a new DB and validates the schema
+// if it exists in database on the given path.
+func NewDB(path string) (db *DB, err error) {
+ ldb, err := leveldb.OpenFile(path, &opt.Options{
+ OpenFilesCacheCapacity: openFileLimit,
+ })
+ if err != nil {
+ return nil, err
+ }
+ db = &DB{
+ ldb: ldb,
+ }
+
+ if _, err = db.getSchema(); err != nil {
+ if err == leveldb.ErrNotFound {
+ // save schema with initialized default fields
+ if err = db.putSchema(schema{
+ Fields: make(map[string]fieldSpec),
+ Indexes: make(map[byte]indexSpec),
+ }); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+ }
+ return db, nil
+}
+
+// Put wraps LevelDB Put method to increment metrics counter.
+func (db *DB) Put(key []byte, value []byte) (err error) {
+ err = db.ldb.Put(key, value, nil)
+ if err != nil {
+ metrics.GetOrRegisterCounter("DB.putFail", nil).Inc(1)
+ return err
+ }
+ metrics.GetOrRegisterCounter("DB.put", nil).Inc(1)
+ return nil
+}
+
+// Get wraps LevelDB Get method to increment metrics counter.
+func (db *DB) Get(key []byte) (value []byte, err error) {
+ value, err = db.ldb.Get(key, nil)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ metrics.GetOrRegisterCounter("DB.getNotFound", nil).Inc(1)
+ } else {
+ metrics.GetOrRegisterCounter("DB.getFail", nil).Inc(1)
+ }
+ return nil, err
+ }
+ metrics.GetOrRegisterCounter("DB.get", nil).Inc(1)
+ return value, nil
+}
+
+// Delete wraps LevelDB Delete method to increment metrics counter.
+func (db *DB) Delete(key []byte) (err error) {
+ err = db.ldb.Delete(key, nil)
+ if err != nil {
+ metrics.GetOrRegisterCounter("DB.deleteFail", nil).Inc(1)
+ return err
+ }
+ metrics.GetOrRegisterCounter("DB.delete", nil).Inc(1)
+ return nil
+}
+
+// NewIterator wraps LevelDB NewIterator method to increment metrics counter.
+func (db *DB) NewIterator() iterator.Iterator {
+ metrics.GetOrRegisterCounter("DB.newiterator", nil).Inc(1)
+
+ return db.ldb.NewIterator(nil, nil)
+}
+
+// WriteBatch wraps LevelDB Write method to increment metrics counter.
+func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) {
+ err = db.ldb.Write(batch, nil)
+ if err != nil {
+ metrics.GetOrRegisterCounter("DB.writebatchFail", nil).Inc(1)
+ return err
+ }
+ metrics.GetOrRegisterCounter("DB.writebatch", nil).Inc(1)
+ return nil
+}
+
+// Close closes LevelDB database.
+func (db *DB) Close() (err error) {
+ return db.ldb.Close()
+}
diff --git a/swarm/shed/db_test.go b/swarm/shed/db_test.go
new file mode 100644
index 000000000..45325beeb
--- /dev/null
+++ b/swarm/shed/db_test.go
@@ -0,0 +1,110 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+// TestNewDB constructs a new DB
+// and validates if the schema is initialized properly.
+func TestNewDB(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ s, err := db.getSchema()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if s.Fields == nil {
+ t.Error("schema fields are empty")
+ }
+ if len(s.Fields) != 0 {
+ t.Errorf("got schema fields length %v, want %v", len(s.Fields), 0)
+ }
+ if s.Indexes == nil {
+ t.Error("schema indexes are empty")
+ }
+ if len(s.Indexes) != 0 {
+ t.Errorf("got schema indexes length %v, want %v", len(s.Indexes), 0)
+ }
+}
+
+// TestDB_persistence creates one DB, saves a field and closes that DB.
+// Then, it constructs another DB and trues to retrieve the saved value.
+func TestDB_persistence(t *testing.T) {
+ dir, err := ioutil.TempDir("", "shed-test-persistence")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ db, err := NewDB(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stringField, err := db.NewStringField("preserve-me")
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "persistent value"
+ err = stringField.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = db.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ db2, err := NewDB(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stringField2, err := db2.NewStringField("preserve-me")
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := stringField2.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+}
+
+// newTestDB is a helper function that constructs a
+// temporary database and returns a cleanup function that must
+// be called to remove the data.
+func newTestDB(t *testing.T) (db *DB, cleanupFunc func()) {
+ t.Helper()
+
+ dir, err := ioutil.TempDir("", "shed-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cleanupFunc = func() { os.RemoveAll(dir) }
+ db, err = NewDB(dir)
+ if err != nil {
+ cleanupFunc()
+ t.Fatal(err)
+ }
+ return db, cleanupFunc
+}
diff --git a/swarm/shed/example_store_test.go b/swarm/shed/example_store_test.go
new file mode 100644
index 000000000..2ed0be141
--- /dev/null
+++ b/swarm/shed/example_store_test.go
@@ -0,0 +1,332 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed_test
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "time"
+
+ "github.com/ethereum/go-ethereum/swarm/shed"
+ "github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Store holds fields and indexes (including their encoding functions)
+// and defines operations on them by composing data from them.
+// It implements storage.ChunkStore interface.
+// It is just an example without any support for parallel operations
+// or real world implementation.
+type Store struct {
+ db *shed.DB
+
+ // fields and indexes
+ schemaName shed.StringField
+ sizeCounter shed.Uint64Field
+ accessCounter shed.Uint64Field
+ retrievalIndex shed.Index
+ accessIndex shed.Index
+ gcIndex shed.Index
+}
+
+// New returns new Store. All fields and indexes are initialized
+// and possible conflicts with schema from existing database is checked
+// automatically.
+func New(path string) (s *Store, err error) {
+ db, err := shed.NewDB(path)
+ if err != nil {
+ return nil, err
+ }
+ s = &Store{
+ db: db,
+ }
+ // Identify current storage schema by arbitrary name.
+ s.schemaName, err = db.NewStringField("schema-name")
+ if err != nil {
+ return nil, err
+ }
+ // Global ever incrementing index of chunk accesses.
+ s.accessCounter, err = db.NewUint64Field("access-counter")
+ if err != nil {
+ return nil, err
+ }
+ // Index storing actual chunk address, data and store timestamp.
+ s.retrievalIndex, err = db.NewIndex("Address->StoreTimestamp|Data", shed.IndexFuncs{
+ EncodeKey: func(fields shed.IndexItem) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e shed.IndexItem, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: func(fields shed.IndexItem) (value []byte, err error) {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+ value = append(b, fields.Data...)
+ return value, nil
+ },
+ DecodeValue: func(value []byte) (e shed.IndexItem, err error) {
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
+ e.Data = value[8:]
+ return e, nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ // Index storing access timestamp for a particular address.
+ // It is needed in order to update gc index keys for iteration order.
+ s.accessIndex, err = db.NewIndex("Address->AccessTimestamp", shed.IndexFuncs{
+ EncodeKey: func(fields shed.IndexItem) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e shed.IndexItem, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: func(fields shed.IndexItem) (value []byte, err error) {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(fields.AccessTimestamp))
+ return b, nil
+ },
+ DecodeValue: func(value []byte) (e shed.IndexItem, err error) {
+ e.AccessTimestamp = int64(binary.BigEndian.Uint64(value))
+ return e, nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ // Index with keys ordered by access timestamp for garbage collection prioritization.
+ s.gcIndex, err = db.NewIndex("AccessTimestamp|StoredTimestamp|Address->nil", shed.IndexFuncs{
+ EncodeKey: func(fields shed.IndexItem) (key []byte, err error) {
+ b := make([]byte, 16, 16+len(fields.Address))
+ binary.BigEndian.PutUint64(b[:8], uint64(fields.AccessTimestamp))
+ binary.BigEndian.PutUint64(b[8:16], uint64(fields.StoreTimestamp))
+ key = append(b, fields.Address...)
+ return key, nil
+ },
+ DecodeKey: func(key []byte) (e shed.IndexItem, err error) {
+ e.AccessTimestamp = int64(binary.BigEndian.Uint64(key[:8]))
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(key[8:16]))
+ e.Address = key[16:]
+ return e, nil
+ },
+ EncodeValue: func(fields shed.IndexItem) (value []byte, err error) {
+ return nil, nil
+ },
+ DecodeValue: func(value []byte) (e shed.IndexItem, err error) {
+ return e, nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// Put stores the chunk and sets it store timestamp.
+func (s *Store) Put(_ context.Context, ch storage.Chunk) (err error) {
+ return s.retrievalIndex.Put(shed.IndexItem{
+ Address: ch.Address(),
+ Data: ch.Data(),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ })
+}
+
+// Get retrieves a chunk with the provided address.
+// It updates access and gc indexes by removing the previous
+// items from them and adding new items as keys of index entries
+// are changed.
+func (s *Store) Get(_ context.Context, addr storage.Address) (c storage.Chunk, err error) {
+ batch := new(leveldb.Batch)
+
+ // Get the chunk data and storage timestamp.
+ item, err := s.retrievalIndex.Get(shed.IndexItem{
+ Address: addr,
+ })
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ return nil, storage.ErrChunkNotFound
+ }
+ return nil, err
+ }
+
+ // Get the chunk access timestamp.
+ accessItem, err := s.accessIndex.Get(shed.IndexItem{
+ Address: addr,
+ })
+ switch err {
+ case nil:
+ // Remove gc index entry if access timestamp is found.
+ err = s.gcIndex.DeleteInBatch(batch, shed.IndexItem{
+ Address: item.Address,
+ StoreTimestamp: accessItem.AccessTimestamp,
+ AccessTimestamp: item.StoreTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+ case leveldb.ErrNotFound:
+ // Access timestamp is not found. Do not do anything.
+ // This is the firs get request.
+ default:
+ return nil, err
+ }
+
+ // Specify new access timestamp
+ accessTimestamp := time.Now().UTC().UnixNano()
+
+ // Put new access timestamp in access index.
+ err = s.accessIndex.PutInBatch(batch, shed.IndexItem{
+ Address: addr,
+ AccessTimestamp: accessTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Put new access timestamp in gc index.
+ err = s.gcIndex.PutInBatch(batch, shed.IndexItem{
+ Address: item.Address,
+ AccessTimestamp: accessTimestamp,
+ StoreTimestamp: item.StoreTimestamp,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Increment access counter.
+ // Currently this information is not used anywhere.
+ _, err = s.accessCounter.IncInBatch(batch)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the batch.
+ err = s.db.WriteBatch(batch)
+ if err != nil {
+ return nil, err
+ }
+
+ // Return the chunk.
+ return storage.NewChunk(item.Address, item.Data), nil
+}
+
+// CollectGarbage is an example of index iteration.
+// It provides no reliable garbage collection functionality.
+func (s *Store) CollectGarbage() (err error) {
+ const maxTrashSize = 100
+ maxRounds := 10 // arbitrary number, needs to be calculated
+
+ // Run a few gc rounds.
+ for roundCount := 0; roundCount < maxRounds; roundCount++ {
+ var garbageCount int
+ // New batch for a new cg round.
+ trash := new(leveldb.Batch)
+ // Iterate through all index items and break when needed.
+ err = s.gcIndex.IterateAll(func(item shed.IndexItem) (stop bool, err error) {
+ // Remove the chunk.
+ err = s.retrievalIndex.DeleteInBatch(trash, item)
+ if err != nil {
+ return false, err
+ }
+ // Remove the element in gc index.
+ err = s.gcIndex.DeleteInBatch(trash, item)
+ if err != nil {
+ return false, err
+ }
+ // Remove the relation in access index.
+ err = s.accessIndex.DeleteInBatch(trash, item)
+ if err != nil {
+ return false, err
+ }
+ garbageCount++
+ if garbageCount >= maxTrashSize {
+ return true, nil
+ }
+ return false, nil
+ })
+ if err != nil {
+ return err
+ }
+ if garbageCount == 0 {
+ return nil
+ }
+ err = s.db.WriteBatch(trash)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSchema is an example of retrieveing the most simple
+// string from a database field.
+func (s *Store) GetSchema() (name string, err error) {
+ name, err = s.schemaName.Get()
+ if err == leveldb.ErrNotFound {
+ return "", nil
+ }
+ return name, err
+}
+
+// GetSchema is an example of storing the most simple
+// string in a database field.
+func (s *Store) PutSchema(name string) (err error) {
+ return s.schemaName.Put(name)
+}
+
+// Close closes the underlying database.
+func (s *Store) Close() error {
+ return s.db.Close()
+}
+
+// Example_store constructs a simple storage implementation using shed package.
+func Example_store() {
+ dir, err := ioutil.TempDir("", "ephemeral")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ s, err := New(dir)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer s.Close()
+
+ ch := storage.GenerateRandomChunk(1024)
+ err = s.Put(context.Background(), ch)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ got, err := s.Get(context.Background(), ch.Address())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(bytes.Equal(got.Data(), ch.Data()))
+
+ //Output: true
+}
diff --git a/swarm/shed/field_string.go b/swarm/shed/field_string.go
new file mode 100644
index 000000000..a7e8f0c75
--- /dev/null
+++ b/swarm/shed/field_string.go
@@ -0,0 +1,66 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// StringField is the most simple field implementation
+// that stores an arbitrary string under a specific LevelDB key.
+type StringField struct {
+ db *DB
+ key []byte
+}
+
+// NewStringField retruns a new Instance of StringField.
+// It validates its name and type against the database schema.
+func (db *DB) NewStringField(name string) (f StringField, err error) {
+ key, err := db.schemaFieldKey(name, "string")
+ if err != nil {
+ return f, err
+ }
+ return StringField{
+ db: db,
+ key: key,
+ }, nil
+}
+
+// Get returns a string value from database.
+// If the value is not found, an empty string is returned
+// an no error.
+func (f StringField) Get() (val string, err error) {
+ b, err := f.db.Get(f.key)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ return "", nil
+ }
+ return "", err
+ }
+ return string(b), nil
+}
+
+// Put stores a string in the database.
+func (f StringField) Put(val string) (err error) {
+ return f.db.Put(f.key, []byte(val))
+}
+
+// PutInBatch stores a string in a batch that can be
+// saved later in database.
+func (f StringField) PutInBatch(batch *leveldb.Batch, val string) {
+ batch.Put(f.key, []byte(val))
+}
diff --git a/swarm/shed/field_string_test.go b/swarm/shed/field_string_test.go
new file mode 100644
index 000000000..4215075bc
--- /dev/null
+++ b/swarm/shed/field_string_test.go
@@ -0,0 +1,110 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "testing"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// TestStringField validates put and get operations
+// of the StringField.
+func TestStringField(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ simpleString, err := db.NewStringField("simple-string")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("get empty", func(t *testing.T) {
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := ""
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+
+ t.Run("put", func(t *testing.T) {
+ want := "simple string value"
+ err = simpleString.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := "overwritten string value"
+ err = simpleString.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := "simple string batch value"
+ simpleString.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := "overwritten string batch value"
+ simpleString.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := simpleString.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+ })
+}
diff --git a/swarm/shed/field_struct.go b/swarm/shed/field_struct.go
new file mode 100644
index 000000000..90daee7fc
--- /dev/null
+++ b/swarm/shed/field_struct.go
@@ -0,0 +1,71 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// StructField is a helper to store complex structure by
+// encoding it in RLP format.
+type StructField struct {
+ db *DB
+ key []byte
+}
+
+// NewStructField returns a new StructField.
+// It validates its name and type against the database schema.
+func (db *DB) NewStructField(name string) (f StructField, err error) {
+ key, err := db.schemaFieldKey(name, "struct-rlp")
+ if err != nil {
+ return f, err
+ }
+ return StructField{
+ db: db,
+ key: key,
+ }, nil
+}
+
+// Get unmarshals data from the database to a provided val.
+// If the data is not found leveldb.ErrNotFound is returned.
+func (f StructField) Get(val interface{}) (err error) {
+ b, err := f.db.Get(f.key)
+ if err != nil {
+ return err
+ }
+ return rlp.DecodeBytes(b, val)
+}
+
+// Put marshals provided val and saves it to the database.
+func (f StructField) Put(val interface{}) (err error) {
+ b, err := rlp.EncodeToBytes(val)
+ if err != nil {
+ return err
+ }
+ return f.db.Put(f.key, b)
+}
+
+// PutInBatch marshals provided val and puts it into the batch.
+func (f StructField) PutInBatch(batch *leveldb.Batch, val interface{}) (err error) {
+ b, err := rlp.EncodeToBytes(val)
+ if err != nil {
+ return err
+ }
+ batch.Put(f.key, b)
+ return nil
+}
diff --git a/swarm/shed/field_struct_test.go b/swarm/shed/field_struct_test.go
new file mode 100644
index 000000000..cc0be0186
--- /dev/null
+++ b/swarm/shed/field_struct_test.go
@@ -0,0 +1,127 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "testing"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// TestStructField validates put and get operations
+// of the StructField.
+func TestStructField(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ complexField, err := db.NewStructField("complex-field")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type complexStructure struct {
+ A string
+ }
+
+ t.Run("get empty", func(t *testing.T) {
+ var s complexStructure
+ err := complexField.Get(&s)
+ if err != leveldb.ErrNotFound {
+ t.Fatalf("got error %v, want %v", err, leveldb.ErrNotFound)
+ }
+ want := ""
+ if s.A != want {
+ t.Errorf("got string %q, want %q", s.A, want)
+ }
+ })
+
+ t.Run("put", func(t *testing.T) {
+ want := complexStructure{
+ A: "simple string value",
+ }
+ err = complexField.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err = complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got.A, want.A)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := complexStructure{
+ A: "overwritten string value",
+ }
+ err = complexField.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err = complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got.A, want.A)
+ }
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := complexStructure{
+ A: "simple string batch value",
+ }
+ complexField.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err := complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ want := complexStructure{
+ A: "overwritten string batch value",
+ }
+ complexField.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var got complexStructure
+ err := complexField.Get(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.A != want.A {
+ t.Errorf("got string %q, want %q", got, want)
+ }
+ })
+ })
+}
diff --git a/swarm/shed/field_uint64.go b/swarm/shed/field_uint64.go
new file mode 100644
index 000000000..80e0069ae
--- /dev/null
+++ b/swarm/shed/field_uint64.go
@@ -0,0 +1,108 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "encoding/binary"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Uint64Field provides a way to have a simple counter in the database.
+// It transparently encodes uint64 type value to bytes.
+type Uint64Field struct {
+ db *DB
+ key []byte
+}
+
+// NewUint64Field returns a new Uint64Field.
+// It validates its name and type against the database schema.
+func (db *DB) NewUint64Field(name string) (f Uint64Field, err error) {
+ key, err := db.schemaFieldKey(name, "uint64")
+ if err != nil {
+ return f, err
+ }
+ return Uint64Field{
+ db: db,
+ key: key,
+ }, nil
+}
+
+// Get retrieves a uint64 value from the database.
+// If the value is not found in the database a 0 value
+// is returned and no error.
+func (f Uint64Field) Get() (val uint64, err error) {
+ b, err := f.db.Get(f.key)
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ return 0, nil
+ }
+ return 0, err
+ }
+ return binary.BigEndian.Uint64(b), nil
+}
+
+// Put encodes uin64 value and stores it in the database.
+func (f Uint64Field) Put(val uint64) (err error) {
+ return f.db.Put(f.key, encodeUint64(val))
+}
+
+// PutInBatch stores a uint64 value in a batch
+// that can be saved later in the database.
+func (f Uint64Field) PutInBatch(batch *leveldb.Batch, val uint64) {
+ batch.Put(f.key, encodeUint64(val))
+}
+
+// Inc increments a uint64 value in the database.
+// This operation is not goroutine save.
+func (f Uint64Field) Inc() (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ val++
+ return val, f.Put(val)
+}
+
+// IncInBatch increments a uint64 value in the batch
+// by retreiving a value from the database, not the same batch.
+// This operation is not goroutine save.
+func (f Uint64Field) IncInBatch(batch *leveldb.Batch) (val uint64, err error) {
+ val, err = f.Get()
+ if err != nil {
+ if err == leveldb.ErrNotFound {
+ val = 0
+ } else {
+ return 0, err
+ }
+ }
+ val++
+ f.PutInBatch(batch, val)
+ return val, nil
+}
+
+// encode transforms uint64 to 8 byte long
+// slice in big endian encoding.
+func encodeUint64(val uint64) (b []byte) {
+ b = make([]byte, 8)
+ binary.BigEndian.PutUint64(b, val)
+ return b
+}
diff --git a/swarm/shed/field_uint64_test.go b/swarm/shed/field_uint64_test.go
new file mode 100644
index 000000000..69ade71ba
--- /dev/null
+++ b/swarm/shed/field_uint64_test.go
@@ -0,0 +1,194 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "testing"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// TestUint64Field validates put and get operations
+// of the Uint64Field.
+func TestUint64Field(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("get empty", func(t *testing.T) {
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var want uint64
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ })
+
+ t.Run("put", func(t *testing.T) {
+ var want uint64 = 42
+ err = counter.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ var want uint64 = 84
+ err = counter.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ var want uint64 = 42
+ counter.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ t.Run("overwrite", func(t *testing.T) {
+ batch := new(leveldb.Batch)
+ var want uint64 = 84
+ counter.PutInBatch(batch, want)
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ })
+ })
+}
+
+// TestUint64Field_Inc validates Inc operation
+// of the Uint64Field.
+func TestUint64Field_Inc(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var want uint64 = 1
+ got, err := counter.Inc()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ want = 2
+ got, err = counter.Inc()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
+
+// TestUint64Field_IncInBatch validates IncInBatch operation
+// of the Uint64Field.
+func TestUint64Field_IncInBatch(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ counter, err := db.NewUint64Field("counter")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ batch := new(leveldb.Batch)
+ var want uint64 = 1
+ got, err := counter.IncInBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+
+ batch2 := new(leveldb.Batch)
+ want = 2
+ got, err = counter.IncInBatch(batch2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+ err = db.WriteBatch(batch2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err = counter.Get()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != want {
+ t.Errorf("got uint64 %v, want %v", got, want)
+ }
+}
diff --git a/swarm/shed/index.go b/swarm/shed/index.go
new file mode 100644
index 000000000..ba803e3c2
--- /dev/null
+++ b/swarm/shed/index.go
@@ -0,0 +1,264 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// IndexItem holds fields relevant to Swarm Chunk data and metadata.
+// All information required for swarm storage and operations
+// on that storage must be defined here.
+// This structure is logically connected to swarm storage,
+// the only part of this package that is not generalized,
+// mostly for performance reasons.
+//
+// IndexItem is a type that is used for retrieving, storing and encoding
+// chunk data and metadata. It is passed as an argument to Index encoding
+// functions, get function and put function.
+// But it is also returned with additional data from get function call
+// and as the argument in iterator function definition.
+type IndexItem struct {
+ Address []byte
+ Data []byte
+ AccessTimestamp int64
+ StoreTimestamp int64
+ // UseMockStore is a pointer to identify
+ // an unset state of the field in Join function.
+ UseMockStore *bool
+}
+
+// Merge is a helper method to construct a new
+// IndexItem by filling up fields with default values
+// of a particular IndexItem with values from another one.
+func (i IndexItem) Merge(i2 IndexItem) (new IndexItem) {
+ if i.Address == nil {
+ i.Address = i2.Address
+ }
+ if i.Data == nil {
+ i.Data = i2.Data
+ }
+ if i.AccessTimestamp == 0 {
+ i.AccessTimestamp = i2.AccessTimestamp
+ }
+ if i.StoreTimestamp == 0 {
+ i.StoreTimestamp = i2.StoreTimestamp
+ }
+ if i.UseMockStore == nil {
+ i.UseMockStore = i2.UseMockStore
+ }
+ return i
+}
+
+// Index represents a set of LevelDB key value pairs that have common
+// prefix. It holds functions for encoding and decoding keys and values
+// to provide transparent actions on saved data which inclide:
+// - getting a particular IndexItem
+// - saving a particular IndexItem
+// - iterating over a sorted LevelDB keys
+// It implements IndexIteratorInterface interface.
+type Index struct {
+ db *DB
+ prefix []byte
+ encodeKeyFunc func(fields IndexItem) (key []byte, err error)
+ decodeKeyFunc func(key []byte) (e IndexItem, err error)
+ encodeValueFunc func(fields IndexItem) (value []byte, err error)
+ decodeValueFunc func(value []byte) (e IndexItem, err error)
+}
+
+// IndexFuncs structure defines functions for encoding and decoding
+// LevelDB keys and values for a specific index.
+type IndexFuncs struct {
+ EncodeKey func(fields IndexItem) (key []byte, err error)
+ DecodeKey func(key []byte) (e IndexItem, err error)
+ EncodeValue func(fields IndexItem) (value []byte, err error)
+ DecodeValue func(value []byte) (e IndexItem, err error)
+}
+
+// NewIndex returns a new Index instance with defined name and
+// encoding functions. The name must be unique and will be validated
+// on database schema for a key prefix byte.
+func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) {
+ id, err := db.schemaIndexPrefix(name)
+ if err != nil {
+ return f, err
+ }
+ prefix := []byte{id}
+ return Index{
+ db: db,
+ prefix: prefix,
+ // This function adjusts Index LevelDB key
+ // by appending the provided index id byte.
+ // This is needed to avoid collisions between keys of different
+ // indexes as all index ids are unique.
+ encodeKeyFunc: func(e IndexItem) (key []byte, err error) {
+ key, err = funcs.EncodeKey(e)
+ if err != nil {
+ return nil, err
+ }
+ return append(append(make([]byte, 0, len(key)+1), prefix...), key...), nil
+ },
+ // This function reverses the encodeKeyFunc constructed key
+ // to transparently work with index keys without their index ids.
+ // It assumes that index keys are prefixed with only one byte.
+ decodeKeyFunc: func(key []byte) (e IndexItem, err error) {
+ return funcs.DecodeKey(key[1:])
+ },
+ encodeValueFunc: funcs.EncodeValue,
+ decodeValueFunc: funcs.DecodeValue,
+ }, nil
+}
+
+// Get accepts key fields represented as IndexItem to retrieve a
+// value from the index and return maximum available information
+// from the index represented as another IndexItem.
+func (f Index) Get(keyFields IndexItem) (out IndexItem, err error) {
+ key, err := f.encodeKeyFunc(keyFields)
+ if err != nil {
+ return out, err
+ }
+ value, err := f.db.Get(key)
+ if err != nil {
+ return out, err
+ }
+ out, err = f.decodeValueFunc(value)
+ if err != nil {
+ return out, err
+ }
+ return out.Merge(keyFields), nil
+}
+
+// Put accepts IndexItem to encode information from it
+// and save it to the database.
+func (f Index) Put(i IndexItem) (err error) {
+ key, err := f.encodeKeyFunc(i)
+ if err != nil {
+ return err
+ }
+ value, err := f.encodeValueFunc(i)
+ if err != nil {
+ return err
+ }
+ return f.db.Put(key, value)
+}
+
+// PutInBatch is the same as Put method, but it just
+// saves the key/value pair to the batch instead
+// directly to the database.
+func (f Index) PutInBatch(batch *leveldb.Batch, i IndexItem) (err error) {
+ key, err := f.encodeKeyFunc(i)
+ if err != nil {
+ return err
+ }
+ value, err := f.encodeValueFunc(i)
+ if err != nil {
+ return err
+ }
+ batch.Put(key, value)
+ return nil
+}
+
+// Delete accepts IndexItem to remove a key/value pair
+// from the database based on its fields.
+func (f Index) Delete(keyFields IndexItem) (err error) {
+ key, err := f.encodeKeyFunc(keyFields)
+ if err != nil {
+ return err
+ }
+ return f.db.Delete(key)
+}
+
+// DeleteInBatch is the same as Delete just the operation
+// is performed on the batch instead on the database.
+func (f Index) DeleteInBatch(batch *leveldb.Batch, keyFields IndexItem) (err error) {
+ key, err := f.encodeKeyFunc(keyFields)
+ if err != nil {
+ return err
+ }
+ batch.Delete(key)
+ return nil
+}
+
+// IndexIterFunc is a callback on every IndexItem that is decoded
+// by iterating on an Index keys.
+// By returning a true for stop variable, iteration will
+// stop, and by returning the error, that error will be
+// propagated to the called iterator method on Index.
+type IndexIterFunc func(item IndexItem) (stop bool, err error)
+
+// IterateAll iterates over all keys of the Index.
+func (f Index) IterateAll(fn IndexIterFunc) (err error) {
+ it := f.db.NewIterator()
+ defer it.Release()
+
+ for ok := it.Seek(f.prefix); ok; ok = it.Next() {
+ key := it.Key()
+ if key[0] != f.prefix[0] {
+ break
+ }
+ keyIndexItem, err := f.decodeKeyFunc(key)
+ if err != nil {
+ return err
+ }
+ valueIndexItem, err := f.decodeValueFunc(it.Value())
+ if err != nil {
+ return err
+ }
+ stop, err := fn(keyIndexItem.Merge(valueIndexItem))
+ if err != nil {
+ return err
+ }
+ if stop {
+ break
+ }
+ }
+ return it.Error()
+}
+
+// IterateFrom iterates over Index keys starting from the key
+// encoded from the provided IndexItem.
+func (f Index) IterateFrom(start IndexItem, fn IndexIterFunc) (err error) {
+ startKey, err := f.encodeKeyFunc(start)
+ if err != nil {
+ return err
+ }
+ it := f.db.NewIterator()
+ defer it.Release()
+
+ for ok := it.Seek(startKey); ok; ok = it.Next() {
+ key := it.Key()
+ if key[0] != f.prefix[0] {
+ break
+ }
+ keyIndexItem, err := f.decodeKeyFunc(key)
+ if err != nil {
+ return err
+ }
+ valueIndexItem, err := f.decodeValueFunc(it.Value())
+ if err != nil {
+ return err
+ }
+ stop, err := fn(keyIndexItem.Merge(valueIndexItem))
+ if err != nil {
+ return err
+ }
+ if stop {
+ break
+ }
+ }
+ return it.Error()
+}
diff --git a/swarm/shed/index_test.go b/swarm/shed/index_test.go
new file mode 100644
index 000000000..ba82216df
--- /dev/null
+++ b/swarm/shed/index_test.go
@@ -0,0 +1,426 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+// Index functions for the index that is used in tests in this file.
+var retrievalIndexFuncs = IndexFuncs{
+ EncodeKey: func(fields IndexItem) (key []byte, err error) {
+ return fields.Address, nil
+ },
+ DecodeKey: func(key []byte) (e IndexItem, err error) {
+ e.Address = key
+ return e, nil
+ },
+ EncodeValue: func(fields IndexItem) (value []byte, err error) {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(fields.StoreTimestamp))
+ value = append(b, fields.Data...)
+ return value, nil
+ },
+ DecodeValue: func(value []byte) (e IndexItem, err error) {
+ e.StoreTimestamp = int64(binary.BigEndian.Uint64(value[:8]))
+ e.Data = value[8:]
+ return e, nil
+ },
+}
+
+// TestIndex validates put, get and delete functions of the Index implementation.
+func TestIndex(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("put", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("put-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err := index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("put-hash"),
+ Data: []byte("New DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err = index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+ })
+ })
+
+ t.Run("put in batch", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("put-in-batch-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ batch := new(leveldb.Batch)
+ index.PutInBatch(batch, want)
+ err := db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+
+ t.Run("overwrite", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("put-in-batch-hash"),
+ Data: []byte("New DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ batch := new(leveldb.Batch)
+ index.PutInBatch(batch, want)
+ db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+ })
+ })
+
+ t.Run("put in batch twice", func(t *testing.T) {
+ // ensure that the last item of items with the same db keys
+ // is actually saved
+ batch := new(leveldb.Batch)
+ address := []byte("put-in-batch-twice-hash")
+
+ // put the first item
+ index.PutInBatch(batch, IndexItem{
+ Address: address,
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ })
+
+ want := IndexItem{
+ Address: address,
+ Data: []byte("New DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+ // then put the item that will produce the same key
+ // but different value in the database
+ index.PutInBatch(batch, want)
+ db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+ })
+
+ t.Run("delete", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("delete-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err := index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+
+ err = index.Delete(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wantErr := leveldb.ErrNotFound
+ got, err = index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != wantErr {
+ t.Fatalf("got error %v, want %v", err, wantErr)
+ }
+ })
+
+ t.Run("delete in batch", func(t *testing.T) {
+ want := IndexItem{
+ Address: []byte("delete-in-batch-hash"),
+ Data: []byte("DATA"),
+ StoreTimestamp: time.Now().UTC().UnixNano(),
+ }
+
+ err := index.Put(want)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkIndexItem(t, got, want)
+
+ batch := new(leveldb.Batch)
+ index.DeleteInBatch(batch, IndexItem{
+ Address: want.Address,
+ })
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wantErr := leveldb.ErrNotFound
+ got, err = index.Get(IndexItem{
+ Address: want.Address,
+ })
+ if err != wantErr {
+ t.Fatalf("got error %v, want %v", err, wantErr)
+ }
+ })
+}
+
+// TestIndex_iterate validates index iterator functions for correctness.
+func TestIndex_iterate(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ index, err := db.NewIndex("retrieval", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ items := []IndexItem{
+ {
+ Address: []byte("iterate-hash-01"),
+ Data: []byte("data80"),
+ },
+ {
+ Address: []byte("iterate-hash-03"),
+ Data: []byte("data22"),
+ },
+ {
+ Address: []byte("iterate-hash-05"),
+ Data: []byte("data41"),
+ },
+ {
+ Address: []byte("iterate-hash-02"),
+ Data: []byte("data84"),
+ },
+ {
+ Address: []byte("iterate-hash-06"),
+ Data: []byte("data1"),
+ },
+ }
+ batch := new(leveldb.Batch)
+ for _, i := range items {
+ index.PutInBatch(batch, i)
+ }
+ err = db.WriteBatch(batch)
+ if err != nil {
+ t.Fatal(err)
+ }
+ item04 := IndexItem{
+ Address: []byte("iterate-hash-04"),
+ Data: []byte("data0"),
+ }
+ err = index.Put(item04)
+ if err != nil {
+ t.Fatal(err)
+ }
+ items = append(items, item04)
+
+ sort.SliceStable(items, func(i, j int) bool {
+ return bytes.Compare(items[i].Address, items[j].Address) < 0
+ })
+
+ t.Run("all", func(t *testing.T) {
+ var i int
+ err := index.IterateAll(func(item IndexItem) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkIndexItem(t, item, want)
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+
+ t.Run("from", func(t *testing.T) {
+ startIndex := 2
+ i := startIndex
+ err := index.IterateFrom(items[startIndex], func(item IndexItem) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkIndexItem(t, item, want)
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+
+ t.Run("stop", func(t *testing.T) {
+ var i int
+ stopIndex := 3
+ var count int
+ err := index.IterateAll(func(item IndexItem) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkIndexItem(t, item, want)
+ count++
+ if i == stopIndex {
+ return true, nil
+ }
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ wantItemsCount := stopIndex + 1
+ if count != wantItemsCount {
+ t.Errorf("got %v items, expected %v", count, wantItemsCount)
+ }
+ })
+
+ t.Run("no overflow", func(t *testing.T) {
+ secondIndex, err := db.NewIndex("second-index", retrievalIndexFuncs)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ secondIndexItem := IndexItem{
+ Address: []byte("iterate-hash-10"),
+ Data: []byte("data-second"),
+ }
+ err = secondIndex.Put(secondIndexItem)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var i int
+ err = index.IterateAll(func(item IndexItem) (stop bool, err error) {
+ if i > len(items)-1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ want := items[i]
+ checkIndexItem(t, item, want)
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ i = 0
+ err = secondIndex.IterateAll(func(item IndexItem) (stop bool, err error) {
+ if i > 1 {
+ return true, fmt.Errorf("got unexpected index item: %#v", item)
+ }
+ checkIndexItem(t, item, secondIndexItem)
+ i++
+ return false, nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// checkIndexItem is a test helper function that compares if two Index items are the same.
+func checkIndexItem(t *testing.T, got, want IndexItem) {
+ t.Helper()
+
+ if !bytes.Equal(got.Address, want.Address) {
+ t.Errorf("got hash %q, expected %q", string(got.Address), string(want.Address))
+ }
+ if !bytes.Equal(got.Data, want.Data) {
+ t.Errorf("got data %q, expected %q", string(got.Data), string(want.Data))
+ }
+ if got.StoreTimestamp != want.StoreTimestamp {
+ t.Errorf("got store timestamp %v, expected %v", got.StoreTimestamp, want.StoreTimestamp)
+ }
+ if got.AccessTimestamp != want.AccessTimestamp {
+ t.Errorf("got access timestamp %v, expected %v", got.AccessTimestamp, want.AccessTimestamp)
+ }
+}
diff --git a/swarm/shed/schema.go b/swarm/shed/schema.go
new file mode 100644
index 000000000..cfb7c6d64
--- /dev/null
+++ b/swarm/shed/schema.go
@@ -0,0 +1,134 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+)
+
+var (
+ // LevelDB key value for storing the schema.
+ keySchema = []byte{0}
+ // LevelDB key prefix for all field type.
+ // LevelDB keys will be constructed by appending name values to this prefix.
+ keyPrefixFields byte = 1
+ // LevelDB key prefix from which indexing keys start.
+ // Every index has its own key prefix and this value defines the first one.
+ keyPrefixIndexStart byte = 2 // Q: or maybe a higher number like 7, to have more space for potential specific perfixes
+)
+
+// schema is used to serialize known database structure information.
+type schema struct {
+ Fields map[string]fieldSpec `json:"fields"` // keys are field names
+ Indexes map[byte]indexSpec `json:"indexes"` // keys are index prefix bytes
+}
+
+// fieldSpec holds information about a particular field.
+// It does not need Name field as it is contained in the
+// schema.Field map key.
+type fieldSpec struct {
+ Type string `json:"type"`
+}
+
+// indxSpec holds information about a particular index.
+// It does not contain index type, as indexes do not have type.
+type indexSpec struct {
+ Name string `json:"name"`
+}
+
+// schemaFieldKey retrives the complete LevelDB key for
+// a particular field form the schema definition.
+func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) {
+ if name == "" {
+ return nil, errors.New("field name can not be blank")
+ }
+ if fieldType == "" {
+ return nil, errors.New("field type can not be blank")
+ }
+ s, err := db.getSchema()
+ if err != nil {
+ return nil, err
+ }
+ var found bool
+ for n, f := range s.Fields {
+ if n == name {
+ if f.Type != fieldType {
+ return nil, fmt.Errorf("field %q of type %q stored as %q in db", name, fieldType, f.Type)
+ }
+ break
+ }
+ }
+ if !found {
+ s.Fields[name] = fieldSpec{
+ Type: fieldType,
+ }
+ err := db.putSchema(s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return append([]byte{keyPrefixFields}, []byte(name)...), nil
+}
+
+// schemaIndexID retrieves the complete LevelDB prefix for
+// a particular index.
+func (db *DB) schemaIndexPrefix(name string) (id byte, err error) {
+ if name == "" {
+ return 0, errors.New("index name can not be blank")
+ }
+ s, err := db.getSchema()
+ if err != nil {
+ return 0, err
+ }
+ nextID := keyPrefixIndexStart
+ for i, f := range s.Indexes {
+ if i >= nextID {
+ nextID = i + 1
+ }
+ if f.Name == name {
+ return i, nil
+ }
+ }
+ id = nextID
+ s.Indexes[id] = indexSpec{
+ Name: name,
+ }
+ return id, db.putSchema(s)
+}
+
+// getSchema retrieves the complete schema from
+// the database.
+func (db *DB) getSchema() (s schema, err error) {
+ b, err := db.Get(keySchema)
+ if err != nil {
+ return s, err
+ }
+ err = json.Unmarshal(b, &s)
+ return s, err
+}
+
+// putSchema stores the complete schema to
+// the database.
+func (db *DB) putSchema(s schema) (err error) {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ return db.Put(keySchema, b)
+}
diff --git a/swarm/shed/schema_test.go b/swarm/shed/schema_test.go
new file mode 100644
index 000000000..a0c1838c8
--- /dev/null
+++ b/swarm/shed/schema_test.go
@@ -0,0 +1,126 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package shed
+
+import (
+ "bytes"
+ "testing"
+)
+
+// TestDB_schemaFieldKey validates correctness of schemaFieldKey.
+func TestDB_schemaFieldKey(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ t.Run("empty name or type", func(t *testing.T) {
+ _, err := db.schemaFieldKey("", "")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+ _, err = db.schemaFieldKey("", "type")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+
+ _, err = db.schemaFieldKey("test", "")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+ })
+
+ t.Run("same field", func(t *testing.T) {
+ key1, err := db.schemaFieldKey("test", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ key2, err := db.schemaFieldKey("test", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(key1, key2) {
+ t.Errorf("schema keys for the same field name are not the same: %q, %q", string(key1), string(key2))
+ }
+ })
+
+ t.Run("different fields", func(t *testing.T) {
+ key1, err := db.schemaFieldKey("test1", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ key2, err := db.schemaFieldKey("test2", "undefined")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if bytes.Equal(key1, key2) {
+ t.Error("schema keys for the same field name are the same, but must not be")
+ }
+ })
+
+ t.Run("same field name different types", func(t *testing.T) {
+ _, err := db.schemaFieldKey("the-field", "one-type")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = db.schemaFieldKey("the-field", "another-type")
+ if err == nil {
+ t.Errorf("error not returned, but expected")
+ }
+ })
+}
+
+// TestDB_schemaIndexPrefix validates correctness of schemaIndexPrefix.
+func TestDB_schemaIndexPrefix(t *testing.T) {
+ db, cleanupFunc := newTestDB(t)
+ defer cleanupFunc()
+
+ t.Run("same name", func(t *testing.T) {
+ id1, err := db.schemaIndexPrefix("test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id2, err := db.schemaIndexPrefix("test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if id1 != id2 {
+ t.Errorf("schema keys for the same field name are not the same: %v, %v", id1, id2)
+ }
+ })
+
+ t.Run("different names", func(t *testing.T) {
+ id1, err := db.schemaIndexPrefix("test1")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id2, err := db.schemaIndexPrefix("test2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if id1 == id2 {
+ t.Error("schema ids for the same index name are the same, but must not be")
+ }
+ })
+}
diff --git a/swarm/state/dbstore.go b/swarm/state/dbstore.go
index b0aa92e27..fc5dd8f7c 100644
--- a/swarm/state/dbstore.go
+++ b/swarm/state/dbstore.go
@@ -22,6 +22,7 @@ import (
"errors"
"github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/storage"
)
// ErrNotFound is returned when no results are returned from the database
@@ -30,6 +31,15 @@ var ErrNotFound = errors.New("ErrorNotFound")
// ErrInvalidArgument is returned when the argument type does not match the expected type
var ErrInvalidArgument = errors.New("ErrorInvalidArgument")
+// Store defines methods required to get, set, delete values for different keys
+// and close the underlying resources.
+type Store interface {
+ Get(key string, i interface{}) (err error)
+ Put(key string, i interface{}) (err error)
+ Delete(key string) (err error)
+ Close() error
+}
+
// DBStore uses LevelDB to store values.
type DBStore struct {
db *leveldb.DB
@@ -46,6 +56,17 @@ func NewDBStore(path string) (s *DBStore, err error) {
}, nil
}
+// NewInmemoryStore returns a new instance of DBStore. To be used only in tests and simulations.
+func NewInmemoryStore() *DBStore {
+ db, err := leveldb.Open(storage.NewMemStorage(), nil)
+ if err != nil {
+ panic(err)
+ }
+ return &DBStore{
+ db: db,
+ }
+}
+
// Get retrieves a persisted value for a specific key. If there is no results
// ErrNotFound is returned. The provided parameter should be either a byte slice or
// a struct that implements the encoding.BinaryUnmarshaler interface
diff --git a/swarm/state/inmemorystore.go b/swarm/state/inmemorystore.go
deleted file mode 100644
index 3ba48592b..000000000
--- a/swarm/state/inmemorystore.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package state
-
-import (
- "encoding"
- "encoding/json"
- "sync"
-)
-
-// InmemoryStore is the reference implementation of Store interface that is supposed
-// to be used in tests.
-type InmemoryStore struct {
- db map[string][]byte
- mu sync.RWMutex
-}
-
-// NewInmemoryStore returns a new instance of InmemoryStore.
-func NewInmemoryStore() *InmemoryStore {
- return &InmemoryStore{
- db: make(map[string][]byte),
- }
-}
-
-// Get retrieves a value stored for a specific key. If there is no value found,
-// ErrNotFound is returned.
-func (s *InmemoryStore) Get(key string, i interface{}) (err error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
-
- bytes, ok := s.db[key]
- if !ok {
- return ErrNotFound
- }
-
- unmarshaler, ok := i.(encoding.BinaryUnmarshaler)
- if !ok {
- return json.Unmarshal(bytes, i)
- }
-
- return unmarshaler.UnmarshalBinary(bytes)
-}
-
-// Put stores a value for a specific key.
-func (s *InmemoryStore) Put(key string, i interface{}) (err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- var bytes []byte
-
- marshaler, ok := i.(encoding.BinaryMarshaler)
- if !ok {
- if bytes, err = json.Marshal(i); err != nil {
- return err
- }
- } else {
- if bytes, err = marshaler.MarshalBinary(); err != nil {
- return err
- }
- }
-
- s.db[key] = bytes
- return nil
-}
-
-// Delete removes value stored under a specific key.
-func (s *InmemoryStore) Delete(key string) (err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if _, ok := s.db[key]; !ok {
- return ErrNotFound
- }
- delete(s.db, key)
- return nil
-}
-
-// Close does not do anything.
-func (s *InmemoryStore) Close() error {
- return nil
-}
diff --git a/swarm/state/store.go b/swarm/state/store.go
deleted file mode 100644
index fb7fe258f..000000000
--- a/swarm/state/store.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package state
-
-// Store defines methods required to get, set, delete values for different keys
-// and close the underlying resources.
-type Store interface {
- Get(key string, i interface{}) (err error)
- Put(key string, i interface{}) (err error)
- Delete(key string) (err error)
- Close() error
-}
diff --git a/swarm/storage/mock/db/db.go b/swarm/storage/mock/db/db.go
index 43bfa24f0..73ae199e8 100644
--- a/swarm/storage/mock/db/db.go
+++ b/swarm/storage/mock/db/db.go
@@ -86,6 +86,13 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
return s.db.Write(batch, nil)
}
+// Delete removes the chunk reference to node with address addr.
+func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ batch := new(leveldb.Batch)
+ batch.Delete(nodeDBKey(addr, key))
+ return s.db.Write(batch, nil)
+}
+
// HasKey returns whether a node with addr contains the key.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
has, err := s.db.Has(nodeDBKey(addr, key), nil)
diff --git a/swarm/storage/mock/mem/mem.go b/swarm/storage/mock/mem/mem.go
index 8878309d0..3a0a2beb8 100644
--- a/swarm/storage/mock/mem/mem.go
+++ b/swarm/storage/mock/mem/mem.go
@@ -83,6 +83,22 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
return nil
}
+// Delete removes the chunk data for node with address addr.
+func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var count int
+ if _, ok := s.nodes[string(key)]; ok {
+ delete(s.nodes[string(key)], addr)
+ count = len(s.nodes[string(key)])
+ }
+ if count == 0 {
+ delete(s.data, string(key))
+ }
+ return nil
+}
+
// HasKey returns whether a node with addr contains the key.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
s.mu.Lock()
diff --git a/swarm/storage/mock/mock.go b/swarm/storage/mock/mock.go
index 81340f927..1fb71b70a 100644
--- a/swarm/storage/mock/mock.go
+++ b/swarm/storage/mock/mock.go
@@ -70,6 +70,12 @@ func (n *NodeStore) Put(key []byte, data []byte) error {
return n.store.Put(n.addr, key, data)
}
+// Delete removes chunk data for a key for a node that has the address
+// provided on NodeStore initialization.
+func (n *NodeStore) Delete(key []byte) error {
+ return n.store.Delete(n.addr, key)
+}
+
// GlobalStorer defines methods for mock db store
// that stores chunk data for all swarm nodes.
// It is used in tests to construct mock NodeStores
@@ -77,6 +83,7 @@ func (n *NodeStore) Put(key []byte, data []byte) error {
type GlobalStorer interface {
Get(addr common.Address, key []byte) (data []byte, err error)
Put(addr common.Address, key []byte, data []byte) error
+ Delete(addr common.Address, key []byte) error
HasKey(addr common.Address, key []byte) bool
// NewNodeStore creates an instance of NodeStore
// to be used by a single swarm node with
diff --git a/swarm/storage/mock/rpc/rpc.go b/swarm/storage/mock/rpc/rpc.go
index 6e735f698..8cd6c83a7 100644
--- a/swarm/storage/mock/rpc/rpc.go
+++ b/swarm/storage/mock/rpc/rpc.go
@@ -73,6 +73,12 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error {
return err
}
+// Delete calls a Delete method to RPC server.
+func (s *GlobalStore) Delete(addr common.Address, key []byte) error {
+ err := s.client.Call(nil, "mockStore_delete", addr, key)
+ return err
+}
+
// HasKey calls a HasKey method to RPC server.
func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool {
var has bool
diff --git a/swarm/storage/mock/test/test.go b/swarm/storage/mock/test/test.go
index 02da3af55..10180985f 100644
--- a/swarm/storage/mock/test/test.go
+++ b/swarm/storage/mock/test/test.go
@@ -72,6 +72,31 @@ func MockStore(t *testing.T, globalStore mock.GlobalStorer, n int) {
}
}
}
+ t.Run("delete", func(t *testing.T) {
+ chunkAddr := storage.Address([]byte("1234567890abcd"))
+ for _, addr := range addrs {
+ err := globalStore.Put(addr, chunkAddr, []byte("data"))
+ if err != nil {
+ t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ firstNodeAddr := addrs[0]
+ if err := globalStore.Delete(firstNodeAddr, chunkAddr); err != nil {
+ t.Fatalf("delete from store %s key %s: %v", firstNodeAddr.Hex(), chunkAddr.Hex(), err)
+ }
+ for i, addr := range addrs {
+ _, err := globalStore.Get(addr, chunkAddr)
+ if i == 0 {
+ if err != mock.ErrNotFound {
+ t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ }
+ })
})
t.Run("NodeStore", func(t *testing.T) {
@@ -114,6 +139,34 @@ func MockStore(t *testing.T, globalStore mock.GlobalStorer, n int) {
}
}
}
+ t.Run("delete", func(t *testing.T) {
+ chunkAddr := storage.Address([]byte("1234567890abcd"))
+ var chosenStore *mock.NodeStore
+ for addr, store := range nodes {
+ if chosenStore == nil {
+ chosenStore = store
+ }
+ err := store.Put(chunkAddr, []byte("data"))
+ if err != nil {
+ t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ if err := chosenStore.Delete(chunkAddr); err != nil {
+ t.Fatalf("delete key %s: %v", chunkAddr.Hex(), err)
+ }
+ for addr, store := range nodes {
+ _, err := store.Get(chunkAddr)
+ if store == chosenStore {
+ if err != mock.ErrNotFound {
+ t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err)
+ }
+ }
+ }
+ })
})
}
diff --git a/swarm/swap/swap.go b/swarm/swap/swap.go
index 137eb141d..5d636dc20 100644
--- a/swarm/swap/swap.go
+++ b/swarm/swap/swap.go
@@ -91,3 +91,8 @@ func (s *Swap) loadState(peer *protocols.Peer) (err error) {
}
return
}
+
+//Clean up Swap
+func (swap *Swap) Close() {
+ swap.stateStore.Close()
+}
diff --git a/swarm/swarm.go b/swarm/swarm.go
index dc3756d3a..a4ff94051 100644
--- a/swarm/swarm.go
+++ b/swarm/swarm.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -66,20 +66,22 @@ var (
// the swarm stack
type Swarm struct {
- config *api.Config // swarm configuration
- api *api.API // high level api layer (fs/manifest)
- dns api.Resolver // DNS registrar
- fileStore *storage.FileStore // distributed preimage archive, the local API to the storage with document level storage/retrieval support
- streamer *stream.Registry
- bzz *network.Bzz // the logistic manager
- backend chequebook.Backend // simple blockchain Backend
- privateKey *ecdsa.PrivateKey
- corsString string
- swapEnabled bool
- netStore *storage.NetStore
- sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
- ps *pss.Pss
- swap *swap.Swap
+ config *api.Config // swarm configuration
+ api *api.API // high level api layer (fs/manifest)
+ dns api.Resolver // DNS registrar
+ fileStore *storage.FileStore // distributed preimage archive, the local API to the storage with document level storage/retrieval support
+ streamer *stream.Registry
+ bzz *network.Bzz // the logistic manager
+ backend chequebook.Backend // simple blockchain Backend
+ privateKey *ecdsa.PrivateKey
+ corsString string
+ swapEnabled bool
+ netStore *storage.NetStore
+ sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
+ ps *pss.Pss
+ swap *swap.Swap
+ stateStore *state.DBStore
+ accountingMetrics *protocols.AccountingMetrics
tracerClose io.Closer
}
@@ -134,7 +136,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
LightNode: config.LightNodeEnabled,
}
- stateStore, err := state.NewDBStore(filepath.Join(config.Path, "state-store.db"))
+ self.stateStore, err = state.NewDBStore(filepath.Join(config.Path, "state-store.db"))
if err != nil {
return
}
@@ -179,6 +181,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
return nil, err
}
self.swap = swap.New(balancesStore)
+ self.accountingMetrics = protocols.SetupAccountingMetrics(10*time.Second, filepath.Join(config.Path, "metrics.db"))
}
var nodeID enode.ID
@@ -203,7 +206,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
SyncUpdateDelay: config.SyncUpdateDelay,
MaxPeerServers: config.MaxStreamPeerServers,
}
- self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, stateStore, registryOptions, self.swap)
+ self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, self.stateStore, registryOptions, self.swap)
// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams)
@@ -226,7 +229,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
log.Debug("Setup local storage")
- self.bzz = network.NewBzz(bzzconfig, to, stateStore, self.streamer.GetSpec(), self.streamer.Run)
+ self.bzz = network.NewBzz(bzzconfig, to, self.stateStore, self.streamer.GetSpec(), self.streamer.Run)
// Pss = postal service over swarm (devp2p over bzz)
self.ps, err = pss.NewPss(to, config.Pss)
@@ -446,14 +449,24 @@ func (self *Swarm) Stop() error {
ch.Stop()
ch.Save()
}
-
+ if self.swap != nil {
+ self.swap.Close()
+ }
+ if self.accountingMetrics != nil {
+ self.accountingMetrics.Close()
+ }
if self.netStore != nil {
self.netStore.Close()
}
self.sfs.Stop()
stopCounter.Inc(1)
self.streamer.Stop()
- return self.bzz.Stop()
+
+ err := self.bzz.Stop()
+ if self.stateStore != nil {
+ self.stateStore.Close()
+ }
+ return err
}
// implements the node.Service interface
diff --git a/swarm/version/version.go b/swarm/version/version.go
index 17ef34f5f..57ac05a86 100644
--- a/swarm/version/version.go
+++ b/swarm/version/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 0 // Major version component of the current release
VersionMinor = 3 // Minor version component of the current release
- VersionPatch = 7 // Patch version component of the current release
+ VersionPatch = 8 // Patch version component of the current release
VersionMeta = "unstable" // Version metadata to append to the version string
)
diff --git a/tests/init.go b/tests/init.go
index f0a4943c1..db0457b6d 100644
--- a/tests/init.go
+++ b/tests/init.go
@@ -86,6 +86,15 @@ var Forks = map[string]*params.ChainConfig{
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(5),
},
+ "ByzantiumToConstantinopleAt5": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(5),
+ },
}
// UnsupportedForkError is returned when a test requests a fork that isn't implemented.
diff --git a/tests/state_test.go b/tests/state_test.go
index ad77e4f33..964405382 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -18,10 +18,12 @@ package tests
import (
"bytes"
+ "flag"
"fmt"
"reflect"
"testing"
+ "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/core/vm"
)
@@ -65,8 +67,17 @@ func TestState(t *testing.T) {
// Transactions with gasLimit above this value will not get a VM trace on failure.
const traceErrorLimit = 400000
+// The VM config for state tests that accepts --vm.* command line arguments.
+var testVMConfig = func() vm.Config {
+ vmconfig := vm.Config{}
+ flag.StringVar(&vmconfig.EVMInterpreter, utils.EVMInterpreterFlag.Name, utils.EVMInterpreterFlag.Value, utils.EVMInterpreterFlag.Usage)
+ flag.StringVar(&vmconfig.EWASMInterpreter, utils.EWASMInterpreterFlag.Name, utils.EWASMInterpreterFlag.Value, utils.EWASMInterpreterFlag.Usage)
+ flag.Parse()
+ return vmconfig
+}()
+
func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) {
- err := test(vm.Config{})
+ err := test(testVMConfig)
if err == nil {
return
}
diff --git a/tests/testdata b/tests/testdata
-Subproject 95a309203890e6244c6d4353ca411671973c13b
+Subproject c02a2a17c0288a255572b37dc7ec1fcb838b9db
diff --git a/trie/database.go b/trie/database.go
index 71190b3f3..739a98add 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -141,7 +141,7 @@ type cachedNode struct {
node node // Cached collapsed trie node, or raw rlp data
size uint16 // Byte size of the useful cached data
- parents uint16 // Number of live nodes referencing this one
+ parents uint32 // Number of live nodes referencing this one
children map[common.Hash]uint16 // External children referenced by this node
flushPrev common.Hash // Previous node in the flush-list
diff --git a/vendor/github.com/karalabe/hid/appveyor.yml b/vendor/github.com/karalabe/hid/appveyor.yml
index f43958747..84b3c95ff 100644
--- a/vendor/github.com/karalabe/hid/appveyor.yml
+++ b/vendor/github.com/karalabe/hid/appveyor.yml
@@ -22,8 +22,8 @@ environment:
install:
- rmdir C:\go /s /q
- - appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.windows-%GOARCH%.zip
- - 7z x go1.8.windows-%GOARCH%.zip -y -oC:\ > NUL
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.1.windows-%GOARCH%.zip
+ - 7z x go1.10.1.windows-%GOARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version
diff --git a/vendor/github.com/karalabe/hid/hid_disabled.go b/vendor/github.com/karalabe/hid/hid_disabled.go
index 1f4026379..0f266ba58 100644
--- a/vendor/github.com/karalabe/hid/hid_disabled.go
+++ b/vendor/github.com/karalabe/hid/hid_disabled.go
@@ -36,7 +36,7 @@ func (info DeviceInfo) Open() (*Device, error) {
// Close releases the HID USB device handle. On platforms that this file implements
// the method is just a noop.
-func (dev *Device) Close() {}
+func (dev *Device) Close() error { return nil }
// Write sends an output report to a HID device. On platforms that this file
// implements the method just returns an error.
diff --git a/vendor/github.com/karalabe/hid/hid_enabled.go b/vendor/github.com/karalabe/hid/hid_enabled.go
index 419273be6..e95e5792d 100644
--- a/vendor/github.com/karalabe/hid/hid_enabled.go
+++ b/vendor/github.com/karalabe/hid/hid_enabled.go
@@ -41,6 +41,7 @@ package hid
#endif
*/
import "C"
+
import (
"errors"
"runtime"
@@ -57,11 +58,6 @@ import (
// > "subsequent calls will cause the hid manager to release previously enumerated devices"
var enumerateLock sync.Mutex
-func init() {
- // Initialize the HIDAPI library
- C.hid_init()
-}
-
// Supported returns whether this platform is supported by the HID library or not.
// The goal of this method is to allow programatically handling platforms that do
// not support USB HID and not having to fall back to build constraints.
@@ -113,6 +109,9 @@ func Enumerate(vendorID uint16, productID uint16) []DeviceInfo {
// Open connects to an HID device by its path name.
func (info DeviceInfo) Open() (*Device, error) {
+ enumerateLock.Lock()
+ defer enumerateLock.Unlock()
+
path := C.CString(info.Path)
defer C.free(unsafe.Pointer(path))
@@ -135,7 +134,7 @@ type Device struct {
}
// Close releases the HID USB device handle.
-func (dev *Device) Close() {
+func (dev *Device) Close() error {
dev.lock.Lock()
defer dev.lock.Unlock()
@@ -143,6 +142,7 @@ func (dev *Device) Close() {
C.hid_close(dev.device)
dev.device = nil
}
+ return nil
}
// Write sends an output report to a HID device.
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
index c5940b232..c36ad3235 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -331,7 +331,6 @@ func (r *Cache) delete(n *Node) bool {
return deleted
}
}
- return false
}
// Nodes returns number of 'cache node' in the map.
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
index 14dddf88d..abf9fb65c 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
@@ -29,7 +29,7 @@ func (bytesComparer) Separator(dst, a, b []byte) []byte {
// Do not shorten if one string is a prefix of the other
} else if c := a[i]; c < 0xff && c+1 < b[i] {
dst = append(dst, a[:i+1]...)
- dst[i]++
+ dst[len(dst)-1]++
return dst
}
return nil
@@ -39,7 +39,7 @@ func (bytesComparer) Successor(dst, b []byte) []byte {
for i, c := range b {
if c != 0xff {
dst = append(dst, b[:i+1]...)
- dst[i]++
+ dst[len(dst)-1]++
return dst
}
}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
index 14a28f16f..2c522db23 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
@@ -36,7 +36,7 @@ type Comparer interface {
// by any users of this package.
Name() string
- // Bellow are advanced functions used used to reduce the space requirements
+ // Bellow are advanced functions used to reduce the space requirements
// for internal data structures such as index blocks.
// Separator appends a sequence of bytes x to dst such that a <= x && x < b,
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
index e7ac06541..b27c38d37 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/db.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
@@ -182,7 +182,7 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
err = s.recover()
if err != nil {
- if !os.IsNotExist(err) || s.o.GetErrorIfMissing() {
+ if !os.IsNotExist(err) || s.o.GetErrorIfMissing() || s.o.GetReadOnly() {
return
}
err = s.create()
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
index 28e50906a..0c1b9a53b 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -663,7 +663,7 @@ type cCmd interface {
}
type cAuto struct {
- // Note for table compaction, an empty ackC represents it's a compaction waiting command.
+ // Note for table compaction, an non-empty ackC represents it's a compaction waiting command.
ackC chan<- error
}
@@ -777,8 +777,8 @@ func (db *DB) mCompaction() {
func (db *DB) tCompaction() {
var (
- x cCmd
- ackQ, waitQ []cCmd
+ x cCmd
+ waitQ []cCmd
)
defer func() {
@@ -787,10 +787,6 @@ func (db *DB) tCompaction() {
panic(x)
}
}
- for i := range ackQ {
- ackQ[i].ack(ErrClosed)
- ackQ[i] = nil
- }
for i := range waitQ {
waitQ[i].ack(ErrClosed)
waitQ[i] = nil
@@ -821,11 +817,6 @@ func (db *DB) tCompaction() {
waitQ = waitQ[:0]
}
} else {
- for i := range ackQ {
- ackQ[i].ack(nil)
- ackQ[i] = nil
- }
- ackQ = ackQ[:0]
for i := range waitQ {
waitQ[i].ack(nil)
waitQ[i] = nil
@@ -844,9 +835,12 @@ func (db *DB) tCompaction() {
switch cmd := x.(type) {
case cAuto:
if cmd.ackC != nil {
- waitQ = append(waitQ, x)
- } else {
- ackQ = append(ackQ, x)
+ // Check the write pause state before caching it.
+ if db.resumeWrite() {
+ x.ack(nil)
+ } else {
+ waitQ = append(waitQ, x)
+ }
}
case cRange:
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
index 7ecd960d2..3f0654894 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -84,7 +84,7 @@ func (db *DB) checkAndCleanFiles() error {
var mfds []storage.FileDesc
for num, present := range tmap {
if !present {
- mfds = append(mfds, storage.FileDesc{storage.TypeTable, num})
+ mfds = append(mfds, storage.FileDesc{Type: storage.TypeTable, Num: num})
db.logf("db@janitor table missing @%d", num)
}
}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
index b16e3a704..96fb0f685 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
@@ -40,11 +40,11 @@ type IteratorSeeker interface {
Seek(key []byte) bool
// Next moves the iterator to the next key/value pair.
- // It returns whether the iterator is exhausted.
+ // It returns false if the iterator is exhausted.
Next() bool
// Prev moves the iterator to the previous key/value pair.
- // It returns whether the iterator is exhausted.
+ // It returns false if the iterator is exhausted.
Prev() bool
}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
index 44e7d9adc..528b16423 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -158,6 +158,12 @@ type Options struct {
// The default value is 8MiB.
BlockCacheCapacity int
+ // BlockCacheEvictRemoved allows enable forced-eviction on cached block belonging
+ // to removed 'sorted table'.
+ //
+ // The default if false.
+ BlockCacheEvictRemoved bool
+
// BlockRestartInterval is the number of keys between restart points for
// delta encoding of keys.
//
@@ -384,6 +390,13 @@ func (o *Options) GetBlockCacheCapacity() int {
return o.BlockCacheCapacity
}
+func (o *Options) GetBlockCacheEvictRemoved() bool {
+ if o == nil {
+ return false
+ }
+ return o.BlockCacheEvictRemoved
+}
+
func (o *Options) GetBlockRestartInterval() int {
if o == nil || o.BlockRestartInterval <= 0 {
return DefaultBlockRestartInterval
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
index 92328933c..40cb2cf95 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go
@@ -36,7 +36,7 @@ func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf
func (s *session) newTemp() storage.FileDesc {
num := atomic.AddInt64(&s.stTempFileNum, 1) - 1
- return storage.FileDesc{storage.TypeTemp, num}
+ return storage.FileDesc{Type: storage.TypeTemp, Num: num}
}
func (s *session) addFileRef(fd storage.FileDesc, ref int) int {
@@ -190,7 +190,7 @@ func (s *session) recordCommited(rec *sessionRecord) {
// Create a new manifest file; need external synchronization.
func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
- fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()}
+ fd := storage.FileDesc{Type: storage.TypeManifest, Num: s.allocFileNum()}
writer, err := s.stor.Create(fd)
if err != nil {
return
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
index 81d18a531..1fac60d05 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/table.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
@@ -78,7 +78,7 @@ func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFil
}
func tableFileFromRecord(r atRecord) *tFile {
- return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax)
+ return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax)
}
// tFiles hold multiple tFile.
@@ -290,16 +290,17 @@ func (x *tFilesSortByNum) Less(i, j int) bool {
// Table operations.
type tOps struct {
- s *session
- noSync bool
- cache *cache.Cache
- bcache *cache.Cache
- bpool *util.BufferPool
+ s *session
+ noSync bool
+ evictRemoved bool
+ cache *cache.Cache
+ bcache *cache.Cache
+ bpool *util.BufferPool
}
// Creates an empty table and returns table writer.
func (t *tOps) create() (*tWriter, error) {
- fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()}
+ fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()}
fw, err := t.s.stor.Create(fd)
if err != nil {
return nil, err
@@ -422,7 +423,7 @@ func (t *tOps) remove(f *tFile) {
} else {
t.s.logf("table@remove removed @%d", f.fd.Num)
}
- if t.bcache != nil {
+ if t.evictRemoved && t.bcache != nil {
t.bcache.EvictNS(uint64(f.fd.Num))
}
})
@@ -451,7 +452,7 @@ func newTableOps(s *session) *tOps {
if !s.o.GetDisableBlockCache() {
var bcacher cache.Cacher
if s.o.GetBlockCacheCapacity() > 0 {
- bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
+ bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity())
}
bcache = cache.NewCache(bcacher)
}
@@ -459,11 +460,12 @@ func newTableOps(s *session) *tOps {
bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
}
return &tOps{
- s: s,
- noSync: s.o.GetNoSync(),
- cache: cache.NewCache(cacher),
- bcache: bcache,
- bpool: bpool,
+ s: s,
+ noSync: s.o.GetNoSync(),
+ evictRemoved: s.o.GetBlockCacheEvictRemoved(),
+ cache: cache.NewCache(cacher),
+ bcache: bcache,
+ bpool: bpool,
}
}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 1bfe09da7..883d02097 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -267,10 +267,10 @@
"revisionTime": "2017-04-30T22:20:11Z"
},
{
- "checksumSHA1": "UpjhOUZ1+0zNt+iIvdtECSHXmTs=",
+ "checksumSHA1": "6XsjAARQFvlW6dS15al0ibTFPOQ=",
"path": "github.com/karalabe/hid",
- "revision": "f00545f9f3748e591590be3732d913c77525b10f",
- "revisionTime": "2017-08-21T10:38:37Z",
+ "revision": "d815e0c1a2e2082a287a2806bc90bc8fc7b276a9",
+ "revisionTime": "2018-11-28T19:21:57Z",
"tree": true
},
{
@@ -455,76 +455,76 @@
"revisionTime": "2017-07-05T02:17:15Z"
},
{
- "checksumSHA1": "k6zbR5hiI10hkWtiK91rIY5s5/E=",
+ "checksumSHA1": "LV0VMVON7xY1ttV+s2ph83ntmDQ=",
"path": "github.com/syndtr/goleveldb/leveldb",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
- "checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=",
+ "checksumSHA1": "mPNraL2edpk/2FYq26rSXfMHbJg=",
"path": "github.com/syndtr/goleveldb/leveldb/cache",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
- "checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=",
+ "checksumSHA1": "UA+PKDKWlDnE2OZblh23W6wZwbY=",
"path": "github.com/syndtr/goleveldb/leveldb/comparer",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=",
"path": "github.com/syndtr/goleveldb/leveldb/errors",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=",
"path": "github.com/syndtr/goleveldb/leveldb/filter",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
- "checksumSHA1": "weSsccMav4BCerDpSLzh3mMxAYo=",
+ "checksumSHA1": "hPyFsMiqZ1OB7MX+6wIAA6nsdtc=",
"path": "github.com/syndtr/goleveldb/leveldb/iterator",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=",
"path": "github.com/syndtr/goleveldb/leveldb/journal",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "MtYY1b2234y/MlS+djL8tXVAcQs=",
"path": "github.com/syndtr/goleveldb/leveldb/memdb",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
- "checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=",
+ "checksumSHA1": "o2TorI3z+vc+EBMJ8XeFoUmXBtU=",
"path": "github.com/syndtr/goleveldb/leveldb/opt",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "ZnyuciM+R19NG8L5YS3TIJdo1e8=",
"path": "github.com/syndtr/goleveldb/leveldb/storage",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "gWFPMz8OQeul0t54RM66yMTX49g=",
"path": "github.com/syndtr/goleveldb/leveldb/table",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "V/Dh7NV0/fy/5jX1KaAjmGcNbzI=",
"path": "github.com/syndtr/goleveldb/leveldb/util",
- "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
- "revisionTime": "2018-07-08T03:05:51Z"
+ "revision": "b001fa50d6b27f3f0bb175a87d0cb55426d0a0ae",
+ "revisionTime": "2018-11-28T10:09:59Z"
},
{
"checksumSHA1": "nD6S4KB0S+YHxVMDDE+w3PyXaMk=",
diff --git a/whisper/mailserver/mailserver.go b/whisper/mailserver/mailserver.go
index af9418d9f..d7af4baae 100644
--- a/whisper/mailserver/mailserver.go
+++ b/whisper/mailserver/mailserver.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+// Package mailserver provides a naive, example mailserver implementation
package mailserver
import (
@@ -26,9 +27,11 @@ import (
"github.com/ethereum/go-ethereum/rlp"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
"github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
+// WMailServer represents the state data of the mailserver.
type WMailServer struct {
db *leveldb.DB
w *whisper.Whisper
@@ -42,6 +45,8 @@ type DBKey struct {
raw []byte
}
+// NewDbKey is a helper function that creates a levelDB
+// key from a hash and an integer.
func NewDbKey(t uint32, h common.Hash) *DBKey {
const sz = common.HashLength + 4
var k DBKey
@@ -53,6 +58,7 @@ func NewDbKey(t uint32, h common.Hash) *DBKey {
return &k
}
+// Init initializes the mail server.
func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, pow float64) error {
var err error
if len(path) == 0 {
@@ -63,7 +69,7 @@ func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, p
return fmt.Errorf("password is not specified")
}
- s.db, err = leveldb.OpenFile(path, nil)
+ s.db, err = leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 32})
if err != nil {
return fmt.Errorf("open DB file: %s", err)
}
@@ -82,12 +88,14 @@ func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, p
return nil
}
+// Close cleans up before shutdown.
func (s *WMailServer) Close() {
if s.db != nil {
s.db.Close()
}
}
+// Archive stores the
func (s *WMailServer) Archive(env *whisper.Envelope) {
key := NewDbKey(env.Expiry-env.TTL, env.Hash())
rawEnvelope, err := rlp.EncodeToBytes(env)
@@ -101,6 +109,8 @@ func (s *WMailServer) Archive(env *whisper.Envelope) {
}
}
+// DeliverMail responds with saved messages upon request by the
+// messages' owner.
func (s *WMailServer) DeliverMail(peer *whisper.Peer, request *whisper.Envelope) {
if peer == nil {
log.Error("Whisper peer is nil")
diff --git a/whisper/whisperv6/api_test.go b/whisper/whisperv6/api_test.go
index cdbc7fab5..6d7157f57 100644
--- a/whisper/whisperv6/api_test.go
+++ b/whisper/whisperv6/api_test.go
@@ -18,27 +18,12 @@ package whisperv6
import (
"bytes"
- "crypto/ecdsa"
"testing"
"time"
-
- mapset "github.com/deckarep/golang-set"
- "github.com/ethereum/go-ethereum/common"
)
func TestMultipleTopicCopyInNewMessageFilter(t *testing.T) {
- w := &Whisper{
- privateKeys: make(map[string]*ecdsa.PrivateKey),
- symKeys: make(map[string][]byte),
- envelopes: make(map[common.Hash]*Envelope),
- expirations: make(map[uint32]mapset.Set),
- peers: make(map[*Peer]struct{}),
- messageQueue: make(chan *Envelope, messageQueueLimit),
- p2pMsgQueue: make(chan *Envelope, messageQueueLimit),
- quit: make(chan struct{}),
- syncAllowance: DefaultSyncAllowance,
- }
- w.filters = NewFilters(w)
+ w := New(nil)
keyID, err := w.GenerateSymKey()
if err != nil {