aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--.gitmodules2
-rw-r--r--Godeps/Godeps.json41
-rw-r--r--Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go124
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitignore5
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitmodules3
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/README.md12
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/all.cpp16
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.cpp26
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.h14
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent.go27
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/.gitignore12
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/MANIFEST.in5
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/Makefile55
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/README.md3
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.cpp112
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.h41
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/cmdline.cpp132
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.cpp554
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.h43
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/example.cpp11
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/collatz.se11
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/counterparty.se274
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/heap.se69
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/crowdfund.se53
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/futarchy.se136
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/heap.se55
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/market.se117
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/subcurrency.se35
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/test.py39
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/datafeed.se12
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover.se40
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover_compiled.evm1
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_add.se32
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_double.se16
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_mul.se37
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/modexp.se11
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/substitutes.py78
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/test.py129
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/channel.se45
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/map.se19
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/multiforward.se14
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/shadowchain.se166
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/fixedpoint.se31
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/long_integer_macros.se116
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mul2.se2
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mutuala.se187
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/namecoin.se7
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/peano.se43
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/returnten.se4
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort.se33
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort_pairs.se46
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingcoin.se94
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingdollar.se171
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellinghelper.se1
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/short_namecoin.se3
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/subcurrency.se11
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.cpp35
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.h35
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.cpp203
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.h39
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.cpp70
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.h13
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.cpp154
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.h45
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.cpp98
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.h19
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.cpp430
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.h13
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.cpp299
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.h58
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.cpp173
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.py1
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.cpp804
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.h16
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.cpp211
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.h51
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/serpent.py201
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/setup.py46
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.cpp115
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.h16
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.cpp305
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.h127
-rw-r--r--Godeps/_workspace/src/github.com/ethereum/serpent-go/tests/main.go21
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go228
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go26
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go58
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go15
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go30
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go713
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go564
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go246
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go195
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go354
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go40
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go76
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go574
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go767
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go108
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go148
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go207
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go959
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go51
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go180
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go10
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go (renamed from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go)24
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go76
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go24
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go30
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go73
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go2
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go27
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go8
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go29
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go115
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go490
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go133
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go94
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go13
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go32
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go10
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go2
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go409
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go81
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go306
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go225
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go18
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go86
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go2
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go68
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go2
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go34
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go158
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go317
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go30
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go701
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go6
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go8
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go15
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go2
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go8
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go21
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go141
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go1
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go14
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go5
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go4
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go238
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go21
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go33
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go16
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go32
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go365
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go292
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go (renamed from Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go)84
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go (renamed from Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go)30
-rw-r--r--Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go (renamed from Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go)201
-rw-r--r--README.md3
-rw-r--r--cmd/geth/admin.go20
-rw-r--r--cmd/geth/blocktest.go2
-rw-r--r--cmd/geth/js.go18
-rw-r--r--cmd/geth/js_test.go16
-rw-r--r--cmd/geth/main.go35
-rw-r--r--cmd/mist/assets/examples/coin.html3
m---------cmd/mist/assets/ext/ethereum.js0
-rw-r--r--cmd/utils/flags.go38
-rw-r--r--common/natspec/natspec_e2e_test.go1
-rw-r--r--core/chain_makers.go6
-rw-r--r--core/chain_manager.go37
-rw-r--r--core/chain_manager_test.go4
-rw-r--r--core/filter.go19
-rw-r--r--core/transaction_pool.go28
-rw-r--r--core/transaction_pool_test.go7
-rw-r--r--crypto/crypto.go13
-rw-r--r--crypto/key_store_plain.go5
-rw-r--r--eth/backend.go13
-rw-r--r--eth/downloader/downloader.go132
-rw-r--r--eth/downloader/downloader_test.go42
-rw-r--r--eth/downloader/peer.go15
-rw-r--r--eth/downloader/queue.go3
-rw-r--r--eth/downloader/synchronous.go79
-rw-r--r--eth/handler.go81
-rw-r--r--eth/peer.go22
-rw-r--r--jsre/ethereum_js.go4
-rw-r--r--miner/worker.go2
-rw-r--r--rpc/api.go44
-rw-r--r--rpc/args.go69
-rw-r--r--rpc/args_test.go2
-rw-r--r--rpc/http.go5
-rw-r--r--tests/block_test.go2
-rw-r--r--tests/block_test_util.go27
-rw-r--r--tests/files/BlockTests/bcGasPricerTest.json1115
-rw-r--r--tests/files/BlockTests/bcRPC_API_Test.json695
-rw-r--r--tests/files/StateTests/RandomTests/st201504131821CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504150854CPPJIT.json72
-rw-r--r--tests/files/StateTests/RandomTests/st201504151057CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504202124CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504210245CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504210957CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504211739CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504212038CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504230729CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504231639CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504231710CPPJIT.json72
-rw-r--r--tests/files/StateTests/RandomTests/st201504231742CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504232350CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504240140CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504240220CPPJIT.json72
-rw-r--r--tests/files/StateTests/RandomTests/st201504240351CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504240817CPPJIT.json71
-rw-r--r--tests/files/StateTests/RandomTests/st201504241118CPPJIT.json71
-rw-r--r--ui/qt/qwhisper/whisper.go2
-rw-r--r--whisper/envelope.go3
-rw-r--r--whisper/envelope_test.go142
-rw-r--r--whisper/filter.go113
-rw-r--r--whisper/filter_test.go199
-rw-r--r--whisper/message.go23
-rw-r--r--whisper/message_test.go4
-rw-r--r--whisper/peer.go11
-rw-r--r--whisper/topic.go79
-rw-r--r--whisper/topic_test.go154
-rw-r--r--whisper/whisper.go57
-rw-r--r--whisper/whisper_test.go2
-rw-r--r--xeth/whisper.go149
-rw-r--r--xeth/whisper_filter.go84
-rw-r--r--xeth/whisper_message.go37
-rw-r--r--xeth/xeth.go76
227 files changed, 11692 insertions, 11012 deletions
diff --git a/.gitignore b/.gitignore
index 43061642a..a90cb225b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,8 +23,6 @@ Godeps/_workspace/bin
.project
.settings
-mist
-cmd/mist/mist
deploy/osx/Mist.app
deploy/osx/Mist\ Installer.dmg
diff --git a/.gitmodules b/.gitmodules
index 3284c329d..219564eb7 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,3 @@
[submodule "cmd/mist/assets/ext/ethereum.js"]
path = cmd/mist/assets/ext/ethereum.js
- url = https://github.com/ethereum/ethereum.js
+ url = https://github.com/ethereum/web3.js
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index bc5e3144a..576efab00 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -11,11 +11,6 @@
"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
},
{
- "ImportPath": "code.google.com/p/snappy-go/snappy",
- "Comment": "null-15",
- "Rev": "12e4b4183793ac4b061921e7980845e750679fd0"
- },
- {
"ImportPath": "github.com/codegangsta/cli",
"Comment": "1.2.0-95-g9b2bd2b",
"Rev": "9b2bd2b3489748d4d0a204fa4eb2ee9e89e0ebc6"
@@ -26,10 +21,6 @@
"Rev": "908aad345c9fbf3ab9bbb94031dc02d0d90df1b8"
},
{
- "ImportPath": "github.com/ethereum/serpent-go",
- "Rev": "5767a0dbd759d313df3f404dadb7f98d7ab51443"
- },
- {
"ImportPath": "github.com/howeyc/fsnotify",
"Comment": "v0.9.0-11-g6b1ef89",
"Rev": "6b1ef893dc11e0447abda6da20a5203481878dda"
@@ -47,10 +38,6 @@
"Rev": "ccfcd0245381f0c94c68f50626665eed3c6b726a"
},
{
- "ImportPath": "github.com/robertkrimen/otto",
- "Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
- },
- {
"ImportPath": "github.com/obscuren/qml",
"Rev": "c288002b52e905973b131089a8a7c761d4a2c36a"
},
@@ -67,27 +54,7 @@
"Rev": "907cca0f578a5316fb864ec6992dc3d9730ec58c"
},
{
- "ImportPath": "github.com/robertkrimen/otto/ast",
- "Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
- },
- {
- "ImportPath": "github.com/robertkrimen/otto/dbg",
- "Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
- },
- {
- "ImportPath": "github.com/robertkrimen/otto/file",
- "Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
- },
- {
- "ImportPath": "github.com/robertkrimen/otto/parser",
- "Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
- },
- {
- "ImportPath": "github.com/robertkrimen/otto/registry",
- "Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
- },
- {
- "ImportPath": "github.com/robertkrimen/otto/token",
+ "ImportPath": "github.com/robertkrimen/otto",
"Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
},
{
@@ -96,7 +63,11 @@
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
- "Rev": "832fa7ed4d28545eab80f19e1831fc004305cade"
+ "Rev": "4875955338b0a434238a31165cb87255ab6e9e4a"
+ },
+ {
+ "ImportPath": "github.com/syndtr/gosnappy/snappy",
+ "Rev": "156a073208e131d7d2e212cb749feae7c339e846"
},
{
"ImportPath": "golang.org/x/crypto/pbkdf2",
diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go
deleted file mode 100644
index d93c1b9db..000000000
--- a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package snappy
-
-import (
- "encoding/binary"
- "errors"
-)
-
-// ErrCorrupt reports that the input is invalid.
-var ErrCorrupt = errors.New("snappy: corrupt input")
-
-// DecodedLen returns the length of the decoded block.
-func DecodedLen(src []byte) (int, error) {
- v, _, err := decodedLen(src)
- return v, err
-}
-
-// decodedLen returns the length of the decoded block and the number of bytes
-// that the length header occupied.
-func decodedLen(src []byte) (blockLen, headerLen int, err error) {
- v, n := binary.Uvarint(src)
- if n == 0 {
- return 0, 0, ErrCorrupt
- }
- if uint64(int(v)) != v {
- return 0, 0, errors.New("snappy: decoded block is too large")
- }
- return int(v), n, nil
-}
-
-// Decode returns the decoded form of src. The returned slice may be a sub-
-// slice of dst if dst was large enough to hold the entire decoded block.
-// Otherwise, a newly allocated slice will be returned.
-// It is valid to pass a nil dst.
-func Decode(dst, src []byte) ([]byte, error) {
- dLen, s, err := decodedLen(src)
- if err != nil {
- return nil, err
- }
- if len(dst) < dLen {
- dst = make([]byte, dLen)
- }
-
- var d, offset, length int
- for s < len(src) {
- switch src[s] & 0x03 {
- case tagLiteral:
- x := uint(src[s] >> 2)
- switch {
- case x < 60:
- s += 1
- case x == 60:
- s += 2
- if s > len(src) {
- return nil, ErrCorrupt
- }
- x = uint(src[s-1])
- case x == 61:
- s += 3
- if s > len(src) {
- return nil, ErrCorrupt
- }
- x = uint(src[s-2]) | uint(src[s-1])<<8
- case x == 62:
- s += 4
- if s > len(src) {
- return nil, ErrCorrupt
- }
- x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
- case x == 63:
- s += 5
- if s > len(src) {
- return nil, ErrCorrupt
- }
- x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
- }
- length = int(x + 1)
- if length <= 0 {
- return nil, errors.New("snappy: unsupported literal length")
- }
- if length > len(dst)-d || length > len(src)-s {
- return nil, ErrCorrupt
- }
- copy(dst[d:], src[s:s+length])
- d += length
- s += length
- continue
-
- case tagCopy1:
- s += 2
- if s > len(src) {
- return nil, ErrCorrupt
- }
- length = 4 + int(src[s-2])>>2&0x7
- offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
-
- case tagCopy2:
- s += 3
- if s > len(src) {
- return nil, ErrCorrupt
- }
- length = 1 + int(src[s-3])>>2
- offset = int(src[s-2]) | int(src[s-1])<<8
-
- case tagCopy4:
- return nil, errors.New("snappy: unsupported COPY_4 tag")
- }
-
- end := d + length
- if offset > d || end > len(dst) {
- return nil, ErrCorrupt
- }
- for ; d < end; d++ {
- dst[d] = dst[d-offset]
- }
- }
- if d != dLen {
- return nil, ErrCorrupt
- }
- return dst[:d], nil
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitignore b/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitignore
deleted file mode 100644
index 5d02b54e5..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-/tmp
-*/**/*un~
-*un~
-.DS_Store
-*/**/.DS_Store
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitmodules b/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitmodules
deleted file mode 100644
index 054c7d628..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "serp"]
- path = serpent
- url = https://github.com/ethereum/serpent.git
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/README.md b/Godeps/_workspace/src/github.com/ethereum/serpent-go/README.md
deleted file mode 100644
index 404f1b380..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-[serpent](https://github.com/ethereum/serpent) go bindings.
-
-## Build instructions
-
-```
-go get -d github.com/ethereum/serpent-go
-cd $GOPATH/src/github.com/ethereum/serpent-go
-git submodule init
-git submodule update
-```
-
-You're now ready to go :-)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/all.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/all.cpp
deleted file mode 100644
index 80032f900..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/all.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-#include "serpent/bignum.cpp"
-#include "serpent/util.cpp"
-#include "serpent/tokenize.cpp"
-#include "serpent/parser.cpp"
-#include "serpent/compiler.cpp"
-#include "serpent/funcs.cpp"
-#include "serpent/lllparser.cpp"
-#include "serpent/rewriter.cpp"
-
-#include "serpent/opcodes.cpp"
-#include "serpent/optimize.cpp"
-#include "serpent/functions.cpp"
-#include "serpent/preprocess.cpp"
-#include "serpent/rewriteutils.cpp"
-
-#include "cpp/api.cpp"
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.cpp
deleted file mode 100644
index bd2c85c7d..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <string>
-
-#include "serpent/lllparser.h"
-#include "serpent/bignum.h"
-#include "serpent/util.h"
-#include "serpent/tokenize.h"
-#include "serpent/parser.h"
-#include "serpent/compiler.h"
-
-#include "cpp/api.h"
-
-const char *compileGo(char *code, int *err)
-{
- try {
- std::string c = binToHex(compile(std::string(code)));
-
- return c.c_str();
- }
- catch(std::string &error) {
- *err = 1;
- return error.c_str();
- }
- catch(...) {
- return "Unknown error";
- }
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.h
deleted file mode 100644
index 235b5eb4a..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef CPP_API_H
-#define CPP_API_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-const char *compileGo(char *code, int *err);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent.go b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent.go
deleted file mode 100644
index 39b60eed7..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package serpent
-
-// #cgo CXXFLAGS: -I. -Ilangs/ -std=c++0x -Wall -fno-strict-aliasing
-// #cgo LDFLAGS: -lstdc++
-//
-// #include "cpp/api.h"
-//
-import "C"
-
-import (
- "encoding/hex"
- "errors"
- "unsafe"
-)
-
-func Compile(str string) ([]byte, error) {
- var err C.int
- out := C.GoString(C.compileGo(C.CString(str), (*C.int)(unsafe.Pointer(&err))))
-
- if err == C.int(1) {
- return nil, errors.New(out)
- }
-
- bytes, _ := hex.DecodeString(out)
-
- return bytes, nil
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/.gitignore b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/.gitignore
deleted file mode 100644
index 72b65e446..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-[._]*.s[a-w][a-z]
-[._]s[a-w][a-z]
-*.un~
-Session.vim
-.netrwhist
-*~
-*.o
-serpent
-libserpent.a
-pyserpent.so
-dist
-*.egg-info
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/MANIFEST.in b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/MANIFEST.in
deleted file mode 100644
index 5f5766ced..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/MANIFEST.in
+++ /dev/null
@@ -1,5 +0,0 @@
-include *.cpp
-include *.h
-include *py
-include README.md
-include Makefile
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/Makefile b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/Makefile
deleted file mode 100644
index 28c38728e..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/Makefile
+++ /dev/null
@@ -1,55 +0,0 @@
-PLATFORM_OPTS =
-PYTHON = /usr/include/python2.7
-CXXFLAGS = -fPIC
-# -g3 -O0
-BOOST_INC = /usr/include
-BOOST_LIB = /usr/lib
-TARGET = pyserpent
-COMMON_OBJS = bignum.o util.o tokenize.o lllparser.o parser.o opcodes.o optimize.o functions.o rewriteutils.o preprocess.o rewriter.o compiler.o funcs.o
-HEADERS = bignum.h util.h tokenize.h lllparser.h parser.h opcodes.h functions.h optimize.h rewriteutils.h preprocess.h rewriter.h compiler.h funcs.h
-PYTHON_VERSION = 2.7
-
-serpent : serpentc lib
-
-lib:
- ar rvs libserpent.a $(COMMON_OBJS)
- g++ $(CXXFLAGS) -shared $(COMMON_OBJS) -o libserpent.so
-
-serpentc: $(COMMON_OBJS) cmdline.o
- rm -rf serpent
- g++ -Wall $(COMMON_OBJS) cmdline.o -o serpent
-
-bignum.o : bignum.cpp bignum.h
-
-opcodes.o : opcodes.cpp opcodes.h
-
-util.o : util.cpp util.h bignum.o
-
-tokenize.o : tokenize.cpp tokenize.h util.o
-
-lllparser.o : lllparser.cpp lllparser.h tokenize.o util.o
-
-parser.o : parser.cpp parser.h tokenize.o util.o
-
-rewriter.o : rewriter.cpp rewriter.h lllparser.o util.o rewriteutils.o preprocess.o opcodes.o functions.o
-
-preprocessor.o: rewriteutils.o functions.o
-
-compiler.o : compiler.cpp compiler.h util.o
-
-funcs.o : funcs.cpp funcs.h
-
-cmdline.o: cmdline.cpp
-
-pyext.o: pyext.cpp
-
-clean:
- rm -f serpent *\.o libserpent.a libserpent.so
-
-install:
- cp serpent /usr/local/bin
- cp libserpent.a /usr/local/lib
- cp libserpent.so /usr/local/lib
- rm -rf /usr/local/include/libserpent
- mkdir -p /usr/local/include/libserpent
- cp $(HEADERS) /usr/local/include/libserpent
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/README.md b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/README.md
deleted file mode 100644
index 03dfcc15f..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-Installation:
-
-```make && sudo make install```
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.cpp
deleted file mode 100644
index 108b1eb04..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "bignum.h"
-
-//Integer to string conversion
-std::string unsignedToDecimal(unsigned branch) {
- if (branch < 10) return nums.substr(branch, 1);
- else return unsignedToDecimal(branch / 10) + nums.substr(branch % 10,1);
-}
-
-//Add two strings representing decimal values
-std::string decimalAdd(std::string a, std::string b) {
- std::string o = a;
- while (b.length() < a.length()) b = "0" + b;
- while (o.length() < b.length()) o = "0" + o;
- bool carry = false;
- for (int i = o.length() - 1; i >= 0; i--) {
- o[i] = o[i] + b[i] - '0';
- if (carry) o[i]++;
- if (o[i] > '9') {
- o[i] -= 10;
- carry = true;
- }
- else carry = false;
- }
- if (carry) o = "1" + o;
- return o;
-}
-
-//Helper function for decimalMul
-std::string decimalDigitMul(std::string a, int dig) {
- if (dig == 0) return "0";
- else return decimalAdd(a, decimalDigitMul(a, dig - 1));
-}
-
-//Multiply two strings representing decimal values
-std::string decimalMul(std::string a, std::string b) {
- std::string o = "0";
- for (unsigned i = 0; i < b.length(); i++) {
- std::string n = decimalDigitMul(a, b[i] - '0');
- if (n != "0") {
- for (unsigned j = i + 1; j < b.length(); j++) n += "0";
- }
- o = decimalAdd(o, n);
- }
- return o;
-}
-
-//Modexp
-std::string decimalModExp(std::string b, std::string e, std::string m) {
- if (e == "0") return "1";
- else if (e == "1") return b;
- else if (decimalMod(e, "2") == "0") {
- std::string o = decimalModExp(b, decimalDiv(e, "2"), m);
- return decimalMod(decimalMul(o, o), m);
- }
- else {
- std::string o = decimalModExp(b, decimalDiv(e, "2"), m);
- return decimalMod(decimalMul(decimalMul(o, o), b), m);
- }
-}
-
-//Is a greater than b? Flag allows equality
-bool decimalGt(std::string a, std::string b, bool eqAllowed) {
- if (a == b) return eqAllowed;
- return (a.length() > b.length()) || (a.length() >= b.length() && a > b);
-}
-
-//Subtract the two strings representing decimal values
-std::string decimalSub(std::string a, std::string b) {
- if (b == "0") return a;
- if (b == a) return "0";
- while (b.length() < a.length()) b = "0" + b;
- std::string c = b;
- for (unsigned i = 0; i < c.length(); i++) c[i] = '0' + ('9' - c[i]);
- std::string o = decimalAdd(decimalAdd(a, c).substr(1), "1");
- while (o.size() > 1 && o[0] == '0') o = o.substr(1);
- return o;
-}
-
-//Divide the two strings representing decimal values
-std::string decimalDiv(std::string a, std::string b) {
- std::string c = b;
- if (decimalGt(c, a)) return "0";
- int zeroes = -1;
- while (decimalGt(a, c, true)) {
- zeroes += 1;
- c = c + "0";
- }
- c = c.substr(0, c.size() - 1);
- std::string quot = "0";
- while (decimalGt(a, c, true)) {
- a = decimalSub(a, c);
- quot = decimalAdd(quot, "1");
- }
- for (int i = 0; i < zeroes; i++) quot += "0";
- return decimalAdd(quot, decimalDiv(a, b));
-}
-
-//Modulo the two strings representing decimal values
-std::string decimalMod(std::string a, std::string b) {
- return decimalSub(a, decimalMul(decimalDiv(a, b), b));
-}
-
-//String to int conversion
-unsigned decimalToUnsigned(std::string a) {
- if (a.size() == 0) return 0;
- else return (a[a.size() - 1] - '0')
- + decimalToUnsigned(a.substr(0,a.size()-1)) * 10;
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.h
deleted file mode 100644
index 99571acd2..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef ETHSERP_BIGNUM
-#define ETHSERP_BIGNUM
-
-const std::string nums = "0123456789";
-
-const std::string tt256 =
-"115792089237316195423570985008687907853269984665640564039457584007913129639936"
-;
-
-const std::string tt256m1 =
-"115792089237316195423570985008687907853269984665640564039457584007913129639935"
-;
-
-const std::string tt255 =
-"57896044618658097711785492504343953926634992332820282019728792003956564819968";
-
-const std::string tt176 =
-"95780971304118053647396689196894323976171195136475136";
-
-std::string unsignedToDecimal(unsigned branch);
-
-std::string decimalAdd(std::string a, std::string b);
-
-std::string decimalMul(std::string a, std::string b);
-
-std::string decimalSub(std::string a, std::string b);
-
-std::string decimalDiv(std::string a, std::string b);
-
-std::string decimalMod(std::string a, std::string b);
-
-std::string decimalModExp(std::string b, std::string e, std::string m);
-
-bool decimalGt(std::string a, std::string b, bool eqAllowed=false);
-
-unsigned decimalToUnsigned(std::string a);
-
-#define utd unsignedToDecimal
-#define dtu decimalToUnsigned
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/cmdline.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/cmdline.cpp
deleted file mode 100644
index fe2560830..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/cmdline.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-#include <stdio.h>
-#include <string>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "funcs.h"
-
-int main(int argv, char** argc) {
- if (argv == 1) {
- std::cerr << "Must provide a command and arguments! Try parse, rewrite, compile, assemble\n";
- return 0;
- }
- if (argv == 2 && std::string(argc[1]) == "--help" || std::string(argc[1]) == "-h" ) {
- std::cout << argc[1] << "\n";
-
- std::cout << "serpent command input\n";
- std::cout << "where input -s for from stdin, a file, or interpreted as serpent code if does not exist as file.";
- std::cout << "where command: \n";
- std::cout << " parse: Just parses and returns s-expression code.\n";
- std::cout << " rewrite: Parse, use rewrite rules print s-expressions of result.\n";
- std::cout << " compile: Return resulting compiled EVM code in hex.\n";
- std::cout << " assemble: Return result from step before compilation.\n";
- return 0;
- }
-
- std::string flag = "";
- std::string command = argc[1];
- std::string input;
- std::string secondInput;
- if (std::string(argc[1]) == "-s") {
- flag = command.substr(1);
- command = argc[2];
- input = "";
- std::string line;
- while (std::getline(std::cin, line)) {
- input += line + "\n";
- }
- secondInput = argv == 3 ? "" : argc[3];
- }
- else {
- if (argv == 2) {
- std::cerr << "Not enough arguments for serpent cmdline\n";
- throw(0);
- }
- input = argc[2];
- secondInput = argv == 3 ? "" : argc[3];
- }
- bool haveSec = secondInput.length() > 0;
- if (command == "parse" || command == "parse_serpent") {
- std::cout << printAST(parseSerpent(input), haveSec) << "\n";
- }
- else if (command == "rewrite") {
- std::cout << printAST(rewrite(parseLLL(input, true)), haveSec) << "\n";
- }
- else if (command == "compile_to_lll") {
- std::cout << printAST(compileToLLL(input), haveSec) << "\n";
- }
- else if (command == "rewrite_chunk") {
- std::cout << printAST(rewriteChunk(parseLLL(input, true)), haveSec) << "\n";
- }
- else if (command == "compile_chunk_to_lll") {
- std::cout << printAST(compileChunkToLLL(input), haveSec) << "\n";
- }
- else if (command == "build_fragtree") {
- std::cout << printAST(buildFragmentTree(parseLLL(input, true))) << "\n";
- }
- else if (command == "compile_lll") {
- std::cout << binToHex(compileLLL(parseLLL(input, true))) << "\n";
- }
- else if (command == "dereference") {
- std::cout << printAST(dereference(parseLLL(input, true)), haveSec) <<"\n";
- }
- else if (command == "pretty_assemble") {
- std::cout << printTokens(prettyAssemble(parseLLL(input, true))) <<"\n";
- }
- else if (command == "pretty_compile_lll") {
- std::cout << printTokens(prettyCompileLLL(parseLLL(input, true))) << "\n";
- }
- else if (command == "pretty_compile") {
- std::cout << printTokens(prettyCompile(input)) << "\n";
- }
- else if (command == "pretty_compile_chunk") {
- std::cout << printTokens(prettyCompileChunk(input)) << "\n";
- }
- else if (command == "assemble") {
- std::cout << assemble(parseLLL(input, true)) << "\n";
- }
- else if (command == "serialize") {
- std::cout << binToHex(serialize(tokenize(input, Metadata(), false))) << "\n";
- }
- else if (command == "flatten") {
- std::cout << printTokens(flatten(parseLLL(input, true))) << "\n";
- }
- else if (command == "deserialize") {
- std::cout << printTokens(deserialize(hexToBin(input))) << "\n";
- }
- else if (command == "compile") {
- std::cout << binToHex(compile(input)) << "\n";
- }
- else if (command == "compile_chunk") {
- std::cout << binToHex(compileChunk(input)) << "\n";
- }
- else if (command == "encode_datalist") {
- std::vector<Node> tokens = tokenize(input);
- std::vector<std::string> o;
- for (int i = 0; i < (int)tokens.size(); i++) {
- o.push_back(tokens[i].val);
- }
- std::cout << binToHex(encodeDatalist(o)) << "\n";
- }
- else if (command == "decode_datalist") {
- std::vector<std::string> o = decodeDatalist(hexToBin(input));
- std::vector<Node> tokens;
- for (int i = 0; i < (int)o.size(); i++)
- tokens.push_back(token(o[i]));
- std::cout << printTokens(tokens) << "\n";
- }
- else if (command == "tokenize") {
- std::cout << printTokens(tokenize(input));
- }
- else if (command == "biject") {
- if (argv == 3)
- std::cerr << "Not enough arguments for biject\n";
- int pos = decimalToUnsigned(secondInput);
- std::vector<Node> n = prettyCompile(input);
- if (pos >= (int)n.size())
- std::cerr << "Code position too high\n";
- Metadata m = n[pos].metadata;
- std::cout << "Opcode: " << n[pos].val << ", file: " << m.file <<
- ", line: " << m.ln << ", char: " << m.ch << "\n";
- }
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.cpp
deleted file mode 100644
index b9281dcbc..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.cpp
+++ /dev/null
@@ -1,554 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "bignum.h"
-#include "opcodes.h"
-
-struct programAux {
- std::map<std::string, std::string> vars;
- int nextVarMem;
- bool allocUsed;
- bool calldataUsed;
- int step;
- int labelLength;
-};
-
-struct programVerticalAux {
- int height;
- std::string innerScopeName;
- std::map<std::string, int> dupvars;
- std::map<std::string, int> funvars;
- std::vector<mss> scopes;
-};
-
-struct programData {
- programAux aux;
- Node code;
- int outs;
-};
-
-programAux Aux() {
- programAux o;
- o.allocUsed = false;
- o.calldataUsed = false;
- o.step = 0;
- o.nextVarMem = 32;
- return o;
-}
-
-programVerticalAux verticalAux() {
- programVerticalAux o;
- o.height = 0;
- o.dupvars = std::map<std::string, int>();
- o.funvars = std::map<std::string, int>();
- o.scopes = std::vector<mss>();
- return o;
-}
-
-programData pd(programAux aux = Aux(), Node code=token("_"), int outs=0) {
- programData o;
- o.aux = aux;
- o.code = code;
- o.outs = outs;
- return o;
-}
-
-Node multiToken(Node nodes[], int len, Metadata met) {
- std::vector<Node> out;
- for (int i = 0; i < len; i++) {
- out.push_back(nodes[i]);
- }
- return astnode("_", out, met);
-}
-
-Node finalize(programData c);
-
-Node popwrap(Node node) {
- Node nodelist[] = {
- node,
- token("POP", node.metadata)
- };
- return multiToken(nodelist, 2, node.metadata);
-}
-
-// Grabs variables
-mss getVariables(Node node, mss cur=mss()) {
- Metadata m = node.metadata;
- // Tokens don't contain any variables
- if (node.type == TOKEN)
- return cur;
- // Don't descend into call fragments
- else if (node.val == "lll")
- return getVariables(node.args[1], cur);
- // At global scope get/set/ref also declare
- else if (node.val == "get" || node.val == "set" || node.val == "ref") {
- if (node.args[0].type != TOKEN)
- err("Variable name must be simple token,"
- " not complex expression!", m);
- if (!cur.count(node.args[0].val)) {
- cur[node.args[0].val] = utd(cur.size() * 32 + 32);
- //std::cerr << node.args[0].val << " " << cur[node.args[0].val] << "\n";
- }
- }
- // Recursively process children
- for (unsigned i = 0; i < node.args.size(); i++) {
- cur = getVariables(node.args[i], cur);
- }
- return cur;
-}
-
-// Turns LLL tree into tree of code fragments
-programData opcodeify(Node node,
- programAux aux=Aux(),
- programVerticalAux vaux=verticalAux()) {
- std::string symb = "_"+mkUniqueToken();
- Metadata m = node.metadata;
- // Get variables
- if (!aux.vars.size()) {
- aux.vars = getVariables(node);
- aux.nextVarMem = aux.vars.size() * 32 + 32;
- }
- // Numbers
- if (node.type == TOKEN) {
- return pd(aux, nodeToNumeric(node), 1);
- }
- else if (node.val == "ref" || node.val == "get" || node.val == "set") {
- std::string varname = node.args[0].val;
- // Determine reference to variable
- Node varNode = tkn(aux.vars[varname], m);
- //std::cerr << varname << " " << printSimple(varNode) << "\n";
- // Set variable
- if (node.val == "set") {
- programData sub = opcodeify(node.args[1], aux, vaux);
- if (!sub.outs)
- err("Value to set variable must have nonzero arity!", m);
- // What if we are setting a stack variable?
- if (vaux.dupvars.count(node.args[0].val)) {
- int h = vaux.height - vaux.dupvars[node.args[0].val];
- if (h > 16) err("Too deep for stack variable (max 16)", m);
- Node nodelist[] = {
- sub.code,
- token("SWAP"+unsignedToDecimal(h), m),
- token("POP", m)
- };
- return pd(sub.aux, multiToken(nodelist, 3, m), 0);
- }
- // Setting a memory variable
- else {
- Node nodelist[] = {
- sub.code,
- varNode,
- token("MSTORE", m),
- };
- return pd(sub.aux, multiToken(nodelist, 3, m), 0);
- }
- }
- // Get variable
- else if (node.val == "get") {
- // Getting a stack variable
- if (vaux.dupvars.count(node.args[0].val)) {
- int h = vaux.height - vaux.dupvars[node.args[0].val];
- if (h > 16) err("Too deep for stack variable (max 16)", m);
- return pd(aux, token("DUP"+unsignedToDecimal(h)), 1);
- }
- // Getting a memory variable
- else {
- Node nodelist[] =
- { varNode, token("MLOAD", m) };
- return pd(aux, multiToken(nodelist, 2, m), 1);
- }
- }
- // Refer variable
- else if (node.val == "ref") {
- if (vaux.dupvars.count(node.args[0].val))
- err("Cannot ref stack variable!", m);
- return pd(aux, varNode, 1);
- }
- }
- // Comments do nothing
- else if (node.val == "comment") {
- Node nodelist[] = { };
- return pd(aux, multiToken(nodelist, 0, m), 0);
- }
- // Custom operation sequence
- // eg. (ops bytez id msize swap1 msize add 0 swap1 mstore) == alloc
- if (node.val == "ops") {
- std::vector<Node> subs2;
- int depth = 0;
- for (unsigned i = 0; i < node.args.size(); i++) {
- std::string op = upperCase(node.args[i].val);
- if (node.args[i].type == ASTNODE || opinputs(op) == -1) {
- programVerticalAux vaux2 = vaux;
- vaux2.height = vaux.height - i - 1 + node.args.size();
- programData sub = opcodeify(node.args[i], aux, vaux2);
- aux = sub.aux;
- depth += sub.outs;
- subs2.push_back(sub.code);
- }
- else {
- subs2.push_back(token(op, m));
- depth += opoutputs(op) - opinputs(op);
- }
- }
- if (depth < 0 || depth > 1) err("Stack depth mismatch", m);
- return pd(aux, astnode("_", subs2, m), 0);
- }
- // Code blocks
- if (node.val == "lll" && node.args.size() == 2) {
- if (node.args[1].val != "0") aux.allocUsed = true;
- std::vector<Node> o;
- o.push_back(finalize(opcodeify(node.args[0])));
- programData sub = opcodeify(node.args[1], aux, vaux);
- Node code = astnode("____CODE", o, m);
- Node nodelist[] = {
- token("$begincode"+symb+".endcode"+symb, m), token("DUP1", m),
- token("$begincode"+symb, m), sub.code, token("CODECOPY", m),
- token("$endcode"+symb, m), token("JUMP", m),
- token("~begincode"+symb, m), code,
- token("~endcode"+symb, m), token("JUMPDEST", m)
- };
- return pd(sub.aux, multiToken(nodelist, 11, m), 1);
- }
- // Stack variables
- if (node.val == "with") {
- programData initial = opcodeify(node.args[1], aux, vaux);
- programVerticalAux vaux2 = vaux;
- vaux2.dupvars[node.args[0].val] = vaux.height;
- vaux2.height += 1;
- if (!initial.outs)
- err("Initial variable value must have nonzero arity!", m);
- programData sub = opcodeify(node.args[2], initial.aux, vaux2);
- Node nodelist[] = {
- initial.code,
- sub.code
- };
- programData o = pd(sub.aux, multiToken(nodelist, 2, m), sub.outs);
- if (sub.outs)
- o.code.args.push_back(token("SWAP1", m));
- o.code.args.push_back(token("POP", m));
- return o;
- }
- // Seq of multiple statements
- if (node.val == "seq") {
- std::vector<Node> children;
- int lastOut = 0;
- for (unsigned i = 0; i < node.args.size(); i++) {
- programData sub = opcodeify(node.args[i], aux, vaux);
- aux = sub.aux;
- if (sub.outs == 1) {
- if (i < node.args.size() - 1) sub.code = popwrap(sub.code);
- else lastOut = 1;
- }
- children.push_back(sub.code);
- }
- return pd(aux, astnode("_", children, m), lastOut);
- }
- // 2-part conditional (if gets rewritten to unless in rewrites)
- else if (node.val == "unless" && node.args.size() == 2) {
- programData cond = opcodeify(node.args[0], aux, vaux);
- programData action = opcodeify(node.args[1], cond.aux, vaux);
- aux = action.aux;
- if (!cond.outs) err("Condition of if/unless statement has arity 0", m);
- if (action.outs) action.code = popwrap(action.code);
- Node nodelist[] = {
- cond.code,
- token("$endif"+symb, m), token("JUMPI", m),
- action.code,
- token("~endif"+symb, m), token("JUMPDEST", m)
- };
- return pd(aux, multiToken(nodelist, 6, m), 0);
- }
- // 3-part conditional
- else if (node.val == "if" && node.args.size() == 3) {
- programData ifd = opcodeify(node.args[0], aux, vaux);
- programData thend = opcodeify(node.args[1], ifd.aux, vaux);
- programData elsed = opcodeify(node.args[2], thend.aux, vaux);
- aux = elsed.aux;
- if (!ifd.outs)
- err("Condition of if/unless statement has arity 0", m);
- // Handle cases where one conditional outputs something
- // and the other does not
- int outs = (thend.outs && elsed.outs) ? 1 : 0;
- if (thend.outs > outs) thend.code = popwrap(thend.code);
- if (elsed.outs > outs) elsed.code = popwrap(elsed.code);
- Node nodelist[] = {
- ifd.code,
- token("ISZERO", m),
- token("$else"+symb, m), token("JUMPI", m),
- thend.code,
- token("$endif"+symb, m), token("JUMP", m),
- token("~else"+symb, m), token("JUMPDEST", m),
- elsed.code,
- token("~endif"+symb, m), token("JUMPDEST", m)
- };
- return pd(aux, multiToken(nodelist, 12, m), outs);
- }
- // While (rewritten to this in rewrites)
- else if (node.val == "until") {
- programData cond = opcodeify(node.args[0], aux, vaux);
- programData action = opcodeify(node.args[1], cond.aux, vaux);
- aux = action.aux;
- if (!cond.outs)
- err("Condition of while/until loop has arity 0", m);
- if (action.outs) action.code = popwrap(action.code);
- Node nodelist[] = {
- token("~beg"+symb, m), token("JUMPDEST", m),
- cond.code,
- token("$end"+symb, m), token("JUMPI", m),
- action.code,
- token("$beg"+symb, m), token("JUMP", m),
- token("~end"+symb, m), token("JUMPDEST", m),
- };
- return pd(aux, multiToken(nodelist, 10, m));
- }
- // Memory allocations
- else if (node.val == "alloc") {
- programData bytez = opcodeify(node.args[0], aux, vaux);
- aux = bytez.aux;
- if (!bytez.outs)
- err("Alloc input has arity 0", m);
- aux.allocUsed = true;
- Node nodelist[] = {
- bytez.code,
- token("MSIZE", m), token("SWAP1", m), token("MSIZE", m),
- token("ADD", m),
- token("0", m), token("SWAP1", m), token("MSTORE", m)
- };
- return pd(aux, multiToken(nodelist, 8, m), 1);
- }
- // All other functions/operators
- else {
- std::vector<Node> subs2;
- int depth = opinputs(upperCase(node.val));
- if (depth == -1)
- err("Not a function or opcode: "+node.val, m);
- if ((int)node.args.size() != depth)
- err("Invalid arity for "+node.val, m);
- for (int i = node.args.size() - 1; i >= 0; i--) {
- programVerticalAux vaux2 = vaux;
- vaux2.height = vaux.height - i - 1 + node.args.size();
- programData sub = opcodeify(node.args[i], aux, vaux2);
- aux = sub.aux;
- if (!sub.outs)
- err("Input "+unsignedToDecimal(i)+" has arity 0", sub.code.metadata);
- subs2.push_back(sub.code);
- }
- subs2.push_back(token(upperCase(node.val), m));
- int outdepth = opoutputs(upperCase(node.val));
- return pd(aux, astnode("_", subs2, m), outdepth);
- }
-}
-
-// Adds necessary wrappers to a program
-Node finalize(programData c) {
- std::vector<Node> bottom;
- Metadata m = c.code.metadata;
- // If we are using both alloc and variables, we need to pre-zfill
- // some memory
- if ((c.aux.allocUsed || c.aux.calldataUsed) && c.aux.vars.size() > 0) {
- Node nodelist[] = {
- token("0", m),
- token(unsignedToDecimal(c.aux.nextVarMem - 1)),
- token("MSTORE8", m)
- };
- bottom.push_back(multiToken(nodelist, 3, m));
- }
- // The actual code
- bottom.push_back(c.code);
- return astnode("_", bottom, m);
-}
-
-//LLL -> code fragment tree
-Node buildFragmentTree(Node node) {
- return finalize(opcodeify(node));
-}
-
-
-// Builds a dictionary mapping labels to variable names
-programAux buildDict(Node program, programAux aux, int labelLength) {
- Metadata m = program.metadata;
- // Token
- if (program.type == TOKEN) {
- if (isNumberLike(program)) {
- aux.step += 1 + toByteArr(program.val, m).size();
- }
- else if (program.val[0] == '~') {
- aux.vars[program.val.substr(1)] = unsignedToDecimal(aux.step);
- }
- else if (program.val[0] == '$') {
- aux.step += labelLength + 1;
- }
- else aux.step += 1;
- }
- // A sub-program (ie. LLL)
- else if (program.val == "____CODE") {
- programAux auks = Aux();
- for (unsigned i = 0; i < program.args.size(); i++) {
- auks = buildDict(program.args[i], auks, labelLength);
- }
- for (std::map<std::string,std::string>::iterator it=auks.vars.begin();
- it != auks.vars.end();
- it++) {
- aux.vars[(*it).first] = (*it).second;
- }
- aux.step += auks.step;
- }
- // Normal sub-block
- else {
- for (unsigned i = 0; i < program.args.size(); i++) {
- aux = buildDict(program.args[i], aux, labelLength);
- }
- }
- return aux;
-}
-
-// Applies that dictionary
-Node substDict(Node program, programAux aux, int labelLength) {
- Metadata m = program.metadata;
- std::vector<Node> out;
- std::vector<Node> inner;
- if (program.type == TOKEN) {
- if (program.val[0] == '$') {
- std::string tokStr = "PUSH"+unsignedToDecimal(labelLength);
- out.push_back(token(tokStr, m));
- int dotLoc = program.val.find('.');
- if (dotLoc == -1) {
- std::string val = aux.vars[program.val.substr(1)];
- inner = toByteArr(val, m, labelLength);
- }
- else {
- std::string start = aux.vars[program.val.substr(1, dotLoc-1)],
- end = aux.vars[program.val.substr(dotLoc + 1)],
- dist = decimalSub(end, start);
- inner = toByteArr(dist, m, labelLength);
- }
- out.push_back(astnode("_", inner, m));
- }
- else if (program.val[0] == '~') { }
- else if (isNumberLike(program)) {
- inner = toByteArr(program.val, m);
- out.push_back(token("PUSH"+unsignedToDecimal(inner.size())));
- out.push_back(astnode("_", inner, m));
- }
- else return program;
- }
- else {
- for (unsigned i = 0; i < program.args.size(); i++) {
- Node n = substDict(program.args[i], aux, labelLength);
- if (n.type == TOKEN || n.args.size()) out.push_back(n);
- }
- }
- return astnode("_", out, m);
-}
-
-// Compiled fragtree -> compiled fragtree without labels
-Node dereference(Node program) {
- int sz = treeSize(program) * 4;
- int labelLength = 1;
- while (sz >= 256) { labelLength += 1; sz /= 256; }
- programAux aux = buildDict(program, Aux(), labelLength);
- return substDict(program, aux, labelLength);
-}
-
-// Dereferenced fragtree -> opcodes
-std::vector<Node> flatten(Node derefed) {
- std::vector<Node> o;
- if (derefed.type == TOKEN) {
- o.push_back(derefed);
- }
- else {
- for (unsigned i = 0; i < derefed.args.size(); i++) {
- std::vector<Node> oprime = flatten(derefed.args[i]);
- for (unsigned j = 0; j < oprime.size(); j++) o.push_back(oprime[j]);
- }
- }
- return o;
-}
-
-// Opcodes -> bin
-std::string serialize(std::vector<Node> codons) {
- std::string o;
- for (unsigned i = 0; i < codons.size(); i++) {
- int v;
- if (isNumberLike(codons[i])) {
- v = decimalToUnsigned(codons[i].val);
- }
- else if (codons[i].val.substr(0,4) == "PUSH") {
- v = 95 + decimalToUnsigned(codons[i].val.substr(4));
- }
- else {
- v = opcode(codons[i].val);
- }
- o += (char)v;
- }
- return o;
-}
-
-// Bin -> opcodes
-std::vector<Node> deserialize(std::string ser) {
- std::vector<Node> o;
- int backCount = 0;
- for (unsigned i = 0; i < ser.length(); i++) {
- unsigned char v = (unsigned char)ser[i];
- std::string oper = op((int)v);
- if (oper != "" && backCount <= 0) o.push_back(token(oper));
- else if (v >= 96 && v < 128 && backCount <= 0) {
- o.push_back(token("PUSH"+unsignedToDecimal(v - 95)));
- }
- else o.push_back(token(unsignedToDecimal(v)));
- if (v >= 96 && v < 128 && backCount <= 0) {
- backCount = v - 95;
- }
- else backCount--;
- }
- return o;
-}
-
-// Fragtree -> bin
-std::string assemble(Node fragTree) {
- return serialize(flatten(dereference(fragTree)));
-}
-
-// Fragtree -> tokens
-std::vector<Node> prettyAssemble(Node fragTree) {
- return flatten(dereference(fragTree));
-}
-
-// LLL -> bin
-std::string compileLLL(Node program) {
- return assemble(buildFragmentTree(program));
-}
-
-// LLL -> tokens
-std::vector<Node> prettyCompileLLL(Node program) {
- return prettyAssemble(buildFragmentTree(program));
-}
-
-// Converts a list of integer values to binary transaction data
-std::string encodeDatalist(std::vector<std::string> vals) {
- std::string o;
- for (unsigned i = 0; i < vals.size(); i++) {
- std::vector<Node> n = toByteArr(strToNumeric(vals[i]), Metadata(), 32);
- for (unsigned j = 0; j < n.size(); j++) {
- int v = decimalToUnsigned(n[j].val);
- o += (char)v;
- }
- }
- return o;
-}
-
-// Converts binary transaction data into a list of integer values
-std::vector<std::string> decodeDatalist(std::string ser) {
- std::vector<std::string> out;
- for (unsigned i = 0; i < ser.length(); i+= 32) {
- std::string o = "0";
- for (unsigned j = i; j < i + 32; j++) {
- int vj = (int)(unsigned char)ser[j];
- o = decimalAdd(decimalMul(o, "256"), unsignedToDecimal(vj));
- }
- out.push_back(o);
- }
- return out;
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.h
deleted file mode 100644
index aecaa3718..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef ETHSERP_COMPILER
-#define ETHSERP_COMPILER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Compiled fragtree -> compiled fragtree without labels
-Node dereference(Node program);
-
-// LLL -> fragtree
-Node buildFragmentTree(Node program);
-
-// Dereferenced fragtree -> opcodes
-std::vector<Node> flatten(Node derefed);
-
-// opcodes -> bin
-std::string serialize(std::vector<Node> codons);
-
-// Fragtree -> bin
-std::string assemble(Node fragTree);
-
-// Fragtree -> opcodes
-std::vector<Node> prettyAssemble(Node fragTree);
-
-// LLL -> bin
-std::string compileLLL(Node program);
-
-// LLL -> opcodes
-std::vector<Node> prettyCompileLLL(Node program);
-
-// bin -> opcodes
-std::vector<Node> deserialize(std::string ser);
-
-// Converts a list of integer values to binary transaction data
-std::string encodeDatalist(std::vector<std::string> vals);
-
-// Converts binary transaction data into a list of integer values
-std::vector<std::string> decodeDatalist(std::string ser);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/example.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/example.cpp
deleted file mode 100644
index 1ce2590d0..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/example.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
-#include <libserpent/funcs.h>
-#include <libserpent/bignum.h>
-#include <iostream>
-
-using namespace std;
-
-int main() {
- cout << printAST(compileToLLL(get_file_contents("examples/namecoin.se"))) << "\n";
- cout << decimalSub("10234", "10234") << "\n";
- cout << decimalSub("10234", "10233") << "\n";
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/collatz.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/collatz.se
deleted file mode 100644
index 148b47b59..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/collatz.se
+++ /dev/null
@@ -1,11 +0,0 @@
-x = msg.data[0]
-steps = 0
-
-while x > 1:
- steps += 1
- if (x % 2) == 0:
- x /= 2
- else:
- x = 3 * x + 1
-
-return(steps)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/counterparty.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/counterparty.se
deleted file mode 100644
index abec0d102..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/counterparty.se
+++ /dev/null
@@ -1,274 +0,0 @@
-# Ethereum forks Counterparty in 340 lines of serpent
-# Not yet tested
-
-# assets[i] = a registered asset, assets[i].holders[j] = former or current i-holder
-data assets[2^50](creator, name, calldate, callprice, dividend_paid, holders[2^50], holdersCount)
-data nextAssetId
-
-# holdersMap: holdersMap[addr][asset] = 1 if addr holds asset
-data holdersMap[2^160][2^50]
-
-# balances[x][y] = how much of y x holds
-data balances[2^160][2^50]
-
-# orders[a][b] = heap of indices to (c, d, e)
-# = c offers to sell d units of a at a price of e units of b per 10^18 units
-# of a
-data orderbooks[2^50][2^50]
-
-# store of general order data
-data orders[2^50](seller, asset_sold, quantity, price)
-data ordersCount
-
-# data feeds
-data feeds[2^50](owner, value)
-data feedCount
-
-# heap
-data heap
-extern heap: [register, push, pop, top, size]
-
-data cfds[2^50](maker, acceptor, feed, asset, strike, leverage, min, max, maturity)
-data cfdCount
-
-data bets[2^50](maker, acceptor, feed, asset, makerstake, acceptorstake, eqtest, maturity)
-data betCount
-
-def init():
- heap = create('heap.se')
-
-# Add units (internal method)
-def add(to, asset, value):
- assert msg.sender == self
- self.balances[to][asset] += value
- # Add the holder to the holders list
- if not self.holdersMap[to][asset]:
- self.holdersMap[to][asset] = 1
- c = self.assets[asset].holdersCount
- self.assets[asset].holders[c] = to
- self.assets[asset].holdersCount = c + 1
-
-# Register a new asset
-def register_asset(q, name, calldate, callprice):
- newid = self.nextAssetId
- self.assets[newid].creator = msg.sender
- self.assets[newid].name = name
- self.assets[newid].calldate = calldate
- self.assets[newid].callprice = callprice
- self.assets[newid].holders[0] = msg.sender
- self.assets[newid].holdersCount = 1
- self.balances[msg.sender][newid] = q
- self.holdersMap[msg.sender][newid] = 1
-
-# Send
-def send(to, asset, value):
- fromval = self.balances[msg.sender][asset]
- if fromval >= value:
- self.balances[msg.sender][asset] -= value
- self.add(to, asset, value)
-
-# Order
-def mkorder(selling, buying, quantity, price):
- # Make sure you have enough to pay for the order
- assert self.balances[msg.sender][selling] >= quantity:
- # Try to match existing orders
- o = orderbooks[buying][selling]
- if not o:
- o = self.heap.register()
- orderbooks[selling][buying] = o
- sz = self.heap.size(o)
- invprice = 10^36 / price
- while quantity > 0 and sz > 0:
- orderid = self.heap.pop()
- p = self.orders[orderid].price
- if p > invprice:
- sz = 0
- else:
- q = self.orders[orderid].quantity
- oq = min(q, quantity)
- b = self.orders[orderid].seller
- self.balances[msg.sender][selling] -= oq * p / 10^18
- self.add(msg.sender, buying, oq)
- self.add(b, selling, oq * p / 10^18)
- self.orders[orderid].quantity = q - oq
- if oq == q:
- self.orders[orderid].seller = 0
- self.orders[orderid].price = 0
- self.orders[orderid].asset_sold = 0
- quantity -= oq
- sz -= 1
- assert quantity > 0
- # Make the order
- c = self.ordersCount
- self.orders[c].seller = msg.sender
- self.orders[c].asset_sold = selling
- self.orders[c].quantity = quantity
- self.orders[c].price = price
- self.ordersCount += 1
- # Add it to the heap
- o = orderbooks[selling][buying]
- if not o:
- o = self.heap.register()
- orderbooks[selling][buying] = o
- self.balances[msg.sender][selling] -= quantity
- self.heap.push(o, price, c)
- return(c)
-
-def cancel_order(id):
- if self.orders[id].seller == msg.sender:
- self.orders[id].seller = 0
- self.orders[id].price = 0
- self.balances[msg.sender][self.orders[id].asset_sold] += self.orders[id].quantity
- self.orders[id].quantity = 0
- self.orders[id].asset_sold = 0
-
-def register_feed():
- c = self.feedCount
- self.feeds[c].owner = msg.sender
- self.feedCount = c + 1
- return(c)
-
-def set_feed(id, v):
- if self.feeds[id].owner == msg.sender:
- self.feeds[id].value = v
-
-def mk_cfd_offer(feed, asset, strike, leverage, min, max, maturity):
- b = self.balances[msg.sender][asset]
- req = max((strike - min) * leverage, (strike - max) * leverage)
- assert b >= req
- self.balances[msg.sender][asset] = b - req
- c = self.cfdCount
- self.cfds[c].maker = msg.sender
- self.cfds[c].feed = feed
- self.cfds[c].asset = asset
- self.cfds[c].strike = strike
- self.cfds[c].leverage = leverage
- self.cfds[c].min = min
- self.cfds[c].max = max
- self.cfds[c].maturity = maturity
- self.cfdCount = c + 1
- return(c)
-
-def accept_cfd_offer(c):
- assert not self.cfds[c].acceptor and self.cfds[c].maker
- asset = self.cfds[c].asset
- strike = self.cfds[c].strike
- min = self.cfds[c].min
- max = self.cfds[c].max
- leverage = self.cfds[c].leverage
- b = self.balances[msg.sender][asset]
- req = max((min - strike) * leverage, (max - strike) * leverage)
- assert b >= req
- self.balances[msg.sender][asset] = b - req
- self.cfds[c].acceptor = msg.sender
- self.cfds[c].maturity += block.timestamp
-
-def claim_cfd_offer(c):
- asset = self.cfds[c].asset
- strike = self.cfds[c].strike
- min = self.cfds[c].min
- max = self.cfds[c].max
- leverage = self.cfds[c].leverage
- v = self.feeds[self.cfds[c].feed].value
- assert v <= min or v >= max or block.timestamp >= self.cfds[c].maturity
- maker_req = max((strike - min) * leverage, (strike - max) * leverage)
- acceptor_req = max((min - strike) * leverage, (max - strike) * leverage)
- paydelta = (strike - v) * leverage
- self.add(self.cfds[c].maker, asset, maker_req + paydelta)
- self.add(self.cfds[c].acceptor, asset, acceptor_req - paydelta)
- self.cfds[c].maker = 0
- self.cfds[c].acceptor = 0
- self.cfds[c].feed = 0
- self.cfds[c].asset = 0
- self.cfds[c].strike = 0
- self.cfds[c].leverage = 0
- self.cfds[c].min = 0
- self.cfds[c].max = 0
- self.cfds[c].maturity = 0
-
-def withdraw_cfd_offer(c):
- if self.cfds[c].maker == msg.sender and not self.cfds[c].acceptor:
- asset = self.cfds[c].asset
- strike = self.cfds[c].strike
- min = self.cfds[c].min
- max = self.cfds[c].max
- leverage = self.cfds[c].leverage
- maker_req = max((strike - min) * leverage, (strike - max) * leverage)
- self.balances[self.cfds[c].maker][asset] += maker_req
- self.cfds[c].maker = 0
- self.cfds[c].acceptor = 0
- self.cfds[c].feed = 0
- self.cfds[c].asset = 0
- self.cfds[c].strike = 0
- self.cfds[c].leverage = 0
- self.cfds[c].min = 0
- self.cfds[c].max = 0
- self.cfds[c].maturity = 0
-
-
-def mk_bet_offer(feed, asset, makerstake, acceptorstake, eqtest, maturity):
- assert self.balances[msg.sender][asset] >= makerstake
- c = self.betCount
- self.bets[c].maker = msg.sender
- self.bets[c].feed = feed
- self.bets[c].asset = asset
- self.bets[c].makerstake = makerstake
- self.bets[c].acceptorstake = acceptorstake
- self.bets[c].eqtest = eqtest
- self.bets[c].maturity = maturity
- self.balances[msg.sender][asset] -= makerstake
- self.betCount = c + 1
- return(c)
-
-def accept_bet_offer(c):
- assert self.bets[c].maker and not self.bets[c].acceptor
- asset = self.bets[c].asset
- acceptorstake = self.bets[c].acceptorstake
- assert self.balances[msg.sender][asset] >= acceptorstake
- self.balances[msg.sender][asset] -= acceptorstake
- self.bets[c].acceptor = msg.sender
-
-def claim_bet_offer(c):
- assert block.timestamp >= self.bets[c].maturity
- v = self.feeds[self.bets[c].feed].value
- totalstake = self.bets[c].makerstake + self.bets[c].acceptorstake
- if v == self.bets[c].eqtest:
- self.add(self.bets[c].maker, self.bets[c].asset, totalstake)
- else:
- self.add(self.bets[c].acceptor, self.bets[c].asset, totalstake)
- self.bets[c].maker = 0
- self.bets[c].feed = 0
- self.bets[c].asset = 0
- self.bets[c].makerstake = 0
- self.bets[c].acceptorstake = 0
- self.bets[c].eqtest = 0
- self.bets[c].maturity = 0
-
-def cancel_bet(c):
- assert not self.bets[c].acceptor and msg.sender == self.bets[c].maker
- self.balances[msg.sender][self.bets[c].asset] += self.bets[c].makerstake
- self.bets[c].maker = 0
- self.bets[c].feed = 0
- self.bets[c].asset = 0
- self.bets[c].makerstake = 0
- self.bets[c].acceptorstake = 0
- self.bets[c].eqtest = 0
- self.bets[c].maturity = 0
-
-def dividend(holder_asset, divvying_asset, ratio):
- i = 0
- sz = self.assets[holder_asset].holdersCount
- t = 0
- holders = array(sz)
- payments = array(sz)
- while i < sz:
- holders[i] = self.assets[holder_asset].holders[i]
- payments[i] = self.balances[holders[i]][holder_asset] * ratio / 10^18
- t += payments[i]
- i += 1
- if self.balances[msg.sender][divvying_asset] >= t:
- i = 0
- while i < sz:
- self.add(holders[i], divvying_asset, payments[i])
- i += 1
- self.balances[msg.sender][divvying_asset] -= t
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/heap.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/heap.se
deleted file mode 100644
index 4a43a3974..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/heap.se
+++ /dev/null
@@ -1,69 +0,0 @@
-data heaps[2^50](owner, size, nodes[2^50](key, value))
-data heapIndex
-
-def register():
- i = self.heapIndex
- self.heaps[i].owner = msg.sender
- self.heapIndex = i + 1
- return(i)
-
-def push(heap, key, value):
- assert msg.sender == self.heaps[heap].owner
- sz = self.heaps[heap].size
- self.heaps[heap].nodes[sz].key = key
- self.heaps[heap].nodes[sz].value = value
- k = sz + 1
- while k > 1:
- bottom = self.heaps[heap].nodes[k].key
- top = self.heaps[heap].nodes[k/2].key
- if bottom < top:
- tvalue = self.heaps[heap].nodes[k/2].value
- bvalue = self.heaps[heap].nodes[k].value
- self.heaps[heap].nodes[k].key = top
- self.heaps[heap].nodes[k].value = tvalue
- self.heaps[heap].nodes[k/2].key = bottom
- self.heaps[heap].nodes[k/2].value = bvalue
- k /= 2
- else:
- k = 0
- self.heaps[heap].size = sz + 1
-
-def pop(heap):
- sz = self.heaps[heap].size
- assert sz
- prevtop = self.heaps[heap].nodes[1].value
- self.heaps[heap].nodes[1].key = self.heaps[heap].nodes[sz].key
- self.heaps[heap].nodes[1].value = self.heaps[heap].nodes[sz].value
- self.heaps[heap].nodes[sz].key = 0
- self.heaps[heap].nodes[sz].value = 0
- top = self.heaps[heap].nodes[1].key
- k = 1
- while k * 2 < sz:
- bottom1 = self.heaps[heap].nodes[k * 2].key
- bottom2 = self.heaps[heap].nodes[k * 2 + 1].key
- if bottom1 < top and (bottom1 < bottom2 or k * 2 + 1 >= sz):
- tvalue = self.heaps[heap].nodes[1].value
- bvalue = self.heaps[heap].nodes[k * 2].value
- self.heaps[heap].nodes[k].key = bottom1
- self.heaps[heap].nodes[k].value = bvalue
- self.heaps[heap].nodes[k * 2].key = top
- self.heaps[heap].nodes[k * 2].value = tvalue
- k = k * 2
- elif bottom2 < top and bottom2 < bottom1 and k * 2 + 1 < sz:
- tvalue = self.heaps[heap].nodes[1].value
- bvalue = self.heaps[heap].nodes[k * 2 + 1].value
- self.heaps[heap].nodes[k].key = bottom2
- self.heaps[heap].nodes[k].value = bvalue
- self.heaps[heap].nodes[k * 2 + 1].key = top
- self.heaps[heap].nodes[k * 2 + 1].value = tvalue
- k = k * 2 + 1
- else:
- k = sz
- self.heaps[heap].size = sz - 1
- return(prevtop)
-
-def top(heap):
- return(self.heaps[heap].nodes[1].value)
-
-def size(heap):
- return(self.heaps[heap].size)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/crowdfund.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/crowdfund.se
deleted file mode 100644
index 9fd1e0643..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/crowdfund.se
+++ /dev/null
@@ -1,53 +0,0 @@
-data campaigns[2^80](recipient, goal, deadline, contrib_total, contrib_count, contribs[2^50](sender, value))
-
-def create_campaign(id, recipient, goal, timelimit):
- if self.campaigns[id].recipient:
- return(0)
- self.campaigns[id].recipient = recipient
- self.campaigns[id].goal = goal
- self.campaigns[id].deadline = block.timestamp + timelimit
-
-def contribute(id):
- # Update contribution total
- total_contributed = self.campaigns[id].contrib_total + msg.value
- self.campaigns[id].contrib_total = total_contributed
-
- # Record new contribution
- sub_index = self.campaigns[id].contrib_count
- self.campaigns[id].contribs[sub_index].sender = msg.sender
- self.campaigns[id].contribs[sub_index].value = msg.value
- self.campaigns[id].contrib_count = sub_index + 1
-
- # Enough funding?
- if total_contributed >= self.campaigns[id].goal:
- send(self.campaigns[id].recipient, total_contributed)
- self.clear(id)
- return(1)
-
- # Expired?
- if block.timestamp > self.campaigns[id].deadline:
- i = 0
- c = self.campaigns[id].contrib_count
- while i < c:
- send(self.campaigns[id].contribs[i].sender, self.campaigns[id].contribs[i].value)
- i += 1
- self.clear(id)
- return(2)
-
-def progress_report(id):
- return(self.campaigns[id].contrib_total)
-
-# Clearing function for internal use
-def clear(id):
- if self == msg.sender:
- self.campaigns[id].recipient = 0
- self.campaigns[id].goal = 0
- self.campaigns[id].deadline = 0
- c = self.campaigns[id].contrib_count
- self.campaigns[id].contrib_count = 0
- self.campaigns[id].contrib_total = 0
- i = 0
- while i < c:
- self.campaigns[id].contribs[i].sender = 0
- self.campaigns[id].contribs[i].value = 0
- i += 1
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/futarchy.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/futarchy.se
deleted file mode 100644
index 0d68622ac..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/futarchy.se
+++ /dev/null
@@ -1,136 +0,0 @@
-# 0: current epoch
-# 1: number of proposals
-# 2: master currency
-# 3: last winning market
-# 4: last txid
-# 5: long-term ema currency units purchased
-# 6: last block when currency units purchased
-# 7: ether allocated to last round
-# 8: last block when currency units claimed
-# 9: ether allocated to current round
-# 1000+: [proposal address, market ID, totprice, totvolume]
-
-init:
- # We technically have two levels of epoch here. We have
- # one epoch of 1000, to synchronize with the 1000 epoch
- # of the market, and then 100 of those epochs make a
- # meta-epoch (I'll nominate the term "seculum") over
- # which the futarchy protocol will take place
- contract.storage[0] = block.number / 1000
- # The master currency of the futarchy. The futarchy will
- # assign currency units to whoever the prediction market
- # thinks will best increase the currency's value
- master_currency = create('subcurrency.se')
- contract.storage[2] = master_currency
-code:
- curepoch = block.number / 1000
- prevepoch = contract.storage[0]
- if curepoch > prevepoch:
- if (curepoch % 100) > 50:
- # Collect price data
- # We take an average over 50 subepochs to determine
- # the price of each asset, weighting by volume to
- # prevent abuse
- contract.storage[0] = curepoch
- i = 0
- numprop = contract.storage[1]
- while i < numprop:
- market = contract.storage[1001 + i * 4]
- price = call(market, 2)
- volume = call(market, 3)
- contract.storage[1002 + i * 4] += price
- contract.storage[1003 + i * 4] += volume * price
- i += 1
- if (curepoch / 100) > (prevepoch / 100):
- # If we are entering a new seculum, we determine the
- # market with the highest total average price
- best = 0
- bestmarket = 0
- besti = 0
- i = 0
- while i < numprop:
- curtotprice = contract.storage[1002 + i * 4]
- curvolume = contract.storage[1002 + i * 4]
- curavgprice = curtotprice / curvolume
- if curavgprice > best:
- best = curavgprice
- besti = i
- bestmarket = contract.storage[1003 + i * 4]
- i += 1
- # Reset the number of proposals to 0
- contract.storage[1] = 0
- # Reward the highest proposal
- call(contract.storage[2], [best, 10^9, 0], 3)
- # Record the winning market so we can later appropriately
- # compensate the participants
- contract.storage[2] = bestmarket
- # The amount of ether allocated to the last round
- contract.storage[7] = contract.storage[9]
- # The amount of ether allocated to the next round
- contract.storage[9] = contract.balance / 2
- # Make a proposal [0, address]
- if msg.data[0] == 0 and curepoch % 100 < 50:
- pid = contract.storage[1]
- market = create('market.se')
- c1 = create('subcurrency.se')
- c2 = create('subcurrency.se')
- call(market, [c1, c2], 2)
- contract.storage[1000 + pid * 4] = msg.data[1]
- contract.storage[1001 + pid * 4] = market
- contract.storage[1] += 1
- # Claim ether [1, address]
- # One unit of the first currency in the last round's winning
- # market entitles you to a quantity of ether that was decided
- # at the start of that epoch
- elif msg.data[0] == 1:
- first_subcurrency = call(contract.storage[2], 3)
- # We ask the first subcurrency contract what the last transaction was. The
- # way to make a claim is to send the amount of first currency units that
- # you wish to claim with, and then immediately call this contract. For security
- # it makes sense to set up a tx which sends both messages in sequence atomically
- data = call(first_subcurrency, [], 0, 4)
- from = data[0]
- to = data[1]
- value = data[2]
- txid = data[3]
- if txid > contract.storage[4] and to == contract.address:
- send(to, contract.storage[7] * value / 10^9)
- contract.storage[4] = txid
- # Claim second currency [2, address]
- # One unit of the second currency in the last round's winning
- # market entitles you to one unit of the futarchy's master
- # currency
- elif msg.data[0] == 2:
- second_subcurrency = call(contract.storage[2], 3)
- data = call(first_subcurrency, [], 0, 4)
- from = data[0]
- to = data[1]
- value = data[2]
- txid = data[3]
- if txid > contract.storage[4] and to == contract.address:
- call(contract.storage[2], [to, value], 2)
- contract.storage[4] = txid
- # Purchase currency for ether (target releasing 10^9 units per seculum)
- # Price starts off 1 eth for 10^9 units but increases hyperbolically to
- # limit issuance
- elif msg.data[0] == 3:
- pre_ema = contract.storage[5]
- post_ema = pre_ema + msg.value
- pre_reserve = 10^18 / (10^9 + pre_ema / 10^9)
- post_reserve = 10^18 / (10^9 + post_ema / 10^9)
- call(contract.storage[2], [msg.sender, pre_reserve - post_reserve], 2)
- last_sold = contract.storage[6]
- contract.storage[5] = pre_ema * (100000 + last_sold - block.number) + msg.value
- contract.storage[6] = block.number
- # Claim all currencies as the ether miner of the current block
- elif msg.data[0] == 2 and msg.sender == block.coinbase and block.number > contract.storage[8]:
- i = 0
- numproposals = contract.storage[1]
- while i < numproposals:
- market = contract.storage[1001 + i * 3]
- fc = call(market, 4)
- sc = call(market, 5)
- call(fc, [msg.sender, 1000], 2)
- call(sc, [msg.sender, 1000], 2)
- i += 1
- contract.storage[8] = block.number
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/heap.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/heap.se
deleted file mode 100644
index 1bc442e6d..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/heap.se
+++ /dev/null
@@ -1,55 +0,0 @@
-# 0: size
-# 1-n: elements
-
-init:
- contract.storage[1000] = msg.sender
-code:
- # Only owner of the heap is allowed to modify it
- if contract.storage[1000] != msg.sender:
- stop
- # push
- if msg.data[0] == 0:
- sz = contract.storage[0]
- contract.storage[sz + 1] = msg.data[1]
- k = sz + 1
- while k > 1:
- bottom = contract.storage[k]
- top = contract.storage[k/2]
- if bottom < top:
- contract.storage[k] = top
- contract.storage[k/2] = bottom
- k /= 2
- else:
- k = 0
- contract.storage[0] = sz + 1
- # pop
- elif msg.data[0] == 1:
- sz = contract.storage[0]
- if !sz:
- return(0)
- prevtop = contract.storage[1]
- contract.storage[1] = contract.storage[sz]
- contract.storage[sz] = 0
- top = contract.storage[1]
- k = 1
- while k * 2 < sz:
- bottom1 = contract.storage[k * 2]
- bottom2 = contract.storage[k * 2 + 1]
- if bottom1 < top and (bottom1 < bottom2 or k * 2 + 1 >= sz):
- contract.storage[k] = bottom1
- contract.storage[k * 2] = top
- k = k * 2
- elif bottom2 < top and bottom2 < bottom1 and k * 2 + 1 < sz:
- contract.storage[k] = bottom2
- contract.storage[k * 2 + 1] = top
- k = k * 2 + 1
- else:
- k = sz
- contract.storage[0] = sz - 1
- return(prevtop)
- # top
- elif msg.data[0] == 2:
- return(contract.storage[1])
- # size
- elif msg.data[0] == 3:
- return(contract.storage[0])
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/market.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/market.se
deleted file mode 100644
index 2303a0b60..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/market.se
+++ /dev/null
@@ -1,117 +0,0 @@
-# Creates a decentralized market between any two subcurrencies
-
-# Here, the first subcurrency is the base asset and the second
-# subcurrency is the asset priced against the base asset. Hence,
-# "buying" refers to trading the first for the second, and
-# "selling" refers to trading the second for the first
-
-# storage 0: buy orders
-# storage 1: sell orders
-# storage 1000: first subcurrency
-# storage 1001: last first subcurrency txid
-# storage 2000: second subcurrency
-# storage 2001: last second subcurrency txid
-# storage 3000: current epoch
-# storage 4000: price
-# storage 4001: volume
-
-init:
- # Heap for buy orders
- contract.storage[0] = create('heap.se')
- # Heap for sell orders
- contract.storage[1] = create('heap.se')
-code:
- # Initialize with [ first_subcurrency, second_subcurrency ]
- if !contract.storage[1000]:
- contract.storage[1000] = msg.data[0] # First subcurrency
- contract.storage[1001] = -1
- contract.storage[2000] = msg.data[1] # Second subcurrency
- contract.storage[2001] = -1
- contract.storage[3000] = block.number / 1000
- stop
- first_subcurrency = contract.storage[1000]
- second_subcurrency = contract.storage[2000]
- buy_heap = contract.storage[0]
- sell_heap = contract.storage[1]
- # This contract operates in "epochs" of 100 blocks
- # At the end of each epoch, we process all orders
- # simultaneously, independent of order. This algorithm
- # prevents front-running, and generates a profit from
- # the spread. The profit is permanently kept in the
- # market (ie. destroyed), making both subcurrencies
- # more valuable
-
- # Epoch transition code
- if contract.storage[3000] < block.number / 100:
- done = 0
- volume = 0
- while !done:
- # Grab the top buy and sell order from each heap
- topbuy = call(buy_heap, 1)
- topsell = call(sell_heap, 1)
- # An order is recorded in the heap as:
- # Buys: (2^48 - 1 - price) * 2^208 + units of first currency * 2^160 + from
- # Sells: price * 2^208 + units of second currency * 2^160 + from
- buyprice = -(topbuy / 2^208)
- buyfcvalue = (topbuy / 2^160) % 2^48
- buyer = topbuy % 2^160
- sellprice = topsell / 2^208
- sellscvalue = (topsell / 2^160) % 2^48
- seller = topsell % 2^160
- # Heap empty, or no more matching orders
- if not topbuy or not topsell or buyprice < sellprice:
- done = 1
- else:
- # Add to volume counter
- volume += buyfcvalue
- # Calculate how much of the second currency the buyer gets, and
- # how much of the first currency the seller gets
- sellfcvalue = sellscvalue / buyprice
- buyscvalue = buyfcvalue * sellprice
- # Send the currency units along
- call(second_subcurrency, [buyer, buyscvalue], 2)
- call(first_subcurrency, [seller, sellfcvalue], 2)
- if volume:
- contract.storage[4000] = (buyprice + sellprice) / 2
- contract.storage[4001] = volume
- contract.storage[3000] = block.number / 100
- # Make buy order [0, price]
- if msg.data[0] == 0:
- # We ask the first subcurrency contract what the last transaction was. The
- # way to make a buy order is to send the amount of first currency units that
- # you wish to buy with, and then immediately call this contract. For security
- # it makes sense to set up a tx which sends both messages in sequence atomically
- data = call(first_subcurrency, [], 0, 4)
- from = data[0]
- to = data[1]
- value = data[2]
- txid = data[3]
- price = msg.data[1]
- if txid > contract.storage[1001] and to == contract.address:
- contract.storage[1001] = txid
- # Adds the order to the heap
- call(buy_heap, [0, -price * 2^208 + (value % 2^48) * 2^160 + from], 2)
- # Make sell order [1, price]
- elif msg.data[0] == 1:
- # Same mechanics as buying
- data = call(second_subcurrency, [], 0, 4)
- from = data[0]
- to = data[1]
- value = data[2]
- txid = data[3]
- price = msg.data[1]
- if txid > contract.storage[2001] and to == contract.address:
- contract.storage[2001] = txid
- call(sell_heap, [0, price * 2^208 + (value % 2^48) * 2^160 + from], 2)
- # Ask for price
- elif msg.data[0] == 2:
- return(contract.storage[4000])
- # Ask for volume
- elif msg.data[0] == 3:
- return(contract.storage[1000])
- # Ask for first currency
- elif msg.data[0] == 4:
- return(contract.storage[2000])
- # Ask for second currency
- elif msg.data[0] == 5:
- return(contract.storage[4001])
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/subcurrency.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/subcurrency.se
deleted file mode 100644
index 1501beff7..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/subcurrency.se
+++ /dev/null
@@ -1,35 +0,0 @@
-# Initialization
-# Admin can issue and delete at will
-init:
- contract.storage[0] = msg.sender
-code:
- # If a message with one item is sent, that's a balance query
- if msg.datasize == 1:
- addr = msg.data[0]
- return(contract.storage[addr])
- # If a message with two items [to, value] are sent, that's a transfer request
- elif msg.datasize == 2:
- from = msg.sender
- fromvalue = contract.storage[from]
- to = msg.data[0]
- value = msg.data[1]
- if fromvalue >= value and value > 0 and to > 4:
- contract.storage[from] = fromvalue - value
- contract.storage[to] += value
- contract.storage[2] = from
- contract.storage[3] = to
- contract.storage[4] = value
- contract.storage[5] += 1
- return(1)
- return(0)
- elif msg.datasize == 3 and msg.sender == contract.storage[0]:
- # Admin can issue at will by sending a [to, value, 0] message
- if msg.data[2] == 0:
- contract.storage[msg.data[0]] += msg.data[1]
- # Change admin [ newadmin, 0, 1 ]
- # Set admin to 0 to disable administration
- elif msg.data[2] == 1:
- contract.storage[0] = msg.data[0]
- # Fetch last transaction
- else:
- return([contract.storage[2], contract.storage[3], contract.storage[4], contract.storage[5]], 4)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/test.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/test.py
deleted file mode 100644
index 301a4a845..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/test.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from __future__ import print_function
-import pyethereum
-t = pyethereum.tester
-s = t.state()
-# Create currencies
-c1 = s.contract('subcurrency.se')
-print("First currency: %s" % c1)
-c2 = s.contract('subcurrency.se')
-print("First currency: %s" % c2)
-# Allocate units
-s.send(t.k0, c1, 0, [t.a0, 1000, 0])
-s.send(t.k0, c1, 0, [t.a1, 1000, 0])
-s.send(t.k0, c2, 0, [t.a2, 1000000, 0])
-s.send(t.k0, c2, 0, [t.a3, 1000000, 0])
-print("Allocated units")
-# Market
-m = s.contract('market.se')
-s.send(t.k0, m, 0, [c1, c2])
-# Place orders
-s.send(t.k0, c1, 0, [m, 1000])
-s.send(t.k0, m, 0, [0, 1200])
-s.send(t.k1, c1, 0, [m, 1000])
-s.send(t.k1, m, 0, [0, 1400])
-s.send(t.k2, c2, 0, [m, 1000000])
-s.send(t.k2, m, 0, [1, 800])
-s.send(t.k3, c2, 0, [m, 1000000])
-s.send(t.k3, m, 0, [1, 600])
-print("Orders placed")
-# Next epoch and ping
-s.mine(100)
-print("Mined 100")
-s.send(t.k0, m, 0, [])
-print("Updating")
-# Check
-assert s.send(t.k0, c2, 0, [t.a0]) == [800000]
-assert s.send(t.k0, c2, 0, [t.a1]) == [600000]
-assert s.send(t.k0, c1, 0, [t.a2]) == [833]
-assert s.send(t.k0, c1, 0, [t.a3]) == [714]
-print("Balance checks passed")
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/datafeed.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/datafeed.se
deleted file mode 100644
index 4c4a56de8..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/datafeed.se
+++ /dev/null
@@ -1,12 +0,0 @@
-# Database updateable only by the original creator
-data creator
-
-def init():
- self.creator = msg.sender
-
-def update(k, v):
- if msg.sender == self.creator:
- self.storage[k] = v
-
-def query(k):
- return(self.storage[k])
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover.se
deleted file mode 100644
index ce28f58c2..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover.se
+++ /dev/null
@@ -1,40 +0,0 @@
-# So I looked up on Wikipedia what Jacobian form actually is, and noticed that it's
-# actually a rather different and more clever construction than the naive version
-# that I created. It may possible to achieve a further 20-50% savings by applying
-# that version.
-
-extern all: [call]
-
-data JORDANMUL
-data JORDANADD
-data EXP
-
-def init():
- self.JORDANMUL = create('jacobian_mul.se')
- self.JORDANADD = create('jacobian_add.se')
- self.EXP = create('modexp.se')
-
-def call(h, v, r, s):
- N = -432420386565659656852420866394968145599
- P = -4294968273
- h = mod(h, N)
- r = mod(r, P)
- s = mod(s, N)
- Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240
- Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424
- x = r
- xcubed = mulmod(mulmod(x, x, P), x, P)
- beta = self.EXP.call(addmod(xcubed, 7, P), div(P + 1, 4), P)
-
- # Static-gascost ghetto conditional
- y_is_positive = mod(v, 2) xor mod(beta, 2)
- y = beta * y_is_positive + (P - beta) * (1 - y_is_positive)
-
- GZ = self.JORDANMUL.call(Gx, 1, Gy, 1, N - h, outsz=4)
- XY = self.JORDANMUL.call(x, 1, y, 1, s, outsz=4)
- COMB = self.JORDANADD.call(GZ[0], GZ[1], GZ[2], GZ[3], XY[0], XY[1], XY[2], XY[3], 1, outsz=5)
- COMB[4] = self.EXP.call(r, N - 2, N)
- Q = self.JORDANMUL.call(data=COMB, datasz=5, outsz=4)
- ox = mulmod(Q[0], self.EXP.call(Q[1], P - 2, P), P)
- oy = mulmod(Q[2], self.EXP.call(Q[3], P - 2, P), P)
- return([ox, oy], 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover_compiled.evm b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover_compiled.evm
deleted file mode 100644
index f575fe70f..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover_compiled.evm
+++ /dev/null
@@ -1 +0,0 @@
-6000607f535961071c80610013593961072f566000605f535961013d8061001359396101505661012b8061000e60003961013956600061023f5360003560001a6000141561012a5760806001602037602051151561002c576060511561002f565b60005b156100695760806080599059016000905260a052600060a051526001602060a05101526000604060a05101526001606060a051015260a051f25b6401000003d160000380608051826003846020516020510909098182600260605109836040516040510909828283098382830984858660026020510983098603866040518509088560405183098686888985604051098a038a85602051090809878689846040510909888960605183098a038a6080518509088960805183096080608059905901600090526101e052866101e051528560206101e05101528260406101e05101528160606101e05101526101e051f250505050505050505050505b5b6000f25b816000f090506000555961040680610168593961056e566000603f535961013d8061001359396101505661012b8061000e60003961013956600061023f5360003560001a6000141561012a5760806001602037602051151561002c576060511561002f565b60005b156100695760806080599059016000905260a052600060a051526001602060a05101526000604060a05101526001606060a051015260a051f25b6401000003d160000380608051826003846020516020510909098182600260605109836040516040510909828283098382830984858660026020510983098603866040518509088560405183098686888985604051098a038a85602051090809878689846040510909888960605183098a038a6080518509088960805183096080608059905901600090526101e052866101e051528560206101e05101528260406101e05101528160606101e05101526101e051f250505050505050505050505b5b6000f25b816000f0905060005561029a8061016860003961040256600061043f5360003560001a60001415610299576101006001602037602051151561002d5760605115610030565b60005b1561007657608059905901600090526101405260a051610140515260c051602061014051015260e051604061014051015261010051606061014051015261014051610120525b60a05115156100885760e0511561008b565b60005b156100d0576080599059016000905261016052602051610160515260405160206101605101526060516040610160510152608051606061016051015261016051610120525b61012051156100e157608061012051f25b6401000003d16000036000818260a0516040510983038360c051602051090814156101b1576000818260e051608051098303836101005160605109081415610175576080608080599059016000905260006101c0601f01536020516101e052604051610200526060516102205260805161024052818160816101c0601f01600060005460195a03f1508090509050f26101b0565b608060805990590160009052610280526000610280515260016020610280510152600060406102805101526001606061028051015261028051f25b5b808160405160c051098283610100516060510984038460805160e05109080981828360c0516020510984038460405160a051090883608051610100510909828283098382830984856020518309860386604051850908856040518309868760a051830988038860c0518509088760c051830988898a60405185098b038b8460205109088909898a836040510989098a8b60605183098c038c6080518509088b60805183096080608059905901600090526103e052866103e051528560206103e05101528260406103e05101528160606103e05101526103e051f2505050505050505050505050505b5b6000f25b816000f090506001556101928061058660003961071856600061013f5360003560001a600014156101915760a0600160203770014551231950b75fc4402da1732fc9bebf60000360a0510660a05260a05115606051156020511502011561007e5760806080599059016000905260c052600060c051526001602060c05101526000604060c05101526001606060c051015260c051f25b610120599059016000905260e052600060e051526000602060e05101526001604060e05101526000606060e05101526001608060e0510152600060a060e0510152600060c060e0510152600060e060e0510152600061010060e051015260e0517f80000000000000000000000000000000000000000000000000000000000000005b6000811115610187578060a0511615610165576080602083016081601f85016000600054614e20f15060205160a083015260405160c083015260605160e0830152608051610100830152608060208301610101601f85016000600154614e20f161017b565b6080602083016081601f85016000600054614e20f15b50600281049050610100565b608060208301f250505b5b6000f25b816000f0905060005559610406806107475939610b4d566000603f535961013d8061001359396101505661012b8061000e60003961013956600061023f5360003560001a6000141561012a5760806001602037602051151561002c576060511561002f565b60005b156100695760806080599059016000905260a052600060a051526001602060a05101526000604060a05101526001606060a051015260a051f25b6401000003d160000380608051826003846020516020510909098182600260605109836040516040510909828283098382830984858660026020510983098603866040518509088560405183098686888985604051098a038a85602051090809878689846040510909888960605183098a038a6080518509088960805183096080608059905901600090526101e052866101e051528560206101e05101528260406101e05101528160606101e05101526101e051f250505050505050505050505b5b6000f25b816000f0905060005561029a8061016860003961040256600061043f5360003560001a60001415610299576101006001602037602051151561002d5760605115610030565b60005b1561007657608059905901600090526101405260a051610140515260c051602061014051015260e051604061014051015261010051606061014051015261014051610120525b60a05115156100885760e0511561008b565b60005b156100d0576080599059016000905261016052602051610160515260405160206101605101526060516040610160510152608051606061016051015261016051610120525b61012051156100e157608061012051f25b6401000003d16000036000818260a0516040510983038360c051602051090814156101b1576000818260e051608051098303836101005160605109081415610175576080608080599059016000905260006101c0601f01536020516101e052604051610200526060516102205260805161024052818160816101c0601f01600060005460195a03f1508090509050f26101b0565b608060805990590160009052610280526000610280515260016020610280510152600060406102805101526001606061028051015261028051f25b5b808160405160c051098283610100516060510984038460805160e05109080981828360c0516020510984038460405160a051090883608051610100510909828283098382830984856020518309860386604051850908856040518309868760a051830988038860c0518509088760c051830988898a60405185098b038b8460205109088909898a836040510989098a8b60605183098c038c6080518509088b60805183096080608059905901600090526103e052866103e051528560206103e05101528260406103e05101528160606103e05101526103e051f2505050505050505050505050505b5b6000f25b816000f09050600155596100d080610b655939610c35566100be8061000e6000396100cc5660003560001a600014156100bd576060600160203760017f80000000000000000000000000000000000000000000000000000000000000005b60008111156100b157606051816040511615156020510a606051848509099150606051600282046040511615156020510a606051848509099150606051600482046040511615156020510a606051848509099150606051600882046040511615156020510a606051848509099150601081049050610038565b8160c052602060c0f250505b5b6000f25b816000f090506002556103d280610c4d60003961101f56600061095f5360003560001a600014156103d1576080600160203770014551231950b75fc4402da1732fc9bebf60000360a0526401000003d160000360c05260a0516020510660205260c0516060510660605260a051608051066080527f79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179860e0527f483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8610100526060516101205260c0516101205160c05161012051610120510909610140526000610180601f015360c051600761014051086101a0526004600160c05101046101c05260c0516101e05260206102006061610180601f01600060025460195a03f1506102005161016052600261016051066002604051061861022052610220516001036101605160c05103026102205161016051020161024052608080599059016000905260006102a0601f015360e0516102c05260016102e052610100516103005260016103205260205160a0510361034052818160a16102a0601f01600060005460195a03f150809050905061026052608080599059016000905260006103c0601f0153610120516103e052600161040052610240516104205260016104405260805161046052818160a16103c0601f01600060005460195a03f15080905090506103805260a080599059016000905260006104e0601f015361026051516105005260206102605101516105205260406102605101516105405260606102605101516105605261038051516105805260206103805101516105a05260406103805101516105c05260606103805101516105e05260016106005281816101216104e0601f01600060015460195a03f15080905090506104a0526000610640601f015360605161066052600260a051036106805260a0516106a05260206106c06061610640601f01600060025460195a03f1506106c05160806104a05101526104a05160208103805160018303608080599059016000905260008353818160a185600060005460195a03f150838552809050905090509050905090506106e05260c05160006107e0601f015360206106e051015161080052600260c051036108205260c05161084052602061086060616107e0601f01600060025460195a03f150610860516106e05151096107c05260c05160006108a0601f015360606106e05101516108c052600260c051036108e05260c05161090052602061092060616108a0601f01600060025460195a03f1506109205160406106e05101510961088052604060405990590160009052610940526107c051610940515261088051602061094051015261094051f25b5b6000f2
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_add.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_add.se
deleted file mode 100644
index 29dc390b2..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_add.se
+++ /dev/null
@@ -1,32 +0,0 @@
-extern all: [call]
-data DOUBLE
-
-def init():
- self.DOUBLE = create('jacobian_double.se')
-
-def call(axn, axd, ayn, ayd, bxn, bxd, byn, byd):
- if !axn and !ayn:
- o = [bxn, bxd, byn, byd]
- if !bxn and !byn:
- o = [axn, axd, ayn, ayd]
- if o:
- return(o, 4)
- with P = -4294968273:
- if addmod(mulmod(axn, bxd, P), P - mulmod(axd, bxn, P), P) == 0:
- if addmod(mulmod(ayn, byd, P), P - mulmod(ayd, byn, P), P) == 0:
- return(self.DOUBLE.call(axn, axd, ayn, ayd, outsz=4), 4)
- else:
- return([0, 1, 0, 1], 4)
- with mn = mulmod(addmod(mulmod(byn, ayd, P), P - mulmod(ayn, byd, P), P), mulmod(bxd, axd, P), P):
- with md = mulmod(mulmod(byd, ayd, P), addmod(mulmod(bxn, axd, P), P - mulmod(axn, bxd, P), P), P):
- with msqn = mulmod(mn, mn, P):
- with msqd = mulmod(md, md, P):
- with msqman = addmod(mulmod(msqn, axd, P), P - mulmod(msqd, axn, P), P):
- with msqmad = mulmod(msqd, axd, P):
- with xn = addmod(mulmod(msqman, bxd, P), P - mulmod(msqmad, bxn, P), P):
- with xd = mulmod(msqmad, bxd, P):
- with mamxn = mulmod(mn, addmod(mulmod(axn, xd, P), P - mulmod(xn, axd, P), P), P):
- with mamxd = mulmod(md, mulmod(axd, xd, P), P):
- with yn = addmod(mulmod(mamxn, ayd, P), P - mulmod(mamxd, ayn, P), P):
- with yd = mulmod(mamxd, ayd, P):
- return([xn, xd, yn, yd], 4)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_double.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_double.se
deleted file mode 100644
index b7d8221a6..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_double.se
+++ /dev/null
@@ -1,16 +0,0 @@
-def call(axn, axd, ayn, ayd):
- if !axn and !ayn:
- return([0, 1, 0, 1], 4)
- with P = -4294968273:
- # No need to add (A, 1) because A = 0 for bitcoin
- with mn = mulmod(mulmod(mulmod(axn, axn, P), 3, P), ayd, P):
- with md = mulmod(mulmod(axd, axd, P), mulmod(ayn, 2, P), P):
- with msqn = mulmod(mn, mn, P):
- with msqd = mulmod(md, md, P):
- with xn = addmod(mulmod(msqn, axd, P), P - mulmod(msqd, mulmod(axn, 2, P), P), P):
- with xd = mulmod(msqd, axd, P):
- with mamxn = mulmod(addmod(mulmod(axn, xd, P), P - mulmod(axd, xn, P), P), mn, P):
- with mamxd = mulmod(mulmod(axd, xd, P), md, P):
- with yn = addmod(mulmod(mamxn, ayd, P), P - mulmod(mamxd, ayn, P), P):
- with yd = mulmod(mamxd, ayd, P):
- return([xn, xd, yn, yd], 4)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_mul.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_mul.se
deleted file mode 100644
index bf5b96bb4..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_mul.se
+++ /dev/null
@@ -1,37 +0,0 @@
-# Expected gas cost
-#
-# def expect(n, point_at_infinity=False):
-# n = n % (2**256 - 432420386565659656852420866394968145599)
-# if point_at_infinity:
-# return 79
-# if n == 0:
-# return 34479
-# L = int(1 + math.log(n) / math.log(2))
-# H = len([x for x in b.encode(n, 2) if x == '1'])
-# return 34221 + 94 * L + 343 * H
-
-data DOUBLE
-data ADD
-
-def init():
- self.DOUBLE = create('jacobian_double.se')
- self.ADD = create('jacobian_add.se')
-
-def call(axn, axd, ayn, ayd, n):
- n = mod(n, -432420386565659656852420866394968145599)
- if !axn * !ayn + !n: # Constant-gas version of !axn and !ayn or !n
- return([0, 1, 0, 1], 4)
- with o = [0, 0, 1, 0, 1, 0, 0, 0, 0]:
- with b = 2 ^ 255:
- while gt(b, 0):
- if n & b:
- ~call(20000, self.DOUBLE, 0, o + 31, 129, o + 32, 128)
- o[5] = axn
- o[6] = axd
- o[7] = ayn
- o[8] = ayd
- ~call(20000, self.ADD, 0, o + 31, 257, o + 32, 128)
- else:
- ~call(20000, self.DOUBLE, 0, o + 31, 129, o + 32, 128)
- b = div(b, 2)
- return(o + 32, 4)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/modexp.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/modexp.se
deleted file mode 100644
index 687b12a04..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/modexp.se
+++ /dev/null
@@ -1,11 +0,0 @@
-def call(b, e, m):
- with o = 1:
- with bit = 2 ^ 255:
- while gt(bit, 0):
- # A touch of loop unrolling for 20% efficiency gain
- o = mulmod(mulmod(o, o, m), b ^ !(!(e & bit)), m)
- o = mulmod(mulmod(o, o, m), b ^ !(!(e & div(bit, 2))), m)
- o = mulmod(mulmod(o, o, m), b ^ !(!(e & div(bit, 4))), m)
- o = mulmod(mulmod(o, o, m), b ^ !(!(e & div(bit, 8))), m)
- bit = div(bit, 16)
- return(o)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/substitutes.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/substitutes.py
deleted file mode 100644
index 0007da0cf..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/substitutes.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import bitcoin as b
-import math
-import sys
-
-
-def signed(o):
- return map(lambda x: x - 2**256 if x >= 2**255 else x, o)
-
-
-def hamming_weight(n):
- return len([x for x in b.encode(n, 2) if x == '1'])
-
-
-def binary_length(n):
- return len(b.encode(n, 2))
-
-
-def jacobian_mul_substitute(A, B, C, D, N):
- if A == 0 and C == 0 or (N % b.N) == 0:
- return {"gas": 86, "output": [0, 1, 0, 1]}
- else:
- output = b.jordan_multiply(((A, B), (C, D)), N)
- return {
- "gas": 35262 + 95 * binary_length(N % b.N) + 355 * hamming_weight(N % b.N),
- "output": signed(list(output[0]) + list(output[1]))
- }
-
-
-def jacobian_add_substitute(A, B, C, D, E, F, G, H):
- if A == 0 or E == 0:
- gas = 149
- elif (A * F - B * E) % b.P == 0:
- if (C * H - D * G) % b.P == 0:
- gas = 442
- else:
- gas = 177
- else:
- gas = 301
- output = b.jordan_add(((A, B), (C, D)), ((E, F), (G, H)))
- return {
- "gas": gas,
- "output": signed(list(output[0]) + list(output[1]))
- }
-
-
-def modexp_substitute(base, exp, mod):
- return {
- "gas": 5150,
- "output": signed([pow(base, exp, mod) if mod > 0 else 0])
- }
-
-
-def ecrecover_substitute(z, v, r, s):
- P, A, B, N, Gx, Gy = b.P, b.A, b.B, b.N, b.Gx, b.Gy
- x = r
- beta = pow(x*x*x+A*x+B, (P + 1) / 4, P)
- BETA_PREMIUM = modexp_substitute(x, (P + 1) / 4, P)["gas"]
- y = beta if v % 2 ^ beta % 2 else (P - beta)
- Gz = b.jordan_multiply(((Gx, 1), (Gy, 1)), (N - z) % N)
- GZ_PREMIUM = jacobian_mul_substitute(Gx, 1, Gy, 1, (N - z) % N)["gas"]
- XY = b.jordan_multiply(((x, 1), (y, 1)), s)
- XY_PREMIUM = jacobian_mul_substitute(x, 1, y, 1, s % N)["gas"]
- Qr = b.jordan_add(Gz, XY)
- QR_PREMIUM = jacobian_add_substitute(Gz[0][0], Gz[0][1], Gz[1][0], Gz[1][1],
- XY[0][0], XY[0][1], XY[1][0], XY[1][1]
- )["gas"]
- Q = b.jordan_multiply(Qr, pow(r, N - 2, N))
- Q_PREMIUM = jacobian_mul_substitute(Qr[0][0], Qr[0][1], Qr[1][0], Qr[1][1],
- pow(r, N - 2, N))["gas"]
- R_PREMIUM = modexp_substitute(r, N - 2, N)["gas"]
- OX_PREMIUM = modexp_substitute(Q[0][1], P - 2, P)["gas"]
- OY_PREMIUM = modexp_substitute(Q[1][1], P - 2, P)["gas"]
- Q = b.from_jordan(Q)
- return {
- "gas": 991 + BETA_PREMIUM + GZ_PREMIUM + XY_PREMIUM + QR_PREMIUM +
- Q_PREMIUM + R_PREMIUM + OX_PREMIUM + OY_PREMIUM,
- "output": signed(Q)
- }
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/test.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/test.py
deleted file mode 100644
index 48d21e32f..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/test.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import bitcoin as b
-import random
-import sys
-import math
-from pyethereum import tester as t
-import substitutes
-import time
-
-vals = [random.randrange(2**256) for i in range(12)]
-
-test_points = [list(p[0]) + list(p[1]) for p in
- [b.jordan_multiply(((b.Gx, 1), (b.Gy, 1)), r) for r in vals]]
-
-G = [b.Gx, 1, b.Gy, 1]
-Z = [0, 1, 0, 1]
-
-
-def neg_point(p):
- return [p[0], b.P - p[1], p[2], b.P - p[3]]
-
-s = t.state()
-s.block.gas_limit = 10000000
-t.gas_limit = 1000000
-
-
-c = s.contract('modexp.se')
-print "Starting modexp tests"
-
-for i in range(0, len(vals) - 2, 3):
- o1 = substitutes.modexp_substitute(vals[i], vals[i+1], vals[i+2])
- o2 = s.profile(t.k0, c, 0, funid=0, abi=vals[i:i+3])
- #assert o1["gas"] == o2["gas"], (o1, o2)
- assert o1["output"] == o2["output"], (o1, o2)
-
-c = s.contract('jacobian_add.se')
-print "Starting addition tests"
-
-for i in range(2):
- P = test_points[i * 2]
- Q = test_points[i * 2 + 1]
- NP = neg_point(P)
-
- o1 = substitutes.jacobian_add_substitute(*(P + Q))
- o2 = s.profile(t.k0, c, 0, funid=0, abi=P + Q)
- #assert o1["gas"] == o2["gas"], (o1, o2)
- assert o1["output"] == o2["output"], (o1, o2)
-
- o1 = substitutes.jacobian_add_substitute(*(P + NP))
- o2 = s.profile(t.k0, c, 0, funid=0, abi=P + NP)
- #assert o1["gas"] == o2["gas"], (o1, o2)
- assert o1["output"] == o2["output"], (o1, o2)
-
- o1 = substitutes.jacobian_add_substitute(*(P + P))
- o2 = s.profile(t.k0, c, 0, funid=0, abi=P + P)
- #assert o1["gas"] == o2["gas"], (o1, o2)
- assert o1["output"] == o2["output"], (o1, o2)
-
- o1 = substitutes.jacobian_add_substitute(*(P + Z))
- o2 = s.profile(t.k0, c, 0, funid=0, abi=P + Z)
- #assert o1["gas"] == o2["gas"], (o1, o2)
- assert o1["output"] == o2["output"], (o1, o2)
-
- o1 = substitutes.jacobian_add_substitute(*(Z + P))
- o2 = s.profile(t.k0, c, 0, funid=0, abi=Z + P)
- #assert o1["gas"] == o2["gas"], (o1, o2)
- assert o1["output"] == o2["output"], (o1, o2)
-
-
-c = s.contract('jacobian_mul.se')
-print "Starting multiplication tests"
-
-
-mul_tests = [
- Z + [0],
- Z + [vals[0]],
- test_points[0] + [0],
- test_points[1] + [b.N],
- test_points[2] + [1],
- test_points[2] + [2],
- test_points[2] + [3],
- test_points[2] + [4],
- test_points[3] + [5],
- test_points[3] + [6],
- test_points[4] + [7],
- test_points[4] + [2**254],
- test_points[4] + [vals[1]],
- test_points[4] + [vals[2]],
- test_points[4] + [vals[3]],
- test_points[5] + [2**256 - 1],
-]
-
-for i, test in enumerate(mul_tests):
- print 'trying mul_test %i' % i, test
- o1 = substitutes.jacobian_mul_substitute(*test)
- o2 = s.profile(t.k0, c, 0, funid=0, abi=test)
- # assert o1["gas"] == o2["gas"], (o1, o2, test)
- assert o1["output"] == o2["output"], (o1, o2, test)
-
-c = s.contract('ecrecover.se')
-print "Starting ecrecover tests"
-
-for i in range(5):
- print 'trying ecrecover_test', vals[i*2], vals[i*2+1]
- k = vals[i*2]
- h = vals[i*2+1]
- V, R, S = b.ecdsa_raw_sign(b.encode(h, 256, 32), k)
- aa = time.time()
- o1 = substitutes.ecrecover_substitute(h, V, R, S)
- print 'sub', time.time() - aa
- a = time.time()
- o2 = s.profile(t.k0, c, 0, funid=0, abi=[h, V, R, S])
- print time.time() - a
- # assert o1["gas"] == o2["gas"], (o1, o2, h, V, R, S)
- assert o1["output"] == o2["output"], (o1, o2, h, V, R, S)
-
-# Explicit tests
-
-data = [[
- 0xf007a9c78a4b2213220adaaf50c89a49d533fbefe09d52bbf9b0da55b0b90b60,
- 0x1b,
- 0x5228fc9e2fabfe470c32f459f4dc17ef6a0a81026e57e4d61abc3bc268fc92b5,
- 0x697d4221cd7bc5943b482173de95d3114b9f54c5f37cc7f02c6910c6dd8bd107
-]]
-
-for datum in data:
- o1 = substitutes.ecrecover_substitute(*datum)
- o2 = s.profile(t.k0, c, 0, funid=0, abi=datum)
- #assert o1["gas"] == o2["gas"], (o1, o2, datum)
- assert o1["output"] == o2["output"], (o1, o2, datum)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/channel.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/channel.se
deleted file mode 100644
index 733f4a95b..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/channel.se
+++ /dev/null
@@ -1,45 +0,0 @@
-if msg.data[0] == 0:
- new_id = contract.storage[-1]
- # store [from, to, value, maxvalue, timeout] in contract storage
- contract.storage[new_id] = msg.sender
- contract.storage[new_id + 1] = msg.data[1]
- contract.storage[new_id + 2] = 0
- contract.storage[new_id + 3] = msg.value
- contract.storage[new_id + 4] = 2^254
- # increment next id
- contract.storage[-1] = new_id + 10
- # return id of this channel
- return(new_id)
-
-# Increase payment on channel: [1, id, value, v, r, s]
-elif msg.data[0] == 1:
- # Ecrecover native extension; will be a different address in testnet and live
- ecrecover = 0x46a8d0b21b1336d83b06829f568d7450df36883f
- # Message data parameters
- id = msg.data[1] % 2^160
- value = msg.data[2]
- # Determine sender from signature
- h = sha3([id, value], 2)
- sender = call(ecrecover, [h, msg.data[3], msg.data[4], msg.data[5]], 4)
- # Check sender matches and new value is greater than old
- if sender == contract.storage[id]:
- if value > contract.storage[id + 2] and value <= contract.storage[id + 3]:
- # Update channel, increasing value and setting timeout
- contract.storage[id + 2] = value
- contract.storage[id + 4] = block.number + 1000
-
-# Cash out channel: [2, id]
-elif msg.data[0] == 2:
- id = msg.data[1] % 2^160
- # Check if timeout has run out
- if block.number >= contract.storage[id + 3]:
- # Send funds
- send(contract.storage[id + 1], contract.storage[id + 2])
- # Send refund
- send(contract.storage[id], contract.storage[id + 3] - contract.storage[id + 2])
- # Clear storage
- contract.storage[id] = 0
- contract.storage[id + 1] = 0
- contract.storage[id + 2] = 0
- contract.storage[id + 3] = 0
- contract.storage[id + 4] = 0
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/map.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/map.se
deleted file mode 100644
index 768dfb9fc..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/map.se
+++ /dev/null
@@ -1,19 +0,0 @@
-# An implementation of a contract for storing a key/value binding
-init:
- # Set owner
- contract.storage[0] = msg.sender
-code:
- # Check ownership
- if msg.sender == contract.storage[0]:
- # Get: returns (found, val)
- if msg.data[0] == 0:
- s = sha3(msg.data[1])
- return([contract.storage[s], contract.storage[s+1]], 2)
- # Set: sets map[k] = v
- elif msg.data[0] == 1:
- s = sha3(msg.data[1])
- contract.storage[s] = 1
- contract.storage[s + 1] = msg.data[2]
- # Suicide
- elif msg.data[2] == 1:
- suicide(0)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/multiforward.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/multiforward.se
deleted file mode 100644
index 577794d97..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/multiforward.se
+++ /dev/null
@@ -1,14 +0,0 @@
-init:
- contract.storage[0] = msg.sender
-code:
- if msg.sender != contract.storage[0]:
- stop
- i = 0
- while i < ~calldatasize():
- to = ~calldataload(i)
- value = ~calldataload(i+20) / 256^12
- datasize = ~calldataload(i+32) / 256^30
- data = alloc(datasize)
- ~calldatacopy(data, i+34, datasize)
- ~call(tx.gas - 25, to, value, data, datasize, 0, 0)
- i += 34 + datasize
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/shadowchain.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/shadowchain.se
deleted file mode 100644
index 1e466a355..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/shadowchain.se
+++ /dev/null
@@ -1,166 +0,0 @@
-# Exists in state:
-# (i) last committed block
-# (ii) chain of uncommitted blocks (linear only)
-# (iii) transactions, each tx with an associated block number
-#
-# Uncommitted block =
-# [ numtxs, numkvs, tx1 (N words), tx2 (N words) ..., [k1, v1], [k2, v2], [k3, v3] ... ]
-#
-# Block checking process
-#
-# Suppose last committed state is m
-# Last uncommitted state is n
-# Contested block is b
-#
-# 1. Temporarily apply all state transitions from
-# m to b
-# 2. Run code, get list of changes
-# 3. Check is list of changes matches deltas
-# * if yes, do nothing
-# * if no, set last uncommitted state to pre-b
-#
-# Storage variables:
-#
-# Last committed block: 0
-# Last uncommitted block: 1
-# Contract holding code: 2
-# Uncommitted map: 3
-# Transaction length (parameter): 4
-# Block b: 2^160 + b * 2^40:
-# + 1: submission blknum
-# + 2: submitter
-# + 3: data in uncommitted block format above
-# Last committed storage:
-# sha3(k): index k
-
-# Initialize: [0, c, txlength], set address of the code-holding contract and the transaction
-# length
-if not contract.storage[2]:
- contract.storage[2] = msg.data[1]
- contract.storage[4] = msg.data[2]
- stop
-
-# Sequentially commit all uncommitted blocks that are more than 1000 mainchain-blocks old
-last_committed_block = contract.storage[0]
-last_uncommitted_block = contract.storage[1]
-lcb_storage_index = 2^160 + last_committed_block * 2^40
-while contract.storage[lcb_storage_index + 1] < block.number - 1000 and last_committed_block < last_uncommitted_block:
- kvpairs = contract.storage[lcb_storage_index]
- i = 0
- while i < kvpairs:
- k = contract.storage[lcb_storage_index + 3 + i * 2]
- v = contract.storage[lcb_storage_index + 4 + i * 2]
- contract.storage[sha3(k)] = v
- i += 1
- last_committed_block += 1
- lcb_storage_index += 2^40
-contract.storage[0] = last_committed_block
-
-
-# Propose block: [ 0, block number, data in block format above ... ]
-if msg.data[0] == 0:
- blknumber = msg.data[1]
- # Block number must be correct
- if blknumber != contract.storage[1]:
- stop
- # Deposit requirement
- if msg.value < 10^19:
- stop
- # Store the proposal in storage as
- # [ 0, main-chain block number, sender, block data...]
- start_index = 2^160 + blknumber * 2^40
- numkvs = (msg.datasize - 2) / 2
- contract.storage[start_index + 1] = block.number
- 1ontract.storage[start_index + 2] = msg.sender
- i = 0
- while i < msg.datasize - 2:
- contract.storage[start_index + 3 + i] = msg.data[2 + i]
- i += 1
- contract.storage[1] = blknumber + 1
-
-# Challenge block: [ 1, b ]
-elif msg.data[0] == 1:
- blknumber = msg.data[1]
- txwidth = contract.storage[4]
- last_uncommitted_block = contract.storage[1]
- last_committed_block = contract.storage[0]
- # Cannot challenge nonexistent or committed blocks
- if blknumber <= last_uncommitted_block or blknumber > last_committed_block:
- stop
- # Create a contract to serve as a map that maintains keys and values
- # temporarily
- tempstore = create('map.se')
- contract.storage[3] = tempstore
- # Unquestioningly apply the state transitions from the last committed block
- # up to b
- b = last_committed_block
- cur_storage_index = 2^160 + last_committed_block * 2^40
- while b < blknumber:
- numtxs = contract.storage[cur_storage_index + 3]
- numkvs = contract.storage[cur_storage_index + 4]
- kv0index = cur_storage_index + 5 + numtxs * txwidth
- i = 0
- while i < numkvs:
- k = contract.storage[kv0index + i * 2]
- v = contract.storage[kx0index + i * 2 + 1]
- call(tempstore, [1, k, v], 3)
- i += 1
- b += 1
- cur_storage_index += 2^40
- # Run the actual code, and see what state transitions it outputs
- # The way that the code is expected to work is to:
- #
- # (1) take as input the list of transactions (the contract should
- # use msg.datasize to determine how many txs there are, and it should
- # be aware of the value of txwidth)
- # (2) call this contract with [2, k] to read current state data
- # (3) call this contract with [3, k, v] to write current state data
- # (4) return as output a list of all state transitions that it made
- # in the form [kvcount, k1, v1, k2, v2 ... ]
- #
- # The reason for separating (2) from (3) is that sometimes the state
- # transition may end up changing a given key many times, and we don't
- # need to inefficiently store that in storage
- numkvs = contract.storage[cur_storage_index + 3]
- numtxs = contract.storage[cur_storage_index + 4]
- # Populate input array
- inpwidth = numtxs * txwidth
- inp = array(inpwidth)
- i = 0
- while i < inpwidth:
- inp[i] = contract.storage[cur_storage_index + 5 + i]
- i += 1
- out = call(contract.storage[2], inp, inpwidth, numkvs * 2 + 1)
- # Check that the number of state transitions is the same
- if out[0] != kvcount:
- send(msg.sender, 10^19)
- contract.storage[0] = last_committed_block
- stop
- kv0index = cur_storage_index + 5 + numtxs * txwidth
- i = 0
- while i < kvcount:
- # Check that each individual state transition matches
- k = contract.storage[kv0index + i * 2 + 1]
- v = contract.storage[kv0index + i * 2 + 2]
- if k != out[i * 2 + 1] or v != out[i * 2 + 2]:
- send(msg.sender, 10^19)
- contract.storage[0] = last_committed_block
- stop
- i += 1
- # Suicide tempstore
- call(tempstore, 2)
-
-
-# Read data [2, k]
-elif msg.data[0] == 2:
- tempstore = contract.storage[3]
- o = call(tempstore, [0, msg.data[1]], 2, 2)
- if o[0]:
- return(o[1])
- else:
- return contract.storage[sha3(msg.data[1])]
-
-# Write data [3, k, v]
-elif msg.data[0] == 3:
- tempstore = contract.storage[3]
- call(tempstore, [1, msg.data[1], msg.data[2]], 3, 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/fixedpoint.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/fixedpoint.se
deleted file mode 100644
index a8073c685..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/fixedpoint.se
+++ /dev/null
@@ -1,31 +0,0 @@
-type f: [a, b, c, d, e]
-
-macro f($a) + f($b):
- f(add($a, $b))
-
-macro f($a) - f($b):
- f(sub($a, $b))
-
-macro f($a) * f($b):
- f(mul($a, $b) / 10000)
-
-macro f($a) / f($b):
- f(sdiv($a * 10000, $b))
-
-macro f($a) % f($b):
- f(smod($a, $b))
-
-macro f($v) = f($w):
- $v = $w
-
-macro unfify(f($a)):
- $a / 10000
-
-macro fify($a):
- f($a * 10000)
-
-a = fify(5)
-b = fify(2)
-c = a / b
-e = c + (a / b)
-return(unfify(e))
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/long_integer_macros.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/long_integer_macros.se
deleted file mode 100644
index 58cdce6ab..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/long_integer_macros.se
+++ /dev/null
@@ -1,116 +0,0 @@
-macro smin($a, $b):
- with $1 = $a:
- with $2 = $b:
- if(slt($1, $2), $1, $2)
-
-macro smax($a, $b):
- with $1 = $a:
- with $2 = $b:
- if(slt($1, $2), $2, $1)
-
-def omul(x, y):
- o = expose(mklong(x) * mklong(y))
- return(slice(o, 1), o[0]+1)
-
-def oadd(x, y):
- o = expose(mklong(x) + mklong(y))
- return(slice(o, 1), o[0]+1)
-
-def osub(x, y):
- o = expose(mklong(x) - mklong(y))
- return(slice(o, 1), o[0]+1)
-
-def odiv(x, y):
- o = expose(mklong(x) / mklong(y))
- return(slice(o, 1), o[0]+1)
-
-def comb(a:a, b:a, sign):
- sz = smax(a[0], b[0])
- msz = smin(a[0], b[0])
- c = array(sz + 2)
- c[0] = sz
- i = 0
- carry = 0
- while i < msz:
- m = a[i + 1] + sign * b[i + 1] + carry
- c[i + 1] = mod(m + 2^127, 2^128) - 2^127
- carry = (div(m + 2^127, 2^128) + 2^127) % 2^128 - 2^127
- i += 1
- u = if(a[0] > msz, a, b)
- s = if(a[0] > msz, 1, sign)
- while i < sz:
- m = s * u[i + 1] + carry
- c[i + 1] = mod(m + 2^127, 2^128) - 2^127
- carry = (div(m + 2^127, 2^128) + 2^127) % 2^128 - 2^127
- i += 1
- if carry:
- c[0] += 1
- c[sz + 1] = carry
- return(c, c[0]+1)
-
-def mul(a:a, b:a):
- c = array(a[0] + b[0] + 2)
- c[0] = a[0] + b[0]
- i = 0
- while i < a[0]:
- j = 0
- carry = 0
- while j < b[0]:
- m = c[i + j + 1] + a[i + 1] * b[j + 1] + carry
- c[i + j + 1] = mod(m + 2^127, 2^128) - 2^127
- carry = (div(m + 2^127, 2^128) + 2^127) % 2^128 - 2^127
- j += 1
- if carry:
- c[0] = a[0] + b[0] + 1
- c[i + j + 1] += carry
- i += 1
- return(c, c[0]+1)
-
-macro long($a) + long($b):
- long(self.comb($a:$a[0]+1, $b:$b[0]+1, 1, outsz=$a[0]+$b[0]+2))
-
-macro long($a) - long($b):
- long(self.comb($a:$a[0]+1, $b:$b[0]+1, -1, outsz=$a[0]+$b[0]+2))
-
-macro long($a) * long($b):
- long(self.mul($a:$a[0]+1, $b:$b[0]+1, outsz=$a[0]+$b[0]+2))
-
-macro long($a) / long($b):
- long(self.div($a:$a[0]+1, $b:$b[0]+1, outsz=$a[0]+$b[0]+2))
-
-macro mulexpand(long($a), $k, $m):
- long:
- with $c = array($a[0]+k+2):
- $c[0] = $a[0]+$k
- with i = 0:
- while i < $a[0]:
- v = $a[i+1] * $m + $c[i+$k+1]
- $c[i+$k+1] = mod(v + 2^127, 2^128) - 2^127
- $c[i+$k+2] = div(v + 2^127, 2^128)
- i += 1
- $c
-
-def div(a:a, b:a):
- asz = a[0]
- bsz = b[0]
- while b[bsz] == 0 and bsz > 0:
- bsz -= 1
- c = array(asz+2)
- c[0] = asz+1
- while 1:
- while a[asz] == 0 and asz > 0:
- asz -= 1
- if asz < bsz:
- return(c, c[0]+1)
- sub = expose(mulexpand(long(b), asz - bsz, a[asz] / b[bsz]))
- c[asz - bsz+1] = a[asz] / b[bsz]
- a = expose(long(a) - long(sub))
- a[asz-1] += 2^128 * a[asz]
- a[asz] = 0
-
-macro mklong($i):
- long([2, mod($i + 2^127, 2^128) - 2^127, div($i + 2^127, 2^128)])
-
-macro expose(long($i)):
- $i
-
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mul2.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mul2.se
deleted file mode 100644
index 65adff1e6..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mul2.se
+++ /dev/null
@@ -1,2 +0,0 @@
-def double(v):
- return(v*2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mutuala.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mutuala.se
deleted file mode 100644
index 3efb0edeb..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mutuala.se
+++ /dev/null
@@ -1,187 +0,0 @@
-# mutuala - subcurrency
-
-# We want to issue a currency that reduces in value as you store it through negative interest.
-# That negative interest would be stored in a commons account. It's like the p2p version of a
-# capital tax
-
-# the same things goes for transactions - you pay as you use the currency. However, the more
-# you pay, the more you get to say about what the tax is used for
-
-# each participant can propose a recipient for a payout to be made out of the commons account,
-# others can vote on it by awarding it tax_credits.
-
-# TODO should proposal have expiration timestamp?, after which the tax_credits are refunded
-# TODO multiple proposals can take more credits that available in the Commons, how to handle this
-# TODO how to handle lost accounts, after which no longer possible to get 2/3 majority
-
-shared:
- COMMONS = 42
- ADMIN = 666
- CAPITAL_TAX_PER_DAY = 7305 # 5% per year
- PAYMENT_TAX = 20 # 5%
-
- ACCOUNT_LIST_OFFSET = 2^160
- ACCOUNT_MAP_OFFSET = 2^161
- PROPOSAL_LIST_OFFSET = 2^162
- PROPOSAL_MAP_OFFSET = 2^163
-
-init:
- contract.storage[ADMIN] = msg.sender
- contract.storage[ACCOUNT_LIST_OFFSET - 1] = 1
- contract.storage[ACCOUNT_LIST_OFFSET] = msg.sender
- contract.storage[ACCOUNT_MAP_OFFSET + msg.sender] = 10^12
- contract.storage[ACCOUNT_MAP_OFFSET + msg.sender + 1] = block.timestamp
-
-# contract.storage[COMMONS] = balance commons
-
-# contract.storage[ACCOUNT_LIST_OFFSET - 1] = number of accounts
-# contract.storage[ACCOUNT_LIST_OFFSET + n] = account n
-
-# contract.storage[PROPOSAL_LIST_OFFSET - 1] contains the number of proposals
-# contract.storage[PROPOSAL_LIST_OFFSET + n] = proposal n
-
-# per account:
-# contract.storage[ACCOUNT_MAP_OFFSET + account] = balance
-# contract.storage[ACCOUNT_MAP_OFFSET + account+1] = timestamp_last_transaction
-# contract.storage[ACCOUNT_MAP_OFFSET + account+2] = tax_credits
-
-# per proposal:
-# contract.storage[PROPOSAL_MAP_OFFSET + proposal_id] = recipient
-# contract.storage[PROPOSAL_MAP_OFFSET + proposal_id+1] = amount
-# contract.storage[PROPOSAL_MAP_OFFSET + proposal_id+2] = total vote credits
-
-code:
- if msg.data[0] == "suicide" and msg.sender == contract.storage[ADMIN]:
- suicide(msg.sender)
-
- elif msg.data[0] == "balance":
- addr = msg.data[1]
- return(contract.storage[ACCOUNT_MAP_OFFSET + addr])
-
- elif msg.data[0] == "pay":
- from = msg.sender
- fromvalue = contract.storage[ACCOUNT_MAP_OFFSET + from]
- to = msg.data[1]
- if to == 0 or to >= 2^160:
- return([0, "invalid address"], 2)
- value = msg.data[2]
- tax = value / PAYMENT_TAX
-
- if fromvalue >= value + tax:
- contract.storage[ACCOUNT_MAP_OFFSET + from] = fromvalue - (value + tax)
- contract.storage[ACCOUNT_MAP_OFFSET + to] += value
- # tax
- contract.storage[COMMONS] += tax
- contract.storage[ACCOUNT_MAP_OFFSET + from + 2] += tax
-
- # check timestamp field to see if target account exists
- if contract.storage[ACCOUNT_MAP_OFFSET + to + 1] == 0:
- # register new account
- nr_accounts = contract.storage[ACCOUNT_LIST_OFFSET - 1]
- contract.storage[ACCOUNT_LIST_OFFSET + nr_accounts] = to
- contract.storage[ACCOUNT_LIST_OFFSET - 1] += 1
- contract.storage[ACCOUNT_MAP_OFFSET + to + 1] = block.timestamp
-
- return(1)
- else:
- return([0, "insufficient balance"], 2)
-
- elif msg.data[0] == "hash":
- proposal_id = sha3(msg.data[1])
- return(proposal_id)
-
- elif msg.data[0] == "propose":
- from = msg.sender
- # check if sender has an account and has tax credits
- if contract.storage[ACCOUNT_MAP_OFFSET + from + 2] == 0:
- return([0, "sender has no tax credits"], 2)
-
- proposal_id = sha3(msg.data[1])
- # check if proposal doesn't already exist
- if contract.storage[PROPOSAL_MAP_OFFSET + proposal_id]:
- return([0, "proposal already exists"])
-
- to = msg.data[2]
- # check if recipient is a valid address and has an account (with timestamp)
- if to == 0 or to >= 2^160:
- return([0, "invalid address"], 2)
- if contract.storage[ACCOUNT_MAP_OFFSET + to + 1] == 0:
- return([0, "invalid to account"], 2)
-
- value = msg.data[3]
- # check if there is enough money in the commons account
- if value > contract.storage[COMMONS]:
- return([0, "not enough credits in commons"], 2)
-
- # record proposal in list
- nr_proposals = contract.storage[PROPOSAL_LIST_OFFSET - 1]
- contract.storage[PROPOSAL_LIST_OFFSET + nr_proposals] = proposal_id
- contract.storage[PROPOSAL_LIST_OFFSET - 1] += 1
-
- # record proposal in map
- contract.storage[PROPOSAL_MAP_OFFSET + proposal_id] = to
- contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 1] = value
-
- return(proposal_id)
-
- elif msg.data[0] == "vote":
- from = msg.sender
- proposal_id = sha3(msg.data[1])
- value = msg.data[2]
- # check if sender has an account and has tax credits
- if value < contract.storage[ACCOUNT_MAP_OFFSET + from + 2]:
- return([0, "sender doesn't have enough tax credits"], 2)
-
- # check if proposal exist
- if contract.storage[PROPOSAL_MAP_OFFSET + proposal_id] == 0:
- return([0, "proposal doesn't exist"], 2)
-
- # increase votes
- contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 2] += value
- # withdraw tax credits
- contract.storage[ACCOUNT_MAP_OFFSET + from + 2] -= value
-
- # did we reach 2/3 threshold?
- if contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 2] >= contract.storage[COMMONS] * 2 / 3:
- # got majority
- to = contract.storage[PROPOSAL_MAP_OFFSET + proposal_id]
- amount = contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 1]
-
- # adjust balances
- contract.storage[ACCOUNT_MAP_OFFSET + to] += amount
- contract.storage[COMMONS] -= amount
-
- # reset proposal
- contract.storage[PROPOSAL_MAP_OFFSET + proposal_id] = 0
- contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 1] = 0
- contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 2] = 0
- return(1)
-
- return(proposal_id)
-
- elif msg.data[0] == "tick":
- nr_accounts = contract.storage[ACCOUNT_LIST_OFFSET - 1]
- account_idx = 0
- tax_paid = 0
- # process all accounts and see if they have to pay their daily capital tax
- while account_idx < nr_accounts:
- cur_account = contract.storage[ACCOUNT_LIST_OFFSET + account_idx]
- last_timestamp = contract.storage[ACCOUNT_MAP_OFFSET + cur_account + 1]
- time_diff = block.timestamp - last_timestamp
- if time_diff >= 86400:
- tax_days = time_diff / 86400
- balance = contract.storage[ACCOUNT_MAP_OFFSET + cur_account]
- tax = tax_days * (balance / CAPITAL_TAX_PER_DAY)
- if tax > 0:
- # charge capital tax, but give tax credits in return
- contract.storage[ACCOUNT_MAP_OFFSET + cur_account] -= tax
- contract.storage[ACCOUNT_MAP_OFFSET + cur_account + 1] += tax_days * 86400
- contract.storage[ACCOUNT_MAP_OFFSET + cur_account + 2] += tax
-
- contract.storage[COMMONS] += tax
- tax_paid += 1
- account_idx += 1
- return(tax_paid) # how many accounts did we charge tax on
-
- else:
- return([0, "unknown command"], 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/namecoin.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/namecoin.se
deleted file mode 100644
index 11d6274ae..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/namecoin.se
+++ /dev/null
@@ -1,7 +0,0 @@
-def register(k, v):
- if !self.storage[k]: # Is the key not yet taken?
- # Then take it!
- self.storage[k] = v
- return(1)
- else:
- return(0) // Otherwise do nothing
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/peano.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/peano.se
deleted file mode 100644
index 979854444..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/peano.se
+++ /dev/null
@@ -1,43 +0,0 @@
-macro padd($x, psuc($y)):
- psuc(padd($x, $y))
-
-macro padd($x, z()):
- $x
-
-macro dec(psuc($x)):
- dec($x) + 1
-
-macro dec(z()):
- 0
-
-macro pmul($x, z()):
- z()
-
-macro pmul($x, psuc($y)):
- padd(pmul($x, $y), $x)
-
-macro pexp($x, z()):
- one()
-
-macro pexp($x, psuc($y)):
- pmul($x, pexp($x, $y))
-
-macro fac(z()):
- one()
-
-macro fac(psuc($x)):
- pmul(psuc($x), fac($x))
-
-macro one():
- psuc(z())
-
-macro two():
- psuc(psuc(z()))
-
-macro three():
- psuc(psuc(psuc(z())))
-
-macro five():
- padd(three(), two())
-
-return([dec(pmul(three(), pmul(three(), three()))), dec(fac(five()))], 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/returnten.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/returnten.se
deleted file mode 100644
index 7969c9eb8..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/returnten.se
+++ /dev/null
@@ -1,4 +0,0 @@
-extern mul2: [double]
-
-x = create("mul2.se")
-return(x.double(5))
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort.se
deleted file mode 100644
index be5d97fc7..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort.se
+++ /dev/null
@@ -1,33 +0,0 @@
-def kall():
- argcount = ~calldatasize() / 32
- if argcount == 1:
- return(~calldataload(1))
-
- args = array(argcount)
- ~calldatacopy(args, 1, argcount * 32)
- low = array(argcount)
- lsz = 0
- high = array(argcount)
- hsz = 0
- i = 1
- while i < argcount:
- if args[i] < args[0]:
- low[lsz] = args[i]
- lsz += 1
- else:
- high[hsz] = args[i]
- hsz += 1
- i += 1
- low = self.kall(data=low, datasz=lsz, outsz=lsz)
- high = self.kall(data=high, datasz=hsz, outsz=hsz)
- o = array(argcount)
- i = 0
- while i < lsz:
- o[i] = low[i]
- i += 1
- o[lsz] = args[0]
- j = 0
- while j < hsz:
- o[lsz + 1 + j] = high[j]
- j += 1
- return(o, argcount)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort_pairs.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort_pairs.se
deleted file mode 100644
index 0e603a238..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort_pairs.se
+++ /dev/null
@@ -1,46 +0,0 @@
-# Quicksort pairs
-# eg. input of the form [ 30, 1, 90, 2, 70, 3, 50, 4]
-# outputs [ 30, 1, 50, 4, 70, 3, 90, 2 ]
-#
-# Note: this can be used as a generalized sorting algorithm:
-# map every object to [ key, ref ] where `ref` is the index
-# in memory to all of the properties and `key` is the key to
-# sort by
-
-
-def kall():
- argcount = ~calldatasize() / 64
- if argcount == 1:
- return([~calldataload(1), ~calldataload(33)], 2)
-
- args = array(argcount * 2)
- ~calldatacopy(args, 1, argcount * 64)
- low = array(argcount * 2)
- lsz = 0
- high = array(argcount * 2)
- hsz = 0
- i = 2
- while i < argcount * 2:
- if args[i] < args[0]:
- low[lsz] = args[i]
- low[lsz + 1] = args[i + 1]
- lsz += 2
- else:
- high[hsz] = args[i]
- high[hsz + 1] = args[i + 1]
- hsz += 2
- i = i + 2
- low = self.kall(data=low, datasz=lsz, outsz=lsz)
- high = self.kall(data=high, datasz=hsz, outsz=hsz)
- o = array(argcount * 2)
- i = 0
- while i < lsz:
- o[i] = low[i]
- i += 1
- o[lsz] = args[0]
- o[lsz + 1] = args[1]
- j = 0
- while j < hsz:
- o[lsz + 2 + j] = high[j]
- j += 1
- return(o, argcount * 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingcoin.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingcoin.se
deleted file mode 100644
index a7d7da9c5..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingcoin.se
+++ /dev/null
@@ -1,94 +0,0 @@
-# SchellingCoin implementation
-#
-# Epoch length: 100 blocks
-# Target savings depletion rate: 0.1% per epoch
-
-data epoch
-data hashes_submitted
-data output
-data quicksort_pairs
-data accounts[2^160]
-data submissions[2^80](hash, deposit, address, value)
-extern any: [call]
-
-
-def init():
- self.epoch = block.number / 100
- self.quicksort_pairs = create('quicksort_pairs.se')
-
-def any():
- if block.number / 100 > epoch:
- # Sort all values submitted
- N = self.hashes_submitted
- o = array(N * 2)
- i = 0
- j = 0
- while i < N:
- v = self.submissions[i].value
- if v:
- o[j] = v
- o[j + 1] = i
- j += 2
- i += 1
- values = self.quicksort_pairs.call(data=o, datasz=j, outsz=j)
-
- # Calculate total deposit, refund non-submitters and
- # cleanup
-
- deposits = array(j / 2)
- addresses = array(j / 2)
-
- i = 0
- total_deposit = 0
- while i < j / 2:
- base_index = HASHES + values[i * 2 + 1] * 3
- deposits[i] = self.submissions[i].deposit
- addresses[i] = self.submissions[i].address
- if self.submissions[values[i * 2 + 1]].value:
- total_deposit += deposits[i]
- else:
- send(addresses[i], deposits[i] * 999 / 1000)
- i += 1
-
- inverse_profit_ratio = total_deposit / (contract.balance / 1000) + 1
-
- # Reward everyone
- i = 0
- running_deposit_sum = 0
- halfway_passed = 0
- while i < j / 2:
- new_deposit_sum = running_deposit_sum + deposits[i]
- if new_deposit_sum > total_deposit / 4 and running_deposit_sum < total_deposit * 3 / 4:
- send(addresses[i], deposits[i] + deposits[i] / inverse_profit_ratio * 2)
- else:
- send(addresses[i], deposits[i] - deposits[i] / inverse_profit_ratio)
-
- if not halfway_passed and new_deposit_sum > total_deposit / 2:
- self.output = self.submissions[i].value
- halfway_passed = 1
- self.submissions[i].value = 0
- running_deposit_sum = new_deposit_sum
- i += 1
- self.epoch = block.number / 100
- self.hashes_submitted = 0
-
-def submit_hash(h):
- if block.number % 100 < 50:
- cur = self.hashes_submitted
- pos = HASHES + cur * 3
- self.submissions[cur].hash = h
- self.submissions[cur].deposit = msg.value
- self.submissions[cur].address = msg.sender
- self.hashes_submitted = cur + 1
- return(cur)
-
-def submit_value(index, v):
- if sha3([msg.sender, v], 2) == self.submissions[index].hash:
- self.submissions[index].value = v
- return(1)
-
-def request_balance():
- return(contract.balance)
-
-def request_output():
- return(self.output)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingdollar.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingdollar.se
deleted file mode 100644
index a34f42ce2..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingdollar.se
+++ /dev/null
@@ -1,171 +0,0 @@
-# Hedged zero-supply dollar implementation
-# Uses SchellingCoin as price-determining backend
-#
-# Stored variables:
-#
-# 0: Schelling coin contract
-# 1: Last epoch
-# 2: Genesis block of contract
-# 3: USD exposure
-# 4: ETH exposure
-# 5: Cached price
-# 6: Last interest rate
-# 2^160 + k: interest rate accumulator at k epochs
-# 2^161 + ADDR * 3: eth-balance of a particular address
-# 2^161 + ADDR * 3 + 1: usd-balance of a particular address
-# 2^161 + ADDR * 3 + 1: last accessed epoch of a particular address
-#
-# Transaction types:
-#
-# [1, to, val]: send ETH
-# [2, to, val]: send USD
-# [3, wei_amount]: convert ETH to USD
-# [4, usd_amount]: converts USD to ETH
-# [5]: deposit
-# [6, amount]: withdraw
-# [7]: my balance query
-# [7, acct]: balance query for any acct
-# [8]: global state query
-# [9]: liquidation test any account
-#
-# The purpose of the contract is to serve as a sort of cryptographic
-# bank account where users can store both ETH and USD. ETH must be
-# stored in zero or positive quantities, but USD balances can be
-# positive or negative. If the USD balance is negative, the invariant
-# usdbal * 10 >= ethbal * 9 must be satisfied; if any account falls
-# below this value, then that account's balances are zeroed. Note
-# that there is a 2% bounty to ping the app if an account does go
-# below zero; one weakness is that if no one does ping then it is
-# quite possible for accounts to go negative-net-worth, then zero
-# themselves out, draining the reserves of the "bank" and potentially
-# bankrupting it. A 0.1% fee on ETH <-> USD trade is charged to
-# minimize this risk. Additionally, the bank itself will inevitably
-# end up with positive or negative USD exposure; to mitigate this,
-# it automatically updates interest rates on USD to keep exposure
-# near zero.
-
-data schelling_coin
-data last_epoch
-data starting_block
-data usd_exposure
-data eth_exposure
-data price
-data last_interest_rate
-data interest_rate_accum[2^50]
-data accounts[2^160](eth, usd, last_epoch)
-
-extern sc: [submit_hash, submit_value, request_balance, request_output]
-
-def init():
- self.schelling_coin = create('schellingcoin.se')
- self.price = self.schelling_coin.request_output()
- self.interest_rate_accum[0] = 10^18
- self.starting_block = block.number
-
-def any():
- sender = msg.sender
- epoch = (block.number - self.starting_block) / 100
- last_epoch = self.last_epoch
- usdprice = self.price
-
- # Update contract epochs
- if epoch > last_epoch:
- delta = epoch - last_epoch
- last_interest_rate = self.last_interest_rate
- usd_exposure - self.usd_exposure
- last_accum = self.interest_rate_accum[last_epoch]
-
- if usd_exposure < 0:
- self.last_interest_rate = last_interest_rate - 10000 * delta
- elif usd_exposure > 0:
- self.last_interest_rate = last_interest_rate + 10000 * delta
-
- self.interest_rate_accum[epoch] = last_accum + last_accum * last_interest_rate * delta / 10^9
-
- # Proceeds go to support the SchellingCoin feeding it price data, ultimately providing the depositors
- # of the SchellingCoin an interest rate
- bal = max(self.balance - self.eth_exposure, 0) / 10000
- usdprice = self.schelling_coin.request_output()
- self.price = usdprice
- self.last_epoch = epoch
-
- ethbal = self.accounts[msg.sender].eth
- usdbal = self.accounts[msg.sender].usd
-
- # Apply interest rates to sender and liquidation-test self
- if msg.sender != self:
- self.ping(self)
-
-def send_eth(to, value):
- if value > 0 and value <= ethbal and usdbal * usdprice * 2 + (ethbal - value) >= 0:
- self.accounts[msg.sender].eth = ethbal - value
- self.ping(to)
- self.accounts[to].eth += value
- return(1)
-
-def send_usd(to, value):
- if value > 0 and value <= usdbal and (usdbal - value) * usdprice * 2 + ethbal >= 0:
- self.accounts[msg.sender].usd = usdbal - value
- self.ping(to)
- self.accounts[to].usd += value
- return(1)
-
-def convert_to_eth(usdvalue):
- ethplus = usdvalue * usdprice * 999 / 1000
- if usdvalue > 0 and (usdbal - usdvalue) * usdprice * 2 + (ethbal + ethplus) >= 0:
- self.accounts[msg.sender].eth = ethbal + ethplus
- self.accounts[msg.sender].usd = usdbal - usdvalue
- self.eth_exposure += ethplus
- self.usd_exposure -= usdvalue
- return([ethbal + ethplus, usdbal - usdvalue], 2)
-
-def convert_to_usd(ethvalue):
- usdplus = ethvalue / usdprice * 999 / 1000
- if ethvalue > 0 and (usdbal + usdplus) * usdprice * 2 + (ethbal - ethvalue) >= 0:
- self.accounts[msg.sender].eth = ethbal - ethvalue
- self.accounts[msg.sender].usd = usdbal + usdplus
- self.eth_exposure -= ethvalue
- self.usd_exposure += usdplus
- return([ethbal - ethvalue, usdbal + usdplus], 2)
-
-def deposit():
- self.accounts[msg.sender].eth = ethbal + msg.value
- self.eth_exposure += msg.value
- return(ethbal + msg.value)
-
-def withdraw(value):
- if value > 0 and value <= ethbal and usdbal * usdprice * 2 + (ethbal - value) >= 0:
- self.accounts[msg.sender].eth -= value
- self.eth_exposure -= value
- return(ethbal - value)
-
-def balance(acct):
- self.ping(acct)
- return([self.accounts[acct].eth, self.accounts[acct].usd], 2)
-
-def global_state_query(acct):
- interest = self.last_interest_rate
- usd_exposure = self.usd_exposure
- eth_exposure = self.eth_exposure
- eth_balance = self.balance
- return([epoch, usdprice, interest, usd_exposure, eth_exposure, eth_balance], 6)
-
-def ping(acct):
- account_last_epoch = self.accounts[acct].last_epoch
- if account_last_epoch != epoch:
- cur_usd_balance = self.accounts[acct].usd
- new_usd_balance = cur_usd_balance * self.interest_rate_accum[epoch] / self.interest_rate_accum[account_last_epoch]
- self.accounts[acct].usd = new_usd_balance
- self.accounts[acct].last_epoch = epoch
- self.usd_exposure += new_usd_balance - cur_usd_balance
-
- ethbal = self.accounts[acct].eth
-
- if new_usd_balance * usdval * 10 + ethbal * 9 < 0:
- self.accounts[acct].eth = 0
- self.accounts[acct].usd = 0
- self.accounts[msg.sender].eth += ethbal / 50
- self.eth_exposure += -ethbal + ethbal / 50
- self.usd_exposure += new_usd_balance
- return(1)
- return(0)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellinghelper.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellinghelper.se
deleted file mode 100644
index 0e522d6e8..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellinghelper.se
+++ /dev/null
@@ -1 +0,0 @@
-return(sha3([msg.sender, msg.data[0]], 2))
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/short_namecoin.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/short_namecoin.se
deleted file mode 100644
index db327a77d..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/short_namecoin.se
+++ /dev/null
@@ -1,3 +0,0 @@
-def register(k, v):
- if !self.storage[k]:
- self.storage[k] = v
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/subcurrency.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/subcurrency.se
deleted file mode 100644
index fbda822b6..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/subcurrency.se
+++ /dev/null
@@ -1,11 +0,0 @@
-def init():
- self.storage[msg.sender] = 1000000
-
-def balance_query(k):
- return(self.storage[addr])
-
-def send(to, value):
- fromvalue = self.storage[msg.sender]
- if fromvalue >= value:
- self.storage[from] = fromvalue - value
- self.storage[to] += value
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.cpp
deleted file mode 100644
index ea9be14a6..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include "funcs.h"
-#include "bignum.h"
-#include "util.h"
-#include "parser.h"
-#include "lllparser.h"
-#include "compiler.h"
-#include "rewriter.h"
-#include "tokenize.h"
-
-Node compileToLLL(std::string input) {
- return rewrite(parseSerpent(input));
-}
-
-Node compileChunkToLLL(std::string input) {
- return rewriteChunk(parseSerpent(input));
-}
-
-std::string compile(std::string input) {
- return compileLLL(compileToLLL(input));
-}
-
-std::vector<Node> prettyCompile(std::string input) {
- return prettyCompileLLL(compileToLLL(input));
-}
-
-std::string compileChunk(std::string input) {
- return compileLLL(compileChunkToLLL(input));
-}
-
-std::vector<Node> prettyCompileChunk(std::string input) {
- return prettyCompileLLL(compileChunkToLLL(input));
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.h
deleted file mode 100644
index d9bf44549..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include "bignum.h"
-#include "util.h"
-#include "parser.h"
-#include "lllparser.h"
-#include "compiler.h"
-#include "rewriter.h"
-#include "tokenize.h"
-
-// Function listing:
-//
-// parseSerpent (serpent -> AST) std::string -> Node
-// parseLLL (LLL -> AST) std::string -> Node
-// rewrite (apply rewrite rules) Node -> Node
-// compileToLLL (serpent -> LLL) std::string -> Node
-// compileLLL (LLL -> EVMhex) Node -> std::string
-// prettyCompileLLL (LLL -> EVMasm) Node -> std::vector<Node>
-// prettyCompile (serpent -> EVMasm) std::string -> std::vector>Node>
-// compile (serpent -> EVMhex) std::string -> std::string
-// get_file_contents (filename -> file) std::string -> std::string
-// exists (does file exist?) std::string -> bool
-
-Node compileToLLL(std::string input);
-
-Node compileChunkToLLL(std::string input);
-
-std::string compile(std::string input);
-
-std::vector<Node> prettyCompile(std::string input);
-
-std::string compileChunk(std::string input);
-
-std::vector<Node> prettyCompileChunk(std::string input);
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.cpp
deleted file mode 100644
index 78e12e84a..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "optimize.h"
-#include "rewriteutils.h"
-#include "preprocess.h"
-#include "functions.h"
-
-std::string getSignature(std::vector<Node> args) {
- std::string o;
- for (unsigned i = 0; i < args.size(); i++) {
- if (args[i].val == ":" && args[i].args[1].val == "s")
- o += "s";
- else if (args[i].val == ":" && args[i].args[1].val == "a")
- o += "a";
- else
- o += "i";
- }
- return o;
-}
-
-// Convert a list of arguments into a node containing a
-// < datastart, datasz > pair
-
-Node packArguments(std::vector<Node> args, std::string sig,
- int funId, Metadata m) {
- // Plain old 32 byte arguments
- std::vector<Node> nargs;
- // Variable-sized arguments
- std::vector<Node> vargs;
- // Variable sizes
- std::vector<Node> sizes;
- // Is a variable an array?
- std::vector<bool> isArray;
- // Fill up above three argument lists
- int argCount = 0;
- for (unsigned i = 0; i < args.size(); i++) {
- Metadata m = args[i].metadata;
- if (args[i].val == "=") {
- // do nothing
- }
- else {
- // Determine the correct argument type
- char argType;
- if (sig.size() > 0) {
- if (argCount >= (signed)sig.size())
- err("Too many args", m);
- argType = sig[argCount];
- }
- else argType = 'i';
- // Integer (also usable for short strings)
- if (argType == 'i') {
- if (args[i].val == ":")
- err("Function asks for int, provided string or array", m);
- nargs.push_back(args[i]);
- }
- // Long string
- else if (argType == 's') {
- if (args[i].val != ":")
- err("Must specify string length", m);
- vargs.push_back(args[i].args[0]);
- sizes.push_back(args[i].args[1]);
- isArray.push_back(false);
- }
- // Array
- else if (argType == 'a') {
- if (args[i].val != ":")
- err("Must specify array length", m);
- vargs.push_back(args[i].args[0]);
- sizes.push_back(args[i].args[1]);
- isArray.push_back(true);
- }
- else err("Invalid arg type in signature", m);
- argCount++;
- }
- }
- int static_arg_size = 1 + (vargs.size() + nargs.size()) * 32;
- // Start off by saving the size variables and calculating the total
- msn kwargs;
- kwargs["funid"] = tkn(utd(funId), m);
- std::string pattern =
- "(with _sztot "+utd(static_arg_size)+" "
- " (with _sizes (alloc "+utd(sizes.size() * 32)+") "
- " (seq ";
- for (unsigned i = 0; i < sizes.size(); i++) {
- std::string sizeIncrement =
- isArray[i] ? "(mul 32 _x)" : "_x";
- pattern +=
- "(with _x $sz"+utd(i)+"(seq "
- " (mstore (add _sizes "+utd(i * 32)+") _x) "
- " (set _sztot (add _sztot "+sizeIncrement+" )))) ";
- kwargs["sz"+utd(i)] = sizes[i];
- }
- // Allocate memory, and set first data byte
- pattern +=
- "(with _datastart (alloc (add _sztot 32)) (seq "
- " (mstore8 _datastart $funid) ";
- // Copy over size variables
- for (unsigned i = 0; i < sizes.size(); i++) {
- int v = 1 + i * 32;
- pattern +=
- " (mstore "
- " (add _datastart "+utd(v)+") "
- " (mload (add _sizes "+utd(v-1)+"))) ";
- }
- // Store normal arguments
- for (unsigned i = 0; i < nargs.size(); i++) {
- int v = 1 + (i + sizes.size()) * 32;
- pattern +=
- " (mstore (add _datastart "+utd(v)+") $"+utd(i)+") ";
- kwargs[utd(i)] = nargs[i];
- }
- // Loop through variable-sized arguments, store them
- pattern +=
- " (with _pos (add _datastart "+utd(static_arg_size)+") (seq";
- for (unsigned i = 0; i < vargs.size(); i++) {
- std::string copySize =
- isArray[i] ? "(mul 32 (mload (add _sizes "+utd(i * 32)+")))"
- : "(mload (add _sizes "+utd(i * 32)+"))";
- pattern +=
- " (unsafe_mcopy _pos $vl"+utd(i)+" "+copySize+") "
- " (set _pos (add _pos "+copySize+")) ";
- kwargs["vl"+utd(i)] = vargs[i];
- }
- // Return a 2-item array containing the start and size
- pattern += " (array_lit _datastart _sztot))))))))";
- std::string prefix = "_temp_"+mkUniqueToken();
- // Fill in pattern, return triple
- return subst(parseLLL(pattern), kwargs, prefix, m);
-}
-
-// Create a node for argument unpacking
-Node unpackArguments(std::vector<Node> vars, Metadata m) {
- std::vector<std::string> varNames;
- std::vector<std::string> longVarNames;
- std::vector<bool> longVarIsArray;
- // Fill in variable and long variable names, as well as which
- // long variables are arrays and which are strings
- for (unsigned i = 0; i < vars.size(); i++) {
- if (vars[i].val == ":") {
- if (vars[i].args.size() != 2)
- err("Malformed def!", m);
- longVarNames.push_back(vars[i].args[0].val);
- std::string tag = vars[i].args[1].val;
- if (tag == "s")
- longVarIsArray.push_back(false);
- else if (tag == "a")
- longVarIsArray.push_back(true);
- else
- err("Function value can only be string or array", m);
- }
- else {
- varNames.push_back(vars[i].val);
- }
- }
- std::vector<Node> sub;
- if (!varNames.size() && !longVarNames.size()) {
- // do nothing if we have no arguments
- }
- else {
- std::vector<Node> varNodes;
- for (unsigned i = 0; i < longVarNames.size(); i++)
- varNodes.push_back(token(longVarNames[i], m));
- for (unsigned i = 0; i < varNames.size(); i++)
- varNodes.push_back(token(varNames[i], m));
- // Copy over variable lengths and short variables
- for (unsigned i = 0; i < varNodes.size(); i++) {
- int pos = 1 + i * 32;
- std::string prefix = (i < longVarNames.size()) ? "_len_" : "";
- sub.push_back(asn("untyped", asn("set",
- token(prefix+varNodes[i].val, m),
- asn("calldataload", tkn(utd(pos), m), m),
- m)));
- }
- // Copy over long variables
- if (longVarNames.size() > 0) {
- std::vector<Node> sub2;
- int pos = varNodes.size() * 32 + 1;
- Node tot = tkn("_tot", m);
- for (unsigned i = 0; i < longVarNames.size(); i++) {
- Node var = tkn(longVarNames[i], m);
- Node varlen = longVarIsArray[i]
- ? asn("mul", tkn("32", m), tkn("_len_"+longVarNames[i], m))
- : tkn("_len_"+longVarNames[i], m);
- sub2.push_back(asn("untyped",
- asn("set", var, asn("alloc", varlen))));
- sub2.push_back(asn("calldatacopy", var, tot, varlen));
- sub2.push_back(asn("set", tot, asn("add", tot, varlen)));
- }
- std::string prefix = "_temp_"+mkUniqueToken();
- sub.push_back(subst(
- astnode("with", tot, tkn(utd(pos), m), asn("seq", sub2)),
- msn(),
- prefix,
- m));
- }
- }
- return asn("seq", sub, m);
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.h
deleted file mode 100644
index 68a1c69ce..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef ETHSERP_FUNCTIONS
-#define ETHSERP_FUNCTIONS
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "optimize.h"
-#include "rewriteutils.h"
-#include "preprocess.h"
-
-
-class argPack {
- public:
- argPack(Node a, Node b, Node c) {
- pre = a;
- datastart = b;
- datasz = c;
- }
- Node pre;
- Node datastart;
- Node datasz;
-};
-
-// Get a signature from a function
-std::string getSignature(std::vector<Node> args);
-
-// Convert a list of arguments into a <pre, mstart, msize> node
-// triple, given the signature of a function
-Node packArguments(std::vector<Node> args, std::string sig,
- int funId, Metadata m);
-
-// Create a node for argument unpacking
-Node unpackArguments(std::vector<Node> vars, Metadata m);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.cpp
deleted file mode 100644
index ad4fbd52d..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "tokenize.h"
-
-struct _parseOutput {
- Node node;
- int newpos;
-};
-
-// Helper, returns subtree and position of start of next node
-_parseOutput _parse(std::vector<Node> inp, int pos) {
- Metadata met = inp[pos].metadata;
- _parseOutput o;
- // Bracket: keep grabbing tokens until we get to the
- // corresponding closing bracket
- if (inp[pos].val == "(" || inp[pos].val == "[") {
- std::string fun, rbrack;
- std::vector<Node> args;
- pos += 1;
- if (inp[pos].val == "[") {
- fun = "access";
- rbrack = "]";
- }
- else rbrack = ")";
- // First argument is the function
- while (inp[pos].val != ")") {
- _parseOutput po = _parse(inp, pos);
- if (fun.length() == 0 && po.node.type == 1) {
- std::cerr << "Error: first arg must be function\n";
- fun = po.node.val;
- }
- else if (fun.length() == 0) {
- fun = po.node.val;
- }
- else {
- args.push_back(po.node);
- }
- pos = po.newpos;
- }
- o.newpos = pos + 1;
- o.node = astnode(fun, args, met);
- }
- // Normal token, return it and advance to next token
- else {
- o.newpos = pos + 1;
- o.node = token(inp[pos].val, met);
- }
- return o;
-}
-
-// stream of tokens -> lisp parse tree
-Node parseLLLTokenStream(std::vector<Node> inp) {
- _parseOutput o = _parse(inp, 0);
- return o.node;
-}
-
-// Parses LLL
-Node parseLLL(std::string s, bool allowFileRead) {
- std::string input = s;
- std::string file = "main";
- if (exists(s) && allowFileRead) {
- file = s;
- input = get_file_contents(s);
- }
- return parseLLLTokenStream(tokenize(s, Metadata(file, 0, 0), true));
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.h
deleted file mode 100644
index 4bfa7b82e..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef ETHSERP_LLLPARSER
-#define ETHSERP_LLLPARSER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// LLL text -> parse tree
-Node parseLLL(std::string s, bool allowFileRead=false);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.cpp
deleted file mode 100644
index b24144e46..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "opcodes.h"
-#include "util.h"
-#include "bignum.h"
-
-Mapping mapping[] = {
- Mapping("STOP", 0x00, 0, 0),
- Mapping("ADD", 0x01, 2, 1),
- Mapping("MUL", 0x02, 2, 1),
- Mapping("SUB", 0x03, 2, 1),
- Mapping("DIV", 0x04, 2, 1),
- Mapping("SDIV", 0x05, 2, 1),
- Mapping("MOD", 0x06, 2, 1),
- Mapping("SMOD", 0x07, 2, 1),
- Mapping("ADDMOD", 0x08, 3, 1),
- Mapping("MULMOD", 0x09, 3, 1),
- Mapping("EXP", 0x0a, 2, 1),
- Mapping("SIGNEXTEND", 0x0b, 2, 1),
- Mapping("LT", 0x10, 2, 1),
- Mapping("GT", 0x11, 2, 1),
- Mapping("SLT", 0x12, 2, 1),
- Mapping("SGT", 0x13, 2, 1),
- Mapping("EQ", 0x14, 2, 1),
- Mapping("ISZERO", 0x15, 1, 1),
- Mapping("AND", 0x16, 2, 1),
- Mapping("OR", 0x17, 2, 1),
- Mapping("XOR", 0x18, 2, 1),
- Mapping("NOT", 0x19, 1, 1),
- Mapping("BYTE", 0x1a, 2, 1),
- Mapping("SHA3", 0x20, 2, 1),
- Mapping("ADDRESS", 0x30, 0, 1),
- Mapping("BALANCE", 0x31, 1, 1),
- Mapping("ORIGIN", 0x32, 0, 1),
- Mapping("CALLER", 0x33, 0, 1),
- Mapping("CALLVALUE", 0x34, 0, 1),
- Mapping("CALLDATALOAD", 0x35, 1, 1),
- Mapping("CALLDATASIZE", 0x36, 0, 1),
- Mapping("CALLDATACOPY", 0x37, 3, 0),
- Mapping("CODESIZE", 0x38, 0, 1),
- Mapping("CODECOPY", 0x39, 3, 0),
- Mapping("GASPRICE", 0x3a, 0, 1),
- Mapping("EXTCODESIZE", 0x3b, 1, 1),
- Mapping("EXTCODECOPY", 0x3c, 4, 0),
- Mapping("PREVHASH", 0x40, 0, 1),
- Mapping("COINBASE", 0x41, 0, 1),
- Mapping("TIMESTAMP", 0x42, 0, 1),
- Mapping("NUMBER", 0x43, 0, 1),
- Mapping("DIFFICULTY", 0x44, 0, 1),
- Mapping("GASLIMIT", 0x45, 0, 1),
- Mapping("POP", 0x50, 1, 0),
- Mapping("MLOAD", 0x51, 1, 1),
- Mapping("MSTORE", 0x52, 2, 0),
- Mapping("MSTORE8", 0x53, 2, 0),
- Mapping("SLOAD", 0x54, 1, 1),
- Mapping("SSTORE", 0x55, 2, 0),
- Mapping("JUMP", 0x56, 1, 0),
- Mapping("JUMPI", 0x57, 2, 0),
- Mapping("PC", 0x58, 0, 1),
- Mapping("MSIZE", 0x59, 0, 1),
- Mapping("GAS", 0x5a, 0, 1),
- Mapping("JUMPDEST", 0x5b, 0, 0),
- Mapping("LOG0", 0xa0, 2, 0),
- Mapping("LOG1", 0xa1, 3, 0),
- Mapping("LOG2", 0xa2, 4, 0),
- Mapping("LOG3", 0xa3, 5, 0),
- Mapping("LOG4", 0xa4, 6, 0),
- Mapping("CREATE", 0xf0, 3, 1),
- Mapping("CALL", 0xf1, 7, 1),
- Mapping("CALLCODE", 0xf2, 7, 1),
- Mapping("RETURN", 0xf3, 2, 0),
- Mapping("SUICIDE", 0xff, 1, 0),
- Mapping("---END---", 0x00, 0, 0),
-};
-
-std::map<std::string, std::vector<int> > opcodes;
-std::map<int, std::string> reverseOpcodes;
-
-// Fetches everything EXCEPT PUSH1..32
-std::pair<std::string, std::vector<int> > _opdata(std::string ops, int opi) {
- if (!opcodes.size()) {
- int i = 0;
- while (mapping[i].op != "---END---") {
- Mapping mi = mapping[i];
- opcodes[mi.op] = triple(mi.opcode, mi.in, mi.out);
- i++;
- }
- for (i = 1; i <= 16; i++) {
- opcodes["DUP"+unsignedToDecimal(i)] = triple(0x7f + i, i, i+1);
- opcodes["SWAP"+unsignedToDecimal(i)] = triple(0x8f + i, i+1, i+1);
- }
- for (std::map<std::string, std::vector<int> >::iterator it=opcodes.begin();
- it != opcodes.end();
- it++) {
- reverseOpcodes[(*it).second[0]] = (*it).first;
- }
- }
- ops = upperCase(ops);
- std::string op;
- std::vector<int> opdata;
- op = reverseOpcodes.count(opi) ? reverseOpcodes[opi] : "";
- opdata = opcodes.count(ops) ? opcodes[ops] : triple(-1, -1, -1);
- return std::pair<std::string, std::vector<int> >(op, opdata);
-}
-
-int opcode(std::string op) {
- return _opdata(op, -1).second[0];
-}
-
-int opinputs(std::string op) {
- return _opdata(op, -1).second[1];
-}
-
-int opoutputs(std::string op) {
- return _opdata(op, -1).second[2];
-}
-
-std::string op(int opcode) {
- return _opdata("", opcode).first;
-}
-
-std::string lllSpecials[][3] = {
- { "ref", "1", "1" },
- { "get", "1", "1" },
- { "set", "2", "2" },
- { "with", "3", "3" },
- { "comment", "0", "2147483647" },
- { "ops", "0", "2147483647" },
- { "lll", "2", "2" },
- { "seq", "0", "2147483647" },
- { "if", "3", "3" },
- { "unless", "2", "2" },
- { "until", "2", "2" },
- { "alloc", "1", "1" },
- { "---END---", "0", "0" },
-};
-
-std::map<std::string, std::pair<int, int> > lllMap;
-
-// Is a function name one of the valid functions above?
-bool isValidLLLFunc(std::string f, int argc) {
- if (lllMap.size() == 0) {
- for (int i = 0; ; i++) {
- if (lllSpecials[i][0] == "---END---") break;
- lllMap[lllSpecials[i][0]] = std::pair<int, int>(
- dtu(lllSpecials[i][1]), dtu(lllSpecials[i][2]));
- }
- }
- return lllMap.count(f)
- && argc >= lllMap[f].first
- && argc <= lllMap[f].second;
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.h
deleted file mode 100644
index 41423c169..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef ETHSERP_OPCODES
-#define ETHSERP_OPCODES
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-class Mapping {
- public:
- Mapping(std::string Op, int Opcode, int In, int Out) {
- op = Op;
- opcode = Opcode;
- in = In;
- out = Out;
- }
- std::string op;
- int opcode;
- int in;
- int out;
-};
-
-extern Mapping mapping[];
-
-extern std::map<std::string, std::vector<int> > opcodes;
-extern std::map<int, std::string> reverseOpcodes;
-
-std::pair<std::string, std::vector<int> > _opdata(std::string ops, int opi);
-
-int opcode(std::string op);
-
-int opinputs(std::string op);
-
-int opoutputs(std::string op);
-
-std::string op(int opcode);
-
-extern std::string lllSpecials[][3];
-
-extern std::map<std::string, std::pair<int, int> > lllMap;
-
-bool isValidLLLFunc(std::string f, int argc);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.cpp
deleted file mode 100644
index e689fcb69..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-
-// Compile-time arithmetic calculations
-Node optimize(Node inp) {
- if (inp.type == TOKEN) {
- Node o = tryNumberize(inp);
- if (decimalGt(o.val, tt256, true))
- err("Value too large (exceeds 32 bytes or 2^256)", inp.metadata);
- return o;
- }
- for (unsigned i = 0; i < inp.args.size(); i++) {
- inp.args[i] = optimize(inp.args[i]);
- }
- // Arithmetic-specific transform
- if (inp.val == "+") inp.val = "add";
- if (inp.val == "*") inp.val = "mul";
- if (inp.val == "-") inp.val = "sub";
- if (inp.val == "/") inp.val = "sdiv";
- if (inp.val == "^") inp.val = "exp";
- if (inp.val == "**") inp.val = "exp";
- if (inp.val == "%") inp.val = "smod";
- // Degenerate cases for add and mul
- if (inp.args.size() == 2) {
- if (inp.val == "add" && inp.args[0].type == TOKEN &&
- inp.args[0].val == "0") {
- Node x = inp.args[1];
- inp = x;
- }
- if (inp.val == "add" && inp.args[1].type == TOKEN &&
- inp.args[1].val == "0") {
- Node x = inp.args[0];
- inp = x;
- }
- if (inp.val == "mul" && inp.args[0].type == TOKEN &&
- inp.args[0].val == "1") {
- Node x = inp.args[1];
- inp = x;
- }
- if (inp.val == "mul" && inp.args[1].type == TOKEN &&
- inp.args[1].val == "1") {
- Node x = inp.args[0];
- inp = x;
- }
- }
- // Arithmetic computation
- if (inp.args.size() == 2
- && inp.args[0].type == TOKEN
- && inp.args[1].type == TOKEN) {
- std::string o;
- if (inp.val == "add") {
- o = decimalMod(decimalAdd(inp.args[0].val, inp.args[1].val), tt256);
- }
- else if (inp.val == "sub") {
- if (decimalGt(inp.args[0].val, inp.args[1].val, true))
- o = decimalSub(inp.args[0].val, inp.args[1].val);
- }
- else if (inp.val == "mul") {
- o = decimalMod(decimalMul(inp.args[0].val, inp.args[1].val), tt256);
- }
- else if (inp.val == "div" && inp.args[1].val != "0") {
- o = decimalDiv(inp.args[0].val, inp.args[1].val);
- }
- else if (inp.val == "sdiv" && inp.args[1].val != "0"
- && decimalGt(tt255, inp.args[0].val)
- && decimalGt(tt255, inp.args[1].val)) {
- o = decimalDiv(inp.args[0].val, inp.args[1].val);
- }
- else if (inp.val == "mod" && inp.args[1].val != "0") {
- o = decimalMod(inp.args[0].val, inp.args[1].val);
- }
- else if (inp.val == "smod" && inp.args[1].val != "0"
- && decimalGt(tt255, inp.args[0].val)
- && decimalGt(tt255, inp.args[1].val)) {
- o = decimalMod(inp.args[0].val, inp.args[1].val);
- }
- else if (inp.val == "exp") {
- o = decimalModExp(inp.args[0].val, inp.args[1].val, tt256);
- }
- if (o.length()) return token(o, inp.metadata);
- }
- return inp;
-}
-
-// Is a node degenerate (ie. trivial to calculate) ?
-bool isDegenerate(Node n) {
- return optimize(n).type == TOKEN;
-}
-
-// Is a node purely arithmetic?
-bool isPureArithmetic(Node n) {
- return isNumberLike(optimize(n));
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.h
deleted file mode 100644
index 06ea3bba1..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef ETHSERP_OPTIMIZER
-#define ETHSERP_OPTIMIZER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Compile-time arithmetic calculations
-Node optimize(Node inp);
-
-// Is a node degenerate (ie. trivial to calculate) ?
-bool isDegenerate(Node n);
-
-// Is a node purely arithmetic?
-bool isPureArithmetic(Node n);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.cpp
deleted file mode 100644
index 5e8c459c3..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.cpp
+++ /dev/null
@@ -1,430 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "parser.h"
-#include "tokenize.h"
-
-// Extended BEDMAS precedence order
-int precedence(Node tok) {
- std::string v = tok.val;
- if (v == ".") return -1;
- else if (v == "!" || v == "not") return 1;
- else if (v=="^" || v == "**") return 2;
- else if (v=="*" || v=="/" || v=="%") return 3;
- else if (v=="+" || v=="-") return 4;
- else if (v=="<" || v==">" || v=="<=" || v==">=") return 5;
- else if (v=="&" || v=="|" || v=="xor" || v=="==" || v == "!=") return 6;
- else if (v=="&&" || v=="and") return 7;
- else if (v=="||" || v=="or") return 8;
- else if (v=="=") return 10;
- else if (v=="+=" || v=="-=" || v=="*=" || v=="/=" || v=="%=") return 10;
- else if (v==":" || v == "::") return 11;
- else return 0;
-}
-
-// Token classification for shunting-yard purposes
-int toktype(Node tok) {
- if (tok.type == ASTNODE) return COMPOUND;
- std::string v = tok.val;
- if (v == "(" || v == "[" || v == "{") return LPAREN;
- else if (v == ")" || v == "]" || v == "}") return RPAREN;
- else if (v == ",") return COMMA;
- else if (v == "!" || v == "~" || v == "not") return UNARY_OP;
- else if (precedence(tok) > 0) return BINARY_OP;
- else if (precedence(tok) < 0) return TOKEN_SPLITTER;
- if (tok.val[0] != '"' && tok.val[0] != '\'') {
- for (unsigned i = 0; i < tok.val.length(); i++) {
- if (chartype(tok.val[i]) == SYMB) {
- err("Invalid symbol: "+tok.val, tok.metadata);
- }
- }
- }
- return ALPHANUM;
-}
-
-
-// Converts to reverse polish notation
-std::vector<Node> shuntingYard(std::vector<Node> tokens) {
- std::vector<Node> iq;
- for (int i = tokens.size() - 1; i >= 0; i--) {
- iq.push_back(tokens[i]);
- }
- std::vector<Node> oq;
- std::vector<Node> stack;
- Node prev, tok;
- int prevtyp = 0, toktyp = 0;
-
- while (iq.size()) {
- prev = tok;
- prevtyp = toktyp;
- tok = iq.back();
- toktyp = toktype(tok);
- iq.pop_back();
- // Alphanumerics go straight to output queue
- if (toktyp == ALPHANUM) {
- oq.push_back(tok);
- }
- // Left parens go on stack and output queue
- else if (toktyp == LPAREN) {
- while (stack.size() && toktype(stack.back()) == TOKEN_SPLITTER) {
- oq.push_back(stack.back());
- stack.pop_back();
- }
- if (prevtyp != ALPHANUM && prevtyp != RPAREN) {
- oq.push_back(token("id", tok.metadata));
- }
- stack.push_back(tok);
- oq.push_back(tok);
- }
- // If rparen, keep moving from stack to output queue until lparen
- else if (toktyp == RPAREN) {
- while (stack.size() && toktype(stack.back()) != LPAREN) {
- oq.push_back(stack.back());
- stack.pop_back();
- }
- if (stack.size()) {
- stack.pop_back();
- }
- oq.push_back(tok);
- }
- else if (toktyp == UNARY_OP) {
- stack.push_back(tok);
- }
- // If token splitter, just push it to the stack
- else if (toktyp == TOKEN_SPLITTER) {
- while (stack.size() && toktype(stack.back()) == TOKEN_SPLITTER) {
- oq.push_back(stack.back());
- stack.pop_back();
- }
- stack.push_back(tok);
- }
- // If binary op, keep popping from stack while higher bedmas precedence
- else if (toktyp == BINARY_OP) {
- if (tok.val == "-" && prevtyp != ALPHANUM && prevtyp != RPAREN) {
- stack.push_back(tok);
- oq.push_back(token("0", tok.metadata));
- }
- else {
- int prec = precedence(tok);
- while (stack.size()
- && (toktype(stack.back()) == BINARY_OP
- || toktype(stack.back()) == UNARY_OP
- || toktype(stack.back()) == TOKEN_SPLITTER)
- && precedence(stack.back()) <= prec) {
- oq.push_back(stack.back());
- stack.pop_back();
- }
- stack.push_back(tok);
- }
- }
- // Comma means finish evaluating the argument
- else if (toktyp == COMMA) {
- while (stack.size() && toktype(stack.back()) != LPAREN) {
- oq.push_back(stack.back());
- stack.pop_back();
- }
- }
- }
- while (stack.size()) {
- oq.push_back(stack.back());
- stack.pop_back();
- }
- return oq;
-}
-
-// Converts reverse polish notation into tree
-Node treefy(std::vector<Node> stream) {
- std::vector<Node> iq;
- for (int i = stream.size() -1; i >= 0; i--) {
- iq.push_back(stream[i]);
- }
- std::vector<Node> oq;
- while (iq.size()) {
- Node tok = iq.back();
- iq.pop_back();
- int typ = toktype(tok);
- // If unary, take node off end of oq and wrap it with the operator
- // If binary, do the same with two nodes
- if (typ == UNARY_OP || typ == BINARY_OP || typ == TOKEN_SPLITTER) {
- std::vector<Node> args;
- int rounds = (typ == UNARY_OP) ? 1 : 2;
- for (int i = 0; i < rounds; i++) {
- if (oq.size() == 0) {
- err("Line malformed, not enough args for "+tok.val,
- tok.metadata);
- }
- args.push_back(oq.back());
- oq.pop_back();
- }
- std::vector<Node> args2;
- while (args.size()) {
- args2.push_back(args.back());
- args.pop_back();
- }
- oq.push_back(astnode(tok.val, args2, tok.metadata));
- }
- // If rparen, keep grabbing until we get to an lparen
- else if (typ == RPAREN) {
- std::vector<Node> args;
- while (1) {
- if (toktype(oq.back()) == LPAREN) break;
- args.push_back(oq.back());
- oq.pop_back();
- if (!oq.size()) err("Bracket without matching", tok.metadata);
- }
- oq.pop_back();
- args.push_back(oq.back());
- oq.pop_back();
- // We represent a[b] as (access a b)
- if (tok.val == "]")
- args.push_back(token("access", tok.metadata));
- if (args.back().type == ASTNODE)
- args.push_back(token("fun", tok.metadata));
- std::string fun = args.back().val;
- args.pop_back();
- // We represent [1,2,3] as (array_lit 1 2 3)
- if (fun == "access" && args.size() && args.back().val == "id") {
- fun = "array_lit";
- args.pop_back();
- }
- std::vector<Node> args2;
- while (args.size()) {
- args2.push_back(args.back());
- args.pop_back();
- }
- // When evaluating 2 + (3 * 5), the shunting yard algo turns that
- // into 2 ( id 3 5 * ) +, effectively putting "id" as a dummy
- // function where the algo was expecting a function to call the
- // thing inside the brackets. This reverses that step
- if (fun == "id" && args2.size() == 1) {
- oq.push_back(args2[0]);
- }
- else {
- oq.push_back(astnode(fun, args2, tok.metadata));
- }
- }
- else oq.push_back(tok);
- // This is messy, but has to be done. Import/inset other files here
- std::string v = oq.back().val;
- if ((v == "inset" || v == "import" || v == "create")
- && oq.back().args.size() == 1
- && oq.back().args[0].type == TOKEN) {
- int lastSlashPos = tok.metadata.file.rfind("/");
- std::string root;
- if (lastSlashPos >= 0)
- root = tok.metadata.file.substr(0, lastSlashPos) + "/";
- else
- root = "";
- std::string filename = oq.back().args[0].val;
- filename = filename.substr(1, filename.length() - 2);
- if (!exists(root + filename))
- err("File does not exist: "+root + filename, tok.metadata);
- oq.back().args.pop_back();
- oq.back().args.push_back(parseSerpent(root + filename));
- }
- //Useful for debugging
- //for (int i = 0; i < oq.size(); i++) {
- // std::cerr << printSimple(oq[i]) << " ";
- //}
- //std::cerr << " <-\n";
- }
- // Output must have one argument
- if (oq.size() == 0) {
- err("Output blank", Metadata());
- }
- else if (oq.size() > 1) {
- return asn("multi", oq, oq[0].metadata);
- }
-
- return oq[0];
-}
-
-
-// Parses one line of serpent
-Node parseSerpentTokenStream(std::vector<Node> s) {
- return treefy(shuntingYard(s));
-}
-
-
-// Count spaces at beginning of line
-int spaceCount(std::string s) {
- unsigned pos = 0;
- while (pos < s.length() && (s[pos] == ' ' || s[pos] == '\t'))
- pos++;
- return pos;
-}
-
-// Is this a command that takes an argument on the same line?
-bool bodied(std::string tok) {
- return tok == "if" || tok == "elif" || tok == "while"
- || tok == "with" || tok == "def" || tok == "extern"
- || tok == "data" || tok == "assert" || tok == "return"
- || tok == "fun" || tok == "scope" || tok == "macro"
- || tok == "type";
-}
-
-// Are the two commands meant to continue each other?
-bool bodiedContinued(std::string prev, std::string tok) {
- return (prev == "if" && tok == "elif")
- || (prev == "elif" && tok == "else")
- || (prev == "elif" && tok == "elif")
- || (prev == "if" && tok == "else");
-}
-
-// Is a line of code empty?
-bool isLineEmpty(std::string line) {
- std::vector<Node> tokens = tokenize(line);
- if (!tokens.size() || tokens[0].val == "#" || tokens[0].val == "//")
- return true;
- return false;
-}
-
-// Parse lines of serpent (helper function)
-Node parseLines(std::vector<std::string> lines, Metadata metadata, int sp) {
- std::vector<Node> o;
- int origLine = metadata.ln;
- unsigned i = 0;
- while (i < lines.size()) {
- metadata.ln = origLine + i;
- std::string main = lines[i];
- if (isLineEmpty(main)) {
- i += 1;
- continue;
- }
- int spaces = spaceCount(main);
- if (spaces != sp) {
- err("Indent mismatch", metadata);
- }
- // Tokenize current line
- std::vector<Node> tokens = tokenize(main.substr(sp), metadata);
- // Remove comments
- std::vector<Node> tokens2;
- for (unsigned j = 0; j < tokens.size(); j++) {
- if (tokens[j].val == "#" || tokens[j].val == "//") break;
- tokens2.push_back(tokens[j]);
- }
- bool expectingChildBlock = false;
- if (tokens2.size() > 0 && tokens2.back().val == ":") {
- tokens2.pop_back();
- expectingChildBlock = true;
- }
- // Parse current line
- Node out = parseSerpentTokenStream(tokens2);
- // Parse child block
- int childIndent = 999999;
- std::vector<std::string> childBlock;
- while (1) {
- i++;
- if (i >= lines.size())
- break;
- bool ile = isLineEmpty(lines[i]);
- if (!ile) {
- int spaces = spaceCount(lines[i]);
- if (spaces <= sp) break;
- childBlock.push_back(lines[i]);
- if (spaces < childIndent) childIndent = spaces;
- }
- else childBlock.push_back("");
- }
- // Child block empty?
- bool cbe = true;
- for (unsigned i = 0; i < childBlock.size(); i++) {
- if (childBlock[i].length() > 0) { cbe = false; break; }
- }
- // Add child block to AST
- if (expectingChildBlock) {
- if (cbe)
- err("Expected indented child block!", out.metadata);
- out.type = ASTNODE;
- metadata.ln += 1;
- out.args.push_back(parseLines(childBlock, metadata, childIndent));
- metadata.ln -= 1;
- }
- else if (!cbe)
- err("Did not expect indented child block!", out.metadata);
- else if (out.args.size() && out.args[out.args.size() - 1].val == ":") {
- Node n = out.args[out.args.size() - 1];
- out.args.pop_back();
- out.args.push_back(n.args[0]);
- out.args.push_back(n.args[1]);
- }
- // Bring back if / elif into AST
- if (bodied(tokens[0].val)) {
- if (out.val != "multi") {
- // token not being used in bodied form
- }
- else if (out.args[0].val == "id")
- out = astnode(tokens[0].val, out.args[1].args, out.metadata);
- else if (out.args[0].type == TOKEN) {
- std::vector<Node> out2;
- for (unsigned i = 1; i < out.args.size(); i++)
- out2.push_back(out.args[i]);
- out = astnode(tokens[0].val, out2, out.metadata);
- }
- else
- out = astnode("fun", out.args, out.metadata);
- }
- // Multi not supported
- if (out.val == "multi")
- err("Multiple expressions or unclosed bracket", out.metadata);
- // Convert top-level colon expressions into non-colon expressions;
- // makes if statements and the like equivalent indented or not
- //if (out.val == ":" && out.args[0].type == TOKEN)
- // out = asn(out.args[0].val, out.args[1], out.metadata);
- //if (bodied(tokens[0].val) && out.args[0].val == ":")
- // out = asn(tokens[0].val, out.args[0].args);
- if (o.size() == 0 || o.back().type == TOKEN) {
- o.push_back(out);
- continue;
- }
- // This is a little complicated. Basically, the idea here is to build
- // constructions like [if [< x 5] [a] [elif [< x 10] [b] [else [c]]]]
- std::vector<Node> u;
- u.push_back(o.back());
- if (bodiedContinued(o.back().val, out.val)) {
- while (1) {
- if (!bodiedContinued(u.back().val, out.val)) {
- u.pop_back();
- break;
- }
- if (!u.back().args.size()
- || !bodiedContinued(u.back().val, u.back().args.back().val)) {
- break;
- }
- u.push_back(u.back().args.back());
- }
- u.back().args.push_back(out);
- while (u.size() > 1) {
- Node v = u.back();
- u.pop_back();
- u.back().args.pop_back();
- u.back().args.push_back(v);
- }
- o.pop_back();
- o.push_back(u[0]);
- }
- else o.push_back(out);
- }
- if (o.size() == 1)
- return o[0];
- else if (o.size())
- return astnode("seq", o, o[0].metadata);
- else
- return astnode("seq", o, Metadata());
-}
-
-// Parses serpent code
-Node parseSerpent(std::string s) {
- std::string input = s;
- std::string file = "main";
- if (exists(s)) {
- file = s;
- input = get_file_contents(s);
- }
- return parseLines(splitLines(input), Metadata(file, 0, 0), 0);
-}
-
-
-using namespace std;
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.h
deleted file mode 100644
index e3632220a..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef ETHSERP_PARSER
-#define ETHSERP_PARSER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Serpent text -> parse tree
-Node parseSerpent(std::string s);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.cpp
deleted file mode 100644
index 3f08ea8b1..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.cpp
+++ /dev/null
@@ -1,299 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "rewriteutils.h"
-#include "optimize.h"
-#include "preprocess.h"
-#include "functions.h"
-#include "opcodes.h"
-
-// Convert a function of the form (def (f x y z) (do stuff)) into
-// (if (first byte of ABI is correct) (seq (setup x y z) (do stuff)))
-Node convFunction(Node node, int functionCount) {
- std::string prefix = "_temp"+mkUniqueToken()+"_";
- Metadata m = node.metadata;
-
- if (node.args.size() != 2)
- err("Malformed def!", m);
- // Collect the list of variable names and variable byte counts
- Node unpack = unpackArguments(node.args[0].args, m);
- // And the actual code
- Node body = node.args[1];
- // Main LLL-based function body
- return astnode("if",
- astnode("eq",
- astnode("get", token("__funid", m), m),
- token(unsignedToDecimal(functionCount), m),
- m),
- astnode("seq", unpack, body, m));
-}
-
-// Populate an svObj with the arguments needed to determine
-// the storage position of a node
-svObj getStorageVars(svObj pre, Node node, std::string prefix,
- int index) {
- Metadata m = node.metadata;
- if (!pre.globalOffset.size()) pre.globalOffset = "0";
- std::vector<Node> h;
- std::vector<std::string> coefficients;
- // Array accesses or atoms
- if (node.val == "access" || node.type == TOKEN) {
- std::string tot = "1";
- h = listfyStorageAccess(node);
- coefficients.push_back("1");
- for (unsigned i = h.size() - 1; i >= 1; i--) {
- // Array sizes must be constant or at least arithmetically
- // evaluable at compile time
- if (!isPureArithmetic(h[i]))
- err("Array size must be fixed value", m);
- // Create a list of the coefficient associated with each
- // array index
- coefficients.push_back(decimalMul(coefficients.back(), h[i].val));
- }
- }
- // Tuples
- else {
- int startc;
- // Handle the (fun <fun_astnode> args...) case
- if (node.val == "fun") {
- startc = 1;
- h = listfyStorageAccess(node.args[0]);
- }
- // Handle the (<fun_name> args...) case, which
- // the serpent parser produces when the function
- // is a simple name and not a complex astnode
- else {
- startc = 0;
- h = listfyStorageAccess(token(node.val, m));
- }
- svObj sub = pre;
- sub.globalOffset = "0";
- // Evaluate tuple elements recursively
- for (unsigned i = startc; i < node.args.size(); i++) {
- sub = getStorageVars(sub,
- node.args[i],
- prefix+h[0].val.substr(2)+".",
- i-startc);
- }
- coefficients.push_back(sub.globalOffset);
- for (unsigned i = h.size() - 1; i >= 1; i--) {
- // Array sizes must be constant or at least arithmetically
- // evaluable at compile time
- if (!isPureArithmetic(h[i]))
- err("Array size must be fixed value", m);
- // Create a list of the coefficient associated with each
- // array index
- coefficients.push_back(decimalMul(coefficients.back(), h[i].val));
- }
- pre.offsets = sub.offsets;
- pre.coefficients = sub.coefficients;
- pre.nonfinal = sub.nonfinal;
- pre.nonfinal[prefix+h[0].val.substr(2)] = true;
- }
- pre.coefficients[prefix+h[0].val.substr(2)] = coefficients;
- pre.offsets[prefix+h[0].val.substr(2)] = pre.globalOffset;
- pre.indices[prefix+h[0].val.substr(2)] = index;
- if (decimalGt(tt176, coefficients.back()))
- pre.globalOffset = decimalAdd(pre.globalOffset, coefficients.back());
- return pre;
-}
-
-// Preprocess input containing functions
-//
-// localExterns is a map of the form, eg,
-//
-// { x: { foo: 0, bar: 1, baz: 2 }, y: { qux: 0, foo: 1 } ... }
-//
-// localExternSigs is a map of the form, eg,
-//
-// { x : { foo: iii, bar: iis, baz: ia }, y: { qux: i, foo: as } ... }
-//
-// Signifying that x.foo = 0, x.baz = 2, y.foo = 1, etc
-// and that x.foo has three integers as arguments, x.bar has two
-// integers and a variable-length string, and baz has an integer
-// and an array
-//
-// globalExterns is a one-level map, eg from above
-//
-// { foo: 1, bar: 1, baz: 2, qux: 0 }
-//
-// globalExternSigs is a one-level map, eg from above
-//
-// { foo: as, bar: iis, baz: ia, qux: i}
-//
-// Note that globalExterns and globalExternSigs may be ambiguous
-// Also, a null signature implies an infinite tail of integers
-preprocessResult preprocessInit(Node inp) {
- Metadata m = inp.metadata;
- if (inp.val != "seq")
- inp = astnode("seq", inp, m);
- std::vector<Node> empty = std::vector<Node>();
- Node init = astnode("seq", empty, m);
- Node shared = astnode("seq", empty, m);
- std::vector<Node> any;
- std::vector<Node> functions;
- preprocessAux out = preprocessAux();
- out.localExterns["self"] = std::map<std::string, int>();
- int functionCount = 0;
- int storageDataCount = 0;
- for (unsigned i = 0; i < inp.args.size(); i++) {
- Node obj = inp.args[i];
- // Functions
- if (obj.val == "def") {
- if (obj.args.size() == 0)
- err("Empty def", m);
- std::string funName = obj.args[0].val;
- // Init, shared and any are special functions
- if (funName == "init" || funName == "shared" || funName == "any") {
- if (obj.args[0].args.size())
- err(funName+" cannot have arguments", m);
- }
- if (funName == "init") init = obj.args[1];
- else if (funName == "shared") shared = obj.args[1];
- else if (funName == "any") any.push_back(obj.args[1]);
- else {
- // Other functions
- functions.push_back(convFunction(obj, functionCount));
- out.localExterns["self"][obj.args[0].val] = functionCount;
- out.localExternSigs["self"][obj.args[0].val]
- = getSignature(obj.args[0].args);
- functionCount++;
- }
- }
- // Extern declarations
- else if (obj.val == "extern") {
- std::string externName = obj.args[0].val;
- Node al = obj.args[1];
- if (!out.localExterns.count(externName))
- out.localExterns[externName] = std::map<std::string, int>();
- for (unsigned i = 0; i < al.args.size(); i++) {
- if (al.args[i].val == ":") {
- std::string v = al.args[i].args[0].val;
- std::string sig = al.args[i].args[1].val;
- out.globalExterns[v] = i;
- out.globalExternSigs[v] = sig;
- out.localExterns[externName][v] = i;
- out.localExternSigs[externName][v] = sig;
- }
- else {
- std::string v = al.args[i].val;
- out.globalExterns[v] = i;
- out.globalExternSigs[v] = "";
- out.localExterns[externName][v] = i;
- out.localExternSigs[externName][v] = "";
- }
- }
- }
- // Custom macros
- else if (obj.val == "macro") {
- // Rules for valid macros:
- //
- // There are only four categories of valid macros:
- //
- // 1. a macro where the outer function is something
- // which is NOT an existing valid function/extern/datum
- // 2. a macro of the form set(c(x), d) where c must NOT
- // be an existing valid function/extern/datum
- // 3. something of the form access(c(x)), where c must NOT
- // be an existing valid function/extern/datum
- // 4. something of the form set(access(c(x)), d) where c must
- // NOT be an existing valid function/extern/datum
- bool valid = false;
- Node pattern = obj.args[0];
- Node substitution = obj.args[1];
- if (opcode(pattern.val) < 0 && !isValidFunctionName(pattern.val))
- valid = true;
- if (pattern.val == "set" &&
- opcode(pattern.args[0].val) < 0 &&
- !isValidFunctionName(pattern.args[0].val))
- valid = true;
- if (pattern.val == "access" &&
- opcode(pattern.args[0].val) < 0 &&
- !isValidFunctionName(pattern.args[0].val))
- if (pattern.val == "set" &&
- pattern.args[0].val == "access" &&
- opcode(pattern.args[0].args[0].val) < 0 &&
- !isValidFunctionName(pattern.args[0].args[0].val))
- valid = true;
- if (valid) {
- out.customMacros.push_back(rewriteRule(pattern, substitution));
- }
- }
- // Variable types
- else if (obj.val == "type") {
- std::string typeName = obj.args[0].val;
- std::vector<Node> vars = obj.args[1].args;
- for (unsigned i = 0; i < vars.size(); i++)
- out.types[vars[i].val] = typeName;
- }
- // Storage variables/structures
- else if (obj.val == "data") {
- out.storageVars = getStorageVars(out.storageVars,
- obj.args[0],
- "",
- storageDataCount);
- storageDataCount += 1;
- }
- else any.push_back(obj);
- }
- std::vector<Node> main;
- if (shared.args.size()) main.push_back(shared);
- if (init.args.size()) main.push_back(init);
-
- std::vector<Node> code;
- if (shared.args.size()) code.push_back(shared);
- for (unsigned i = 0; i < any.size(); i++)
- code.push_back(any[i]);
- for (unsigned i = 0; i < functions.size(); i++)
- code.push_back(functions[i]);
- Node codeNode;
- if (functions.size() > 0) {
- codeNode = astnode("with",
- token("__funid", m),
- astnode("byte",
- token("0", m),
- astnode("calldataload", token("0", m), m),
- m),
- astnode("seq", code, m),
- m);
- }
- else codeNode = astnode("seq", code, m);
- main.push_back(astnode("~return",
- token("0", m),
- astnode("lll",
- codeNode,
- token("0", m),
- m),
- m));
-
-
- Node result;
- if (main.size() == 1) result = main[0];
- else result = astnode("seq", main, inp.metadata);
- return preprocessResult(result, out);
-}
-
-preprocessResult processTypes (preprocessResult pr) {
- preprocessAux aux = pr.second;
- Node node = pr.first;
- if (node.type == TOKEN && aux.types.count(node.val)) {
- node = asn(aux.types[node.val], node, node.metadata);
- }
- else if (node.val == "untyped")
- return preprocessResult(node.args[0], aux);
- else {
- for (unsigned i = 0; i < node.args.size(); i++) {
- node.args[i] =
- processTypes(preprocessResult(node.args[i], aux)).first;
- }
- }
- return preprocessResult(node, aux);
-}
-
-preprocessResult preprocess(Node n) {
- return processTypes(preprocessInit(n));
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.h
deleted file mode 100644
index 944436aef..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef ETHSERP_PREPROCESSOR
-#define ETHSERP_PREPROCESSOR
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Storage variable index storing object
-struct svObj {
- std::map<std::string, std::string> offsets;
- std::map<std::string, int> indices;
- std::map<std::string, std::vector<std::string> > coefficients;
- std::map<std::string, bool> nonfinal;
- std::string globalOffset;
-};
-
-class rewriteRule {
- public:
- rewriteRule(Node p, Node s) {
- pattern = p;
- substitution = s;
- }
- Node pattern;
- Node substitution;
-};
-
-
-// Preprocessing result storing object
-class preprocessAux {
- public:
- preprocessAux() {
- globalExterns = std::map<std::string, int>();
- localExterns = std::map<std::string, std::map<std::string, int> >();
- localExterns["self"] = std::map<std::string, int>();
- }
- std::map<std::string, int> globalExterns;
- std::map<std::string, std::string> globalExternSigs;
- std::map<std::string, std::map<std::string, int> > localExterns;
- std::map<std::string, std::map<std::string, std::string> > localExternSigs;
- std::vector<rewriteRule> customMacros;
- std::map<std::string, std::string> types;
- svObj storageVars;
-};
-
-#define preprocessResult std::pair<Node, preprocessAux>
-
-// Populate an svObj with the arguments needed to determine
-// the storage position of a node
-svObj getStorageVars(svObj pre, Node node, std::string prefix="",
- int index=0);
-
-// Preprocess a function (see cpp for details)
-preprocessResult preprocess(Node inp);
-
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.cpp
deleted file mode 100644
index 38398aa46..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.cpp
+++ /dev/null
@@ -1,173 +0,0 @@
-#include <Python.h>
-#include "structmember.h"
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <iostream>
-#include "funcs.h"
-
-#define PYMETHOD(name, FROM, method, TO) \
- static PyObject * name(PyObject *, PyObject *args) { \
- try { \
- FROM(med) \
- return TO(method(med)); \
- } \
- catch (std::string e) { \
- PyErr_SetString(PyExc_Exception, e.c_str()); \
- return NULL; \
- } \
- }
-
-#define FROMSTR(v) \
- const char *command; \
- int len; \
- if (!PyArg_ParseTuple(args, "s#", &command, &len)) \
- return NULL; \
- std::string v = std::string(command, len); \
-
-#define FROMNODE(v) \
- PyObject *node; \
- if (!PyArg_ParseTuple(args, "O", &node)) \
- return NULL; \
- Node v = cppifyNode(node);
-
-#define FROMLIST(v) \
- PyObject *node; \
- if (!PyArg_ParseTuple(args, "O", &node)) \
- return NULL; \
- std::vector<Node> v = cppifyNodeList(node);
-
-// Convert metadata into python wrapper form [file, ln, ch]
-PyObject* pyifyMetadata(Metadata m) {
- PyObject* a = PyList_New(0);
- PyList_Append(a, Py_BuildValue("s#", m.file.c_str(), m.file.length()));
- PyList_Append(a, Py_BuildValue("i", m.ln));
- PyList_Append(a, Py_BuildValue("i", m.ch));
- return a;
-}
-
-// Convert node into python wrapper form
-// [token=0/astnode=1, val, metadata, args]
-PyObject* pyifyNode(Node n) {
- PyObject* a = PyList_New(0);
- PyList_Append(a, Py_BuildValue("i", n.type == ASTNODE));
- PyList_Append(a, Py_BuildValue("s#", n.val.c_str(), n.val.length()));
- PyList_Append(a, pyifyMetadata(n.metadata));
- for (unsigned i = 0; i < n.args.size(); i++)
- PyList_Append(a, pyifyNode(n.args[i]));
- return a;
-}
-
-// Convert string into python wrapper form
-PyObject* pyifyString(std::string s) {
- return Py_BuildValue("s#", s.c_str(), s.length());
-}
-
-// Convert list of nodes into python wrapper form
-PyObject* pyifyNodeList(std::vector<Node> n) {
- PyObject* a = PyList_New(0);
- for (unsigned i = 0; i < n.size(); i++)
- PyList_Append(a, pyifyNode(n[i]));
- return a;
-}
-
-// Convert pyobject int into normal form
-int cppifyInt(PyObject* o) {
- int out;
- if (!PyArg_Parse(o, "i", &out))
- err("Argument should be integer", Metadata());
- return out;
-}
-
-// Convert pyobject string into normal form
-std::string cppifyString(PyObject* o) {
- const char *command;
- if (!PyArg_Parse(o, "s", &command))
- err("Argument should be string", Metadata());
- return std::string(command);
-}
-
-// Convert metadata from python wrapper form
-Metadata cppifyMetadata(PyObject* o) {
- std::string file = cppifyString(PyList_GetItem(o, 0));
- int ln = cppifyInt(PyList_GetItem(o, 1));
- int ch = cppifyInt(PyList_GetItem(o, 2));
- return Metadata(file, ln, ch);
-}
-
-// Convert node from python wrapper form
-Node cppifyNode(PyObject* o) {
- Node n;
- int isAstNode = cppifyInt(PyList_GetItem(o, 0));
- n.type = isAstNode ? ASTNODE : TOKEN;
- n.val = cppifyString(PyList_GetItem(o, 1));
- n.metadata = cppifyMetadata(PyList_GetItem(o, 2));
- std::vector<Node> args;
- for (int i = 3; i < PyList_Size(o); i++) {
- args.push_back(cppifyNode(PyList_GetItem(o, i)));
- }
- n.args = args;
- return n;
-}
-
-//Convert list of nodes into normal form
-std::vector<Node> cppifyNodeList(PyObject* o) {
- std::vector<Node> out;
- for (int i = 0; i < PyList_Size(o); i++) {
- out.push_back(cppifyNode(PyList_GetItem(o,i)));
- }
- return out;
-}
-
-PYMETHOD(ps_compile, FROMSTR, compile, pyifyString)
-PYMETHOD(ps_compile_chunk, FROMSTR, compileChunk, pyifyString)
-PYMETHOD(ps_compile_to_lll, FROMSTR, compileToLLL, pyifyNode)
-PYMETHOD(ps_compile_chunk_to_lll, FROMSTR, compileChunkToLLL, pyifyNode)
-PYMETHOD(ps_compile_lll, FROMNODE, compileLLL, pyifyString)
-PYMETHOD(ps_parse, FROMSTR, parseSerpent, pyifyNode)
-PYMETHOD(ps_rewrite, FROMNODE, rewrite, pyifyNode)
-PYMETHOD(ps_rewrite_chunk, FROMNODE, rewriteChunk, pyifyNode)
-PYMETHOD(ps_pretty_compile, FROMSTR, prettyCompile, pyifyNodeList)
-PYMETHOD(ps_pretty_compile_chunk, FROMSTR, prettyCompileChunk, pyifyNodeList)
-PYMETHOD(ps_pretty_compile_lll, FROMNODE, prettyCompileLLL, pyifyNodeList)
-PYMETHOD(ps_serialize, FROMLIST, serialize, pyifyString)
-PYMETHOD(ps_deserialize, FROMSTR, deserialize, pyifyNodeList)
-PYMETHOD(ps_parse_lll, FROMSTR, parseLLL, pyifyNode)
-
-
-static PyMethodDef PyextMethods[] = {
- {"compile", ps_compile, METH_VARARGS,
- "Compile code."},
- {"compile_chunk", ps_compile_chunk, METH_VARARGS,
- "Compile code chunk (no wrappers)."},
- {"compile_to_lll", ps_compile_to_lll, METH_VARARGS,
- "Compile code to LLL."},
- {"compile_chunk_to_lll", ps_compile_chunk_to_lll, METH_VARARGS,
- "Compile code chunk to LLL (no wrappers)."},
- {"compile_lll", ps_compile_lll, METH_VARARGS,
- "Compile LLL to EVM."},
- {"parse", ps_parse, METH_VARARGS,
- "Parse serpent"},
- {"rewrite", ps_rewrite, METH_VARARGS,
- "Rewrite parsed serpent to LLL"},
- {"rewrite_chunk", ps_rewrite_chunk, METH_VARARGS,
- "Rewrite parsed serpent to LLL (no wrappers)"},
- {"pretty_compile", ps_pretty_compile, METH_VARARGS,
- "Compile to EVM opcodes"},
- {"pretty_compile_chunk", ps_pretty_compile_chunk, METH_VARARGS,
- "Compile chunk to EVM opcodes (no wrappers)"},
- {"pretty_compile_lll", ps_pretty_compile_lll, METH_VARARGS,
- "Compile LLL to EVM opcodes"},
- {"serialize", ps_serialize, METH_VARARGS,
- "Convert EVM opcodes to bin"},
- {"deserialize", ps_deserialize, METH_VARARGS,
- "Convert EVM bin to opcodes"},
- {"parse_lll", ps_parse_lll, METH_VARARGS,
- "Parse LLL"},
- {NULL, NULL, 0, NULL} /* Sentinel */
-};
-
-PyMODINIT_FUNC initserpent_pyext(void)
-{
- Py_InitModule( "serpent_pyext", PyextMethods );
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.py
deleted file mode 100644
index 2103b48fe..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.py
+++ /dev/null
@@ -1 +0,0 @@
-from serpent import *
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.cpp
deleted file mode 100644
index 4cdce4f0a..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.cpp
+++ /dev/null
@@ -1,804 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "optimize.h"
-#include "rewriteutils.h"
-#include "preprocess.h"
-#include "functions.h"
-#include "opcodes.h"
-
-// Rewrite rules
-std::string macros[][2] = {
- {
- "(seq $x)",
- "$x"
- },
- {
- "(seq (seq) $x)",
- "$x"
- },
- {
- "(+= $a $b)",
- "(set $a (+ $a $b))"
- },
- {
- "(*= $a $b)",
- "(set $a (* $a $b))"
- },
- {
- "(-= $a $b)",
- "(set $a (- $a $b))"
- },
- {
- "(/= $a $b)",
- "(set $a (/ $a $b))"
- },
- {
- "(%= $a $b)",
- "(set $a (% $a $b))"
- },
- {
- "(^= $a $b)",
- "(set $a (^ $a $b))"
- },
- {
- "(!= $a $b)",
- "(iszero (eq $a $b))"
- },
- {
- "(assert $x)",
- "(unless $x (stop))"
- },
- {
- "(min $a $b)",
- "(with $1 $a (with $2 $b (if (lt $1 $2) $1 $2)))"
- },
- {
- "(max $a $b)",
- "(with $1 $a (with $2 $b (if (lt $1 $2) $2 $1)))"
- },
- {
- "(smin $a $b)",
- "(with $1 $a (with $2 $b (if (slt $1 $2) $1 $2)))"
- },
- {
- "(smax $a $b)",
- "(with $1 $a (with $2 $b (if (slt $1 $2) $2 $1)))"
- },
- {
- "(if $cond $do (else $else))",
- "(if $cond $do $else)"
- },
- {
- "(code $code)",
- "$code"
- },
- {
- "(slice $arr $pos)",
- "(add $arr (mul 32 $pos))",
- },
- {
- "(array $len)",
- "(alloc (mul 32 $len))"
- },
- {
- "(while $cond $do)",
- "(until (iszero $cond) $do)",
- },
- {
- "(while (iszero $cond) $do)",
- "(until $cond $do)",
- },
- {
- "(if $cond $do)",
- "(unless (iszero $cond) $do)",
- },
- {
- "(if (iszero $cond) $do)",
- "(unless $cond $do)",
- },
- {
- "(access (. self storage) $ind)",
- "(sload $ind)"
- },
- {
- "(access $var $ind)",
- "(mload (add $var (mul 32 $ind)))"
- },
- {
- "(set (access (. self storage) $ind) $val)",
- "(sstore $ind $val)"
- },
- {
- "(set (access $var $ind) $val)",
- "(mstore (add $var (mul 32 $ind)) $val)"
- },
- {
- "(getch $var $ind)",
- "(mod (mload (sub (add $var $ind) 31)) 256)"
- },
- {
- "(setch $var $ind $val)",
- "(mstore8 (add $var $ind) $val)",
- },
- {
- "(send $to $value)",
- "(~call (sub (gas) 25) $to $value 0 0 0 0)"
- },
- {
- "(send $gas $to $value)",
- "(~call $gas $to $value 0 0 0 0)"
- },
- {
- "(sha3 $x)",
- "(seq (set $1 $x) (~sha3 (ref $1) 32))"
- },
- {
- "(sha3 $mstart (= chars $msize))",
- "(~sha3 $mstart $msize)"
- },
- {
- "(sha3 $mstart $msize)",
- "(~sha3 $mstart (mul 32 $msize))"
- },
- {
- "(id $0)",
- "$0"
- },
- {
- "(return $x)",
- "(seq (set $1 $x) (~return (ref $1) 32))"
- },
- {
- "(return $mstart (= chars $msize))",
- "(~return $mstart $msize)"
- },
- {
- "(return $start $len)",
- "(~return $start (mul 32 $len))"
- },
- {
- "(&& $x $y)",
- "(if $x $y 0)"
- },
- {
- "(|| $x $y)",
- "(with $1 $x (if $1 $1 $y))"
- },
- {
- "(>= $x $y)",
- "(iszero (slt $x $y))"
- },
- {
- "(<= $x $y)",
- "(iszero (sgt $x $y))"
- },
- {
- "(create $code)",
- "(create 0 $code)"
- },
- {
- "(create $endowment $code)",
- "(with $1 (msize) (create $endowment (get $1) (lll (outer $code) (msize))))"
- },
- {
- "(sha256 $x)",
- "(with $1 (alloc 64) (seq (mstore (add (get $1) 32) $x) (pop (~call 101 2 0 (add (get $1) 32) 32 (get $1) 32)) (mload (get $1))))"
- },
- {
- "(sha256 $arr (= chars $sz))",
- "(with $1 (alloc 32) (seq (pop (~call 101 2 0 $arr $sz (get $1) 32)) (mload (get $1))))"
- },
- {
- "(sha256 $arr $sz)",
- "(with $1 (alloc 32) (seq (pop (~call 101 2 0 $arr (mul 32 $sz) (get $1) 32)) (mload (get $1))))"
- },
- {
- "(ripemd160 $x)",
- "(with $1 (alloc 64) (seq (mstore (add (get $1) 32) $x) (pop (~call 101 3 0 (add (get $1) 32) 32 (get $1) 32)) (mload (get $1))))"
- },
- {
- "(ripemd160 $arr (= chars $sz))",
- "(with $1 (alloc 32) (seq (pop (~call 101 3 0 $arr $sz (mload $1) 32)) (mload (get $1))))"
- },
- {
- "(ripemd160 $arr $sz)",
- "(with $1 (alloc 32) (seq (pop (~call 101 3 0 $arr (mul 32 $sz) (get $1) 32)) (mload (get $1))))"
- },
- {
- "(ecrecover $h $v $r $s)",
- "(with $1 (alloc 160) (seq (mstore (get $1) $h) (mstore (add (get $1) 32) $v) (mstore (add (get $1) 64) $r) (mstore (add (get $1) 96) $s) (pop (~call 101 1 0 (get $1) 128 (add (get $1 128)) 32)) (mload (add (get $1) 128))))"
- },
- {
- "(inset $x)",
- "$x"
- },
- {
- "(create $x)",
- "(with $1 (msize) (create $val (get $1) (lll $code (get $1))))"
- },
- {
- "(with (= $var $val) $cond)",
- "(with $var $val $cond)"
- },
- {
- "(log $t1)",
- "(~log1 0 0 $t1)"
- },
- {
- "(log $t1 $t2)",
- "(~log2 0 0 $t1 $t2)"
- },
- {
- "(log $t1 $t2 $t3)",
- "(~log3 0 0 $t1 $t2 $t3)"
- },
- {
- "(log $t1 $t2 $t3 $t4)",
- "(~log4 0 0 $t1 $t2 $t3 $t4)"
- },
- {
- "(logarr $a $sz)",
- "(~log0 $a (mul 32 $sz))"
- },
- {
- "(logarr $a $sz $t1)",
- "(~log1 $a (mul 32 $sz) $t1)"
- },
- {
- "(logarr $a $sz $t1 $t2)",
- "(~log2 $a (mul 32 $sz) $t1 $t2)"
- },
- {
- "(logarr $a $sz $t1 $t2 $t3)",
- "(~log3 $a (mul 32 $sz) $t1 $t2 $t3)"
- },
- {
- "(logarr $a $sz $t1 $t2 $t3 $t4)",
- "(~log4 $a (mul 32 $sz) $t1 $t2 $t3 $t4)"
- },
- {
- "(save $loc $array (= chars $count))",
- "(with $location (ref $loc) (with $c $count (with $end (div $c 32) (with $i 0 (seq (while (slt $i $end) (seq (sstore (add $i $location) (access $array $i)) (set $i (add $i 1)))) (sstore (add $i $location) (~and (access $array $i) (sub 0 (exp 256 (sub 32 (mod $c 32)))))))))))"
- },
- {
- "(save $loc $array $count)",
- "(with $location (ref $loc) (with $end $count (with $i 0 (while (slt $i $end) (seq (sstore (add $i $location) (access $array $i)) (set $i (add $i 1)))))))"
- },
- {
- "(load $loc (= chars $count))",
- "(with $location (ref $loc) (with $c $count (with $a (alloc $c) (with $i 0 (seq (while (slt $i (div $c 32)) (seq (set (access $a $i) (sload (add $location $i))) (set $i (add $i 1)))) (set (access $a $i) (~and (sload (add $location $i)) (sub 0 (exp 256 (sub 32 (mod $c 32)))))) $a)))))"
- },
- {
- "(load $loc $count)",
- "(with $location (ref $loc) (with $c $count (with $a (alloc $c) (with $i 0 (seq (while (slt $i $c) (seq (set (access $a $i) (sload (add $location $i))) (set $i (add $i 1)))) $a)))))"
- },
- {
- "(unsafe_mcopy $to $from $sz)",
- "(with _sz $sz (with _from $from (with _to $to (seq (comment STARTING UNSAFE MCOPY) (with _i 0 (while (lt _i _sz) (seq (mstore (add $to _i) (mload (add _from _i))) (set _i (add _i 32)))))))))"
- },
- {
- "(mcopy $to $from $_sz)",
- "(with _to $to (with _from $from (with _sz $sz (seq (comment STARTING MCOPY (with _i 0 (seq (while (lt (add _i 31) _sz) (seq (mstore (add _to _i) (mload (add _from _i))) (set _i (add _i 32)))) (with _mask (exp 256 (sub 32 (mod _sz 32))) (mstore (add $to _i) (add (mod (mload (add $to _i)) _mask) (and (mload (add $from _i)) (sub 0 _mask))))))))))))"
- },
- { "(. msg sender)", "(caller)" },
- { "(. msg value)", "(callvalue)" },
- { "(. tx gasprice)", "(gasprice)" },
- { "(. tx origin)", "(origin)" },
- { "(. tx gas)", "(gas)" },
- { "(. $x balance)", "(balance $x)" },
- { "self", "(address)" },
- { "(. block prevhash)", "(prevhash)" },
- { "(. block coinbase)", "(coinbase)" },
- { "(. block timestamp)", "(timestamp)" },
- { "(. block number)", "(number)" },
- { "(. block difficulty)", "(difficulty)" },
- { "(. block gaslimit)", "(gaslimit)" },
- { "stop", "(stop)" },
- { "---END---", "" } //Keep this line at the end of the list
-};
-
-std::vector<rewriteRule> nodeMacros;
-
-// Token synonyms
-std::string synonyms[][2] = {
- { "or", "||" },
- { "and", "&&" },
- { "|", "~or" },
- { "&", "~and" },
- { "elif", "if" },
- { "!", "iszero" },
- { "~", "~not" },
- { "not", "iszero" },
- { "string", "alloc" },
- { "+", "add" },
- { "-", "sub" },
- { "*", "mul" },
- { "/", "sdiv" },
- { "^", "exp" },
- { "**", "exp" },
- { "%", "smod" },
- { "<", "slt" },
- { ">", "sgt" },
- { "=", "set" },
- { "==", "eq" },
- { ":", "kv" },
- { "---END---", "" } //Keep this line at the end of the list
-};
-
-// Custom setters (need to be registered separately
-// for use with managed storage)
-std::string setters[][2] = {
- { "+=", "+" },
- { "-=", "-" },
- { "*=", "*" },
- { "/=", "/" },
- { "%=", "%" },
- { "^=", "^" },
- { "---END---", "" } //Keep this line at the end of the list
-};
-
-// Processes mutable array literals
-Node array_lit_transform(Node node) {
- std::string prefix = "_temp"+mkUniqueToken() + "_";
- Metadata m = node.metadata;
- std::map<std::string, Node> d;
- std::string o = "(seq (set $arr (alloc "+utd(node.args.size()*32)+"))";
- for (unsigned i = 0; i < node.args.size(); i++) {
- o += " (mstore (add (get $arr) "+utd(i * 32)+") $"+utd(i)+")";
- d[utd(i)] = node.args[i];
- }
- o += " (get $arr))";
- return subst(parseLLL(o), d, prefix, m);
-}
-
-
-Node apply_rules(preprocessResult pr);
-
-// Transform "<variable>.<fun>(args...)" into
-// a call
-Node dotTransform(Node node, preprocessAux aux) {
- Metadata m = node.metadata;
- // We're gonna make lots of temporary variables,
- // so set up a unique flag for them
- std::string prefix = "_temp"+mkUniqueToken()+"_";
- // Check that the function name is a token
- if (node.args[0].args[1].type == ASTNODE)
- err("Function name must be static", m);
-
- Node dotOwner = node.args[0].args[0];
- std::string dotMember = node.args[0].args[1].val;
- // kwargs = map of special arguments
- std::map<std::string, Node> kwargs;
- kwargs["value"] = token("0", m);
- kwargs["gas"] = subst(parseLLL("(- (gas) 25)"), msn(), prefix, m);
- // Search for as=? and call=code keywords, and isolate the actual
- // function arguments
- std::vector<Node> fnargs;
- std::string as = "";
- std::string op = "call";
- for (unsigned i = 1; i < node.args.size(); i++) {
- fnargs.push_back(node.args[i]);
- Node arg = fnargs.back();
- if (arg.val == "=" || arg.val == "set") {
- if (arg.args[0].val == "as")
- as = arg.args[1].val;
- if (arg.args[0].val == "call" && arg.args[1].val == "code")
- op = "callcode";
- if (arg.args[0].val == "gas")
- kwargs["gas"] = arg.args[1];
- if (arg.args[0].val == "value")
- kwargs["value"] = arg.args[1];
- if (arg.args[0].val == "outsz")
- kwargs["outsz"] = arg.args[1];
- }
- }
- if (dotOwner.val == "self") {
- if (as.size()) err("Cannot use \"as\" when calling self!", m);
- as = dotOwner.val;
- }
- // Determine the funId and sig assuming the "as" keyword was used
- int funId = 0;
- std::string sig;
- if (as.size() > 0 && aux.localExterns.count(as)) {
- if (!aux.localExterns[as].count(dotMember))
- err("Invalid call: "+printSimple(dotOwner)+"."+dotMember, m);
- funId = aux.localExterns[as][dotMember];
- sig = aux.localExternSigs[as][dotMember];
- }
- // Determine the funId and sig otherwise
- else if (!as.size()) {
- if (!aux.globalExterns.count(dotMember))
- err("Invalid call: "+printSimple(dotOwner)+"."+dotMember, m);
- std::string key = unsignedToDecimal(aux.globalExterns[dotMember]);
- funId = aux.globalExterns[dotMember];
- sig = aux.globalExternSigs[dotMember];
- }
- else err("Invalid call: "+printSimple(dotOwner)+"."+dotMember, m);
- // Pack arguments
- kwargs["data"] = packArguments(fnargs, sig, funId, m);
- kwargs["to"] = dotOwner;
- Node main;
- // Pack output
- if (!kwargs.count("outsz")) {
- main = parseLLL(
- "(with _data $data (seq "
- "(pop (~"+op+" $gas $to $value (access _data 0) (access _data 1) (ref $dataout) 32))"
- "(get $dataout)))");
- }
- else {
- main = parseLLL(
- "(with _data $data (with _outsz (mul 32 $outsz) (with _out (alloc _outsz) (seq "
- "(pop (~"+op+" $gas $to $value (access _data 0) (access _data 1) _out _outsz))"
- "(get _out)))))");
- }
- // Set up main call
-
- Node o = subst(main, kwargs, prefix, m);
- return o;
-}
-
-// Transform an access of the form self.bob, self.users[5], etc into
-// a storage access
-//
-// There exist two types of objects: finite objects, and infinite
-// objects. Finite objects are packed optimally tightly into storage
-// accesses; for example:
-//
-// data obj[100](a, b[2][4], c)
-//
-// obj[0].a -> 0
-// obj[0].b[0][0] -> 1
-// obj[0].b[1][3] -> 8
-// obj[45].c -> 459
-//
-// Infinite objects are accessed by sha3([v1, v2, v3 ... ]), where
-// the values are a list of array indices and keyword indices, for
-// example:
-// data obj[](a, b[2][4], c)
-// data obj2[](a, b[][], c)
-//
-// obj[0].a -> sha3([0, 0, 0])
-// obj[5].b[1][3] -> sha3([0, 5, 1, 1, 3])
-// obj[45].c -> sha3([0, 45, 2])
-// obj2[0].a -> sha3([1, 0, 0])
-// obj2[5].b[1][3] -> sha3([1, 5, 1, 1, 3])
-// obj2[45].c -> sha3([1, 45, 2])
-Node storageTransform(Node node, preprocessAux aux,
- bool mapstyle=false, bool ref=false) {
- Metadata m = node.metadata;
- // Get a list of all of the "access parameters" used in order
- // eg. self.users[5].cow[4][m[2]][woof] ->
- // [--self, --users, 5, --cow, 4, m[2], woof]
- std::vector<Node> hlist = listfyStorageAccess(node);
- // For infinite arrays, the terms array will just provide a list
- // of indices. For finite arrays, it's a list of index*coefficient
- std::vector<Node> terms;
- std::string offset = "0";
- std::string prefix = "";
- std::string varPrefix = "_temp"+mkUniqueToken()+"_";
- int c = 0;
- std::vector<std::string> coefficients;
- coefficients.push_back("");
- for (unsigned i = 1; i < hlist.size(); i++) {
- // We pre-add the -- flag to parameter-like terms. For example,
- // self.users[m] -> [--self, --users, m]
- // self.users.m -> [--self, --users, --m]
- if (hlist[i].val.substr(0, 2) == "--") {
- prefix += hlist[i].val.substr(2) + ".";
- std::string tempPrefix = prefix.substr(0, prefix.size()-1);
- if (!aux.storageVars.offsets.count(tempPrefix))
- return node;
- if (c < (signed)coefficients.size() - 1)
- err("Too few array index lookups", m);
- if (c > (signed)coefficients.size() - 1)
- err("Too many array index lookups", m);
- coefficients = aux.storageVars.coefficients[tempPrefix];
- // If the size of an object exceeds 2^176, we make it an infinite
- // array
- if (decimalGt(coefficients.back(), tt176) && !mapstyle)
- return storageTransform(node, aux, true, ref);
- offset = decimalAdd(offset, aux.storageVars.offsets[tempPrefix]);
- c = 0;
- if (mapstyle)
- terms.push_back(token(unsignedToDecimal(
- aux.storageVars.indices[tempPrefix])));
- }
- else if (mapstyle) {
- terms.push_back(hlist[i]);
- c += 1;
- }
- else {
- if (c > (signed)coefficients.size() - 2)
- err("Too many array index lookups", m);
- terms.push_back(
- astnode("mul",
- hlist[i],
- token(coefficients[coefficients.size() - 2 - c], m),
- m));
-
- c += 1;
- }
- }
- if (aux.storageVars.nonfinal.count(prefix.substr(0, prefix.size()-1)))
- err("Storage variable access not deep enough", m);
- if (c < (signed)coefficients.size() - 1) {
- err("Too few array index lookups", m);
- }
- if (c > (signed)coefficients.size() - 1) {
- err("Too many array index lookups", m);
- }
- Node o;
- if (mapstyle) {
- std::string t = "_temp_"+mkUniqueToken();
- std::vector<Node> sub;
- for (unsigned i = 0; i < terms.size(); i++)
- sub.push_back(asn("mstore",
- asn("add",
- tkn(utd(i * 32), m),
- asn("get", tkn(t+"pos", m), m),
- m),
- terms[i],
- m));
- sub.push_back(tkn(t+"pos", m));
- Node main = asn("with",
- tkn(t+"pos", m),
- asn("alloc", tkn(utd(terms.size() * 32), m), m),
- asn("seq", sub, m),
- m);
- Node sz = token(utd(terms.size() * 32), m);
- o = astnode("~sha3",
- main,
- sz,
- m);
- }
- else {
- // We add up all the index*coefficients
- Node out = token(offset, node.metadata);
- for (unsigned i = 0; i < terms.size(); i++) {
- std::vector<Node> temp;
- temp.push_back(out);
- temp.push_back(terms[i]);
- out = astnode("add", temp, node.metadata);
- }
- o = out;
- }
- if (ref) return o;
- else return astnode("sload", o, node.metadata);
-}
-
-
-// Recursively applies rewrite rules
-std::pair<Node, bool> apply_rules_iter(preprocessResult pr) {
- bool changed = false;
- Node node = pr.first;
- // If the rewrite rules have not yet been parsed, parse them
- if (!nodeMacros.size()) {
- for (int i = 0; i < 9999; i++) {
- std::vector<Node> o;
- if (macros[i][0] == "---END---") break;
- nodeMacros.push_back(rewriteRule(
- parseLLL(macros[i][0]),
- parseLLL(macros[i][1])
- ));
- }
- }
- // Assignment transformations
- for (int i = 0; i < 9999; i++) {
- if (setters[i][0] == "---END---") break;
- if (node.val == setters[i][0]) {
- node = astnode("=",
- node.args[0],
- astnode(setters[i][1],
- node.args[0],
- node.args[1],
- node.metadata),
- node.metadata);
- }
- }
- // Do nothing to macros
- if (node.val == "macro") {
- return std::pair<Node, bool>(node, changed);
- }
- // Ignore comments
- if (node.val == "comment") {
- return std::pair<Node, bool>(node, changed);
- }
- // Special storage transformation
- if (isNodeStorageVariable(node)) {
- node = storageTransform(node, pr.second);
- changed = true;
- }
- if (node.val == "ref" && isNodeStorageVariable(node.args[0])) {
- node = storageTransform(node.args[0], pr.second, false, true);
- changed = true;
- }
- if (node.val == "=" && isNodeStorageVariable(node.args[0])) {
- Node t = storageTransform(node.args[0], pr.second);
- if (t.val == "sload") {
- std::vector<Node> o;
- o.push_back(t.args[0]);
- o.push_back(node.args[1]);
- node = astnode("sstore", o, node.metadata);
- }
- changed = true;
- }
- // Main code
- unsigned pos = 0;
- std::string prefix = "_temp"+mkUniqueToken()+"_";
- while(1) {
- if (synonyms[pos][0] == "---END---") {
- break;
- }
- else if (node.type == ASTNODE && node.val == synonyms[pos][0]) {
- node.val = synonyms[pos][1];
- changed = true;
- }
- pos++;
- }
- for (pos = 0; pos < nodeMacros.size() + pr.second.customMacros.size(); pos++) {
- rewriteRule macro = pos < nodeMacros.size()
- ? nodeMacros[pos]
- : pr.second.customMacros[pos - nodeMacros.size()];
- matchResult mr = match(macro.pattern, node);
- if (mr.success) {
- node = subst(macro.substitution, mr.map, prefix, node.metadata);
- std::pair<Node, bool> o =
- apply_rules_iter(preprocessResult(node, pr.second));
- o.second = true;
- return o;
- }
- }
- // Special transformations
- if (node.val == "outer") {
- node = apply_rules(preprocess(node.args[0]));
- changed = true;
- }
- if (node.val == "array_lit") {
- node = array_lit_transform(node);
- changed = true;
- }
- if (node.val == "fun" && node.args[0].val == ".") {
- node = dotTransform(node, pr.second);
- changed = true;
- }
- if (node.type == ASTNODE) {
- unsigned i = 0;
- if (node.val == "set" || node.val == "ref"
- || node.val == "get" || node.val == "with") {
- if (node.args[0].val.size() > 0 && node.args[0].val[0] != '\''
- && node.args[0].type == TOKEN && node.args[0].val[0] != '$') {
- node.args[0].val = "'" + node.args[0].val;
- changed = true;
- }
- i = 1;
- }
- else if (node.val == "arglen") {
- node.val = "get";
- node.args[0].val = "'_len_" + node.args[0].val;
- i = 1;
- changed = true;
- }
- for (; i < node.args.size(); i++) {
- std::pair<Node, bool> r =
- apply_rules_iter(preprocessResult(node.args[i], pr.second));
- node.args[i] = r.first;
- changed = changed || r.second;
- }
- }
- else if (node.type == TOKEN && !isNumberLike(node)) {
- if (node.val.size() >= 2
- && node.val[0] == '"'
- && node.val[node.val.size() - 1] == '"') {
- std::string bin = node.val.substr(1, node.val.size() - 2);
- unsigned sz = bin.size();
- std::vector<Node> o;
- for (unsigned i = 0; i < sz; i += 32) {
- std::string t = binToNumeric(bin.substr(i, 32));
- if ((sz - i) < 32 && (sz - i) > 0) {
- while ((sz - i) < 32) {
- t = decimalMul(t, "256");
- i--;
- }
- i = sz;
- }
- o.push_back(token(t, node.metadata));
- }
- node = astnode("array_lit", o, node.metadata);
- std::pair<Node, bool> r =
- apply_rules_iter(preprocessResult(node, pr.second));
- node = r.first;
- changed = true;
- }
- else if (node.val.size() && node.val[0] != '\'' && node.val[0] != '$') {
- node.val = "'" + node.val;
- std::vector<Node> args;
- args.push_back(node);
- std::string v = node.val.substr(1);
- node = astnode("get", args, node.metadata);
- changed = true;
- }
- }
- return std::pair<Node, bool>(node, changed);
-}
-
-Node apply_rules(preprocessResult pr) {
- for (unsigned i = 0; i < pr.second.customMacros.size(); i++) {
- pr.second.customMacros[i].pattern =
- apply_rules(preprocessResult(pr.second.customMacros[i].pattern, preprocessAux()));
- }
- while (1) {
- //std::cerr << printAST(pr.first) <<
- // " " << pr.second.customMacros.size() << "\n";
- std::pair<Node, bool> r = apply_rules_iter(pr);
- if (!r.second) {
- return r.first;
- }
- pr.first = r.first;
- }
-}
-
-Node validate(Node inp) {
- Metadata m = inp.metadata;
- if (inp.type == ASTNODE) {
- int i = 0;
- while(validFunctions[i][0] != "---END---") {
- if (inp.val == validFunctions[i][0]) {
- std::string sz = unsignedToDecimal(inp.args.size());
- if (decimalGt(validFunctions[i][1], sz)) {
- err("Too few arguments for "+inp.val, inp.metadata);
- }
- if (decimalGt(sz, validFunctions[i][2])) {
- err("Too many arguments for "+inp.val, inp.metadata);
- }
- }
- i++;
- }
- }
- for (unsigned i = 0; i < inp.args.size(); i++) validate(inp.args[i]);
- return inp;
-}
-
-Node postValidate(Node inp) {
- // This allows people to use ~x as a way of having functions with the same
- // name and arity as macros; the idea is that ~x is a "final" form, and
- // should not be remacroed, but it is converted back at the end
- if (inp.val.size() > 0 && inp.val[0] == '~') {
- inp.val = inp.val.substr(1);
- }
- if (inp.type == ASTNODE) {
- if (inp.val == ".")
- err("Invalid object member (ie. a foo.bar not mapped to anything)",
- inp.metadata);
- else if (opcode(inp.val) >= 0) {
- if ((signed)inp.args.size() < opinputs(inp.val))
- err("Too few arguments for "+inp.val, inp.metadata);
- if ((signed)inp.args.size() > opinputs(inp.val))
- err("Too many arguments for "+inp.val, inp.metadata);
- }
- else if (isValidLLLFunc(inp.val, inp.args.size())) {
- // do nothing
- }
- else err ("Invalid argument count or LLL function: "+inp.val, inp.metadata);
- for (unsigned i = 0; i < inp.args.size(); i++) {
- inp.args[i] = postValidate(inp.args[i]);
- }
- }
- return inp;
-}
-
-Node rewrite(Node inp) {
- return postValidate(optimize(apply_rules(preprocess(inp))));
-}
-
-Node rewriteChunk(Node inp) {
- return postValidate(optimize(apply_rules(
- preprocessResult(
- validate(inp), preprocessAux()))));
-}
-
-using namespace std;
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.h
deleted file mode 100644
index 716815cee..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef ETHSERP_REWRITER
-#define ETHSERP_REWRITER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Applies rewrite rules
-Node rewrite(Node inp);
-
-// Applies rewrite rules adding without wrapper
-Node rewriteChunk(Node inp);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.cpp
deleted file mode 100644
index 0d810bdbc..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.cpp
+++ /dev/null
@@ -1,211 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "rewriteutils.h"
-#include "optimize.h"
-
-// Valid functions and their min and max argument counts
-std::string validFunctions[][3] = {
- { "if", "2", "3" },
- { "unless", "2", "2" },
- { "while", "2", "2" },
- { "until", "2", "2" },
- { "alloc", "1", "1" },
- { "array", "1", "1" },
- { "call", "2", tt256 },
- { "callcode", "2", tt256 },
- { "create", "1", "4" },
- { "getch", "2", "2" },
- { "setch", "3", "3" },
- { "sha3", "1", "2" },
- { "return", "1", "2" },
- { "inset", "1", "1" },
- { "min", "2", "2" },
- { "max", "2", "2" },
- { "array_lit", "0", tt256 },
- { "seq", "0", tt256 },
- { "log", "1", "6" },
- { "outer", "1", "1" },
- { "set", "2", "2" },
- { "get", "1", "1" },
- { "ref", "1", "1" },
- { "declare", "1", tt256 },
- { "with", "3", "3" },
- { "outer", "1", "1" },
- { "mcopy", "3", "3" },
- { "unsafe_mcopy", "3", "3" },
- { "save", "3", "3" },
- { "load", "2", "2" },
- { "---END---", "", "" } //Keep this line at the end of the list
-};
-
-std::map<std::string, bool> vfMap;
-
-// Is a function name one of the valid functions above?
-bool isValidFunctionName(std::string f) {
- if (vfMap.size() == 0) {
- for (int i = 0; ; i++) {
- if (validFunctions[i][0] == "---END---") break;
- vfMap[validFunctions[i][0]] = true;
- }
- }
- return vfMap.count(f);
-}
-
-// Cool function for debug purposes (named cerrStringList to make
-// all prints searchable via 'cerr')
-void cerrStringList(std::vector<std::string> s, std::string suffix) {
- for (unsigned i = 0; i < s.size(); i++) std::cerr << s[i] << " ";
- std::cerr << suffix << "\n";
-}
-
-// Convert:
-// self.cow -> ["cow"]
-// self.horse[0] -> ["horse", "0"]
-// self.a[6][7][self.storage[3]].chicken[9] ->
-// ["6", "7", (sload 3), "chicken", "9"]
-std::vector<Node> listfyStorageAccess(Node node) {
- std::vector<Node> out;
- std::vector<Node> nodez;
- nodez.push_back(node);
- while (1) {
- if (nodez.back().type == TOKEN) {
- out.push_back(token("--" + nodez.back().val, node.metadata));
- std::vector<Node> outrev;
- for (int i = (signed)out.size() - 1; i >= 0; i--) {
- outrev.push_back(out[i]);
- }
- return outrev;
- }
- if (nodez.back().val == ".")
- nodez.back().args[1].val = "--" + nodez.back().args[1].val;
- if (nodez.back().args.size() == 0)
- err("Error parsing storage variable statement", node.metadata);
- if (nodez.back().args.size() == 1)
- out.push_back(token(tt256m1, node.metadata));
- else
- out.push_back(nodez.back().args[1]);
- nodez.push_back(nodez.back().args[0]);
- }
-}
-
-// Is the given node something of the form
-// self.cow
-// self.horse[0]
-// self.a[6][7][self.storage[3]].chicken[9]
-bool isNodeStorageVariable(Node node) {
- std::vector<Node> nodez;
- nodez.push_back(node);
- while (1) {
- if (nodez.back().type == TOKEN) return false;
- if (nodez.back().args.size() == 0) return false;
- if (nodez.back().val != "." && nodez.back().val != "access")
- return false;
- if (nodez.back().args[0].val == "self") return true;
- nodez.push_back(nodez.back().args[0]);
- }
-}
-
-// Main pattern matching routine, for those patterns that can be expressed
-// using our standard mini-language above
-//
-// Returns two values. First, a boolean to determine whether the node matches
-// the pattern, second, if the node does match then a map mapping variables
-// in the pattern to nodes
-matchResult match(Node p, Node n) {
- matchResult o;
- o.success = false;
- if (p.type == TOKEN) {
- if (p.val == n.val && n.type == TOKEN) o.success = true;
- else if (p.val[0] == '$' || p.val[0] == '@') {
- o.success = true;
- o.map[p.val.substr(1)] = n;
- }
- }
- else if (n.type==TOKEN || p.val!=n.val || p.args.size()!=n.args.size()) {
- // do nothing
- }
- else {
- for (unsigned i = 0; i < p.args.size(); i++) {
- matchResult oPrime = match(p.args[i], n.args[i]);
- if (!oPrime.success) {
- o.success = false;
- return o;
- }
- for (std::map<std::string, Node>::iterator it = oPrime.map.begin();
- it != oPrime.map.end();
- it++) {
- o.map[(*it).first] = (*it).second;
- }
- }
- o.success = true;
- }
- return o;
-}
-
-
-// Fills in the pattern with a dictionary mapping variable names to
-// nodes (these dicts are generated by match). Match and subst together
-// create a full pattern-matching engine.
-Node subst(Node pattern,
- std::map<std::string, Node> dict,
- std::string varflag,
- Metadata m) {
- // Swap out patterns at the token level
- if (pattern.metadata.ln == -1)
- pattern.metadata = m;
- if (pattern.type == TOKEN &&
- pattern.val[0] == '$') {
- if (dict.count(pattern.val.substr(1))) {
- return dict[pattern.val.substr(1)];
- }
- else {
- return token(varflag + pattern.val.substr(1), m);
- }
- }
- // Other tokens are untouched
- else if (pattern.type == TOKEN) {
- return pattern;
- }
- // Substitute recursively for ASTs
- else {
- std::vector<Node> args;
- for (unsigned i = 0; i < pattern.args.size(); i++) {
- args.push_back(subst(pattern.args[i], dict, varflag, m));
- }
- return asn(pattern.val, args, m);
- }
-}
-
-// Transforms a sequence containing two-argument with statements
-// into a statement containing those statements in nested form
-Node withTransform (Node source) {
- Node o = token("--");
- Metadata m = source.metadata;
- std::vector<Node> args;
- for (int i = source.args.size() - 1; i >= 0; i--) {
- Node a = source.args[i];
- if (a.val == "with" && a.args.size() == 2) {
- std::vector<Node> flipargs;
- for (int j = args.size() - 1; j >= 0; j--)
- flipargs.push_back(args[i]);
- if (o.val != "--")
- flipargs.push_back(o);
- o = asn("with", a.args[0], a.args[1], asn("seq", flipargs, m), m);
- args = std::vector<Node>();
- }
- else {
- args.push_back(a);
- }
- }
- std::vector<Node> flipargs;
- for (int j = args.size() - 1; j >= 0; j--)
- flipargs.push_back(args[j]);
- if (o.val != "--")
- flipargs.push_back(o);
- return asn("seq", flipargs, m);
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.h
deleted file mode 100644
index 8abf44a9f..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef ETHSERP_REWRITEUTILS
-#define ETHSERP_REWRITEUTILS
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Valid functions and their min and max argument counts
-extern std::string validFunctions[][3];
-
-extern std::map<std::string, bool> vfMap;
-
-bool isValidFunctionName(std::string f);
-
-// Converts deep array access into ordered list of the arguments
-// along the descent
-std::vector<Node> listfyStorageAccess(Node node);
-
-// Cool function for debug purposes (named cerrStringList to make
-// all prints searchable via 'cerr')
-void cerrStringList(std::vector<std::string> s, std::string suffix="");
-
-// Is the given node something of the form
-// self.cow
-// self.horse[0]
-// self.a[6][7][self.storage[3]].chicken[9]
-bool isNodeStorageVariable(Node node);
-
-// Applies rewrite rules adding without wrapper
-Node rewriteChunk(Node inp);
-
-// Match result storing object
-struct matchResult {
- bool success;
- std::map<std::string, Node> map;
-};
-
-// Match node to pattern
-matchResult match(Node p, Node n);
-
-// Substitute node using pattern
-Node subst(Node pattern,
- std::map<std::string, Node> dict,
- std::string varflag,
- Metadata m);
-
-Node withTransform(Node source);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/serpent.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/serpent.py
deleted file mode 100644
index 8d6bedfe3..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/serpent.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import serpent_pyext as pyext
-import sys
-import re
-
-VERSION = '1.7.7'
-
-
-class Metadata(object):
- def __init__(self, li):
- self.file = li[0]
- self.ln = li[1]
- self.ch = li[2]
-
- def out(self):
- return [self.file, self.ln, self.ch]
-
-
-class Token(object):
- def __init__(self, val, metadata):
- self.val = val
- self.metadata = Metadata(metadata)
-
- def out(self):
- return [0, self.val, self.metadata.out()]
-
- def __repr__(self):
- return str(self.val)
-
-
-class Astnode(object):
- def __init__(self, val, args, metadata):
- self.val = val
- self.args = map(node, args)
- self.metadata = Metadata(metadata)
-
- def out(self):
- o = [1, self.val, self.metadata.out()]+[x.out() for x in self.args]
- return o
-
- def __repr__(self):
- o = '(' + self.val
- subs = map(repr, self.args)
- k = 0
- out = " "
- while k < len(subs) and o != "(seq":
- if '\n' in subs[k] or len(out + subs[k]) >= 80:
- break
- out += subs[k] + " "
- k += 1
- if k < len(subs):
- o += out + "\n "
- o += '\n '.join('\n'.join(subs[k:]).split('\n'))
- o += '\n)'
- else:
- o += out[:-1] + ')'
- return o
-
-
-def node(li):
- if li[0]:
- return Astnode(li[1], li[3:], li[2])
- else:
- return Token(li[1], li[2])
-
-
-def take(x):
- return pyext.parse_lll(x) if isinstance(x, (str, unicode)) else x.out()
-
-
-def takelist(x):
- return map(take, parse(x).args if isinstance(x, (str, unicode)) else x)
-
-
-compile = lambda x: pyext.compile(x)
-compile_chunk = lambda x: pyext.compile_chunk(x)
-compile_to_lll = lambda x: node(pyext.compile_to_lll(x))
-compile_chunk_to_lll = lambda x: node(pyext.compile_chunk_to_lll(x))
-compile_lll = lambda x: pyext.compile_lll(take(x))
-parse = lambda x: node(pyext.parse(x))
-rewrite = lambda x: node(pyext.rewrite(take(x)))
-rewrite_chunk = lambda x: node(pyext.rewrite_chunk(take(x)))
-pretty_compile = lambda x: map(node, pyext.pretty_compile(x))
-pretty_compile_chunk = lambda x: map(node, pyext.pretty_compile_chunk(x))
-pretty_compile_lll = lambda x: map(node, pyext.pretty_compile_lll(take(x)))
-serialize = lambda x: pyext.serialize(takelist(x))
-deserialize = lambda x: map(node, pyext.deserialize(x))
-
-is_numeric = lambda x: isinstance(x, (int, long))
-is_string = lambda x: isinstance(x, (str, unicode))
-tobytearr = lambda n, L: [] if L == 0 else tobytearr(n / 256, L - 1)+[n % 256]
-
-
-# A set of methods for detecting raw values (numbers and strings) and
-# converting them to integers
-def frombytes(b):
- return 0 if len(b) == 0 else ord(b[-1]) + 256 * frombytes(b[:-1])
-
-
-def fromhex(b):
- hexord = lambda x: '0123456789abcdef'.find(x)
- return 0 if len(b) == 0 else hexord(b[-1]) + 16 * fromhex(b[:-1])
-
-
-def numberize(b):
- if is_numeric(b):
- return b
- elif b[0] in ["'", '"']:
- return frombytes(b[1:-1])
- elif b[:2] == '0x':
- return fromhex(b[2:])
- elif re.match('^[0-9]*$', b):
- return int(b)
- elif len(b) == 40:
- return fromhex(b)
- else:
- raise Exception("Cannot identify data type: %r" % b)
-
-
-def enc(n):
- if is_numeric(n):
- return ''.join(map(chr, tobytearr(n, 32)))
- elif is_string(n) and len(n) == 40:
- return '\x00' * 12 + n.decode('hex')
- elif is_string(n):
- return '\x00' * (32 - len(n)) + n
- elif n is True:
- return 1
- elif n is False or n is None:
- return 0
-
-
-def encode_datalist(*args):
- if isinstance(args, (tuple, list)):
- return ''.join(map(enc, args))
- elif not len(args) or args[0] == '':
- return ''
- else:
- # Assume you're getting in numbers or addresses or 0x...
- return ''.join(map(enc, map(numberize, args)))
-
-
-def decode_datalist(arr):
- if isinstance(arr, list):
- arr = ''.join(map(chr, arr))
- o = []
- for i in range(0, len(arr), 32):
- o.append(frombytes(arr[i:i + 32]))
- return o
-
-
-def encode_abi(funid, *args):
- len_args = ''
- normal_args = ''
- var_args = ''
- for arg in args:
- if isinstance(arg, str) and len(arg) and \
- arg[0] == '"' and arg[-1] == '"':
- len_args += enc(numberize(len(arg[1:-1])))
- var_args += arg[1:-1]
- elif isinstance(arg, list):
- for a in arg:
- var_args += enc(numberize(a))
- len_args += enc(numberize(len(arg)))
- else:
- normal_args += enc(numberize(arg))
- return chr(int(funid)) + len_args + normal_args + var_args
-
-
-def decode_abi(arr, *lens):
- o = []
- pos = 1
- i = 0
- if len(lens) == 1 and isinstance(lens[0], list):
- lens = lens[0]
- while pos < len(arr):
- bytez = int(lens[i]) if i < len(lens) else 32
- o.append(frombytes(arr[pos: pos + bytez]))
- i, pos = i + 1, pos + bytez
- return o
-
-
-def main():
- if len(sys.argv) == 1:
- print "serpent <command> <arg1> <arg2> ..."
- else:
- cmd = sys.argv[2] if sys.argv[1] == '-s' else sys.argv[1]
- if sys.argv[1] == '-s':
- args = [sys.stdin.read()] + sys.argv[3:]
- elif sys.argv[1] == '-v':
- print VERSION
- sys.exit()
- else:
- cmd = sys.argv[1]
- args = sys.argv[2:]
- if cmd in ['deserialize', 'decode_datalist', 'decode_abi']:
- args[0] = args[0].strip().decode('hex')
- o = globals()[cmd](*args)
- if isinstance(o, (Token, Astnode, list)):
- print repr(o)
- else:
- print o.encode('hex')
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/setup.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/setup.py
deleted file mode 100644
index 5fdc1c16a..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/setup.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from setuptools import setup, Extension
-
-import os
-from distutils.sysconfig import get_config_vars
-
-(opt,) = get_config_vars('OPT')
-os.environ['OPT'] = " ".join(
- flag for flag in opt.split() if flag != '-Wstrict-prototypes'
-)
-
-setup(
- # Name of this package
- name="ethereum-serpent",
-
- # Package version
- version='1.7.7',
-
- description='Serpent compiler',
- maintainer='Vitalik Buterin',
- maintainer_email='v@buterin.com',
- license='WTFPL',
- url='http://www.ethereum.org/',
-
- # Describes how to build the actual extension module from C source files.
- ext_modules=[
- Extension(
- 'serpent_pyext', # Python name of the module
- ['bignum.cpp', 'util.cpp', 'tokenize.cpp',
- 'lllparser.cpp', 'parser.cpp', 'functions.cpp',
- 'optimize.cpp', 'opcodes.cpp',
- 'rewriteutils.cpp', 'preprocess.cpp', 'rewriter.cpp',
- 'compiler.cpp', 'funcs.cpp', 'pyserpent.cpp']
- )],
- py_modules=[
- 'serpent',
- 'pyserpent'
- ],
- scripts=[
- 'serpent.py'
- ],
- entry_points={
- 'console_scripts': [
- 'serpent = serpent:main',
- ],
- }
- ),
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.cpp
deleted file mode 100644
index b60cc8a44..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// These appear as independent tokens even if inside a stream of symbols
-const std::string atoms[] = { "#", "//", "(", ")", "[", "]", "{", "}" };
-const int numAtoms = 8;
-
-// Is the char alphanumeric, a space, a bracket, a quote, a symbol?
-int chartype(char c) {
- if (c >= '0' && c <= '9') return ALPHANUM;
- else if (c >= 'a' && c <= 'z') return ALPHANUM;
- else if (c >= 'A' && c <= 'Z') return ALPHANUM;
- else if (std::string("~_$@").find(c) != std::string::npos) return ALPHANUM;
- else if (c == '\t' || c == ' ' || c == '\n' || c == '\r') return SPACE;
- else if (std::string("()[]{}").find(c) != std::string::npos) return BRACK;
- else if (c == '"') return DQUOTE;
- else if (c == '\'') return SQUOTE;
- else return SYMB;
-}
-
-// "y = f(45,124)/3" -> [ "y", "f", "(", "45", ",", "124", ")", "/", "3"]
-std::vector<Node> tokenize(std::string inp, Metadata metadata, bool lispMode) {
- int curtype = SPACE;
- unsigned pos = 0;
- int lastNewline = 0;
- metadata.ch = 0;
- std::string cur;
- std::vector<Node> out;
-
- inp += " ";
- while (pos < inp.length()) {
- int headtype = chartype(inp[pos]);
- if (lispMode) {
- if (inp[pos] == '\'') headtype = ALPHANUM;
- }
- // Are we inside a quote?
- if (curtype == SQUOTE || curtype == DQUOTE) {
- // Close quote
- if (headtype == curtype) {
- cur += inp[pos];
- out.push_back(token(cur, metadata));
- cur = "";
- metadata.ch = pos - lastNewline;
- curtype = SPACE;
- pos += 1;
- }
- // eg. \xc3
- else if (inp.length() >= pos + 4 && inp.substr(pos, 2) == "\\x") {
- cur += (std::string("0123456789abcdef").find(inp[pos+2]) * 16
- + std::string("0123456789abcdef").find(inp[pos+3]));
- pos += 4;
- }
- // Newline
- else if (inp.substr(pos, 2) == "\\n") {
- cur += '\n';
- pos += 2;
- }
- // Backslash escape
- else if (inp.length() >= pos + 2 && inp[pos] == '\\') {
- cur += inp[pos + 1];
- pos += 2;
- }
- // Normal character
- else {
- cur += inp[pos];
- pos += 1;
- }
- }
- else {
- // Handle atoms ( '//', '#', brackets )
- for (int i = 0; i < numAtoms; i++) {
- int split = cur.length() - atoms[i].length();
- if (split >= 0 && cur.substr(split) == atoms[i]) {
- if (split > 0) {
- out.push_back(token(cur.substr(0, split), metadata));
- }
- metadata.ch += split;
- out.push_back(token(cur.substr(split), metadata));
- metadata.ch = pos - lastNewline;
- cur = "";
- curtype = SPACE;
- }
- }
- // Special case the minus sign
- if (cur.length() > 1 && (cur.substr(cur.length() - 1) == "-"
- || cur.substr(cur.length() - 1) == "!")) {
- out.push_back(token(cur.substr(0, cur.length() - 1), metadata));
- out.push_back(token(cur.substr(cur.length() - 1), metadata));
- cur = "";
- }
- // Boundary between different char types
- if (headtype != curtype) {
- if (curtype != SPACE && cur != "") {
- out.push_back(token(cur, metadata));
- }
- metadata.ch = pos - lastNewline;
- cur = "";
- }
- cur += inp[pos];
- curtype = headtype;
- pos += 1;
- }
- if (inp[pos] == '\n') {
- lastNewline = pos;
- metadata.ch = 0;
- metadata.ln += 1;
- }
- }
- return out;
-}
-
-
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.h
deleted file mode 100644
index 04a42f3c6..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef ETHSERP_TOKENIZE
-#define ETHSERP_TOKENIZE
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-int chartype(char c);
-
-std::vector<Node> tokenize(std::string inp,
- Metadata meta=Metadata(),
- bool lispMode=false);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.cpp
deleted file mode 100644
index 56f642fc8..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.cpp
+++ /dev/null
@@ -1,305 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "bignum.h"
-#include <fstream>
-#include <cerrno>
-
-//Token or value node constructor
-Node token(std::string val, Metadata met) {
- Node o;
- o.type = 0;
- o.val = val;
- o.metadata = met;
- return o;
-}
-
-//AST node constructor
-Node astnode(std::string val, std::vector<Node> args, Metadata met) {
- Node o;
- o.type = 1;
- o.val = val;
- o.args = args;
- o.metadata = met;
- return o;
-}
-
-//AST node constructors for a specific number of children
-Node astnode(std::string val, Metadata met) {
- std::vector<Node> args;
- return astnode(val, args, met);
-}
-
-Node astnode(std::string val, Node a, Metadata met) {
- std::vector<Node> args;
- args.push_back(a);
- return astnode(val, args, met);
-}
-
-Node astnode(std::string val, Node a, Node b, Metadata met) {
- std::vector<Node> args;
- args.push_back(a);
- args.push_back(b);
- return astnode(val, args, met);
-}
-
-Node astnode(std::string val, Node a, Node b, Node c, Metadata met) {
- std::vector<Node> args;
- args.push_back(a);
- args.push_back(b);
- args.push_back(c);
- return astnode(val, args, met);
-}
-
-Node astnode(std::string val, Node a, Node b, Node c, Node d, Metadata met) {
- std::vector<Node> args;
- args.push_back(a);
- args.push_back(b);
- args.push_back(c);
- args.push_back(d);
- return astnode(val, args, met);
-}
-
-
-// Print token list
-std::string printTokens(std::vector<Node> tokens) {
- std::string s = "";
- for (unsigned i = 0; i < tokens.size(); i++) {
- s += tokens[i].val + " ";
- }
- return s;
-}
-
-// Prints a lisp AST on one line
-std::string printSimple(Node ast) {
- if (ast.type == TOKEN) return ast.val;
- std::string o = "(" + ast.val;
- std::vector<std::string> subs;
- for (unsigned i = 0; i < ast.args.size(); i++) {
- o += " " + printSimple(ast.args[i]);
- }
- return o + ")";
-}
-
-// Number of tokens in a tree
-int treeSize(Node prog) {
- if (prog.type == TOKEN) return 1;
- int o = 0;
- for (unsigned i = 0; i < prog.args.size(); i++) o += treeSize(prog.args[i]);
- return o;
-}
-
-// Pretty-prints a lisp AST
-std::string printAST(Node ast, bool printMetadata) {
- if (ast.type == TOKEN) return ast.val;
- std::string o = "(";
- if (printMetadata) {
- o += ast.metadata.file + " ";
- o += unsignedToDecimal(ast.metadata.ln) + " ";
- o += unsignedToDecimal(ast.metadata.ch) + ": ";
- }
- o += ast.val;
- std::vector<std::string> subs;
- for (unsigned i = 0; i < ast.args.size(); i++) {
- subs.push_back(printAST(ast.args[i], printMetadata));
- }
- unsigned k = 0;
- std::string out = " ";
- // As many arguments as possible go on the same line as the function,
- // except when seq is used
- while (k < subs.size() && o != "(seq") {
- if (subs[k].find("\n") != std::string::npos || (out + subs[k]).length() >= 80) break;
- out += subs[k] + " ";
- k += 1;
- }
- // All remaining arguments go on their own lines
- if (k < subs.size()) {
- o += out + "\n";
- std::vector<std::string> subsSliceK;
- for (unsigned i = k; i < subs.size(); i++) subsSliceK.push_back(subs[i]);
- o += indentLines(joinLines(subsSliceK));
- o += "\n)";
- }
- else {
- o += out.substr(0, out.size() - 1) + ")";
- }
- return o;
-}
-
-// Splits text by line
-std::vector<std::string> splitLines(std::string s) {
- unsigned pos = 0;
- int lastNewline = 0;
- std::vector<std::string> o;
- while (pos < s.length()) {
- if (s[pos] == '\n') {
- o.push_back(s.substr(lastNewline, pos - lastNewline));
- lastNewline = pos + 1;
- }
- pos = pos + 1;
- }
- o.push_back(s.substr(lastNewline));
- return o;
-}
-
-// Inverse of splitLines
-std::string joinLines(std::vector<std::string> lines) {
- std::string o = "\n";
- for (unsigned i = 0; i < lines.size(); i++) {
- o += lines[i] + "\n";
- }
- return o.substr(1, o.length() - 2);
-}
-
-// Indent all lines by 4 spaces
-std::string indentLines(std::string inp) {
- std::vector<std::string> lines = splitLines(inp);
- for (unsigned i = 0; i < lines.size(); i++) lines[i] = " "+lines[i];
- return joinLines(lines);
-}
-
-// Binary to hexadecimal
-std::string binToNumeric(std::string inp) {
- std::string o = "0";
- for (unsigned i = 0; i < inp.length(); i++) {
- o = decimalAdd(decimalMul(o,"256"), unsignedToDecimal((unsigned char)inp[i]));
- }
- return o;
-}
-
-// Converts string to simple numeric format
-std::string strToNumeric(std::string inp) {
- std::string o = "0";
- if (inp == "") {
- o = "";
- }
- else if (inp.substr(0,2) == "0x") {
- for (unsigned i = 2; i < inp.length(); i++) {
- int dig = std::string("0123456789abcdef0123456789ABCDEF").find(inp[i]) % 16;
- if (dig < 0) return "";
- o = decimalAdd(decimalMul(o,"16"), unsignedToDecimal(dig));
- }
- }
- else {
- bool isPureNum = true;
- for (unsigned i = 0; i < inp.length(); i++) {
- isPureNum = isPureNum && inp[i] >= '0' && inp[i] <= '9';
- }
- o = isPureNum ? inp : "";
- }
- return o;
-}
-
-// Does the node contain a number (eg. 124, 0xf012c, "george")
-bool isNumberLike(Node node) {
- if (node.type == ASTNODE) return false;
- return strToNumeric(node.val) != "";
-}
-
-//Normalizes number representations
-Node nodeToNumeric(Node node) {
- std::string o = strToNumeric(node.val);
- return token(o == "" ? node.val : o, node.metadata);
-}
-
-Node tryNumberize(Node node) {
- if (node.type == TOKEN && isNumberLike(node)) return nodeToNumeric(node);
- return node;
-}
-
-//Converts a value to an array of byte number nodes
-std::vector<Node> toByteArr(std::string val, Metadata metadata, int minLen) {
- std::vector<Node> o;
- int L = 0;
- while (val != "0" || L < minLen) {
- o.push_back(token(decimalMod(val, "256"), metadata));
- val = decimalDiv(val, "256");
- L++;
- }
- std::vector<Node> o2;
- for (int i = o.size() - 1; i >= 0; i--) o2.push_back(o[i]);
- return o2;
-}
-
-int counter = 0;
-
-//Makes a unique token
-std::string mkUniqueToken() {
- counter++;
- return unsignedToDecimal(counter);
-}
-
-//Does a file exist? http://stackoverflow.com/questions/12774207
-bool exists(std::string fileName) {
- std::ifstream infile(fileName.c_str());
- return infile.good();
-}
-
-//Reads a file: http://stackoverflow.com/questions/2602013
-std::string get_file_contents(std::string filename)
-{
- std::ifstream in(filename.c_str(), std::ios::in | std::ios::binary);
- if (in)
- {
- std::string contents;
- in.seekg(0, std::ios::end);
- contents.resize(in.tellg());
- in.seekg(0, std::ios::beg);
- in.read(&contents[0], contents.size());
- in.close();
- return(contents);
- }
- throw(errno);
-}
-
-//Report error
-void err(std::string errtext, Metadata met) {
- std::string err = "Error (file \"" + met.file + "\", line " +
- unsignedToDecimal(met.ln + 1) + ", char " + unsignedToDecimal(met.ch) +
- "): " + errtext;
- std::cerr << err << "\n";
- throw(err);
-}
-
-//Bin to hex
-std::string binToHex(std::string inp) {
- std::string o = "";
- for (unsigned i = 0; i < inp.length(); i++) {
- unsigned char v = inp[i];
- o += std::string("0123456789abcdef").substr(v/16, 1)
- + std::string("0123456789abcdef").substr(v%16, 1);
- }
- return o;
-}
-
-//Hex to bin
-std::string hexToBin(std::string inp) {
- std::string o = "";
- for (unsigned i = 0; i+1 < inp.length(); i+=2) {
- char v = (char)(std::string("0123456789abcdef").find(inp[i]) * 16 +
- std::string("0123456789abcdef").find(inp[i+1]));
- o += v;
- }
- return o;
-}
-
-//Lower to upper
-std::string upperCase(std::string inp) {
- std::string o = "";
- for (unsigned i = 0; i < inp.length(); i++) {
- if (inp[i] >= 97 && inp[i] <= 122) o += inp[i] - 32;
- else o += inp[i];
- }
- return o;
-}
-
-//Three-int vector
-std::vector<int> triple(int a, int b, int c) {
- std::vector<int> v;
- v.push_back(a);
- v.push_back(b);
- v.push_back(c);
- return v;
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.h
deleted file mode 100644
index f7d6744f9..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.h
+++ /dev/null
@@ -1,127 +0,0 @@
-#ifndef ETHSERP_UTIL
-#define ETHSERP_UTIL
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include <fstream>
-#include <cerrno>
-
-const int TOKEN = 0,
- ASTNODE = 1,
- SPACE = 2,
- BRACK = 3,
- SQUOTE = 4,
- DQUOTE = 5,
- SYMB = 6,
- ALPHANUM = 7,
- LPAREN = 8,
- RPAREN = 9,
- COMMA = 10,
- COLON = 11,
- UNARY_OP = 12,
- BINARY_OP = 13,
- COMPOUND = 14,
- TOKEN_SPLITTER = 15;
-
-// Stores metadata about each token
-class Metadata {
- public:
- Metadata(std::string File="main", int Ln=-1, int Ch=-1) {
- file = File;
- ln = Ln;
- ch = Ch;
- fixed = false;
- }
- std::string file;
- int ln;
- int ch;
- bool fixed;
-};
-
-std::string mkUniqueToken();
-
-// type can be TOKEN or ASTNODE
-struct Node {
- int type;
- std::string val;
- std::vector<Node> args;
- Metadata metadata;
-};
-Node token(std::string val, Metadata met=Metadata());
-Node astnode(std::string val, std::vector<Node> args, Metadata met=Metadata());
-Node astnode(std::string val, Metadata met=Metadata());
-Node astnode(std::string val, Node a, Metadata met=Metadata());
-Node astnode(std::string val, Node a, Node b, Metadata met=Metadata());
-Node astnode(std::string val, Node a, Node b, Node c, Metadata met=Metadata());
-Node astnode(std::string val, Node a, Node b,
- Node c, Node d, Metadata met=Metadata());
-
-// Number of tokens in a tree
-int treeSize(Node prog);
-
-// Print token list
-std::string printTokens(std::vector<Node> tokens);
-
-// Prints a lisp AST on one line
-std::string printSimple(Node ast);
-
-// Pretty-prints a lisp AST
-std::string printAST(Node ast, bool printMetadata=false);
-
-// Splits text by line
-std::vector<std::string> splitLines(std::string s);
-
-// Inverse of splitLines
-std::string joinLines(std::vector<std::string> lines);
-
-// Indent all lines by 4 spaces
-std::string indentLines(std::string inp);
-
-// Converts binary to simple numeric format
-std::string binToNumeric(std::string inp);
-
-// Converts string to simple numeric format
-std::string strToNumeric(std::string inp);
-
-// Does the node contain a number (eg. 124, 0xf012c, "george")
-bool isNumberLike(Node node);
-
-//Normalizes number representations
-Node nodeToNumeric(Node node);
-
-//If a node is numeric, normalize its representation
-Node tryNumberize(Node node);
-
-//Converts a value to an array of byte number nodes
-std::vector<Node> toByteArr(std::string val, Metadata metadata, int minLen=1);
-
-//Reads a file
-std::string get_file_contents(std::string filename);
-
-//Does a file exist?
-bool exists(std::string fileName);
-
-//Report error
-void err(std::string errtext, Metadata met);
-
-//Bin to hex
-std::string binToHex(std::string inp);
-
-//Hex to bin
-std::string hexToBin(std::string inp);
-
-//Lower to upper
-std::string upperCase(std::string inp);
-
-//Three-int vector
-std::vector<int> triple(int a, int b, int c);
-
-#define asn astnode
-#define tkn token
-#define msi std::map<std::string, int>
-#define msn std::map<std::string, Node>
-#define mss std::map<std::string, std::string>
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/tests/main.go b/Godeps/_workspace/src/github.com/ethereum/serpent-go/tests/main.go
deleted file mode 100644
index 2f2d17784..000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/tests/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
- "fmt"
-
- "github.com/ethereum/serpent-go"
-)
-
-func main() {
- out, _ := serpent.Compile(`
-// Namecoin
-if !contract.storage[msg.data[0]]: # Is the key not yet taken?
- # Then take it!
- contract.storage[msg.data[0]] = msg.data[1]
- return(1)
-else:
- return(0) // Otherwise do nothing
- `)
-
- fmt.Printf("%x\n", out)
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
index 0d7911eca..ccf390c9c 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
@@ -8,65 +8,84 @@ package leveldb
import (
"encoding/binary"
- "errors"
+ "fmt"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
)
-var (
- errBatchTooShort = errors.New("leveldb: batch is too short")
- errBatchBadRecord = errors.New("leveldb: bad record in batch")
-)
+type ErrBatchCorrupted struct {
+ Reason string
+}
+
+func (e *ErrBatchCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
+}
+
+func newErrBatchCorrupted(reason string) error {
+ return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
+}
-const kBatchHdrLen = 8 + 4
+const (
+ batchHdrLen = 8 + 4
+ batchGrowRec = 3000
+)
-type batchReplay interface {
- put(key, value []byte, seq uint64)
- delete(key []byte, seq uint64)
+type BatchReplay interface {
+ Put(key, value []byte)
+ Delete(key []byte)
}
// Batch is a write batch.
type Batch struct {
- buf []byte
+ data []byte
rLen, bLen int
seq uint64
sync bool
}
func (b *Batch) grow(n int) {
- off := len(b.buf)
+ off := len(b.data)
if off == 0 {
- // include headers
- off = kBatchHdrLen
- n += off
+ off = batchHdrLen
+ if b.data != nil {
+ b.data = b.data[:off]
+ }
}
- if cap(b.buf)-off >= n {
- return
+ if cap(b.data)-off < n {
+ if b.data == nil {
+ b.data = make([]byte, off, off+n)
+ } else {
+ odata := b.data
+ div := 1
+ if b.rLen > batchGrowRec {
+ div = b.rLen / batchGrowRec
+ }
+ b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
+ copy(b.data, odata)
+ }
}
- buf := make([]byte, 2*cap(b.buf)+n)
- copy(buf, b.buf)
- b.buf = buf[:off]
}
-func (b *Batch) appendRec(t vType, key, value []byte) {
+func (b *Batch) appendRec(kt kType, key, value []byte) {
n := 1 + binary.MaxVarintLen32 + len(key)
- if t == tVal {
+ if kt == ktVal {
n += binary.MaxVarintLen32 + len(value)
}
b.grow(n)
- off := len(b.buf)
- buf := b.buf[:off+n]
- buf[off] = byte(t)
+ off := len(b.data)
+ data := b.data[:off+n]
+ data[off] = byte(kt)
off += 1
- off += binary.PutUvarint(buf[off:], uint64(len(key)))
- copy(buf[off:], key)
+ off += binary.PutUvarint(data[off:], uint64(len(key)))
+ copy(data[off:], key)
off += len(key)
- if t == tVal {
- off += binary.PutUvarint(buf[off:], uint64(len(value)))
- copy(buf[off:], value)
+ if kt == ktVal {
+ off += binary.PutUvarint(data[off:], uint64(len(value)))
+ copy(data[off:], value)
off += len(value)
}
- b.buf = buf[:off]
+ b.data = data[:off]
b.rLen++
// Include 8-byte ikey header
b.bLen += len(key) + len(value) + 8
@@ -75,18 +94,51 @@ func (b *Batch) appendRec(t vType, key, value []byte) {
// Put appends 'put operation' of the given key/value pair to the batch.
// It is safe to modify the contents of the argument after Put returns.
func (b *Batch) Put(key, value []byte) {
- b.appendRec(tVal, key, value)
+ b.appendRec(ktVal, key, value)
}
// Delete appends 'delete operation' of the given key to the batch.
// It is safe to modify the contents of the argument after Delete returns.
func (b *Batch) Delete(key []byte) {
- b.appendRec(tDel, key, nil)
+ b.appendRec(ktDel, key, nil)
+}
+
+// Dump dumps batch contents. The returned slice can be loaded into the
+// batch using Load method.
+// The returned slice is not its own copy, so the contents should not be
+// modified.
+func (b *Batch) Dump() []byte {
+ return b.encode()
+}
+
+// Load loads given slice into the batch. Previous contents of the batch
+// will be discarded.
+// The given slice will not be copied and will be used as batch buffer, so
+// it is not safe to modify the contents of the slice.
+func (b *Batch) Load(data []byte) error {
+ return b.decode(0, data)
+}
+
+// Replay replays batch contents.
+func (b *Batch) Replay(r BatchReplay) error {
+ return b.decodeRec(func(i int, kt kType, key, value []byte) {
+ switch kt {
+ case ktVal:
+ r.Put(key, value)
+ case ktDel:
+ r.Delete(key)
+ }
+ })
+}
+
+// Len returns number of records in the batch.
+func (b *Batch) Len() int {
+ return b.rLen
}
// Reset resets the batch.
func (b *Batch) Reset() {
- b.buf = nil
+ b.data = b.data[:0]
b.seq = 0
b.rLen = 0
b.bLen = 0
@@ -97,24 +149,10 @@ func (b *Batch) init(sync bool) {
b.sync = sync
}
-func (b *Batch) put(key, value []byte, seq uint64) {
- if b.rLen == 0 {
- b.seq = seq
- }
- b.Put(key, value)
-}
-
-func (b *Batch) delete(key []byte, seq uint64) {
- if b.rLen == 0 {
- b.seq = seq
- }
- b.Delete(key)
-}
-
func (b *Batch) append(p *Batch) {
if p.rLen > 0 {
- b.grow(len(p.buf) - kBatchHdrLen)
- b.buf = append(b.buf, p.buf[kBatchHdrLen:]...)
+ b.grow(len(p.data) - batchHdrLen)
+ b.data = append(b.data, p.data[batchHdrLen:]...)
b.rLen += p.rLen
}
if p.sync {
@@ -122,95 +160,93 @@ func (b *Batch) append(p *Batch) {
}
}
-func (b *Batch) len() int {
- return b.rLen
-}
-
+// size returns sums of key/value pair length plus 8-bytes ikey.
func (b *Batch) size() int {
return b.bLen
}
func (b *Batch) encode() []byte {
b.grow(0)
- binary.LittleEndian.PutUint64(b.buf, b.seq)
- binary.LittleEndian.PutUint32(b.buf[8:], uint32(b.rLen))
+ binary.LittleEndian.PutUint64(b.data, b.seq)
+ binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
- return b.buf
+ return b.data
}
-func (b *Batch) decode(buf []byte) error {
- if len(buf) < kBatchHdrLen {
- return errBatchTooShort
+func (b *Batch) decode(prevSeq uint64, data []byte) error {
+ if len(data) < batchHdrLen {
+ return newErrBatchCorrupted("too short")
}
- b.seq = binary.LittleEndian.Uint64(buf)
- b.rLen = int(binary.LittleEndian.Uint32(buf[8:]))
+ b.seq = binary.LittleEndian.Uint64(data)
+ if b.seq < prevSeq {
+ return newErrBatchCorrupted("invalid sequence number")
+ }
+ b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
+ if b.rLen < 0 {
+ return newErrBatchCorrupted("invalid records length")
+ }
// No need to be precise at this point, it won't be used anyway
- b.bLen = len(buf) - kBatchHdrLen
- b.buf = buf
+ b.bLen = len(data) - batchHdrLen
+ b.data = data
return nil
}
-func (b *Batch) decodeRec(f func(i int, t vType, key, value []byte)) error {
- off := kBatchHdrLen
+func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
+ off := batchHdrLen
for i := 0; i < b.rLen; i++ {
- if off >= len(b.buf) {
- return errors.New("leveldb: invalid batch record length")
+ if off >= len(b.data) {
+ return newErrBatchCorrupted("invalid records length")
}
- t := vType(b.buf[off])
- if t > tVal {
- return errors.New("leveldb: invalid batch record type in batch")
+ kt := kType(b.data[off])
+ if kt > ktVal {
+ return newErrBatchCorrupted("bad record: invalid type")
}
off += 1
- x, n := binary.Uvarint(b.buf[off:])
+ x, n := binary.Uvarint(b.data[off:])
off += n
- if n <= 0 || off+int(x) > len(b.buf) {
- return errBatchBadRecord
+ if n <= 0 || off+int(x) > len(b.data) {
+ return newErrBatchCorrupted("bad record: invalid key length")
}
- key := b.buf[off : off+int(x)]
+ key := b.data[off : off+int(x)]
off += int(x)
-
var value []byte
- if t == tVal {
- x, n := binary.Uvarint(b.buf[off:])
+ if kt == ktVal {
+ x, n := binary.Uvarint(b.data[off:])
off += n
- if n <= 0 || off+int(x) > len(b.buf) {
- return errBatchBadRecord
+ if n <= 0 || off+int(x) > len(b.data) {
+ return newErrBatchCorrupted("bad record: invalid value length")
}
- value = b.buf[off : off+int(x)]
+ value = b.data[off : off+int(x)]
off += int(x)
}
- f(i, t, key, value)
+ f(i, kt, key, value)
}
return nil
}
-func (b *Batch) replay(to batchReplay) error {
- return b.decodeRec(func(i int, t vType, key, value []byte) {
- switch t {
- case tVal:
- to.put(key, value, b.seq+uint64(i))
- case tDel:
- to.delete(key, b.seq+uint64(i))
- }
- })
-}
-
func (b *Batch) memReplay(to *memdb.DB) error {
- return b.decodeRec(func(i int, t vType, key, value []byte) {
- ikey := newIKey(key, b.seq+uint64(i), t)
+ return b.decodeRec(func(i int, kt kType, key, value []byte) {
+ ikey := newIkey(key, b.seq+uint64(i), kt)
to.Put(ikey, value)
})
}
+func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
+ if err := b.decode(prevSeq, data); err != nil {
+ return err
+ }
+ return b.memReplay(to)
+}
+
func (b *Batch) revertMemReplay(to *memdb.DB) error {
- return b.decodeRec(func(i int, t vType, key, value []byte) {
- ikey := newIKey(key, b.seq+uint64(i), t)
+ return b.decodeRec(func(i int, kt kType, key, value []byte) {
+ ikey := newIkey(key, b.seq+uint64(i), kt)
to.Delete(ikey)
})
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
index 19b749b8f..7fc842f4f 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
@@ -15,7 +15,7 @@ import (
)
type tbRec struct {
- t vType
+ kt kType
key, value []byte
}
@@ -23,39 +23,39 @@ type testBatch struct {
rec []*tbRec
}
-func (p *testBatch) put(key, value []byte, seq uint64) {
- p.rec = append(p.rec, &tbRec{tVal, key, value})
+func (p *testBatch) Put(key, value []byte) {
+ p.rec = append(p.rec, &tbRec{ktVal, key, value})
}
-func (p *testBatch) delete(key []byte, seq uint64) {
- p.rec = append(p.rec, &tbRec{tDel, key, nil})
+func (p *testBatch) Delete(key []byte) {
+ p.rec = append(p.rec, &tbRec{ktDel, key, nil})
}
func compareBatch(t *testing.T, b1, b2 *Batch) {
if b1.seq != b2.seq {
t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
}
- if b1.len() != b2.len() {
- t.Fatalf("invalid record length want %d, got %d", b1.len(), b2.len())
+ if b1.Len() != b2.Len() {
+ t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len())
}
p1, p2 := new(testBatch), new(testBatch)
- err := b1.replay(p1)
+ err := b1.Replay(p1)
if err != nil {
t.Fatal("error when replaying batch 1: ", err)
}
- err = b2.replay(p2)
+ err = b2.Replay(p2)
if err != nil {
t.Fatal("error when replaying batch 2: ", err)
}
for i := range p1.rec {
r1, r2 := p1.rec[i], p2.rec[i]
- if r1.t != r2.t {
- t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.t, r2.t)
+ if r1.kt != r2.kt {
+ t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt)
}
if !bytes.Equal(r1.key, r2.key) {
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
}
- if r1.t == tVal {
+ if r1.kt == ktVal {
if !bytes.Equal(r1.value, r2.value) {
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
}
@@ -75,7 +75,7 @@ func TestBatch_EncodeDecode(t *testing.T) {
b1.Delete([]byte("k"))
buf := b1.encode()
b2 := new(Batch)
- err := b2.decode(buf)
+ err := b2.decode(0, buf)
if err != nil {
t.Error("error when decoding batch: ", err)
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
new file mode 100644
index 000000000..0dd60fd82
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.2
+
+package leveldb
+
+import (
+ "sync/atomic"
+ "testing"
+)
+
+func BenchmarkDBReadConcurrent(b *testing.B) {
+ p := openDBBench(b, false)
+ p.populate(b.N)
+ p.fill()
+ p.gc()
+ defer p.close()
+
+ b.ResetTimer()
+ b.SetBytes(116)
+
+ b.RunParallel(func(pb *testing.PB) {
+ iter := p.newIter()
+ defer iter.Release()
+ for pb.Next() && iter.Next() {
+ }
+ })
+}
+
+func BenchmarkDBReadConcurrent2(b *testing.B) {
+ p := openDBBench(b, false)
+ p.populate(b.N)
+ p.fill()
+ p.gc()
+ defer p.close()
+
+ b.ResetTimer()
+ b.SetBytes(116)
+
+ var dir uint32
+ b.RunParallel(func(pb *testing.PB) {
+ iter := p.newIter()
+ defer iter.Release()
+ if atomic.AddUint32(&dir, 1)%2 == 0 {
+ for pb.Next() && iter.Next() {
+ }
+ } else {
+ if pb.Next() && iter.Last() {
+ for pb.Next() && iter.Prev() {
+ }
+ }
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
index ea6801a89..91b426709 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
@@ -170,7 +170,7 @@ func (p *dbBench) writes(perBatch int) {
b.SetBytes(116)
}
-func (p *dbBench) drop() {
+func (p *dbBench) gc() {
p.keys, p.values = nil, nil
runtime.GC()
}
@@ -249,6 +249,9 @@ func (p *dbBench) newIter() iterator.Iterator {
}
func (p *dbBench) close() {
+ if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
+ p.b.Log("Block pool stats: ", bp)
+ }
p.db.Close()
p.stor.Close()
os.RemoveAll(benchDB)
@@ -331,7 +334,7 @@ func BenchmarkDBRead(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
- p.drop()
+ p.gc()
iter := p.newIter()
b.ResetTimer()
@@ -362,7 +365,7 @@ func BenchmarkDBReadUncompressed(b *testing.B) {
p := openDBBench(b, true)
p.populate(b.N)
p.fill()
- p.drop()
+ p.gc()
iter := p.newIter()
b.ResetTimer()
@@ -379,7 +382,7 @@ func BenchmarkDBReadTable(b *testing.B) {
p.populate(b.N)
p.fill()
p.reopen()
- p.drop()
+ p.gc()
iter := p.newIter()
b.ResetTimer()
@@ -395,7 +398,7 @@ func BenchmarkDBReadReverse(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
- p.drop()
+ p.gc()
iter := p.newIter()
b.ResetTimer()
@@ -413,7 +416,7 @@ func BenchmarkDBReadReverseTable(b *testing.B) {
p.populate(b.N)
p.fill()
p.reopen()
- p.drop()
+ p.gc()
iter := p.newIter()
b.ResetTimer()
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
new file mode 100644
index 000000000..175e22203
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.2
+
+package cache
+
+import (
+ "math/rand"
+ "testing"
+)
+
+func BenchmarkLRUCache(b *testing.B) {
+ c := NewCache(NewLRU(10000))
+
+ b.SetParallelism(10)
+ b.RunParallel(func(pb *testing.PB) {
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ for pb.Next() {
+ key := uint64(r.Intn(1000000))
+ c.Get(0, key, func() (int, Value) {
+ return 1, key
+ }).Release()
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
index 9b6a74977..c9670de5d 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -8,118 +8,669 @@
package cache
import (
+ "sync"
"sync/atomic"
+ "unsafe"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
)
-// SetFunc used by Namespace.Get method to create a cache object. SetFunc
-// may return ok false, in that case the cache object will not be created.
-type SetFunc func() (ok bool, value interface{}, charge int, fin SetFin)
+// Cacher provides interface to implements a caching functionality.
+// An implementation must be goroutine-safe.
+type Cacher interface {
+ // Capacity returns cache capacity.
+ Capacity() int
-// SetFin will be called when corresponding cache object are released.
-type SetFin func()
+ // SetCapacity sets cache capacity.
+ SetCapacity(capacity int)
-// DelFin will be called when corresponding cache object are released.
-// DelFin will be called after SetFin. The exist is true if the corresponding
-// cache object is actually exist in the cache tree.
-type DelFin func(exist bool)
+ // Promote promotes the 'cache node'.
+ Promote(n *Node)
-// PurgeFin will be called when corresponding cache object are released.
-// PurgeFin will be called after SetFin. If PurgeFin present DelFin will
-// not be executed but passed to the PurgeFin, it is up to the caller
-// to call it or not.
-type PurgeFin func(ns, key uint64, delfin DelFin)
+ // Ban evicts the 'cache node' and prevent subsequent 'promote'.
+ Ban(n *Node)
-// Cache is a cache tree.
-type Cache interface {
- // SetCapacity sets cache capacity.
- SetCapacity(capacity int)
+ // Evict evicts the 'cache node'.
+ Evict(n *Node)
- // GetNamespace gets or creates a cache namespace for the given id.
- GetNamespace(id uint64) Namespace
+ // EvictNS evicts 'cache node' with the given namespace.
+ EvictNS(ns uint64)
- // Purge purges all cache namespaces, read Namespace.Purge method documentation.
- Purge(fin PurgeFin)
+ // EvictAll evicts all 'cache node'.
+ EvictAll()
+
+ // Close closes the 'cache tree'
+ Close() error
+}
+
+// Value is a 'cacheable object'. It may implements util.Releaser, if
+// so the the Release method will be called once object is released.
+type Value interface{}
+
+type CacheGetter struct {
+ Cache *Cache
+ NS uint64
+}
- // Zap zaps all cache namespaces, read Namespace.Zap method documentation.
- Zap(closed bool)
+func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+ return g.Cache.Get(g.NS, key, setFunc)
}
-// Namespace is a cache namespace.
-type Namespace interface {
- // Get gets cache object for the given key. The given SetFunc (if not nil) will
- // be called if the given key does not exist.
- // If the given key does not exist, SetFunc is nil or SetFunc return ok false, Get
- // will return ok false.
- Get(key uint64, setf SetFunc) (obj Object, ok bool)
+// The hash tables implementation is based on:
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
- // Get deletes cache object for the given key. If exist the cache object will
- // be deleted later when all of its handles have been released (i.e. no one use
- // it anymore) and the given DelFin (if not nil) will finally be executed. If
- // such cache object does not exist the given DelFin will be executed anyway.
- //
- // Delete returns true if such cache object exist.
- Delete(key uint64, fin DelFin) bool
+const (
+ mInitialSize = 1 << 4
+ mOverflowThreshold = 1 << 5
+ mOverflowGrowThreshold = 1 << 7
+)
- // Purge deletes all cache objects, read Delete method documentation.
- Purge(fin PurgeFin)
+type mBucket struct {
+ mu sync.Mutex
+ node []*Node
+ frozen bool
+}
- // Zap detaches the namespace from the cache tree and delete all its cache
- // objects. The cache objects deletion and finalizers execution are happen
- // immediately, even if its existing handles haven't yet been released.
- // A zapped namespace can't never be filled again.
- // If closed is false then the Get function will always call the given SetFunc
- // if it is not nil, but resultant of the SetFunc will not be cached.
- Zap(closed bool)
+func (b *mBucket) freeze() []*Node {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if !b.frozen {
+ b.frozen = true
+ }
+ return b.node
}
-// Object is a cache object.
-type Object interface {
- // Release releases the cache object. Other methods should not be called
- // after the cache object has been released.
- Release()
+func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ for _, n := range b.node {
+ if n.hash == hash && n.ns == ns && n.key == key {
+ atomic.AddInt32(&n.ref, 1)
+ b.mu.Unlock()
+ return true, false, n
+ }
+ }
+
+ // Get only.
+ if noset {
+ b.mu.Unlock()
+ return true, false, nil
+ }
+
+ // Create node.
+ n = &Node{
+ r: r,
+ hash: hash,
+ ns: ns,
+ key: key,
+ ref: 1,
+ }
+ // Add node to bucket.
+ b.node = append(b.node, n)
+ bLen := len(b.node)
+ b.mu.Unlock()
+
+ // Update counter.
+ grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
+ if bLen > mOverflowThreshold {
+ grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
+ }
- // Value returns value of the cache object.
- Value() interface{}
+ // Grow.
+ if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) << 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+
+ return true, true, n
}
-// Namespace state.
-type nsState int
+func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
+ b.mu.Lock()
-const (
- nsEffective nsState = iota
- nsZapped
- nsClosed
-)
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
-// Node state.
-type nodeState int
+ // Scan the node.
+ var (
+ n *Node
+ bLen int
+ )
+ for i := range b.node {
+ n = b.node[i]
+ if n.ns == ns && n.key == key {
+ if atomic.LoadInt32(&n.ref) == 0 {
+ deleted = true
-const (
- nodeEffective nodeState = iota
- nodeEvicted
- nodeRemoved
-)
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Remove node from bucket.
+ b.node = append(b.node[:i], b.node[i+1:]...)
+ bLen = len(b.node)
+ }
+ break
+ }
+ }
+ b.mu.Unlock()
-// Fake object.
-type fakeObject struct {
- value interface{}
- fin func()
- once uint32
+ if deleted {
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+
+ // Update counter.
+ atomic.AddInt32(&r.size, int32(n.size)*-1)
+ shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
+ if bLen >= mOverflowThreshold {
+ atomic.AddInt32(&h.overflow, -1)
+ }
+
+ // Shrink.
+ if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) >> 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+ }
+
+ return true, deleted
}
-func (o *fakeObject) Value() interface{} {
- if atomic.LoadUint32(&o.once) == 0 {
- return o.value
+type mNode struct {
+ buckets []unsafe.Pointer // []*mBucket
+ mask uint32
+ pred unsafe.Pointer // *mNode
+ resizeInProgess int32
+
+ overflow int32
+ growThreshold int32
+ shrinkThreshold int32
+}
+
+func (n *mNode) initBucket(i uint32) *mBucket {
+ if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
+ return b
+ }
+
+ p := (*mNode)(atomic.LoadPointer(&n.pred))
+ if p != nil {
+ var node []*Node
+ if n.mask > p.mask {
+ // Grow.
+ pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
+ if pb == nil {
+ pb = p.initBucket(i & p.mask)
+ }
+ m := pb.freeze()
+ // Split nodes.
+ for _, x := range m {
+ if x.hash&n.mask == i {
+ node = append(node, x)
+ }
+ }
+ } else {
+ // Shrink.
+ pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
+ if pb0 == nil {
+ pb0 = p.initBucket(i)
+ }
+ pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
+ if pb1 == nil {
+ pb1 = p.initBucket(i + uint32(len(n.buckets)))
+ }
+ m0 := pb0.freeze()
+ m1 := pb1.freeze()
+ // Merge nodes.
+ node = make([]*Node, 0, len(m0)+len(m1))
+ node = append(node, m0...)
+ node = append(node, m1...)
+ }
+ b := &mBucket{node: node}
+ if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
+ if len(node) > mOverflowThreshold {
+ atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
+ }
+ return b
+ }
+ }
+
+ return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
+}
+
+func (n *mNode) initBuckets() {
+ for i := range n.buckets {
+ n.initBucket(uint32(i))
+ }
+ atomic.StorePointer(&n.pred, nil)
+}
+
+// Cache is a 'cache map'.
+type Cache struct {
+ mu sync.RWMutex
+ mHead unsafe.Pointer // *mNode
+ nodes int32
+ size int32
+ cacher Cacher
+ closed bool
+}
+
+// NewCache creates a new 'cache map'. The cacher is optional and
+// may be nil.
+func NewCache(cacher Cacher) *Cache {
+ h := &mNode{
+ buckets: make([]unsafe.Pointer, mInitialSize),
+ mask: mInitialSize - 1,
+ growThreshold: int32(mInitialSize * mOverflowThreshold),
+ shrinkThreshold: 0,
+ }
+ for i := range h.buckets {
+ h.buckets[i] = unsafe.Pointer(&mBucket{})
+ }
+ r := &Cache{
+ mHead: unsafe.Pointer(h),
+ cacher: cacher,
+ }
+ return r
+}
+
+func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
+ h := (*mNode)(atomic.LoadPointer(&r.mHead))
+ i := hash & h.mask
+ b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
+ if b == nil {
+ b = h.initBucket(i)
+ }
+ return h, b
+}
+
+func (r *Cache) delete(n *Node) bool {
+ for {
+ h, b := r.getBucket(n.hash)
+ done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
+ if done {
+ return deleted
+ }
+ }
+ return false
+}
+
+// Nodes returns number of 'cache node' in the map.
+func (r *Cache) Nodes() int {
+ return int(atomic.LoadInt32(&r.nodes))
+}
+
+// Size returns sums of 'cache node' size in the map.
+func (r *Cache) Size() int {
+ return int(atomic.LoadInt32(&r.size))
+}
+
+// Capacity returns cache capacity.
+func (r *Cache) Capacity() int {
+ if r.cacher == nil {
+ return 0
+ }
+ return r.cacher.Capacity()
+}
+
+// SetCapacity sets cache capacity.
+func (r *Cache) SetCapacity(capacity int) {
+ if r.cacher != nil {
+ r.cacher.SetCapacity(capacity)
+ }
+}
+
+// Get gets 'cache node' with the given namespace and key.
+// If cache node is not found and setFunc is not nil, Get will atomically creates
+// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
+//
+// The returned 'cache handle' should be released after use by calling Release
+// method.
+func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return nil
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
+ if done {
+ if n != nil {
+ n.mu.Lock()
+ if n.value == nil {
+ if setFunc == nil {
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+
+ n.size, n.value = setFunc()
+ if n.value == nil {
+ n.size = 0
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+ atomic.AddInt32(&r.size, int32(n.size))
+ }
+ n.mu.Unlock()
+ if r.cacher != nil {
+ r.cacher.Promote(n)
+ }
+ return &Handle{unsafe.Pointer(n)}
+ }
+
+ break
+ }
}
return nil
}
-func (o *fakeObject) Release() {
- if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
+// Delete removes and ban 'cache node' with the given namespace and key.
+// A banned 'cache node' will never inserted into the 'cache tree'. Ban
+// only attributed to the particular 'cache node', so when a 'cache node'
+// is recreated it will not be banned.
+//
+// If onDel is not nil, then it will be executed if such 'cache node'
+// doesn't exist or once the 'cache node' is released.
+//
+// Delete return true is such 'cache node' exist.
+func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if onDel != nil {
+ n.mu.Lock()
+ n.onDel = append(n.onDel, onDel)
+ n.mu.Unlock()
+ }
+ if r.cacher != nil {
+ r.cacher.Ban(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ if onDel != nil {
+ onDel()
+ }
+
+ return false
+}
+
+// Evict evicts 'cache node' with the given namespace and key. This will
+// simply call Cacher.Evict.
+//
+// Evict return true is such 'cache node' exist.
+func (r *Cache) Evict(ns, key uint64) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if r.cacher != nil {
+ r.cacher.Evict(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ return false
+}
+
+// EvictNS evicts 'cache node' with the given namespace. This will
+// simply call Cacher.EvictNS.
+func (r *Cache) EvictNS(ns uint64) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictNS(ns)
+ }
+}
+
+// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
+func (r *Cache) EvictAll() {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
return
}
- if o.fin != nil {
- o.fin()
- o.fin = nil
+
+ if r.cacher != nil {
+ r.cacher.EvictAll()
+ }
+}
+
+// Close closes the 'cache map' and releases all 'cache node'.
+func (r *Cache) Close() error {
+ r.mu.Lock()
+ if !r.closed {
+ r.closed = true
+
+ if r.cacher != nil {
+ if err := r.cacher.Close(); err != nil {
+ return err
+ }
+ }
+
+ h := (*mNode)(r.mHead)
+ h.initBuckets()
+
+ for i := range h.buckets {
+ b := (*mBucket)(h.buckets[i])
+ for _, n := range b.node {
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+ }
+ }
}
+ r.mu.Unlock()
+ return nil
+}
+
+// Node is a 'cache node'.
+type Node struct {
+ r *Cache
+
+ hash uint32
+ ns, key uint64
+
+ mu sync.Mutex
+ size int
+ value Value
+
+ ref int32
+ onDel []func()
+
+ CacheData unsafe.Pointer
+}
+
+// NS returns this 'cache node' namespace.
+func (n *Node) NS() uint64 {
+ return n.ns
+}
+
+// Key returns this 'cache node' key.
+func (n *Node) Key() uint64 {
+ return n.key
+}
+
+// Size returns this 'cache node' size.
+func (n *Node) Size() int {
+ return n.size
+}
+
+// Value returns this 'cache node' value.
+func (n *Node) Value() Value {
+ return n.value
+}
+
+// Ref returns this 'cache node' ref counter.
+func (n *Node) Ref() int32 {
+ return atomic.LoadInt32(&n.ref)
+}
+
+// GetHandle returns an handle for this 'cache node'.
+func (n *Node) GetHandle() *Handle {
+ if atomic.AddInt32(&n.ref, 1) <= 1 {
+ panic("BUG: Node.GetHandle on zero ref")
+ }
+ return &Handle{unsafe.Pointer(n)}
+}
+
+func (n *Node) unref() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.delete(n)
+ }
+}
+
+func (n *Node) unrefLocked() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.mu.RLock()
+ if !n.r.closed {
+ n.r.delete(n)
+ }
+ n.r.mu.RUnlock()
+ }
+}
+
+type Handle struct {
+ n unsafe.Pointer // *Node
+}
+
+func (h *Handle) Value() Value {
+ n := (*Node)(atomic.LoadPointer(&h.n))
+ if n != nil {
+ return n.value
+ }
+ return nil
+}
+
+func (h *Handle) Release() {
+ nPtr := atomic.LoadPointer(&h.n)
+ if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
+ n := (*Node)(nPtr)
+ n.unrefLocked()
+ }
+}
+
+func murmur32(ns, key uint64, seed uint32) uint32 {
+ const (
+ m = uint32(0x5bd1e995)
+ r = 24
+ )
+
+ k1 := uint32(ns >> 32)
+ k2 := uint32(ns)
+ k3 := uint32(key >> 32)
+ k4 := uint32(key)
+
+ k1 *= m
+ k1 ^= k1 >> r
+ k1 *= m
+
+ k2 *= m
+ k2 ^= k2 >> r
+ k2 *= m
+
+ k3 *= m
+ k3 ^= k3 >> r
+ k3 *= m
+
+ k4 *= m
+ k4 ^= k4 >> r
+ k4 *= m
+
+ h := seed
+
+ h *= m
+ h ^= k1
+ h *= m
+ h ^= k2
+ h *= m
+ h ^= k3
+ h *= m
+ h ^= k4
+
+ h ^= h >> 13
+ h *= m
+ h ^= h >> 15
+
+ return h
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
index 07a9939b2..c2a50156f 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
@@ -8,17 +8,289 @@ package cache
import (
"math/rand"
+ "runtime"
+ "sync"
+ "sync/atomic"
"testing"
+ "time"
+ "unsafe"
)
-func set(ns Namespace, key uint64, value interface{}, charge int, fin func()) Object {
- obj, _ := ns.Get(key, func() (bool, interface{}, int, SetFin) {
- return true, value, charge, fin
+type int32o int32
+
+func (o *int32o) acquire() {
+ if atomic.AddInt32((*int32)(o), 1) != 1 {
+ panic("BUG: invalid ref")
+ }
+}
+
+func (o *int32o) Release() {
+ if atomic.AddInt32((*int32)(o), -1) != 0 {
+ panic("BUG: invalid ref")
+ }
+}
+
+type releaserFunc struct {
+ fn func()
+ value Value
+}
+
+func (r releaserFunc) Release() {
+ if r.fn != nil {
+ r.fn()
+ }
+}
+
+func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
+ return c.Get(ns, key, func() (int, Value) {
+ if relf != nil {
+ return charge, releaserFunc{relf, value}
+ } else {
+ return charge, value
+ }
+ })
+}
+
+func TestCacheMap(t *testing.T) {
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ nsx := []struct {
+ nobjects, nhandles, concurrent, repeat int
+ }{
+ {10000, 400, 50, 3},
+ {100000, 1000, 100, 10},
+ }
+
+ var (
+ objects [][]int32o
+ handles [][]unsafe.Pointer
+ )
+
+ for _, x := range nsx {
+ objects = append(objects, make([]int32o, x.nobjects))
+ handles = append(handles, make([]unsafe.Pointer, x.nhandles))
+ }
+
+ c := NewCache(nil)
+
+ wg := new(sync.WaitGroup)
+ var done int32
+
+ for ns, x := range nsx {
+ for i := 0; i < x.concurrent; i++ {
+ wg.Add(1)
+ go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
+ defer wg.Done()
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ for j := len(objects) * repeat; j >= 0; j-- {
+ key := uint64(r.Intn(len(objects)))
+ h := c.Get(uint64(ns), key, func() (int, Value) {
+ o := &objects[key]
+ o.acquire()
+ return 1, o
+ })
+ if v := h.Value().(*int32o); v != &objects[key] {
+ t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
+ }
+ if objects[key] != 1 {
+ t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
+ }
+ if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
+ h.Release()
+ }
+ }
+ }(ns, i, x.repeat, objects[ns], handles[ns])
+ }
+
+ go func(handles []unsafe.Pointer) {
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ for atomic.LoadInt32(&done) == 0 {
+ i := r.Intn(len(handles))
+ h := (*Handle)(atomic.LoadPointer(&handles[i]))
+ if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
+ h.Release()
+ }
+ time.Sleep(time.Millisecond)
+ }
+ }(handles[ns])
+ }
+
+ go func() {
+ handles := make([]*Handle, 100000)
+ for atomic.LoadInt32(&done) == 0 {
+ for i := range handles {
+ handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
+ return 1, 1
+ })
+ }
+ for _, h := range handles {
+ h.Release()
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ atomic.StoreInt32(&done, 1)
+
+ for _, handles0 := range handles {
+ for i := range handles0 {
+ h := (*Handle)(atomic.LoadPointer(&handles0[i]))
+ if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
+ h.Release()
+ }
+ }
+ }
+
+ for ns, objects0 := range objects {
+ for i, o := range objects0 {
+ if o != 0 {
+ t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
+ }
+ }
+ }
+}
+
+func TestCacheMap_NodesAndSize(t *testing.T) {
+ c := NewCache(nil)
+ if c.Nodes() != 0 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
+ }
+ if c.Size() != 0 {
+ t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
+ }
+ set(c, 0, 1, 1, 1, nil)
+ set(c, 0, 2, 2, 2, nil)
+ set(c, 1, 1, 3, 3, nil)
+ set(c, 2, 1, 4, 1, nil)
+ if c.Nodes() != 4 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
+ }
+ if c.Size() != 7 {
+ t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
+ }
+}
+
+func TestLRUCache_Capacity(t *testing.T) {
+ c := NewCache(NewLRU(10))
+ if c.Capacity() != 10 {
+ t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
+ }
+ set(c, 0, 1, 1, 1, nil).Release()
+ set(c, 0, 2, 2, 2, nil).Release()
+ set(c, 1, 1, 3, 3, nil).Release()
+ set(c, 2, 1, 4, 1, nil).Release()
+ set(c, 2, 2, 5, 1, nil).Release()
+ set(c, 2, 3, 6, 1, nil).Release()
+ set(c, 2, 4, 7, 1, nil).Release()
+ set(c, 2, 5, 8, 1, nil).Release()
+ if c.Nodes() != 7 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
+ }
+ if c.Size() != 10 {
+ t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
+ }
+ c.SetCapacity(9)
+ if c.Capacity() != 9 {
+ t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
+ }
+ if c.Nodes() != 6 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
+ }
+ if c.Size() != 8 {
+ t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
+ }
+}
+
+func TestCacheMap_NilValue(t *testing.T) {
+ c := NewCache(NewLRU(10))
+ h := c.Get(0, 0, func() (size int, value Value) {
+ return 1, nil
})
- return obj
+ if h != nil {
+ t.Error("cache handle is non-nil")
+ }
+ if c.Nodes() != 0 {
+ t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
+ }
+ if c.Size() != 0 {
+ t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
+ }
}
-func TestCache_HitMiss(t *testing.T) {
+func TestLRUCache_GetLatency(t *testing.T) {
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ const (
+ concurrentSet = 30
+ concurrentGet = 3
+ duration = 3 * time.Second
+ delay = 3 * time.Millisecond
+ maxkey = 100000
+ )
+
+ var (
+ set, getHit, getAll int32
+ getMaxLatency, getDuration int64
+ )
+
+ c := NewCache(NewLRU(5000))
+ wg := &sync.WaitGroup{}
+ until := time.Now().Add(duration)
+ for i := 0; i < concurrentSet; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for time.Now().Before(until) {
+ c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
+ time.Sleep(delay)
+ atomic.AddInt32(&set, 1)
+ return 1, 1
+ }).Release()
+ }
+ }(i)
+ }
+ for i := 0; i < concurrentGet; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for {
+ mark := time.Now()
+ if mark.Before(until) {
+ h := c.Get(0, uint64(r.Intn(maxkey)), nil)
+ latency := int64(time.Now().Sub(mark))
+ m := atomic.LoadInt64(&getMaxLatency)
+ if latency > m {
+ atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
+ }
+ atomic.AddInt64(&getDuration, latency)
+ if h != nil {
+ atomic.AddInt32(&getHit, 1)
+ h.Release()
+ }
+ atomic.AddInt32(&getAll, 1)
+ } else {
+ break
+ }
+ }
+ }(i)
+ }
+
+ wg.Wait()
+ getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
+ t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
+ set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
+
+ if getAvglatency > delay/3 {
+ t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
+ }
+}
+
+func TestLRUCache_HitMiss(t *testing.T) {
cases := []struct {
key uint64
value string
@@ -36,36 +308,37 @@ func TestCache_HitMiss(t *testing.T) {
}
setfin := 0
- c := NewLRUCache(1000)
- ns := c.GetNamespace(0)
+ c := NewCache(NewLRU(1000))
for i, x := range cases {
- set(ns, x.key, x.value, len(x.value), func() {
+ set(c, 0, x.key, x.value, len(x.value), func() {
setfin++
}).Release()
for j, y := range cases {
- r, ok := ns.Get(y.key, nil)
+ h := c.Get(0, y.key, nil)
if j <= i {
// should hit
- if !ok {
+ if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
- } else if r.Value().(string) != y.value {
- t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
+ } else {
+ if x := h.Value().(releaserFunc).value.(string); x != y.value {
+ t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
+ }
}
} else {
// should miss
- if ok {
- t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, r.Value().(string))
+ if h != nil {
+ t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
- if ok {
- r.Release()
+ if h != nil {
+ h.Release()
}
}
}
for i, x := range cases {
finalizerOk := false
- ns.Delete(x.key, func(exist bool) {
+ c.Delete(0, x.key, func() {
finalizerOk = true
})
@@ -74,22 +347,24 @@ func TestCache_HitMiss(t *testing.T) {
}
for j, y := range cases {
- r, ok := ns.Get(y.key, nil)
+ h := c.Get(0, y.key, nil)
if j > i {
// should hit
- if !ok {
+ if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
- } else if r.Value().(string) != y.value {
- t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
+ } else {
+ if x := h.Value().(releaserFunc).value.(string); x != y.value {
+ t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
+ }
}
} else {
// should miss
- if ok {
- t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, r.Value().(string))
+ if h != nil {
+ t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
- if ok {
- r.Release()
+ if h != nil {
+ h.Release()
}
}
}
@@ -100,137 +375,180 @@ func TestCache_HitMiss(t *testing.T) {
}
func TestLRUCache_Eviction(t *testing.T) {
- c := NewLRUCache(12)
- ns := c.GetNamespace(0)
- o1 := set(ns, 1, 1, 1, nil)
- set(ns, 2, 2, 1, nil).Release()
- set(ns, 3, 3, 1, nil).Release()
- set(ns, 4, 4, 1, nil).Release()
- set(ns, 5, 5, 1, nil).Release()
- if r, ok := ns.Get(2, nil); ok { // 1,3,4,5,2
- r.Release()
- }
- set(ns, 9, 9, 10, nil).Release() // 5,2,9
-
- for _, x := range []uint64{9, 2, 5, 1} {
- r, ok := ns.Get(x, nil)
- if !ok {
- t.Errorf("miss for key '%d'", x)
+ c := NewCache(NewLRU(12))
+ o1 := set(c, 0, 1, 1, 1, nil)
+ set(c, 0, 2, 2, 1, nil).Release()
+ set(c, 0, 3, 3, 1, nil).Release()
+ set(c, 0, 4, 4, 1, nil).Release()
+ set(c, 0, 5, 5, 1, nil).Release()
+ if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
+ h.Release()
+ }
+ set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
+
+ for _, key := range []uint64{9, 2, 5, 1} {
+ h := c.Get(0, key, nil)
+ if h == nil {
+ t.Errorf("miss for key '%d'", key)
} else {
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+ if x := h.Value().(int); x != int(key) {
+ t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
- r.Release()
+ h.Release()
}
}
o1.Release()
- for _, x := range []uint64{1, 2, 5} {
- r, ok := ns.Get(x, nil)
- if !ok {
- t.Errorf("miss for key '%d'", x)
+ for _, key := range []uint64{1, 2, 5} {
+ h := c.Get(0, key, nil)
+ if h == nil {
+ t.Errorf("miss for key '%d'", key)
} else {
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+ if x := h.Value().(int); x != int(key) {
+ t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
- r.Release()
+ h.Release()
}
}
- for _, x := range []uint64{3, 4, 9} {
- r, ok := ns.Get(x, nil)
- if ok {
- t.Errorf("hit for key '%d'", x)
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+ for _, key := range []uint64{3, 4, 9} {
+ h := c.Get(0, key, nil)
+ if h != nil {
+ t.Errorf("hit for key '%d'", key)
+ if x := h.Value().(int); x != int(key) {
+ t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
- r.Release()
+ h.Release()
}
}
}
-func TestLRUCache_SetGet(t *testing.T) {
- c := NewLRUCache(13)
- ns := c.GetNamespace(0)
- for i := 0; i < 200; i++ {
- n := uint64(rand.Intn(99999) % 20)
- set(ns, n, n, 1, nil).Release()
- if p, ok := ns.Get(n, nil); ok {
- if p.Value() == nil {
- t.Errorf("key '%d' contains nil value", n)
+func TestLRUCache_Evict(t *testing.T) {
+ c := NewCache(NewLRU(6))
+ set(c, 0, 1, 1, 1, nil).Release()
+ set(c, 0, 2, 2, 1, nil).Release()
+ set(c, 1, 1, 4, 1, nil).Release()
+ set(c, 1, 2, 5, 1, nil).Release()
+ set(c, 2, 1, 6, 1, nil).Release()
+ set(c, 2, 2, 7, 1, nil).Release()
+
+ for ns := 0; ns < 3; ns++ {
+ for key := 1; key < 3; key++ {
+ if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
+ h.Release()
} else {
- got := p.Value().(uint64)
- if got != n {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, got)
- }
+ t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
}
- p.Release()
- } else {
- t.Errorf("key '%d' doesn't exist", n)
}
}
-}
-func TestLRUCache_Purge(t *testing.T) {
- c := NewLRUCache(3)
- ns1 := c.GetNamespace(0)
- o1 := set(ns1, 1, 1, 1, nil)
- o2 := set(ns1, 2, 2, 1, nil)
- ns1.Purge(nil)
- set(ns1, 3, 3, 1, nil).Release()
- for _, x := range []uint64{1, 2, 3} {
- r, ok := ns1.Get(x, nil)
- if !ok {
- t.Errorf("miss for key '%d'", x)
- } else {
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+ if ok := c.Evict(0, 1); !ok {
+ t.Error("first Cache.Evict on #0.1 return false")
+ }
+ if ok := c.Evict(0, 1); ok {
+ t.Error("second Cache.Evict on #0.1 return true")
+ }
+ if h := c.Get(0, 1, nil); h != nil {
+ t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
+ }
+
+ c.EvictNS(1)
+ if h := c.Get(1, 1, nil); h != nil {
+ t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
+ }
+ if h := c.Get(1, 2, nil); h != nil {
+ t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
+ }
+
+ c.EvictAll()
+ for ns := 0; ns < 3; ns++ {
+ for key := 1; key < 3; key++ {
+ if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
+ t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
}
- r.Release()
}
}
- o1.Release()
- o2.Release()
- for _, x := range []uint64{1, 2} {
- r, ok := ns1.Get(x, nil)
- if ok {
- t.Errorf("hit for key '%d'", x)
- if r.Value().(int) != int(x) {
- t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
- }
- r.Release()
+}
+
+func TestLRUCache_Delete(t *testing.T) {
+ delFuncCalled := 0
+ delFunc := func() {
+ delFuncCalled++
+ }
+
+ c := NewCache(NewLRU(2))
+ set(c, 0, 1, 1, 1, nil).Release()
+ set(c, 0, 2, 2, 1, nil).Release()
+
+ if ok := c.Delete(0, 1, delFunc); !ok {
+ t.Error("Cache.Delete on #1 return false")
+ }
+ if h := c.Get(0, 1, nil); h != nil {
+ t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
+ }
+ if ok := c.Delete(0, 1, delFunc); ok {
+ t.Error("Cache.Delete on #1 return true")
+ }
+
+ h2 := c.Get(0, 2, nil)
+ if h2 == nil {
+ t.Error("Cache.Get on #2 return nil")
+ }
+ if ok := c.Delete(0, 2, delFunc); !ok {
+ t.Error("(1) Cache.Delete on #2 return false")
+ }
+ if ok := c.Delete(0, 2, delFunc); !ok {
+ t.Error("(2) Cache.Delete on #2 return false")
+ }
+
+ set(c, 0, 3, 3, 1, nil).Release()
+ set(c, 0, 4, 4, 1, nil).Release()
+ c.Get(0, 2, nil).Release()
+
+ for key := 2; key <= 4; key++ {
+ if h := c.Get(0, uint64(key), nil); h != nil {
+ h.Release()
+ } else {
+ t.Errorf("Cache.Get on #%d return nil", key)
}
}
-}
-func BenchmarkLRUCache_SetRelease(b *testing.B) {
- capacity := b.N / 100
- if capacity <= 0 {
- capacity = 10
+ h2.Release()
+ if h := c.Get(0, 2, nil); h != nil {
+ t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
}
- c := NewLRUCache(capacity)
- ns := c.GetNamespace(0)
- b.ResetTimer()
- for i := uint64(0); i < uint64(b.N); i++ {
- set(ns, i, nil, 1, nil).Release()
+
+ if delFuncCalled != 4 {
+ t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
}
}
-func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) {
- capacity := b.N / 100
- if capacity <= 0 {
- capacity = 10
+func TestLRUCache_Close(t *testing.T) {
+ relFuncCalled := 0
+ relFunc := func() {
+ relFuncCalled++
+ }
+ delFuncCalled := 0
+ delFunc := func() {
+ delFuncCalled++
}
- c := NewLRUCache(capacity)
- ns := c.GetNamespace(0)
- b.ResetTimer()
- na := b.N / 2
- nb := b.N - na
+ c := NewCache(NewLRU(2))
+ set(c, 0, 1, 1, 1, relFunc).Release()
+ set(c, 0, 2, 2, 1, relFunc).Release()
- for i := uint64(0); i < uint64(na); i++ {
- set(ns, i, nil, 1, nil).Release()
+ h3 := set(c, 0, 3, 3, 1, relFunc)
+ if h3 == nil {
+ t.Error("Cache.Get on #3 return nil")
}
+ if ok := c.Delete(0, 3, delFunc); !ok {
+ t.Error("Cache.Delete on #3 return false")
+ }
+
+ c.Close()
- for i := uint64(0); i < uint64(nb); i++ {
- set(ns, i, nil, 1, nil).Release()
+ if relFuncCalled != 3 {
+ t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
+ }
+ if delFuncCalled != 1 {
+ t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go
deleted file mode 100644
index 1fbf81459..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package cache
-
-import (
- "sync"
- "sync/atomic"
-)
-
-type emptyCache struct {
- sync.Mutex
- table map[uint64]*emptyNS
-}
-
-// NewEmptyCache creates a new initialized empty cache.
-func NewEmptyCache() Cache {
- return &emptyCache{
- table: make(map[uint64]*emptyNS),
- }
-}
-
-func (c *emptyCache) GetNamespace(id uint64) Namespace {
- c.Lock()
- defer c.Unlock()
-
- if ns, ok := c.table[id]; ok {
- return ns
- }
-
- ns := &emptyNS{
- cache: c,
- id: id,
- table: make(map[uint64]*emptyNode),
- }
- c.table[id] = ns
- return ns
-}
-
-func (c *emptyCache) Purge(fin PurgeFin) {
- c.Lock()
- for _, ns := range c.table {
- ns.purgeNB(fin)
- }
- c.Unlock()
-}
-
-func (c *emptyCache) Zap(closed bool) {
- c.Lock()
- for _, ns := range c.table {
- ns.zapNB(closed)
- }
- c.table = make(map[uint64]*emptyNS)
- c.Unlock()
-}
-
-func (*emptyCache) SetCapacity(capacity int) {}
-
-type emptyNS struct {
- cache *emptyCache
- id uint64
- table map[uint64]*emptyNode
- state nsState
-}
-
-func (ns *emptyNS) Get(key uint64, setf SetFunc) (o Object, ok bool) {
- ns.cache.Lock()
-
- switch ns.state {
- case nsZapped:
- ns.cache.Unlock()
- if setf == nil {
- return
- }
-
- var value interface{}
- var fin func()
- ok, value, _, fin = setf()
- if ok {
- o = &fakeObject{
- value: value,
- fin: fin,
- }
- }
- return
- case nsClosed:
- ns.cache.Unlock()
- return
- }
-
- n, ok := ns.table[key]
- if ok {
- n.ref++
- } else {
- if setf == nil {
- ns.cache.Unlock()
- return
- }
-
- var value interface{}
- var fin func()
- ok, value, _, fin = setf()
- if !ok {
- ns.cache.Unlock()
- return
- }
-
- n = &emptyNode{
- ns: ns,
- key: key,
- value: value,
- setfin: fin,
- ref: 1,
- }
- ns.table[key] = n
- }
-
- ns.cache.Unlock()
- o = &emptyObject{node: n}
- return
-}
-
-func (ns *emptyNS) Delete(key uint64, fin DelFin) bool {
- ns.cache.Lock()
-
- if ns.state != nsEffective {
- ns.cache.Unlock()
- if fin != nil {
- fin(false)
- }
- return false
- }
-
- n, ok := ns.table[key]
- if !ok {
- ns.cache.Unlock()
- if fin != nil {
- fin(false)
- }
- return false
- }
- n.delfin = fin
- ns.cache.Unlock()
- return true
-}
-
-func (ns *emptyNS) purgeNB(fin PurgeFin) {
- if ns.state != nsEffective {
- return
- }
- for _, n := range ns.table {
- n.purgefin = fin
- }
-}
-
-func (ns *emptyNS) Purge(fin PurgeFin) {
- ns.cache.Lock()
- ns.purgeNB(fin)
- ns.cache.Unlock()
-}
-
-func (ns *emptyNS) zapNB(closed bool) {
- if ns.state != nsEffective {
- return
- }
- for _, n := range ns.table {
- n.execFin()
- }
- if closed {
- ns.state = nsClosed
- } else {
- ns.state = nsZapped
- }
- ns.table = nil
-}
-
-func (ns *emptyNS) Zap(closed bool) {
- ns.cache.Lock()
- ns.zapNB(closed)
- delete(ns.cache.table, ns.id)
- ns.cache.Unlock()
-}
-
-type emptyNode struct {
- ns *emptyNS
- key uint64
- value interface{}
- ref int
- setfin SetFin
- delfin DelFin
- purgefin PurgeFin
-}
-
-func (n *emptyNode) execFin() {
- if n.setfin != nil {
- n.setfin()
- n.setfin = nil
- }
- if n.purgefin != nil {
- n.purgefin(n.ns.id, n.key, n.delfin)
- n.delfin = nil
- n.purgefin = nil
- } else if n.delfin != nil {
- n.delfin(true)
- n.delfin = nil
- }
-}
-
-func (n *emptyNode) evict() {
- n.ns.cache.Lock()
- n.ref--
- if n.ref == 0 {
- if n.ns.state == nsEffective {
- // Remove elem.
- delete(n.ns.table, n.key)
- // Execute finalizer.
- n.execFin()
- }
- } else if n.ref < 0 {
- panic("leveldb/cache: emptyNode: negative node reference")
- }
- n.ns.cache.Unlock()
-}
-
-type emptyObject struct {
- node *emptyNode
- once uint32
-}
-
-func (o *emptyObject) Value() interface{} {
- if atomic.LoadUint32(&o.once) == 0 {
- return o.node.value
- }
- return nil
-}
-
-func (o *emptyObject) Release() {
- if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
- return
- }
- o.node.evict()
- o.node = nil
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
new file mode 100644
index 000000000..d9a84cde1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
@@ -0,0 +1,195 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cache
+
+import (
+ "sync"
+ "unsafe"
+)
+
+type lruNode struct {
+ n *Node
+ h *Handle
+ ban bool
+
+ next, prev *lruNode
+}
+
+func (n *lruNode) insert(at *lruNode) {
+ x := at.next
+ at.next = n
+ n.prev = at
+ n.next = x
+ x.prev = n
+}
+
+func (n *lruNode) remove() {
+ if n.prev != nil {
+ n.prev.next = n.next
+ n.next.prev = n.prev
+ n.prev = nil
+ n.next = nil
+ } else {
+ panic("BUG: removing removed node")
+ }
+}
+
+type lru struct {
+ mu sync.Mutex
+ capacity int
+ used int
+ recent lruNode
+}
+
+func (r *lru) reset() {
+ r.recent.next = &r.recent
+ r.recent.prev = &r.recent
+ r.used = 0
+}
+
+func (r *lru) Capacity() int {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return r.capacity
+}
+
+func (r *lru) SetCapacity(capacity int) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ r.capacity = capacity
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Promote(n *Node) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ if n.CacheData == nil {
+ if n.Size() <= r.capacity {
+ rn := &lruNode{n: n, h: n.GetHandle()}
+ rn.insert(&r.recent)
+ n.CacheData = unsafe.Pointer(rn)
+ r.used += n.Size()
+
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.insert(&r.recent)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Ban(n *Node) {
+ r.mu.Lock()
+ if n.CacheData == nil {
+ n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.ban = true
+ r.used -= rn.n.Size()
+ r.mu.Unlock()
+
+ rn.h.Release()
+ rn.h = nil
+ return
+ }
+ }
+ r.mu.Unlock()
+}
+
+func (r *lru) Evict(n *Node) {
+ r.mu.Lock()
+ rn := (*lruNode)(n.CacheData)
+ if rn == nil || rn.ban {
+ r.mu.Unlock()
+ return
+ }
+ n.CacheData = nil
+ r.mu.Unlock()
+
+ rn.h.Release()
+}
+
+func (r *lru) EvictNS(ns uint64) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ for e := r.recent.prev; e != &r.recent; {
+ rn := e
+ e = e.prev
+ if rn.n.NS() == ns {
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) EvictAll() {
+ r.mu.Lock()
+ back := r.recent.prev
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.n.CacheData = nil
+ }
+ r.reset()
+ r.mu.Unlock()
+
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Close() error {
+ return nil
+}
+
+// NewLRU create a new LRU-cache.
+func NewLRU(capacity int) Cacher {
+ r := &lru{capacity: capacity}
+ r.reset()
+ return r
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
deleted file mode 100644
index 3c98e076b..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
+++ /dev/null
@@ -1,354 +0,0 @@
-// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package cache
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// lruCache represent a LRU cache state.
-type lruCache struct {
- sync.Mutex
-
- recent lruNode
- table map[uint64]*lruNs
- capacity int
- size int
-}
-
-// NewLRUCache creates a new initialized LRU cache with the given capacity.
-func NewLRUCache(capacity int) Cache {
- c := &lruCache{
- table: make(map[uint64]*lruNs),
- capacity: capacity,
- }
- c.recent.rNext = &c.recent
- c.recent.rPrev = &c.recent
- return c
-}
-
-// SetCapacity set cache capacity.
-func (c *lruCache) SetCapacity(capacity int) {
- c.Lock()
- c.capacity = capacity
- c.evict()
- c.Unlock()
-}
-
-// GetNamespace return namespace object for given id.
-func (c *lruCache) GetNamespace(id uint64) Namespace {
- c.Lock()
- defer c.Unlock()
-
- if p, ok := c.table[id]; ok {
- return p
- }
-
- p := &lruNs{
- lru: c,
- id: id,
- table: make(map[uint64]*lruNode),
- }
- c.table[id] = p
- return p
-}
-
-// Purge purge entire cache.
-func (c *lruCache) Purge(fin PurgeFin) {
- c.Lock()
- for _, ns := range c.table {
- ns.purgeNB(fin)
- }
- c.Unlock()
-}
-
-func (c *lruCache) Zap(closed bool) {
- c.Lock()
- for _, ns := range c.table {
- ns.zapNB(closed)
- }
- c.table = make(map[uint64]*lruNs)
- c.Unlock()
-}
-
-func (c *lruCache) evict() {
- top := &c.recent
- for n := c.recent.rPrev; c.size > c.capacity && n != top; {
- n.state = nodeEvicted
- n.rRemove()
- n.evictNB()
- c.size -= n.charge
- n = c.recent.rPrev
- }
-}
-
-type lruNs struct {
- lru *lruCache
- id uint64
- table map[uint64]*lruNode
- state nsState
-}
-
-func (ns *lruNs) Get(key uint64, setf SetFunc) (o Object, ok bool) {
- lru := ns.lru
- lru.Lock()
-
- switch ns.state {
- case nsZapped:
- lru.Unlock()
- if setf == nil {
- return
- }
-
- var value interface{}
- var fin func()
- ok, value, _, fin = setf()
- if ok {
- o = &fakeObject{
- value: value,
- fin: fin,
- }
- }
- return
- case nsClosed:
- lru.Unlock()
- return
- }
-
- n, ok := ns.table[key]
- if ok {
- switch n.state {
- case nodeEvicted:
- // Insert to recent list.
- n.state = nodeEffective
- n.ref++
- lru.size += n.charge
- lru.evict()
- fallthrough
- case nodeEffective:
- // Bump to front
- n.rRemove()
- n.rInsert(&lru.recent)
- }
- n.ref++
- } else {
- if setf == nil {
- lru.Unlock()
- return
- }
-
- var value interface{}
- var charge int
- var fin func()
- ok, value, charge, fin = setf()
- if !ok {
- lru.Unlock()
- return
- }
-
- n = &lruNode{
- ns: ns,
- key: key,
- value: value,
- charge: charge,
- setfin: fin,
- ref: 2,
- }
- ns.table[key] = n
- n.rInsert(&lru.recent)
-
- lru.size += charge
- lru.evict()
- }
-
- lru.Unlock()
- o = &lruObject{node: n}
- return
-}
-
-func (ns *lruNs) Delete(key uint64, fin DelFin) bool {
- lru := ns.lru
- lru.Lock()
-
- if ns.state != nsEffective {
- lru.Unlock()
- if fin != nil {
- fin(false)
- }
- return false
- }
-
- n, ok := ns.table[key]
- if !ok {
- lru.Unlock()
- if fin != nil {
- fin(false)
- }
- return false
- }
-
- n.delfin = fin
- switch n.state {
- case nodeRemoved:
- lru.Unlock()
- return false
- case nodeEffective:
- lru.size -= n.charge
- n.rRemove()
- n.evictNB()
- }
- n.state = nodeRemoved
-
- lru.Unlock()
- return true
-}
-
-func (ns *lruNs) purgeNB(fin PurgeFin) {
- lru := ns.lru
- if ns.state != nsEffective {
- return
- }
-
- for _, n := range ns.table {
- n.purgefin = fin
- if n.state == nodeEffective {
- lru.size -= n.charge
- n.rRemove()
- n.evictNB()
- }
- n.state = nodeRemoved
- }
-}
-
-func (ns *lruNs) Purge(fin PurgeFin) {
- ns.lru.Lock()
- ns.purgeNB(fin)
- ns.lru.Unlock()
-}
-
-func (ns *lruNs) zapNB(closed bool) {
- lru := ns.lru
- if ns.state != nsEffective {
- return
- }
-
- if closed {
- ns.state = nsClosed
- } else {
- ns.state = nsZapped
- }
- for _, n := range ns.table {
- if n.state == nodeEffective {
- lru.size -= n.charge
- n.rRemove()
- }
- n.state = nodeRemoved
- n.execFin()
- }
- ns.table = nil
-}
-
-func (ns *lruNs) Zap(closed bool) {
- ns.lru.Lock()
- ns.zapNB(closed)
- delete(ns.lru.table, ns.id)
- ns.lru.Unlock()
-}
-
-type lruNode struct {
- ns *lruNs
-
- rNext, rPrev *lruNode
-
- key uint64
- value interface{}
- charge int
- ref int
- state nodeState
- setfin SetFin
- delfin DelFin
- purgefin PurgeFin
-}
-
-func (n *lruNode) rInsert(at *lruNode) {
- x := at.rNext
- at.rNext = n
- n.rPrev = at
- n.rNext = x
- x.rPrev = n
-}
-
-func (n *lruNode) rRemove() bool {
- // only remove if not already removed
- if n.rPrev == nil {
- return false
- }
-
- n.rPrev.rNext = n.rNext
- n.rNext.rPrev = n.rPrev
- n.rPrev = nil
- n.rNext = nil
-
- return true
-}
-
-func (n *lruNode) execFin() {
- if n.setfin != nil {
- n.setfin()
- n.setfin = nil
- }
- if n.purgefin != nil {
- n.purgefin(n.ns.id, n.key, n.delfin)
- n.delfin = nil
- n.purgefin = nil
- } else if n.delfin != nil {
- n.delfin(true)
- n.delfin = nil
- }
-}
-
-func (n *lruNode) evictNB() {
- n.ref--
- if n.ref == 0 {
- if n.ns.state == nsEffective {
- // remove elem
- delete(n.ns.table, n.key)
- // execute finalizer
- n.execFin()
- }
- } else if n.ref < 0 {
- panic("leveldb/cache: lruCache: negative node reference")
- }
-}
-
-func (n *lruNode) evict() {
- n.ns.lru.Lock()
- n.evictNB()
- n.ns.lru.Unlock()
-}
-
-type lruObject struct {
- node *lruNode
- once uint32
-}
-
-func (o *lruObject) Value() interface{} {
- if atomic.LoadUint32(&o.once) == 0 {
- return o.node.value
- }
- return nil
-}
-
-func (o *lruObject) Release() {
- if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
- return
- }
-
- o.node.evict()
- o.node = nil
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go
deleted file mode 100644
index 511058897..000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package leveldb
-
-const (
- kNumLevels = 7
-
- // Level-0 compaction is started when we hit this many files.
- kL0_CompactionTrigger float64 = 4
-
- // Soft limit on number of level-0 files. We slow down writes at this point.
- kL0_SlowdownWritesTrigger = 8
-
- // Maximum number of level-0 files. We stop writes at this point.
- kL0_StopWritesTrigger = 12
-
- // Maximum level to which a new compacted memdb is pushed if it
- // does not create overlap. We try to push to level 2 to avoid the
- // relatively expensive level 0=>1 compactions and to avoid some
- // expensive manifest file operations. We do not push all the way to
- // the largest level since that can generate a lot of wasted disk
- // space if the same key space is being repeatedly overwritten.
- kMaxMemCompactLevel = 2
-
- // Maximum size of a table.
- kMaxTableSize = 2 * 1048576
-
- // Maximum bytes of overlaps in grandparent (i.e., level+2) before we
- // stop building a single file in a level->level+1 compaction.
- kMaxGrandParentOverlapBytes = 10 * kMaxTableSize
-
- // Maximum number of bytes in all compacted files. We avoid expanding
- // the lower level file set of a compaction if it would make the
- // total compaction cover more than this many bytes.
- kExpCompactionMaxBytes = 25 * kMaxTableSize
-)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
index a036e0893..a351874ed 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
@@ -9,13 +9,12 @@ package leveldb
import (
"bytes"
"fmt"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
"io"
"math/rand"
"testing"
-
- "github.com/syndtr/goleveldb/leveldb/cache"
- "github.com/syndtr/goleveldb/leveldb/opt"
- "github.com/syndtr/goleveldb/leveldb/storage"
)
const ctValSize = 1000
@@ -32,8 +31,8 @@ func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
return newDbCorruptHarnessWopt(t, &opt.Options{
- BlockCache: cache.NewLRUCache(100),
- Strict: opt.StrictJournalChecksum,
+ BlockCacheCapacity: 100,
+ Strict: opt.StrictJournalChecksum,
})
}
@@ -96,21 +95,22 @@ func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) {
}
}
-func (h *dbCorruptHarness) corrupt(ft storage.FileType, offset, n int) {
+func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
p := &h.dbHarness
t := p.t
- var file storage.File
ff, _ := p.stor.GetFiles(ft)
- for _, f := range ff {
- if file == nil || f.Num() > file.Num() {
- file = f
- }
+ sff := files(ff)
+ sff.sort()
+ if fi < 0 {
+ fi = len(sff) - 1
}
- if file == nil {
- t.Fatalf("no such file with type %q", ft)
+ if fi >= len(sff) {
+ t.Fatalf("no such file with type %q with index %d", ft, fi)
}
+ file := sff[fi]
+
r, err := file.Open()
if err != nil {
t.Fatal("cannot open file: ", err)
@@ -225,8 +225,8 @@ func TestCorruptDB_Journal(t *testing.T) {
h.build(100)
h.check(100, 100)
h.closeDB()
- h.corrupt(storage.TypeJournal, 19, 1)
- h.corrupt(storage.TypeJournal, 32*1024+1000, 1)
+ h.corrupt(storage.TypeJournal, -1, 19, 1)
+ h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1)
h.openDB()
h.check(36, 36)
@@ -242,7 +242,7 @@ func TestCorruptDB_Table(t *testing.T) {
h.compactRangeAt(0, "", "")
h.compactRangeAt(1, "", "")
h.closeDB()
- h.corrupt(storage.TypeTable, 100, 1)
+ h.corrupt(storage.TypeTable, -1, 100, 1)
h.openDB()
h.check(99, 99)
@@ -256,7 +256,7 @@ func TestCorruptDB_TableIndex(t *testing.T) {
h.build(10000)
h.compactMem()
h.closeDB()
- h.corrupt(storage.TypeTable, -2000, 500)
+ h.corrupt(storage.TypeTable, -1, -2000, 500)
h.openDB()
h.check(5000, 9999)
@@ -267,9 +267,9 @@ func TestCorruptDB_TableIndex(t *testing.T) {
func TestCorruptDB_MissingManifest(t *testing.T) {
rnd := rand.New(rand.NewSource(0x0badda7a))
h := newDbCorruptHarnessWopt(t, &opt.Options{
- BlockCache: cache.NewLRUCache(100),
- Strict: opt.StrictJournalChecksum,
- WriteBuffer: 1000 * 60,
+ BlockCacheCapacity: 100,
+ Strict: opt.StrictJournalChecksum,
+ WriteBuffer: 1000 * 60,
})
h.build(1000)
@@ -355,7 +355,7 @@ func TestCorruptDB_CorruptedManifest(t *testing.T) {
h.compactMem()
h.compactRange("", "")
h.closeDB()
- h.corrupt(storage.TypeManifest, 0, 1000)
+ h.corrupt(storage.TypeManifest, -1, 0, 1000)
h.openAssert(false)
h.recover()
@@ -370,7 +370,7 @@ func TestCorruptDB_CompactionInputError(t *testing.T) {
h.build(10)
h.compactMem()
h.closeDB()
- h.corrupt(storage.TypeTable, 100, 1)
+ h.corrupt(storage.TypeTable, -1, 100, 1)
h.openDB()
h.check(9, 9)
@@ -387,7 +387,7 @@ func TestCorruptDB_UnrelatedKeys(t *testing.T) {
h.build(10)
h.compactMem()
h.closeDB()
- h.corrupt(storage.TypeTable, 100, 1)
+ h.corrupt(storage.TypeTable, -1, 100, 1)
h.openDB()
h.put(string(tkey(1000)), string(tval(1000, ctValSize)))
@@ -470,3 +470,31 @@ func TestCorruptDB_MissingTableFiles(t *testing.T) {
h.close()
}
+
+func TestCorruptDB_RecoverTable(t *testing.T) {
+ h := newDbCorruptHarnessWopt(t, &opt.Options{
+ WriteBuffer: 112 * opt.KiB,
+ CompactionTableSize: 90 * opt.KiB,
+ Filter: filter.NewBloomFilter(10),
+ })
+
+ h.build(1000)
+ h.compactMem()
+ h.compactRangeAt(0, "", "")
+ h.compactRangeAt(1, "", "")
+ seq := h.db.seq
+ h.closeDB()
+ h.corrupt(storage.TypeTable, 0, 1000, 1)
+ h.corrupt(storage.TypeTable, 3, 10000, 1)
+ // Corrupted filter shouldn't affect recovery.
+ h.corrupt(storage.TypeTable, 3, 113888, 10)
+ h.corrupt(storage.TypeTable, -1, 20000, 1)
+
+ h.recover()
+ if h.db.seq != seq {
+ t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
+ }
+ h.check(985, 985)
+
+ h.close()
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
index 8e975dc63..323353b2a 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
@@ -7,15 +7,17 @@
package leveldb
import (
- "errors"
+ "container/list"
"fmt"
"io"
"os"
"runtime"
"strings"
"sync"
+ "sync/atomic"
"time"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/journal"
"github.com/syndtr/goleveldb/leveldb/memdb"
@@ -30,41 +32,46 @@ type DB struct {
// Need 64-bit alignment.
seq uint64
+ // Session.
s *session
- // MemDB
+ // MemDB.
memMu sync.RWMutex
- mem *memdb.DB
- frozenMem *memdb.DB
+ memPool chan *memdb.DB
+ mem, frozenMem *memDB
journal *journal.Writer
journalWriter storage.Writer
journalFile storage.File
frozenJournalFile storage.File
frozenSeq uint64
- // Snapshot
+ // Snapshot.
snapsMu sync.Mutex
- snapsRoot snapshotElement
+ snapsList *list.List
- // Write
+ // Stats.
+ aliveSnaps, aliveIters int32
+
+ // Write.
writeC chan *Batch
writeMergedC chan bool
writeLockC chan struct{}
writeAckC chan error
+ writeDelay time.Duration
+ writeDelayN int
journalC chan *Batch
journalAckC chan error
- // Compaction
- tcompCmdC chan cCmd
- tcompPauseC chan chan<- struct{}
- tcompTriggerC chan struct{}
- mcompCmdC chan cCmd
- mcompTriggerC chan struct{}
- compErrC chan error
- compErrSetC chan error
- compStats [kNumLevels]cStats
-
- // Close
+ // Compaction.
+ tcompCmdC chan cCmd
+ tcompPauseC chan chan<- struct{}
+ mcompCmdC chan cCmd
+ compErrC chan error
+ compPerErrC chan error
+ compErrSetC chan error
+ compStats []cStats
+
+ // Close.
closeW sync.WaitGroup
closeC chan struct{}
closed uint32
@@ -77,7 +84,11 @@ func openDB(s *session) (*DB, error) {
db := &DB{
s: s,
// Initial sequence
- seq: s.stSeq,
+ seq: s.stSeqNum,
+ // MemDB
+ memPool: make(chan *memdb.DB, 1),
+ // Snapshot
+ snapsList: list.New(),
// Write
writeC: make(chan *Batch),
writeMergedC: make(chan bool),
@@ -86,17 +97,16 @@ func openDB(s *session) (*DB, error) {
journalC: make(chan *Batch),
journalAckC: make(chan error),
// Compaction
- tcompCmdC: make(chan cCmd),
- tcompPauseC: make(chan chan<- struct{}),
- tcompTriggerC: make(chan struct{}, 1),
- mcompCmdC: make(chan cCmd),
- mcompTriggerC: make(chan struct{}, 1),
- compErrC: make(chan error),
- compErrSetC: make(chan error),
+ tcompCmdC: make(chan cCmd),
+ tcompPauseC: make(chan chan<- struct{}),
+ mcompCmdC: make(chan cCmd),
+ compErrC: make(chan error),
+ compPerErrC: make(chan error),
+ compErrSetC: make(chan error),
+ compStats: make([]cStats, s.o.GetNumLevel()),
// Close
closeC: make(chan struct{}),
}
- db.initSnapshot()
if err := db.recoverJournal(); err != nil {
return nil, err
@@ -112,8 +122,9 @@ func openDB(s *session) (*DB, error) {
return nil, err
}
- // Don't include compaction error goroutine into wait group.
+ // Doesn't need to be included in the wait group.
go db.compactionError()
+ go db.mpoolDrain()
db.closeW.Add(3)
go db.tCompaction()
@@ -135,9 +146,10 @@ func openDB(s *session) (*DB, error) {
// detected in the DB. Corrupted DB can be recovered with Recover
// function.
//
+// The returned DB instance is goroutine-safe.
// The DB must be closed after use, by calling Close method.
-func Open(p storage.Storage, o *opt.Options) (db *DB, err error) {
- s, err := newSession(p, o)
+func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
if err != nil {
return
}
@@ -177,6 +189,7 @@ func Open(p storage.Storage, o *opt.Options) (db *DB, err error) {
// detected in the DB. Corrupted DB can be recovered with Recover
// function.
//
+// The returned DB instance is goroutine-safe.
// The DB must be closed after use, by calling Close method.
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
stor, err := storage.OpenFile(path)
@@ -197,9 +210,10 @@ func OpenFile(path string, o *opt.Options) (db *DB, err error) {
// The DB must already exist or it will returns an error.
// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
//
+// The returned DB instance is goroutine-safe.
// The DB must be closed after use, by calling Close method.
-func Recover(p storage.Storage, o *opt.Options) (db *DB, err error) {
- s, err := newSession(p, o)
+func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
if err != nil {
return
}
@@ -225,6 +239,7 @@ func Recover(p storage.Storage, o *opt.Options) (db *DB, err error) {
// RecoverFile uses standard file-system backed storage implementation as desribed
// in the leveldb/storage package.
//
+// The returned DB instance is goroutine-safe.
// The DB must be closed after use, by calling Close method.
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
stor, err := storage.OpenFile(path)
@@ -241,16 +256,28 @@ func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
}
func recoverTable(s *session, o *opt.Options) error {
- ff0, err := s.getFiles(storage.TypeTable)
+ o = dupOptions(o)
+ // Mask StrictReader, lets StrictRecovery doing its job.
+ o.Strict &= ^opt.StrictReader
+
+ // Get all tables and sort it by file number.
+ tableFiles_, err := s.getFiles(storage.TypeTable)
if err != nil {
return err
}
- ff1 := files(ff0)
- ff1.sort()
+ tableFiles := files(tableFiles_)
+ tableFiles.sort()
- var mSeq uint64
- var good, corrupted int
- rec := new(sessionRecord)
+ var (
+ maxSeq uint64
+ recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int
+
+ // We will drop corrupted table.
+ strict = o.GetStrict(opt.StrictRecovery)
+
+ rec = &sessionRecord{numLevel: o.GetNumLevel()}
+ bpool = util.NewBufferPool(o.GetBlockSize() + 5)
+ )
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
tmp = s.newTemp()
writer, err := tmp.Create()
@@ -264,8 +291,9 @@ func recoverTable(s *session, o *opt.Options) error {
tmp = nil
}
}()
+
+ // Copy entries.
tw := table.NewWriter(writer, o)
- // Copy records.
for iter.Next() {
key := iter.Key()
if validIkey(key) {
@@ -296,45 +324,73 @@ func recoverTable(s *session, o *opt.Options) error {
if err != nil {
return err
}
- defer reader.Close()
+ var closed bool
+ defer func() {
+ if !closed {
+ reader.Close()
+ }
+ }()
+
// Get file size.
size, err := reader.Seek(0, 2)
if err != nil {
return err
}
- var tSeq uint64
- var tgood, tcorrupted, blockerr int
- var min, max []byte
- tr := table.NewReader(reader, size, nil, o)
+
+ var (
+ tSeq uint64
+ tgoodKey, tcorruptedKey, tcorruptedBlock int
+ imin, imax []byte
+ )
+ tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o)
+ if err != nil {
+ return err
+ }
iter := tr.NewIterator(nil, nil)
- iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
- s.logf("table@recovery found error @%d %q", file.Num(), err)
- blockerr++
- })
+ if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
+ itererr.SetErrorCallback(func(err error) {
+ if errors.IsCorrupted(err) {
+ s.logf("table@recovery block corruption @%d %q", file.Num(), err)
+ tcorruptedBlock++
+ }
+ })
+ }
+
// Scan the table.
for iter.Next() {
key := iter.Key()
- _, seq, _, ok := parseIkey(key)
- if !ok {
- tcorrupted++
+ _, seq, _, kerr := parseIkey(key)
+ if kerr != nil {
+ tcorruptedKey++
continue
}
- tgood++
+ tgoodKey++
if seq > tSeq {
tSeq = seq
}
- if min == nil {
- min = append([]byte{}, key...)
+ if imin == nil {
+ imin = append([]byte{}, key...)
}
- max = append(max[:0], key...)
+ imax = append(imax[:0], key...)
}
if err := iter.Error(); err != nil {
iter.Release()
return err
}
iter.Release()
- if tgood > 0 {
- if tcorrupted > 0 || blockerr > 0 {
+
+ goodKey += tgoodKey
+ corruptedKey += tcorruptedKey
+ corruptedBlock += tcorruptedBlock
+
+ if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
+ droppedTable++
+ s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+ return nil
+ }
+
+ if tgoodKey > 0 {
+ if tcorruptedKey > 0 || tcorruptedBlock > 0 {
// Rebuild the table.
s.logf("table@recovery rebuilding @%d", file.Num())
iter := tr.NewIterator(nil, nil)
@@ -343,62 +399,77 @@ func recoverTable(s *session, o *opt.Options) error {
if err != nil {
return err
}
+ closed = true
reader.Close()
if err := file.Replace(tmp); err != nil {
return err
}
size = newSize
}
- if tSeq > mSeq {
- mSeq = tSeq
+ if tSeq > maxSeq {
+ maxSeq = tSeq
}
+ recoveredKey += tgoodKey
// Add table to level 0.
- rec.addTable(0, file.Num(), uint64(size), min, max)
- s.logf("table@recovery recovered @%d N·%d C·%d B·%d S·%d Q·%d", file.Num(), tgood, tcorrupted, blockerr, size, tSeq)
+ rec.addTable(0, file.Num(), uint64(size), imin, imax)
+ s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
} else {
- s.logf("table@recovery unrecoverable @%d C·%d B·%d S·%d", file.Num(), tcorrupted, blockerr, size)
+ droppedTable++
+ s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size)
}
- good += tgood
- corrupted += tcorrupted
-
return nil
}
+
// Recover all tables.
- if len(ff1) > 0 {
- s.logf("table@recovery F·%d", len(ff1))
- s.markFileNum(ff1[len(ff1)-1].Num())
- for _, file := range ff1 {
+ if len(tableFiles) > 0 {
+ s.logf("table@recovery F·%d", len(tableFiles))
+
+ // Mark file number as used.
+ s.markFileNum(tableFiles[len(tableFiles)-1].Num())
+
+ for _, file := range tableFiles {
if err := recoverTable(file); err != nil {
return err
}
}
- s.logf("table@recovery recovered F·%d N·%d C·%d Q·%d", len(ff1), good, corrupted, mSeq)
+
+ s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq)
}
+
// Set sequence number.
- rec.setSeq(mSeq + 1)
+ rec.setSeqNum(maxSeq)
+
// Create new manifest.
if err := s.create(); err != nil {
return err
}
+
// Commit.
return s.commit(rec)
}
-func (d *DB) recoverJournal() error {
- s := d.s
-
- ff0, err := s.getFiles(storage.TypeJournal)
+func (db *DB) recoverJournal() error {
+ // Get all tables and sort it by file number.
+ journalFiles_, err := db.s.getFiles(storage.TypeJournal)
if err != nil {
return err
}
- ff1 := files(ff0)
- ff1.sort()
- ff2 := make([]storage.File, 0, len(ff1))
- for _, file := range ff1 {
- if file.Num() >= s.stJournalNum || file.Num() == s.stPrevJournalNum {
- s.markFileNum(file.Num())
- ff2 = append(ff2, file)
+ journalFiles := files(journalFiles_)
+ journalFiles.sort()
+
+ // Discard older journal.
+ prev := -1
+ for i, file := range journalFiles {
+ if file.Num() >= db.s.stJournalNum {
+ if prev >= 0 {
+ i--
+ journalFiles[i] = journalFiles[prev]
+ }
+ journalFiles = journalFiles[i:]
+ break
+ } else if file.Num() == db.s.stPrevJournalNum {
+ prev = i
}
}
@@ -406,38 +477,43 @@ func (d *DB) recoverJournal() error {
var of storage.File
var mem *memdb.DB
batch := new(Batch)
- cm := newCMem(s)
+ cm := newCMem(db.s)
buf := new(util.Buffer)
// Options.
- strict := s.o.GetStrict(opt.StrictJournal)
- checksum := s.o.GetStrict(opt.StrictJournalChecksum)
- writeBuffer := s.o.GetWriteBuffer()
+ strict := db.s.o.GetStrict(opt.StrictJournal)
+ checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
+ writeBuffer := db.s.o.GetWriteBuffer()
recoverJournal := func(file storage.File) error {
- s.logf("journal@recovery recovering @%d", file.Num())
+ db.logf("journal@recovery recovering @%d", file.Num())
reader, err := file.Open()
if err != nil {
return err
}
defer reader.Close()
+
+ // Create/reset journal reader instance.
if jr == nil {
- jr = journal.NewReader(reader, dropper{s, file}, strict, checksum)
+ jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
} else {
- jr.Reset(reader, dropper{s, file}, strict, checksum)
+ jr.Reset(reader, dropper{db.s, file}, strict, checksum)
}
+
+ // Flush memdb and remove obsolete journal file.
if of != nil {
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
return err
}
}
- if err := cm.commit(file.Num(), d.seq); err != nil {
+ if err := cm.commit(file.Num(), db.seq); err != nil {
return err
}
cm.reset()
of.Remove()
of = nil
}
- // Reset memdb.
+
+ // Replay journal to memdb.
mem.Reset()
for {
r, err := jr.Next()
@@ -445,43 +521,58 @@ func (d *DB) recoverJournal() error {
if err == io.EOF {
break
}
- return err
+ return errors.SetFile(err, file)
}
+
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
- if strict {
- return err
+ if err == io.ErrUnexpectedEOF {
+ // This is error returned due to corruption, with strict == false.
+ continue
+ } else {
+ return errors.SetFile(err, file)
}
- continue
}
- if err := batch.decode(buf.Bytes()); err != nil {
- return err
- }
- if err := batch.memReplay(mem); err != nil {
- return err
+ if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
+ if strict || !errors.IsCorrupted(err) {
+ return errors.SetFile(err, file)
+ } else {
+ db.s.logf("journal error: %v (skipped)", err)
+ // We won't apply sequence number as it might be corrupted.
+ continue
+ }
}
- d.seq = batch.seq + uint64(batch.len())
+
+ // Save sequence number.
+ db.seq = batch.seq + uint64(batch.Len())
+
+ // Flush it if large enough.
if mem.Size() >= writeBuffer {
- // Large enough, flush it.
if err := cm.flush(mem, 0); err != nil {
return err
}
- // Reset memdb.
mem.Reset()
}
}
+
of = file
return nil
}
+
// Recover all journals.
- if len(ff2) > 0 {
- s.logf("journal@recovery F·%d", len(ff2))
- mem = memdb.New(s.icmp, writeBuffer)
- for _, file := range ff2 {
+ if len(journalFiles) > 0 {
+ db.logf("journal@recovery F·%d", len(journalFiles))
+
+ // Mark file number as used.
+ db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
+
+ mem = memdb.New(db.s.icmp, writeBuffer)
+ for _, file := range journalFiles {
if err := recoverJournal(file); err != nil {
return err
}
}
+
// Flush the last journal.
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
@@ -489,72 +580,140 @@ func (d *DB) recoverJournal() error {
}
}
}
+
// Create a new journal.
- if _, err := d.newMem(0); err != nil {
+ if _, err := db.newMem(0); err != nil {
return err
}
+
// Commit.
- if err := cm.commit(d.journalFile.Num(), d.seq); err != nil {
+ if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
// Close journal.
- if d.journal != nil {
- d.journal.Close()
- d.journalWriter.Close()
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
}
return err
}
- // Remove the last journal.
+
+ // Remove the last obsolete journal file.
if of != nil {
of.Remove()
}
+
return nil
}
-func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
- s := d.s
+func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
+ ikey := newIkey(key, seq, ktSeek)
- ikey := newIKey(key, seq, tSeek)
-
- em, fm := d.getMems()
- for _, m := range [...]*memdb.DB{em, fm} {
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
if m == nil {
continue
}
- mk, mv, me := m.Find(ikey)
+ defer m.decref()
+
+ mk, mv, me := m.mdb.Find(ikey)
if me == nil {
- ukey, _, t, ok := parseIkey(mk)
- if ok && s.icmp.uCompare(ukey, key) == 0 {
- if t == tDel {
+ ukey, _, kt, kerr := parseIkey(mk)
+ if kerr != nil {
+ // Shouldn't have had happen.
+ panic(kerr)
+ }
+ if db.s.icmp.uCompare(ukey, key) == 0 {
+ if kt == ktDel {
return nil, ErrNotFound
}
- return mv, nil
+ return append([]byte{}, mv...), nil
}
} else if me != ErrNotFound {
return nil, me
}
}
- v := s.version()
- value, cSched, err := v.get(ikey, ro)
+ v := db.s.version()
+ value, cSched, err := v.get(ikey, ro, false)
v.release()
if cSched {
// Trigger table compaction.
- d.compTrigger(d.tcompTriggerC)
+ db.compSendTrigger(db.tcompCmdC)
+ }
+ return
+}
+
+func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
+ ikey := newIkey(key, seq, ktSeek)
+
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
+ if m == nil {
+ continue
+ }
+ defer m.decref()
+
+ mk, _, me := m.mdb.Find(ikey)
+ if me == nil {
+ ukey, _, kt, kerr := parseIkey(mk)
+ if kerr != nil {
+ // Shouldn't have had happen.
+ panic(kerr)
+ }
+ if db.s.icmp.uCompare(ukey, key) == 0 {
+ if kt == ktDel {
+ return false, nil
+ }
+ return true, nil
+ }
+ } else if me != ErrNotFound {
+ return false, me
+ }
+ }
+
+ v := db.s.version()
+ _, cSched, err := v.get(ikey, ro, true)
+ v.release()
+ if cSched {
+ // Trigger table compaction.
+ db.compSendTrigger(db.tcompCmdC)
+ }
+ if err == nil {
+ ret = true
+ } else if err == ErrNotFound {
+ err = nil
}
return
}
// Get gets the value for the given key. It returns ErrNotFound if the
-// DB does not contain the key.
+// DB does not contains the key.
//
-// The caller should not modify the contents of the returned slice, but
-// it is safe to modify the contents of the argument after Get returns.
-func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
- err = d.ok()
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = db.ok()
if err != nil {
return
}
- return d.get(key, d.getSeq(), ro)
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.get(key, se.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.has(key, se.seq, ro)
}
// NewIterator returns an iterator for the latest snapshot of the
@@ -573,14 +732,16 @@ func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
// The iterator must be released after use, by calling Release method.
//
// Also read Iterator documentation of the leveldb/iterator package.
-func (d *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
- if err := d.ok(); err != nil {
+func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := db.ok(); err != nil {
return iterator.NewEmptyIterator(err)
}
- p := d.newSnapshot()
- defer p.Release()
- return p.NewIterator(slice, ro)
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ // Iterator holds 'version' lock, 'version' is immutable so snapshot
+ // can be released after iterator created.
+ return db.newIterator(se.seq, slice, ro)
}
// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
@@ -588,25 +749,35 @@ func (d *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterat
// content of snapshot are guaranteed to be consistent.
//
// The snapshot must be released after use, by calling Release method.
-func (d *DB) GetSnapshot() (*Snapshot, error) {
- if err := d.ok(); err != nil {
+func (db *DB) GetSnapshot() (*Snapshot, error) {
+ if err := db.ok(); err != nil {
return nil, err
}
- return d.newSnapshot(), nil
+ return db.newSnapshot(), nil
}
// GetProperty returns value of the given property name.
//
// Property names:
// leveldb.num-files-at-level{n}
-// Returns the number of filer at level 'n'.
+// Returns the number of files at level 'n'.
// leveldb.stats
// Returns statistics of the underlying DB.
// leveldb.sstables
// Returns sstables list for each level.
-func (d *DB) GetProperty(name string) (value string, err error) {
- err = d.ok()
+// leveldb.blockpool
+// Returns block pool stats.
+// leveldb.cachedblock
+// Returns size of cached block.
+// leveldb.openedtables
+// Returns number of opened tables.
+// leveldb.alivesnaps
+// Returns number of alive snapshots.
+// leveldb.aliveiters
+// Returns number of alive iterators.
+func (db *DB) GetProperty(name string) (value string, err error) {
+ err = db.ok()
if err != nil {
return
}
@@ -615,19 +786,18 @@ func (d *DB) GetProperty(name string) (value string, err error) {
if !strings.HasPrefix(name, prefix) {
return "", errors.New("leveldb: GetProperty: unknown property: " + name)
}
-
p := name[len(prefix):]
- s := d.s
- v := s.version()
+ v := db.s.version()
defer v.release()
+ numFilesPrefix := "num-files-at-level"
switch {
- case strings.HasPrefix(p, "num-files-at-level"):
+ case strings.HasPrefix(p, numFilesPrefix):
var level uint
var rest string
- n, _ := fmt.Scanf("%d%s", &level, &rest)
- if n != 1 || level >= kNumLevels {
+ n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
+ if n != 1 || int(level) >= db.s.o.GetNumLevel() {
err = errors.New("leveldb: GetProperty: invalid property: " + name)
} else {
value = fmt.Sprint(v.tLen(int(level)))
@@ -636,22 +806,36 @@ func (d *DB) GetProperty(name string) (value string, err error) {
value = "Compactions\n" +
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
"-------+------------+---------------+---------------+---------------+---------------\n"
- for level, tt := range v.tables {
- duration, read, write := d.compStats[level].get()
- if len(tt) == 0 && duration == 0 {
+ for level, tables := range v.tables {
+ duration, read, write := db.compStats[level].get()
+ if len(tables) == 0 && duration == 0 {
continue
}
value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
- level, len(tt), float64(tt.size())/1048576.0, duration.Seconds(),
+ level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
float64(read)/1048576.0, float64(write)/1048576.0)
}
case p == "sstables":
- for level, tt := range v.tables {
+ for level, tables := range v.tables {
value += fmt.Sprintf("--- level %d ---\n", level)
- for _, t := range tt {
- value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.min, t.max)
+ for _, t := range tables {
+ value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
}
}
+ case p == "blockpool":
+ value = fmt.Sprintf("%v", db.s.tops.bpool)
+ case p == "cachedblock":
+ if db.s.tops.bcache != nil {
+ value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
+ } else {
+ value = "<nil>"
+ }
+ case p == "openedtables":
+ value = fmt.Sprintf("%d", db.s.tops.cache.Size())
+ case p == "alivesnaps":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
+ case p == "aliveiters":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
default:
err = errors.New("leveldb: GetProperty: unknown property: " + name)
}
@@ -665,23 +849,23 @@ func (d *DB) GetProperty(name string) (value string, err error) {
// data compresses by a factor of ten, the returned sizes will be one-tenth
// the size of the corresponding user data size.
// The results may not include the sizes of recently written data.
-func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
- if err := d.ok(); err != nil {
+func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
+ if err := db.ok(); err != nil {
return nil, err
}
- v := d.s.version()
+ v := db.s.version()
defer v.release()
sizes := make(Sizes, 0, len(ranges))
for _, r := range ranges {
- min := newIKey(r.Start, kMaxSeq, tSeek)
- max := newIKey(r.Limit, kMaxSeq, tSeek)
- start, err := v.offsetOf(min)
+ imin := newIkey(r.Start, kMaxSeq, ktSeek)
+ imax := newIkey(r.Limit, kMaxSeq, ktSeek)
+ start, err := v.offsetOf(imin)
if err != nil {
return nil, err
}
- limit, err := v.offsetOf(max)
+ limit, err := v.offsetOf(imax)
if err != nil {
return nil, err
}
@@ -695,61 +879,67 @@ func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
return sizes, nil
}
-// Close closes the DB. This will also releases any outstanding snapshot.
+// Close closes the DB. This will also releases any outstanding snapshot and
+// abort any in-flight compaction.
//
// It is not safe to close a DB until all outstanding iterators are released.
// It is valid to call Close multiple times. Other methods should not be
// called after the DB has been closed.
-func (d *DB) Close() error {
- if !d.setClosed() {
+func (db *DB) Close() error {
+ if !db.setClosed() {
return ErrClosed
}
- s := d.s
start := time.Now()
- s.log("db@close closing")
+ db.log("db@close closing")
// Clear the finalizer.
- runtime.SetFinalizer(d, nil)
+ runtime.SetFinalizer(db, nil)
// Get compaction error.
var err error
select {
- case err = <-d.compErrC:
+ case err = <-db.compErrC:
default:
}
- close(d.closeC)
+ // Signal all goroutines.
+ close(db.closeC)
- // Wait for the close WaitGroup.
- d.closeW.Wait()
+ // Wait for all gorotines to exit.
+ db.closeW.Wait()
+
+ // Lock writer and closes journal.
+ db.writeLockC <- struct{}{}
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
- // Close journal.
- if d.journal != nil {
- d.journal.Close()
- d.journalWriter.Close()
+ if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
}
// Close session.
- s.close()
- s.logf("db@close done T·%v", time.Since(start))
- s.release()
+ db.s.close()
+ db.logf("db@close done T·%v", time.Since(start))
+ db.s.release()
- if d.closer != nil {
- if err1 := d.closer.Close(); err == nil {
+ if db.closer != nil {
+ if err1 := db.closer.Close(); err == nil {
err = err1
}
}
- d.s = nil
- d.mem = nil
- d.frozenMem = nil
- d.journal = nil
- d.journalWriter = nil
- d.journalFile = nil
- d.frozenJournalFile = nil
- d.snapsRoot = snapshotElement{}
- d.closer = nil
+ // NIL'ing pointers.
+ db.s = nil
+ db.mem = nil
+ db.frozenMem = nil
+ db.journal = nil
+ db.journalWriter = nil
+ db.journalFile = nil
+ db.frozenJournalFile = nil
+ db.closer = nil
return err
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
index c82bd9f28..447407aba 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -7,11 +7,12 @@
package leveldb
import (
- "errors"
"sync"
"time"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
)
var (
@@ -68,13 +69,13 @@ type cMem struct {
}
func newCMem(s *session) *cMem {
- return &cMem{s: s, rec: new(sessionRecord)}
+ return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
}
func (c *cMem) flush(mem *memdb.DB, level int) error {
s := c.s
- // Write memdb to table
+ // Write memdb to table.
iter := mem.NewIterator(nil)
defer iter.Release()
t, n, err := s.tops.createFrom(iter)
@@ -82,51 +83,85 @@ func (c *cMem) flush(mem *memdb.DB, level int) error {
return err
}
+ // Pick level.
if level < 0 {
- level = s.version_NB().pickLevel(t.min.ukey(), t.max.ukey())
+ v := s.version()
+ level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
+ v.release()
}
c.rec.addTableFile(level, t)
- s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.min, t.max)
+ s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
c.level = level
return nil
}
func (c *cMem) reset() {
- c.rec = new(sessionRecord)
+ c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
}
func (c *cMem) commit(journal, seq uint64) error {
c.rec.setJournalNum(journal)
- c.rec.setSeq(seq)
- // Commit changes
+ c.rec.setSeqNum(seq)
+
+ // Commit changes.
return c.s.commit(c.rec)
}
-func (d *DB) compactionError() {
- var err error
+func (db *DB) compactionError() {
+ var (
+ err error
+ wlocked bool
+ )
noerr:
+ // No error.
for {
select {
- case _, _ = <-d.closeC:
- return
- case err = <-d.compErrSetC:
- if err != nil {
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
+ case errors.IsCorrupted(err):
+ goto hasperr
+ default:
goto haserr
}
+ case _, _ = <-db.closeC:
+ return
}
}
haserr:
+ // Transient error.
for {
select {
- case _, _ = <-d.closeC:
- return
- case err = <-d.compErrSetC:
- if err == nil {
+ case db.compErrC <- err:
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
goto noerr
+ case errors.IsCorrupted(err):
+ goto hasperr
+ default:
}
- case d.compErrC <- err:
+ case _, _ = <-db.closeC:
+ return
+ }
+ }
+hasperr:
+ // Persistent error.
+ for {
+ select {
+ case db.compErrC <- err:
+ case db.compPerErrC <- err:
+ case db.writeLockC <- struct{}{}:
+ // Hold write lock, so that write won't pass-through.
+ wlocked = true
+ case _, _ = <-db.closeC:
+ if wlocked {
+ // We should release the lock or Close will hang.
+ <-db.writeLockC
+ }
+ return
}
}
}
@@ -137,114 +172,159 @@ func (cnt *compactionTransactCounter) incr() {
*cnt++
}
-func (d *DB) compactionTransact(name string, exec func(cnt *compactionTransactCounter) error, rollback func() error) {
- s := d.s
+type compactionTransactInterface interface {
+ run(cnt *compactionTransactCounter) error
+ revert() error
+}
+
+func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
defer func() {
if x := recover(); x != nil {
- if x == errCompactionTransactExiting && rollback != nil {
- if err := rollback(); err != nil {
- s.logf("%s rollback error %q", name, err)
+ if x == errCompactionTransactExiting {
+ if err := t.revert(); err != nil {
+ db.logf("%s revert error %q", name, err)
}
}
panic(x)
}
}()
+
const (
backoffMin = 1 * time.Second
backoffMax = 8 * time.Second
backoffMul = 2 * time.Second
)
- backoff := backoffMin
- backoffT := time.NewTimer(backoff)
- lastCnt := compactionTransactCounter(0)
+ var (
+ backoff = backoffMin
+ backoffT = time.NewTimer(backoff)
+ lastCnt = compactionTransactCounter(0)
+
+ disableBackoff = db.s.o.GetDisableCompactionBackoff()
+ )
for n := 0; ; n++ {
// Check wether the DB is closed.
- if d.isClosed() {
- s.logf("%s exiting", name)
- d.compactionExitTransact()
+ if db.isClosed() {
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
} else if n > 0 {
- s.logf("%s retrying N·%d", name, n)
+ db.logf("%s retrying N·%d", name, n)
}
// Execute.
cnt := compactionTransactCounter(0)
- err := exec(&cnt)
+ err := t.run(&cnt)
+ if err != nil {
+ db.logf("%s error I·%d %q", name, cnt, err)
+ }
// Set compaction error status.
select {
- case d.compErrSetC <- err:
- case _, _ = <-d.closeC:
- s.logf("%s exiting", name)
- d.compactionExitTransact()
+ case db.compErrSetC <- err:
+ case perr := <-db.compPerErrC:
+ if err != nil {
+ db.logf("%s exiting (persistent error %q)", name, perr)
+ db.compactionExitTransact()
+ }
+ case _, _ = <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
}
if err == nil {
return
}
- s.logf("%s error I·%d %q", name, cnt, err)
-
- // Reset backoff duration if counter is advancing.
- if cnt > lastCnt {
- backoff = backoffMin
- lastCnt = cnt
+ if errors.IsCorrupted(err) {
+ db.logf("%s exiting (corruption detected)", name)
+ db.compactionExitTransact()
}
- // Backoff.
- backoffT.Reset(backoff)
- if backoff < backoffMax {
- backoff *= backoffMul
- if backoff > backoffMax {
- backoff = backoffMax
+ if !disableBackoff {
+ // Reset backoff duration if counter is advancing.
+ if cnt > lastCnt {
+ backoff = backoffMin
+ lastCnt = cnt
+ }
+
+ // Backoff.
+ backoffT.Reset(backoff)
+ if backoff < backoffMax {
+ backoff *= backoffMul
+ if backoff > backoffMax {
+ backoff = backoffMax
+ }
+ }
+ select {
+ case <-backoffT.C:
+ case _, _ = <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
}
}
- select {
- case <-backoffT.C:
- case _, _ = <-d.closeC:
- s.logf("%s exiting", name)
- d.compactionExitTransact()
- }
}
}
-func (d *DB) compactionExitTransact() {
+type compactionTransactFunc struct {
+ runFunc func(cnt *compactionTransactCounter) error
+ revertFunc func() error
+}
+
+func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
+ return t.runFunc(cnt)
+}
+
+func (t *compactionTransactFunc) revert() error {
+ if t.revertFunc != nil {
+ return t.revertFunc()
+ }
+ return nil
+}
+
+func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
+ db.compactionTransact(name, &compactionTransactFunc{run, revert})
+}
+
+func (db *DB) compactionExitTransact() {
panic(errCompactionTransactExiting)
}
-func (d *DB) memCompaction() {
- mem := d.getFrozenMem()
+func (db *DB) memCompaction() {
+ mem := db.getFrozenMem()
if mem == nil {
return
}
+ defer mem.decref()
- s := d.s
- c := newCMem(s)
+ c := newCMem(db.s)
stats := new(cStatsStaging)
- s.logf("mem@flush N·%d S·%s", mem.Len(), shortenb(mem.Size()))
+ db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
// Don't compact empty memdb.
- if mem.Len() == 0 {
- s.logf("mem@flush skipping")
+ if mem.mdb.Len() == 0 {
+ db.logf("mem@flush skipping")
// drop frozen mem
- d.dropFrozenMem()
+ db.dropFrozenMem()
return
}
// Pause table compaction.
- ch := make(chan struct{})
+ resumeC := make(chan struct{})
select {
- case d.tcompPauseC <- (chan<- struct{})(ch):
- case _, _ = <-d.closeC:
+ case db.tcompPauseC <- (chan<- struct{})(resumeC):
+ case <-db.compPerErrC:
+ close(resumeC)
+ resumeC = nil
+ case _, _ = <-db.closeC:
return
}
- d.compactionTransact("mem@flush", func(cnt *compactionTransactCounter) (err error) {
+ db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
defer stats.stopTimer()
- return c.flush(mem, -1)
+ return c.flush(mem.mdb, -1)
}, func() error {
for _, r := range c.rec.addedTables {
- s.logf("mem@flush rollback @%d", r.num)
- f := s.getTableFile(r.num)
+ db.logf("mem@flush revert @%d", r.num)
+ f := db.s.getTableFile(r.num)
if err := f.Remove(); err != nil {
return err
}
@@ -252,279 +332,327 @@ func (d *DB) memCompaction() {
return nil
})
- d.compactionTransact("mem@commit", func(cnt *compactionTransactCounter) (err error) {
+ db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
defer stats.stopTimer()
- return c.commit(d.journalFile.Num(), d.frozenSeq)
+ return c.commit(db.journalFile.Num(), db.frozenSeq)
}, nil)
- s.logf("mem@flush commited F·%d T·%v", len(c.rec.addedTables), stats.duration)
+ db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
for _, r := range c.rec.addedTables {
stats.write += r.size
}
- d.compStats[c.level].add(stats)
+ db.compStats[c.level].add(stats)
// Drop frozen mem.
- d.dropFrozenMem()
+ db.dropFrozenMem()
// Resume table compaction.
- select {
- case <-ch:
- case _, _ = <-d.closeC:
- return
+ if resumeC != nil {
+ select {
+ case <-resumeC:
+ close(resumeC)
+ case _, _ = <-db.closeC:
+ return
+ }
}
// Trigger table compaction.
- d.compTrigger(d.mcompTriggerC)
+ db.compSendTrigger(db.tcompCmdC)
}
-func (d *DB) tableCompaction(c *compaction, noTrivial bool) {
- s := d.s
+type tableCompactionBuilder struct {
+ db *DB
+ s *session
+ c *compaction
+ rec *sessionRecord
+ stat0, stat1 *cStatsStaging
- rec := new(sessionRecord)
- rec.addCompactionPointer(c.level, c.max)
+ snapHasLastUkey bool
+ snapLastUkey []byte
+ snapLastSeq uint64
+ snapIter int
+ snapKerrCnt int
+ snapDropCnt int
- if !noTrivial && c.trivial() {
- t := c.tables[0][0]
- s.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
- rec.deleteTable(c.level, t.file.Num())
- rec.addTableFile(c.level+1, t)
- d.compactionTransact("table@move", func(cnt *compactionTransactCounter) (err error) {
- return s.commit(rec)
- }, nil)
- return
- }
+ kerrCnt int
+ dropCnt int
- var stats [2]cStatsStaging
- for i, tt := range c.tables {
- for _, t := range tt {
- stats[i].read += t.size
- // Insert deleted tables into record
- rec.deleteTable(c.level+i, t.file.Num())
- }
- }
- sourceSize := int(stats[0].read + stats[1].read)
- minSeq := d.minSeq()
- s.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
-
- var snapUkey []byte
- var snapHasUkey bool
- var snapSeq uint64
- var snapIter int
- var snapDropCnt int
- var dropCnt int
- d.compactionTransact("table@build", func(cnt *compactionTransactCounter) (err error) {
- ukey := append([]byte{}, snapUkey...)
- hasUkey := snapHasUkey
- lseq := snapSeq
- dropCnt = snapDropCnt
- snapSched := snapIter == 0
-
- var tw *tWriter
- finish := func() error {
- t, err := tw.finish()
- if err != nil {
- return err
+ minSeq uint64
+ strict bool
+ tableSize int
+
+ tw *tWriter
+}
+
+func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
+ // Create new table if not already.
+ if b.tw == nil {
+ // Check for pause event.
+ if b.db != nil {
+ select {
+ case ch := <-b.db.tcompPauseC:
+ b.db.pauseCompaction(ch)
+ case _, _ = <-b.db.closeC:
+ b.db.compactionExitTransact()
+ default:
}
- rec.addTableFile(c.level+1, t)
- stats[1].write += t.size
- s.logf("table@build created L%d@%d N·%d S·%s %q:%q", c.level+1, t.file.Num(), tw.tw.EntriesLen(), shortenb(int(t.size)), t.min, t.max)
- return nil
}
- defer func() {
- stats[1].stopTimer()
- if tw != nil {
- tw.drop()
- tw = nil
- }
- }()
+ // Create new table.
+ var err error
+ b.tw, err = b.s.tops.create()
+ if err != nil {
+ return err
+ }
+ }
- stats[1].startTimer()
- iter := c.newIterator()
- defer iter.Release()
- for i := 0; iter.Next(); i++ {
- // Incr transact counter.
- cnt.incr()
-
- // Skip until last state.
- if i < snapIter {
- continue
- }
+ // Write key/value into table.
+ return b.tw.append(key, value)
+}
- key := iKey(iter.Key())
+func (b *tableCompactionBuilder) needFlush() bool {
+ return b.tw.tw.BytesLen() >= b.tableSize
+}
- if c.shouldStopBefore(key) && tw != nil {
- err = finish()
- if err != nil {
- return
- }
- snapSched = true
- tw = nil
- }
+func (b *tableCompactionBuilder) flush() error {
+ t, err := b.tw.finish()
+ if err != nil {
+ return err
+ }
+ b.rec.addTableFile(b.c.level+1, t)
+ b.stat1.write += t.size
+ b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
+ b.tw = nil
+ return nil
+}
- // Scheduled for snapshot, snapshot will used to retry compaction
- // if error occured.
- if snapSched {
- snapUkey = append(snapUkey[:0], ukey...)
- snapHasUkey = hasUkey
- snapSeq = lseq
- snapIter = i
- snapDropCnt = dropCnt
- snapSched = false
- }
+func (b *tableCompactionBuilder) cleanup() {
+ if b.tw != nil {
+ b.tw.drop()
+ b.tw = nil
+ }
+}
- if seq, t, ok := key.parseNum(); !ok {
- // Don't drop error keys
- ukey = ukey[:0]
- hasUkey = false
- lseq = kMaxSeq
- } else {
- if !hasUkey || s.icmp.uCompare(key.ukey(), ukey) != 0 {
- // First occurrence of this user key
- ukey = append(ukey[:0], key.ukey()...)
- hasUkey = true
- lseq = kMaxSeq
- }
+func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
+ snapResumed := b.snapIter > 0
+ hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
+ lastUkey := append([]byte{}, b.snapLastUkey...)
+ lastSeq := b.snapLastSeq
+ b.kerrCnt = b.snapKerrCnt
+ b.dropCnt = b.snapDropCnt
+ // Restore compaction state.
+ b.c.restore()
- drop := false
- if lseq <= minSeq {
- // Dropped because newer entry for same user key exist
- drop = true // (A)
- } else if t == tDel && seq <= minSeq && c.isBaseLevelForKey(ukey) {
- // For this user key:
- // (1) there is no data in higher levels
- // (2) data in lower levels will have larger seq numbers
- // (3) data in layers that are being compacted here and have
- // smaller seq numbers will be dropped in the next
- // few iterations of this loop (by rule (A) above).
- // Therefore this deletion marker is obsolete and can be dropped.
- drop = true
- }
+ defer b.cleanup()
- lseq = seq
- if drop {
- dropCnt++
- continue
- }
- }
+ b.stat1.startTimer()
+ defer b.stat1.stopTimer()
- // Create new table if not already
- if tw == nil {
- // Check for pause event.
- select {
- case ch := <-d.tcompPauseC:
- d.pauseCompaction(ch)
- case _, _ = <-d.closeC:
- d.compactionExitTransact()
- default:
- }
+ iter := b.c.newIterator()
+ defer iter.Release()
+ for i := 0; iter.Next(); i++ {
+ // Incr transact counter.
+ cnt.incr()
+
+ // Skip until last state.
+ if i < b.snapIter {
+ continue
+ }
- // Create new table.
- tw, err = s.tops.create()
- if err != nil {
- return
+ resumed := false
+ if snapResumed {
+ resumed = true
+ snapResumed = false
+ }
+
+ ikey := iter.Key()
+ ukey, seq, kt, kerr := parseIkey(ikey)
+
+ if kerr == nil {
+ shouldStop := !resumed && b.c.shouldStopBefore(ikey)
+
+ if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
+ // First occurrence of this user key.
+
+ // Only rotate tables if ukey doesn't hop across.
+ if b.tw != nil && (shouldStop || b.needFlush()) {
+ if err := b.flush(); err != nil {
+ return err
+ }
+
+ // Creates snapshot of the state.
+ b.c.save()
+ b.snapHasLastUkey = hasLastUkey
+ b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
+ b.snapLastSeq = lastSeq
+ b.snapIter = i
+ b.snapKerrCnt = b.kerrCnt
+ b.snapDropCnt = b.dropCnt
}
- }
- // Write key/value into table
- err = tw.add(key, iter.Value())
- if err != nil {
- return
+ hasLastUkey = true
+ lastUkey = append(lastUkey[:0], ukey...)
+ lastSeq = kMaxSeq
}
- // Finish table if it is big enough
- if tw.tw.BytesLen() >= kMaxTableSize {
- err = finish()
- if err != nil {
- return
- }
- snapSched = true
- tw = nil
+ switch {
+ case lastSeq <= b.minSeq:
+ // Dropped because newer entry for same user key exist
+ fallthrough // (A)
+ case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+ // For this user key:
+ // (1) there is no data in higher levels
+ // (2) data in lower levels will have larger seq numbers
+ // (3) data in layers that are being compacted here and have
+ // smaller seq numbers will be dropped in the next
+ // few iterations of this loop (by rule (A) above).
+ // Therefore this deletion marker is obsolete and can be dropped.
+ lastSeq = seq
+ b.dropCnt++
+ continue
+ default:
+ lastSeq = seq
+ }
+ } else {
+ if b.strict {
+ return kerr
}
+
+ // Don't drop corrupted keys.
+ hasLastUkey = false
+ lastUkey = lastUkey[:0]
+ lastSeq = kMaxSeq
+ b.kerrCnt++
}
- err = iter.Error()
- if err != nil {
- return
+ if err := b.appendKV(ikey, iter.Value()); err != nil {
+ return err
}
+ }
- // Finish last table
- if tw != nil && !tw.empty() {
- err = finish()
- if err != nil {
- return
- }
- tw = nil
+ if err := iter.Error(); err != nil {
+ return err
+ }
+
+ // Finish last table.
+ if b.tw != nil && !b.tw.empty() {
+ return b.flush()
+ }
+ return nil
+}
+
+func (b *tableCompactionBuilder) revert() error {
+ for _, at := range b.rec.addedTables {
+ b.s.logf("table@build revert @%d", at.num)
+ f := b.s.getTableFile(at.num)
+ if err := f.Remove(); err != nil {
+ return err
}
+ }
+ return nil
+}
+
+func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
+ defer c.release()
+
+ rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
+ rec.addCompPtr(c.level, c.imax)
+
+ if !noTrivial && c.trivial() {
+ t := c.tables[0][0]
+ db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
+ rec.delTable(c.level, t.file.Num())
+ rec.addTableFile(c.level+1, t)
+ db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
+ return db.s.commit(rec)
+ }, nil)
return
- }, func() error {
- for _, r := range rec.addedTables {
- s.logf("table@build rollback @%d", r.num)
- f := s.getTableFile(r.num)
- if err := f.Remove(); err != nil {
- return err
- }
+ }
+
+ var stats [2]cStatsStaging
+ for i, tables := range c.tables {
+ for _, t := range tables {
+ stats[i].read += t.size
+ // Insert deleted tables into record
+ rec.delTable(c.level+i, t.file.Num())
}
- return nil
- })
+ }
+ sourceSize := int(stats[0].read + stats[1].read)
+ minSeq := db.minSeq()
+ db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
+
+ b := &tableCompactionBuilder{
+ db: db,
+ s: db.s,
+ c: c,
+ rec: rec,
+ stat1: &stats[1],
+ minSeq: minSeq,
+ strict: db.s.o.GetStrict(opt.StrictCompaction),
+ tableSize: db.s.o.GetCompactionTableSize(c.level + 1),
+ }
+ db.compactionTransact("table@build", b)
// Commit changes
- d.compactionTransact("table@commit", func(cnt *compactionTransactCounter) (err error) {
+ db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) {
stats[1].startTimer()
defer stats[1].stopTimer()
- return s.commit(rec)
+ return db.s.commit(rec)
}, nil)
- resultSize := int(int(stats[1].write))
- s.logf("table@compaction commited F%s S%s D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), dropCnt, stats[1].duration)
+ resultSize := int(stats[1].write)
+ db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
// Save compaction stats
for i := range stats {
- d.compStats[c.level+1].add(&stats[i])
+ db.compStats[c.level+1].add(&stats[i])
}
}
-func (d *DB) tableRangeCompaction(level int, min, max []byte) {
- s := d.s
- s.logf("table@compaction range L%d %q:%q", level, min, max)
+func (db *DB) tableRangeCompaction(level int, umin, umax []byte) {
+ db.logf("table@compaction range L%d %q:%q", level, umin, umax)
if level >= 0 {
- if c := s.getCompactionRange(level, min, max); c != nil {
- d.tableCompaction(c, true)
+ if c := db.s.getCompactionRange(level, umin, umax); c != nil {
+ db.tableCompaction(c, true)
}
} else {
- v := s.version_NB()
+ v := db.s.version()
m := 1
for i, t := range v.tables[1:] {
- if t.isOverlaps(min, max, true, s.icmp) {
+ if t.overlaps(db.s.icmp, umin, umax, false) {
m = i + 1
}
}
+ v.release()
+
for level := 0; level < m; level++ {
- if c := s.getCompactionRange(level, min, max); c != nil {
- d.tableCompaction(c, true)
+ if c := db.s.getCompactionRange(level, umin, umax); c != nil {
+ db.tableCompaction(c, true)
}
}
}
}
-func (d *DB) tableAutoCompaction() {
- if c := d.s.pickCompaction(); c != nil {
- d.tableCompaction(c, false)
+func (db *DB) tableAutoCompaction() {
+ if c := db.s.pickCompaction(); c != nil {
+ db.tableCompaction(c, false)
}
}
-func (d *DB) tableNeedCompaction() bool {
- return d.s.version_NB().needCompaction()
+func (db *DB) tableNeedCompaction() bool {
+ v := db.s.version()
+ defer v.release()
+ return v.needCompaction()
}
-func (d *DB) pauseCompaction(ch chan<- struct{}) {
+func (db *DB) pauseCompaction(ch chan<- struct{}) {
select {
case ch <- struct{}{}:
- case _, _ = <-d.closeC:
- d.compactionExitTransact()
+ case _, _ = <-db.closeC:
+ db.compactionExitTransact()
}
}
@@ -537,7 +665,12 @@ type cIdle struct {
}
func (r cIdle) ack(err error) {
- r.ackC <- err
+ if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
+ r.ackC <- err
+ }
}
type cRange struct {
@@ -547,56 +680,67 @@ type cRange struct {
}
func (r cRange) ack(err error) {
- defer func() {
- recover()
- }()
if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
r.ackC <- err
}
}
-func (d *DB) compSendIdle(compC chan<- cCmd) error {
+// This will trigger auto compation and/or wait for all compaction to be done.
+func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
ch := make(chan error)
defer close(ch)
// Send cmd.
select {
case compC <- cIdle{ch}:
- case err := <-d.compErrC:
- return err
- case _, _ = <-d.closeC:
+ case err = <-db.compErrC:
+ return
+ case _, _ = <-db.closeC:
return ErrClosed
}
// Wait cmd.
- return <-ch
+ select {
+ case err = <-ch:
+ case err = <-db.compErrC:
+ case _, _ = <-db.closeC:
+ return ErrClosed
+ }
+ return err
}
-func (d *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
+// This will trigger auto compaction but will not wait for it.
+func (db *DB) compSendTrigger(compC chan<- cCmd) {
+ select {
+ case compC <- cIdle{}:
+ default:
+ }
+}
+
+// Send range compaction request.
+func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
ch := make(chan error)
defer close(ch)
// Send cmd.
select {
case compC <- cRange{level, min, max, ch}:
- case err := <-d.compErrC:
+ case err := <-db.compErrC:
return err
- case _, _ = <-d.closeC:
+ case _, _ = <-db.closeC:
return ErrClosed
}
// Wait cmd.
select {
- case err = <-d.compErrC:
case err = <-ch:
+ case err = <-db.compErrC:
+ case _, _ = <-db.closeC:
+ return ErrClosed
}
return err
}
-func (d *DB) compTrigger(compTriggerC chan struct{}) {
- select {
- case compTriggerC <- struct{}{}:
- default:
- }
-}
-
-func (d *DB) mCompaction() {
+func (db *DB) mCompaction() {
var x cCmd
defer func() {
@@ -608,24 +752,27 @@ func (d *DB) mCompaction() {
if x != nil {
x.ack(ErrClosed)
}
- d.closeW.Done()
+ db.closeW.Done()
}()
for {
select {
- case _, _ = <-d.closeC:
+ case x = <-db.mcompCmdC:
+ switch x.(type) {
+ case cIdle:
+ db.memCompaction()
+ x.ack(nil)
+ x = nil
+ default:
+ panic("leveldb: unknown command")
+ }
+ case _, _ = <-db.closeC:
return
- case x = <-d.mcompCmdC:
- d.memCompaction()
- x.ack(nil)
- x = nil
- case <-d.mcompTriggerC:
- d.memCompaction()
}
}
}
-func (d *DB) tCompaction() {
+func (db *DB) tCompaction() {
var x cCmd
var ackQ []cCmd
@@ -642,19 +789,18 @@ func (d *DB) tCompaction() {
if x != nil {
x.ack(ErrClosed)
}
- d.closeW.Done()
+ db.closeW.Done()
}()
for {
- if d.tableNeedCompaction() {
+ if db.tableNeedCompaction() {
select {
- case x = <-d.tcompCmdC:
- case <-d.tcompTriggerC:
- case _, _ = <-d.closeC:
- return
- case ch := <-d.tcompPauseC:
- d.pauseCompaction(ch)
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
continue
+ case _, _ = <-db.closeC:
+ return
default:
}
} else {
@@ -664,12 +810,11 @@ func (d *DB) tCompaction() {
}
ackQ = ackQ[:0]
select {
- case x = <-d.tcompCmdC:
- case <-d.tcompTriggerC:
- case ch := <-d.tcompPauseC:
- d.pauseCompaction(ch)
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
continue
- case _, _ = <-d.closeC:
+ case _, _ = <-db.closeC:
return
}
}
@@ -678,11 +823,13 @@ func (d *DB) tCompaction() {
case cIdle:
ackQ = append(ackQ, x)
case cRange:
- d.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
+ db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
x.ack(nil)
+ default:
+ panic("leveldb: unknown command")
}
x = nil
}
- d.tableAutoCompaction()
+ db.tableAutoCompaction()
}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
index 9973a8fef..011a94a35 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
@@ -8,7 +8,10 @@ package leveldb
import (
"errors"
+ "math/rand"
"runtime"
+ "sync"
+ "sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
@@ -19,50 +22,69 @@ var (
errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
)
-func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
- s := db.s
+type memdbReleaser struct {
+ once sync.Once
+ m *memDB
+}
+
+func (mr *memdbReleaser) Release() {
+ mr.once.Do(func() {
+ mr.m.decref()
+ })
+}
+func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
em, fm := db.getMems()
- v := s.version()
+ v := db.s.version()
ti := v.getIterators(slice, ro)
n := len(ti) + 2
i := make([]iterator.Iterator, 0, n)
- i = append(i, em.NewIterator(slice))
+ emi := em.mdb.NewIterator(slice)
+ emi.SetReleaser(&memdbReleaser{m: em})
+ i = append(i, emi)
if fm != nil {
- i = append(i, fm.NewIterator(slice))
+ fmi := fm.mdb.NewIterator(slice)
+ fmi.SetReleaser(&memdbReleaser{m: fm})
+ i = append(i, fmi)
}
i = append(i, ti...)
- strict := s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
- mi := iterator.NewMergedIterator(i, s.icmp, strict)
+ strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
+ mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
mi.SetReleaser(&versionReleaser{v: v})
return mi
}
func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
- var slice_ *util.Range
+ var islice *util.Range
if slice != nil {
- slice_ = &util.Range{}
+ islice = &util.Range{}
if slice.Start != nil {
- slice_.Start = newIKey(slice.Start, kMaxSeq, tSeek)
+ islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek)
}
if slice.Limit != nil {
- slice_.Limit = newIKey(slice.Limit, kMaxSeq, tSeek)
+ islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek)
}
}
- rawIter := db.newRawIterator(slice_, ro)
+ rawIter := db.newRawIterator(islice, ro)
iter := &dbIter{
+ db: db,
icmp: db.s.icmp,
iter: rawIter,
seq: seq,
- strict: db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator),
+ strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
key: make([]byte, 0),
value: make([]byte, 0),
}
+ atomic.AddInt32(&db.aliveIters, 1)
runtime.SetFinalizer(iter, (*dbIter).Release)
return iter
}
+func (db *DB) iterSamplingRate() int {
+ return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
+}
+
type dir int
const (
@@ -75,16 +97,27 @@ const (
// dbIter represent an interator states over a database session.
type dbIter struct {
+ db *DB
icmp *iComparer
iter iterator.Iterator
seq uint64
strict bool
- dir dir
- key []byte
- value []byte
- err error
- releaser util.Releaser
+ smaplingGap int
+ dir dir
+ key []byte
+ value []byte
+ err error
+ releaser util.Releaser
+}
+
+func (i *dbIter) sampleSeek() {
+ ikey := i.iter.Key()
+ i.smaplingGap -= len(ikey) + len(i.iter.Value())
+ for i.smaplingGap < 0 {
+ i.smaplingGap += i.db.iterSamplingRate()
+ i.db.sampleSeek(ikey)
+ }
}
func (i *dbIter) setErr(err error) {
@@ -144,7 +177,7 @@ func (i *dbIter) Seek(key []byte) bool {
return false
}
- ikey := newIKey(key, i.seq, tSeek)
+ ikey := newIkey(key, i.seq, ktSeek)
if i.iter.Seek(ikey) {
i.dir = dirSOI
return i.next()
@@ -156,15 +189,15 @@ func (i *dbIter) Seek(key []byte) bool {
func (i *dbIter) next() bool {
for {
- ukey, seq, t, ok := parseIkey(i.iter.Key())
- if ok {
+ if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
if seq <= i.seq {
- switch t {
- case tDel:
+ switch kt {
+ case ktDel:
// Skip deleted key.
i.key = append(i.key[:0], ukey...)
i.dir = dirForward
- case tVal:
+ case ktVal:
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
i.key = append(i.key[:0], ukey...)
i.value = append(i.value[:0], i.iter.Value()...)
@@ -174,7 +207,7 @@ func (i *dbIter) next() bool {
}
}
} else if i.strict {
- i.setErr(errInvalidIkey)
+ i.setErr(kerr)
break
}
if !i.iter.Next() {
@@ -207,20 +240,20 @@ func (i *dbIter) prev() bool {
del := true
if i.iter.Valid() {
for {
- ukey, seq, t, ok := parseIkey(i.iter.Key())
- if ok {
+ if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
if seq <= i.seq {
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
return true
}
- del = (t == tDel)
+ del = (kt == ktDel)
if !del {
i.key = append(i.key[:0], ukey...)
i.value = append(i.value[:0], i.iter.Value()...)
}
}
} else if i.strict {
- i.setErr(errInvalidIkey)
+ i.setErr(kerr)
return false
}
if !i.iter.Prev() {
@@ -249,13 +282,13 @@ func (i *dbIter) Prev() bool {
return i.Last()
case dirForward:
for i.iter.Prev() {
- ukey, _, _, ok := parseIkey(i.iter.Key())
- if ok {
+ if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
if i.icmp.uCompare(ukey, i.key) < 0 {
goto cont
}
} else if i.strict {
- i.setErr(errInvalidIkey)
+ i.setErr(kerr)
return false
}
}
@@ -289,6 +322,7 @@ func (i *dbIter) Release() {
if i.releaser != nil {
i.releaser.Release()
+ i.releaser = nil
}
i.dir = dirReleased
@@ -296,13 +330,19 @@ func (i *dbIter) Release() {
i.value = nil
i.iter.Release()
i.iter = nil
+ atomic.AddInt32(&i.db.aliveIters, -1)
+ i.db = nil
}
}
func (i *dbIter) SetReleaser(releaser util.Releaser) {
- if i.dir != dirReleased {
- i.releaser = releaser
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
}
+ i.releaser = releaser
}
func (i *dbIter) Error() error {
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
index 225b7cd5e..0372848ff 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
@@ -7,8 +7,11 @@
package leveldb
import (
+ "container/list"
+ "fmt"
"runtime"
"sync"
+ "sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
@@ -18,51 +21,41 @@ import (
type snapshotElement struct {
seq uint64
ref int
- // Next and previous pointers in the doubly-linked list of elements.
- next, prev *snapshotElement
-}
-
-// Initialize the snapshot.
-func (db *DB) initSnapshot() {
- db.snapsRoot.next = &db.snapsRoot
- db.snapsRoot.prev = &db.snapsRoot
+ e *list.Element
}
// Acquires a snapshot, based on latest sequence.
func (db *DB) acquireSnapshot() *snapshotElement {
db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
seq := db.getSeq()
- elem := db.snapsRoot.prev
- if elem == &db.snapsRoot || elem.seq != seq {
- at := db.snapsRoot.prev
- next := at.next
- elem = &snapshotElement{
- seq: seq,
- prev: at,
- next: next,
+
+ if e := db.snapsList.Back(); e != nil {
+ se := e.Value.(*snapshotElement)
+ if se.seq == seq {
+ se.ref++
+ return se
+ } else if seq < se.seq {
+ panic("leveldb: sequence number is not increasing")
}
- at.next = elem
- next.prev = elem
}
- elem.ref++
- db.snapsMu.Unlock()
- return elem
+ se := &snapshotElement{seq: seq, ref: 1}
+ se.e = db.snapsList.PushBack(se)
+ return se
}
// Releases given snapshot element.
-func (db *DB) releaseSnapshot(elem *snapshotElement) {
- if !db.isClosed() {
- db.snapsMu.Lock()
- elem.ref--
- if elem.ref == 0 {
- elem.prev.next = elem.next
- elem.next.prev = elem.prev
- elem.next = nil
- elem.prev = nil
- } else if elem.ref < 0 {
- panic("leveldb: Snapshot: negative element reference")
- }
- db.snapsMu.Unlock()
+func (db *DB) releaseSnapshot(se *snapshotElement) {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ se.ref--
+ if se.ref == 0 {
+ db.snapsList.Remove(se.e)
+ se.e = nil
+ } else if se.ref < 0 {
+ panic("leveldb: Snapshot: negative element reference")
}
}
@@ -70,10 +63,11 @@ func (db *DB) releaseSnapshot(elem *snapshotElement) {
func (db *DB) minSeq() uint64 {
db.snapsMu.Lock()
defer db.snapsMu.Unlock()
- elem := db.snapsRoot.prev
- if elem != &db.snapsRoot {
- return elem.seq
+
+ if e := db.snapsList.Front(); e != nil {
+ return e.Value.(*snapshotElement).seq
}
+
return db.getSeq()
}
@@ -81,38 +75,59 @@ func (db *DB) minSeq() uint64 {
type Snapshot struct {
db *DB
elem *snapshotElement
- mu sync.Mutex
+ mu sync.RWMutex
released bool
}
// Creates new snapshot object.
func (db *DB) newSnapshot() *Snapshot {
- p := &Snapshot{
+ snap := &Snapshot{
db: db,
elem: db.acquireSnapshot(),
}
- runtime.SetFinalizer(p, (*Snapshot).Release)
- return p
+ atomic.AddInt32(&db.aliveSnaps, 1)
+ runtime.SetFinalizer(snap, (*Snapshot).Release)
+ return snap
+}
+
+func (snap *Snapshot) String() string {
+ return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
}
// Get gets the value for the given key. It returns ErrNotFound if
-// the DB does not contain the key.
+// the DB does not contains the key.
//
// The caller should not modify the contents of the returned slice, but
// it is safe to modify the contents of the argument after Get returns.
-func (p *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
- db := p.db
- err = db.ok()
+func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = snap.db.ok()
if err != nil {
return
}
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.released {
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
err = ErrSnapshotReleased
return
}
- return db.get(key, p.elem.seq, ro)
+ return snap.db.get(key, snap.elem.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = snap.db.ok()
+ if err != nil {
+ return
+ }
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
+ err = ErrSnapshotReleased
+ return
+ }
+ return snap.db.has(key, snap.elem.seq, ro)
}
// NewIterator returns an iterator for the snapshot of the uderlying DB.
@@ -132,17 +147,18 @@ func (p *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error
// iterator would be still valid until released.
//
// Also read Iterator documentation of the leveldb/iterator package.
-func (p *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
- db := p.db
- if err := db.ok(); err != nil {
+func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := snap.db.ok(); err != nil {
return iterator.NewEmptyIterator(err)
}
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.released {
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+ if snap.released {
return iterator.NewEmptyIterator(ErrSnapshotReleased)
}
- return db.newIterator(p.elem.seq, slice, ro)
+ // Since iterator already hold version ref, it doesn't need to
+ // hold snapshot ref.
+ return snap.db.newIterator(snap.elem.seq, slice, ro)
}
// Release releases the snapshot. This will not release any returned
@@ -150,16 +166,18 @@ func (p *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.
// underlying DB is closed.
//
// Other methods should not be called after the snapshot has been released.
-func (p *Snapshot) Release() {
- p.mu.Lock()
- if !p.released {
+func (snap *Snapshot) Release() {
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+
+ if !snap.released {
// Clear the finalizer.
- runtime.SetFinalizer(p, nil)
+ runtime.SetFinalizer(snap, nil)
- p.released = true
- p.db.releaseSnapshot(p.elem)
- p.db = nil
- p.elem = nil
+ snap.released = true
+ snap.db.releaseSnapshot(snap.elem)
+ atomic.AddInt32(&snap.db.aliveSnaps, -1)
+ snap.db = nil
+ snap.elem = nil
}
- p.mu.Unlock()
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
index a13706142..d4db9d6dd 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
@@ -8,106 +8,203 @@ package leveldb
import (
"sync/atomic"
+ "time"
"github.com/syndtr/goleveldb/leveldb/journal"
"github.com/syndtr/goleveldb/leveldb/memdb"
)
+type memDB struct {
+ db *DB
+ mdb *memdb.DB
+ ref int32
+}
+
+func (m *memDB) incref() {
+ atomic.AddInt32(&m.ref, 1)
+}
+
+func (m *memDB) decref() {
+ if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
+ // Only put back memdb with std capacity.
+ if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
+ m.mdb.Reset()
+ m.db.mpoolPut(m.mdb)
+ }
+ m.db = nil
+ m.mdb = nil
+ } else if ref < 0 {
+ panic("negative memdb ref")
+ }
+}
+
// Get latest sequence number.
-func (d *DB) getSeq() uint64 {
- return atomic.LoadUint64(&d.seq)
+func (db *DB) getSeq() uint64 {
+ return atomic.LoadUint64(&db.seq)
}
// Atomically adds delta to seq.
-func (d *DB) addSeq(delta uint64) {
- atomic.AddUint64(&d.seq, delta)
+func (db *DB) addSeq(delta uint64) {
+ atomic.AddUint64(&db.seq, delta)
+}
+
+func (db *DB) sampleSeek(ikey iKey) {
+ v := db.s.version()
+ if v.sampleSeek(ikey) {
+ // Trigger table compaction.
+ db.compSendTrigger(db.tcompCmdC)
+ }
+ v.release()
+}
+
+func (db *DB) mpoolPut(mem *memdb.DB) {
+ defer func() {
+ recover()
+ }()
+ select {
+ case db.memPool <- mem:
+ default:
+ }
+}
+
+func (db *DB) mpoolGet() *memdb.DB {
+ select {
+ case mem := <-db.memPool:
+ return mem
+ default:
+ return nil
+ }
+}
+
+func (db *DB) mpoolDrain() {
+ ticker := time.NewTicker(30 * time.Second)
+ for {
+ select {
+ case <-ticker.C:
+ select {
+ case <-db.memPool:
+ default:
+ }
+ case _, _ = <-db.closeC:
+ close(db.memPool)
+ return
+ }
+ }
}
// Create new memdb and froze the old one; need external synchronization.
// newMem only called synchronously by the writer.
-func (d *DB) newMem(n int) (mem *memdb.DB, err error) {
- s := d.s
-
- num := s.allocFileNum()
- file := s.getJournalFile(num)
+func (db *DB) newMem(n int) (mem *memDB, err error) {
+ num := db.s.allocFileNum()
+ file := db.s.getJournalFile(num)
w, err := file.Create()
if err != nil {
- s.reuseFileNum(num)
+ db.s.reuseFileNum(num)
return
}
- d.memMu.Lock()
- if d.journal == nil {
- d.journal = journal.NewWriter(w)
+
+ db.memMu.Lock()
+ defer db.memMu.Unlock()
+
+ if db.frozenMem != nil {
+ panic("still has frozen mem")
+ }
+
+ if db.journal == nil {
+ db.journal = journal.NewWriter(w)
} else {
- d.journal.Reset(w)
- d.journalWriter.Close()
- d.frozenJournalFile = d.journalFile
- }
- d.journalWriter = w
- d.journalFile = file
- d.frozenMem = d.mem
- d.mem = memdb.New(s.icmp, maxInt(d.s.o.GetWriteBuffer(), n))
- mem = d.mem
- // The seq only incremented by the writer.
- d.frozenSeq = d.seq
- d.memMu.Unlock()
+ db.journal.Reset(w)
+ db.journalWriter.Close()
+ db.frozenJournalFile = db.journalFile
+ }
+ db.journalWriter = w
+ db.journalFile = file
+ db.frozenMem = db.mem
+ mdb := db.mpoolGet()
+ if mdb == nil || mdb.Capacity() < n {
+ mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
+ }
+ mem = &memDB{
+ db: db,
+ mdb: mdb,
+ ref: 2,
+ }
+ db.mem = mem
+ // The seq only incremented by the writer. And whoever called newMem
+ // should hold write lock, so no need additional synchronization here.
+ db.frozenSeq = db.seq
return
}
// Get all memdbs.
-func (d *DB) getMems() (e *memdb.DB, f *memdb.DB) {
- d.memMu.RLock()
- defer d.memMu.RUnlock()
- return d.mem, d.frozenMem
+func (db *DB) getMems() (e, f *memDB) {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem == nil {
+ panic("nil effective mem")
+ }
+ db.mem.incref()
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.mem, db.frozenMem
}
// Get frozen memdb.
-func (d *DB) getEffectiveMem() *memdb.DB {
- d.memMu.RLock()
- defer d.memMu.RUnlock()
- return d.mem
+func (db *DB) getEffectiveMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem == nil {
+ panic("nil effective mem")
+ }
+ db.mem.incref()
+ return db.mem
}
// Check whether we has frozen memdb.
-func (d *DB) hasFrozenMem() bool {
- d.memMu.RLock()
- defer d.memMu.RUnlock()
- return d.frozenMem != nil
+func (db *DB) hasFrozenMem() bool {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ return db.frozenMem != nil
}
// Get frozen memdb.
-func (d *DB) getFrozenMem() *memdb.DB {
- d.memMu.RLock()
- defer d.memMu.RUnlock()
- return d.frozenMem
+func (db *DB) getFrozenMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.frozenMem
}
// Drop frozen memdb; assume that frozen memdb isn't nil.
-func (d *DB) dropFrozenMem() {
- d.memMu.Lock()
- if err := d.frozenJournalFile.Remove(); err != nil {
- d.s.logf("journal@remove removing @%d %q", d.frozenJournalFile.Num(), err)
+func (db *DB) dropFrozenMem() {
+ db.memMu.Lock()
+ if err := db.frozenJournalFile.Remove(); err != nil {
+ db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err)
} else {
- d.s.logf("journal@remove removed @%d", d.frozenJournalFile.Num())
+ db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
}
- d.frozenJournalFile = nil
- d.frozenMem = nil
- d.memMu.Unlock()
+ db.frozenJournalFile = nil
+ db.frozenMem.decref()
+ db.frozenMem = nil
+ db.memMu.Unlock()
}
// Set closed flag; return true if not already closed.
-func (d *DB) setClosed() bool {
- return atomic.CompareAndSwapUint32(&d.closed, 0, 1)
+func (db *DB) setClosed() bool {
+ return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
}
// Check whether DB was closed.
-func (d *DB) isClosed() bool {
- return atomic.LoadUint32(&d.closed) != 0
+func (db *DB) isClosed() bool {
+ return atomic.LoadUint32(&db.closed) != 0
}
// Check read ok status.
-func (d *DB) ok() error {
- if d.isClosed() {
+func (db *DB) ok() error {
+ if db.isClosed() {
return ErrClosed
}
return nil
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
index 5de7d9723..38bfbf1ea 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
@@ -7,6 +7,10 @@
package leveldb
import (
+ "bytes"
+ "container/list"
+ crand "crypto/rand"
+ "encoding/binary"
"fmt"
"math/rand"
"os"
@@ -20,6 +24,7 @@ import (
"unsafe"
"github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
@@ -148,25 +153,29 @@ func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
t := h.t
db := h.db
- var res uint64
+ var (
+ maxOverlaps uint64
+ maxLevel int
+ )
v := db.s.version()
for i, tt := range v.tables[1 : len(v.tables)-1] {
level := i + 1
next := v.tables[level+1]
for _, t := range tt {
- var r tFiles
- min, max := t.min.ukey(), t.max.ukey()
- next.getOverlaps(min, max, &r, true, db.s.icmp.ucmp)
+ r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false)
sum := r.size()
- if sum > res {
- res = sum
+ if sum > maxOverlaps {
+ maxOverlaps = sum
+ maxLevel = level
}
}
}
v.release()
- if res > want {
- t.Errorf("next level overlapping bytes is more than %d, got=%d", want, res)
+ if maxOverlaps > want {
+ t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel)
+ } else {
+ t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want)
}
}
@@ -239,7 +248,7 @@ func (h *dbHarness) allEntriesFor(key, want string) {
db := h.db
s := db.s
- ikey := newIKey([]byte(key), kMaxSeq, tVal)
+ ikey := newIkey([]byte(key), kMaxSeq, ktVal)
iter := db.newRawIterator(nil, nil)
if !iter.Seek(ikey) && iter.Error() != nil {
t.Error("AllEntries: error during seek, err: ", iter.Error())
@@ -248,19 +257,18 @@ func (h *dbHarness) allEntriesFor(key, want string) {
res := "[ "
first := true
for iter.Valid() {
- rkey := iKey(iter.Key())
- if _, t, ok := rkey.parseNum(); ok {
- if s.icmp.uCompare(ikey.ukey(), rkey.ukey()) != 0 {
+ if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil {
+ if s.icmp.uCompare(ikey.ukey(), ukey) != 0 {
break
}
if !first {
res += ", "
}
first = false
- switch t {
- case tVal:
+ switch kt {
+ case ktVal:
res += string(iter.Value())
- case tDel:
+ case ktDel:
res += "DEL"
}
} else {
@@ -325,6 +333,8 @@ func (h *dbHarness) compactMem() {
t := h.t
db := h.db
+ t.Log("starting memdb compaction")
+
db.writeLockC <- struct{}{}
defer func() {
<-db.writeLockC
@@ -340,6 +350,8 @@ func (h *dbHarness) compactMem() {
if h.totalTables() == 0 {
t.Error("zero tables after mem compaction")
}
+
+ t.Log("memdb compaction done")
}
func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) {
@@ -354,6 +366,8 @@ func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool)
_max = []byte(max)
}
+ t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max)
+
if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil {
if wanterr {
t.Log("CompactRangeAt: got error (expected): ", err)
@@ -363,6 +377,8 @@ func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool)
} else if wanterr {
t.Error("CompactRangeAt: expect error")
}
+
+ t.Log("table range compaction done")
}
func (h *dbHarness) compactRangeAt(level int, min, max string) {
@@ -373,6 +389,8 @@ func (h *dbHarness) compactRange(min, max string) {
t := h.t
db := h.db
+ t.Logf("starting DB range compaction: min=%q, max=%q", min, max)
+
var r util.Range
if min != "" {
r.Start = []byte(min)
@@ -383,21 +401,25 @@ func (h *dbHarness) compactRange(min, max string) {
if err := db.CompactRange(r); err != nil {
t.Error("CompactRange: got error: ", err)
}
-}
-func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
- t := h.t
- db := h.db
+ t.Log("DB range compaction done")
+}
- s, err := db.SizeOf([]util.Range{
+func (h *dbHarness) sizeOf(start, limit string) uint64 {
+ sz, err := h.db.SizeOf([]util.Range{
{[]byte(start), []byte(limit)},
})
if err != nil {
- t.Error("SizeOf: got error: ", err)
+ h.t.Error("SizeOf: got error: ", err)
}
- if s.Sum() < low || s.Sum() > hi {
- t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
- shorten(start), shorten(limit), low, hi, s.Sum())
+ return sz.Sum()
+}
+
+func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
+ sz := h.sizeOf(start, limit)
+ if sz < low || sz > hi {
+ h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
+ shorten(start), shorten(limit), low, hi, sz)
}
}
@@ -504,13 +526,13 @@ func Test_FieldsAligned(t *testing.T) {
p1 := new(DB)
testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq))
p2 := new(session)
- testAligned(t, "session.stFileNum", unsafe.Offsetof(p2.stFileNum))
+ testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum))
testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum))
testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum))
- testAligned(t, "session.stSeq", unsafe.Offsetof(p2.stSeq))
+ testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum))
}
-func TestDb_Locking(t *testing.T) {
+func TestDB_Locking(t *testing.T) {
h := newDbHarness(t)
defer h.stor.Close()
h.openAssert(false)
@@ -518,7 +540,7 @@ func TestDb_Locking(t *testing.T) {
h.openAssert(true)
}
-func TestDb_Empty(t *testing.T) {
+func TestDB_Empty(t *testing.T) {
trun(t, func(h *dbHarness) {
h.get("foo", false)
@@ -527,7 +549,7 @@ func TestDb_Empty(t *testing.T) {
})
}
-func TestDb_ReadWrite(t *testing.T) {
+func TestDB_ReadWrite(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.getVal("foo", "v1")
@@ -542,7 +564,7 @@ func TestDb_ReadWrite(t *testing.T) {
})
}
-func TestDb_PutDeleteGet(t *testing.T) {
+func TestDB_PutDeleteGet(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.getVal("foo", "v1")
@@ -556,7 +578,7 @@ func TestDb_PutDeleteGet(t *testing.T) {
})
}
-func TestDb_EmptyBatch(t *testing.T) {
+func TestDB_EmptyBatch(t *testing.T) {
h := newDbHarness(t)
defer h.close()
@@ -568,7 +590,7 @@ func TestDb_EmptyBatch(t *testing.T) {
h.get("foo", false)
}
-func TestDb_GetFromFrozen(t *testing.T) {
+func TestDB_GetFromFrozen(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100})
defer h.close()
@@ -594,7 +616,7 @@ func TestDb_GetFromFrozen(t *testing.T) {
h.get("k2", true)
}
-func TestDb_GetFromTable(t *testing.T) {
+func TestDB_GetFromTable(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.compactMem()
@@ -602,7 +624,7 @@ func TestDb_GetFromTable(t *testing.T) {
})
}
-func TestDb_GetSnapshot(t *testing.T) {
+func TestDB_GetSnapshot(t *testing.T) {
trun(t, func(h *dbHarness) {
bar := strings.Repeat("b", 200)
h.put("foo", "v1")
@@ -636,7 +658,7 @@ func TestDb_GetSnapshot(t *testing.T) {
})
}
-func TestDb_GetLevel0Ordering(t *testing.T) {
+func TestDB_GetLevel0Ordering(t *testing.T) {
trun(t, func(h *dbHarness) {
for i := 0; i < 4; i++ {
h.put("bar", fmt.Sprintf("b%d", i))
@@ -659,7 +681,7 @@ func TestDb_GetLevel0Ordering(t *testing.T) {
})
}
-func TestDb_GetOrderedByLevels(t *testing.T) {
+func TestDB_GetOrderedByLevels(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.compactMem()
@@ -671,7 +693,7 @@ func TestDb_GetOrderedByLevels(t *testing.T) {
})
}
-func TestDb_GetPicksCorrectFile(t *testing.T) {
+func TestDB_GetPicksCorrectFile(t *testing.T) {
trun(t, func(h *dbHarness) {
// Arrange to have multiple files in a non-level-0 level.
h.put("a", "va")
@@ -695,7 +717,7 @@ func TestDb_GetPicksCorrectFile(t *testing.T) {
})
}
-func TestDb_GetEncountersEmptyLevel(t *testing.T) {
+func TestDB_GetEncountersEmptyLevel(t *testing.T) {
trun(t, func(h *dbHarness) {
// Arrange for the following to happen:
// * sstable A in level 0
@@ -750,7 +772,7 @@ func TestDb_GetEncountersEmptyLevel(t *testing.T) {
})
}
-func TestDb_IterMultiWithDelete(t *testing.T) {
+func TestDB_IterMultiWithDelete(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("a", "va")
h.put("b", "vb")
@@ -776,7 +798,7 @@ func TestDb_IterMultiWithDelete(t *testing.T) {
})
}
-func TestDb_IteratorPinsRef(t *testing.T) {
+func TestDB_IteratorPinsRef(t *testing.T) {
h := newDbHarness(t)
defer h.close()
@@ -800,7 +822,7 @@ func TestDb_IteratorPinsRef(t *testing.T) {
iter.Release()
}
-func TestDb_Recover(t *testing.T) {
+func TestDB_Recover(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.put("baz", "v5")
@@ -822,7 +844,7 @@ func TestDb_Recover(t *testing.T) {
})
}
-func TestDb_RecoverWithEmptyJournal(t *testing.T) {
+func TestDB_RecoverWithEmptyJournal(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
h.put("foo", "v2")
@@ -836,7 +858,7 @@ func TestDb_RecoverWithEmptyJournal(t *testing.T) {
})
}
-func TestDb_RecoverDuringMemtableCompaction(t *testing.T) {
+func TestDB_RecoverDuringMemtableCompaction(t *testing.T) {
truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) {
h.stor.DelaySync(storage.TypeTable)
@@ -852,7 +874,7 @@ func TestDb_RecoverDuringMemtableCompaction(t *testing.T) {
})
}
-func TestDb_MinorCompactionsHappen(t *testing.T) {
+func TestDB_MinorCompactionsHappen(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000})
defer h.close()
@@ -876,7 +898,7 @@ func TestDb_MinorCompactionsHappen(t *testing.T) {
}
}
-func TestDb_RecoverWithLargeJournal(t *testing.T) {
+func TestDB_RecoverWithLargeJournal(t *testing.T) {
h := newDbHarness(t)
defer h.close()
@@ -901,7 +923,7 @@ func TestDb_RecoverWithLargeJournal(t *testing.T) {
v.release()
}
-func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) {
+func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
WriteBuffer: 10000000,
Compression: opt.NoCompression,
@@ -939,11 +961,11 @@ func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) {
}
}
-func TestDb_RepeatedWritesToSameKey(t *testing.T) {
+func TestDB_RepeatedWritesToSameKey(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
defer h.close()
- maxTables := kNumLevels + kL0_StopWritesTrigger
+ maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
for i := 0; i < 5*maxTables; i++ {
@@ -955,13 +977,13 @@ func TestDb_RepeatedWritesToSameKey(t *testing.T) {
}
}
-func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
+func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
defer h.close()
h.reopenDB()
- maxTables := kNumLevels + kL0_StopWritesTrigger
+ maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
for i := 0; i < 5*maxTables; i++ {
@@ -973,11 +995,11 @@ func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
}
}
-func TestDb_SparseMerge(t *testing.T) {
+func TestDB_SparseMerge(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
defer h.close()
- h.putMulti(kNumLevels, "A", "Z")
+ h.putMulti(h.o.GetNumLevel(), "A", "Z")
// Suppose there is:
// small amount of data with prefix A
@@ -1001,6 +1023,7 @@ func TestDb_SparseMerge(t *testing.T) {
h.put("C", "vc2")
h.compactMem()
+ h.waitCompaction()
h.maxNextLevelOverlappingBytes(20 * 1048576)
h.compactRangeAt(0, "", "")
h.waitCompaction()
@@ -1010,7 +1033,7 @@ func TestDb_SparseMerge(t *testing.T) {
h.maxNextLevelOverlappingBytes(20 * 1048576)
}
-func TestDb_SizeOf(t *testing.T) {
+func TestDB_SizeOf(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
Compression: opt.NoCompression,
WriteBuffer: 10000000,
@@ -1060,7 +1083,7 @@ func TestDb_SizeOf(t *testing.T) {
}
}
-func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) {
+func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
defer h.close()
@@ -1098,7 +1121,7 @@ func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) {
}
}
-func TestDb_Snapshot(t *testing.T) {
+func TestDB_Snapshot(t *testing.T) {
trun(t, func(h *dbHarness) {
h.put("foo", "v1")
s1 := h.getSnapshot()
@@ -1127,13 +1150,51 @@ func TestDb_Snapshot(t *testing.T) {
})
}
-func TestDb_HiddenValuesAreRemoved(t *testing.T) {
+func TestDB_SnapshotList(t *testing.T) {
+ db := &DB{snapsList: list.New()}
+ e0a := db.acquireSnapshot()
+ e0b := db.acquireSnapshot()
+ db.seq = 1
+ e1 := db.acquireSnapshot()
+ db.seq = 2
+ e2 := db.acquireSnapshot()
+
+ if db.minSeq() != 0 {
+ t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+ }
+ db.releaseSnapshot(e0a)
+ if db.minSeq() != 0 {
+ t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+ }
+ db.releaseSnapshot(e2)
+ if db.minSeq() != 0 {
+ t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+ }
+ db.releaseSnapshot(e0b)
+ if db.minSeq() != 1 {
+ t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+ }
+ e2 = db.acquireSnapshot()
+ if db.minSeq() != 1 {
+ t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+ }
+ db.releaseSnapshot(e1)
+ if db.minSeq() != 2 {
+ t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+ }
+ db.releaseSnapshot(e2)
+ if db.minSeq() != 2 {
+ t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+ }
+}
+
+func TestDB_HiddenValuesAreRemoved(t *testing.T) {
trun(t, func(h *dbHarness) {
s := h.db.s
h.put("foo", "v1")
h.compactMem()
- m := kMaxMemCompactLevel
+ m := h.o.GetMaxMemCompationLevel()
v := s.version()
num := v.tLen(m)
v.release()
@@ -1170,14 +1231,14 @@ func TestDb_HiddenValuesAreRemoved(t *testing.T) {
})
}
-func TestDb_DeletionMarkers2(t *testing.T) {
+func TestDB_DeletionMarkers2(t *testing.T) {
h := newDbHarness(t)
defer h.close()
s := h.db.s
h.put("foo", "v1")
h.compactMem()
- m := kMaxMemCompactLevel
+ m := h.o.GetMaxMemCompationLevel()
v := s.version()
num := v.tLen(m)
v.release()
@@ -1211,8 +1272,8 @@ func TestDb_DeletionMarkers2(t *testing.T) {
h.allEntriesFor("foo", "[ ]")
}
-func TestDb_CompactionTableOpenError(t *testing.T) {
- h := newDbHarnessWopt(t, &opt.Options{MaxOpenFiles: 0})
+func TestDB_CompactionTableOpenError(t *testing.T) {
+ h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1})
defer h.close()
im := 10
@@ -1230,14 +1291,14 @@ func TestDb_CompactionTableOpenError(t *testing.T) {
t.Errorf("total tables is %d, want %d", n, im)
}
- h.stor.SetOpenErr(storage.TypeTable)
+ h.stor.SetEmuErr(storage.TypeTable, tsOpOpen)
go h.db.CompactRange(util.Range{})
if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil {
t.Log("compaction error: ", err)
}
h.closeDB0()
h.openDB()
- h.stor.SetOpenErr(0)
+ h.stor.SetEmuErr(0, tsOpOpen)
for i := 0; i < im; i++ {
for j := 0; j < jm; j++ {
@@ -1246,9 +1307,9 @@ func TestDb_CompactionTableOpenError(t *testing.T) {
}
}
-func TestDb_OverlapInLevel0(t *testing.T) {
+func TestDB_OverlapInLevel0(t *testing.T) {
trun(t, func(h *dbHarness) {
- if kMaxMemCompactLevel != 2 {
+ if h.o.GetMaxMemCompationLevel() != 2 {
t.Fatal("fix test to reflect the config")
}
@@ -1289,7 +1350,7 @@ func TestDb_OverlapInLevel0(t *testing.T) {
})
}
-func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) {
+func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) {
h := newDbHarness(t)
defer h.close()
@@ -1309,7 +1370,7 @@ func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) {
h.getKeyVal("(a->v)")
}
-func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) {
+func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) {
h := newDbHarness(t)
defer h.close()
@@ -1338,7 +1399,7 @@ func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) {
h.getKeyVal("(->)(c->cv)")
}
-func TestDb_SingleEntryMemCompaction(t *testing.T) {
+func TestDB_SingleEntryMemCompaction(t *testing.T) {
trun(t, func(h *dbHarness) {
for i := 0; i < 10; i++ {
h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer))
@@ -1355,7 +1416,7 @@ func TestDb_SingleEntryMemCompaction(t *testing.T) {
})
}
-func TestDb_ManifestWriteError(t *testing.T) {
+func TestDB_ManifestWriteError(t *testing.T) {
for i := 0; i < 2; i++ {
func() {
h := newDbHarness(t)
@@ -1368,23 +1429,23 @@ func TestDb_ManifestWriteError(t *testing.T) {
h.compactMem()
h.getVal("foo", "bar")
v := h.db.s.version()
- if n := v.tLen(kMaxMemCompactLevel); n != 1 {
+ if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 {
t.Errorf("invalid total tables, want=1 got=%d", n)
}
v.release()
if i == 0 {
- h.stor.SetWriteErr(storage.TypeManifest)
+ h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite)
} else {
- h.stor.SetSyncErr(storage.TypeManifest)
+ h.stor.SetEmuErr(storage.TypeManifest, tsOpSync)
}
// Merging compaction (will fail)
- h.compactRangeAtErr(kMaxMemCompactLevel, "", "", true)
+ h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true)
h.db.Close()
- h.stor.SetWriteErr(0)
- h.stor.SetSyncErr(0)
+ h.stor.SetEmuErr(0, tsOpWrite)
+ h.stor.SetEmuErr(0, tsOpSync)
// Should not lose data
h.openDB()
@@ -1405,7 +1466,7 @@ func assertErr(t *testing.T, err error, wanterr bool) {
}
}
-func TestDb_ClosedIsClosed(t *testing.T) {
+func TestDB_ClosedIsClosed(t *testing.T) {
h := newDbHarness(t)
db := h.db
@@ -1500,7 +1561,7 @@ func (p numberComparer) Compare(a, b []byte) int {
func (numberComparer) Separator(dst, a, b []byte) []byte { return nil }
func (numberComparer) Successor(dst, b []byte) []byte { return nil }
-func TestDb_CustomComparer(t *testing.T) {
+func TestDB_CustomComparer(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
Comparer: numberComparer{},
WriteBuffer: 1000,
@@ -1530,11 +1591,11 @@ func TestDb_CustomComparer(t *testing.T) {
}
}
-func TestDb_ManualCompaction(t *testing.T) {
+func TestDB_ManualCompaction(t *testing.T) {
h := newDbHarness(t)
defer h.close()
- if kMaxMemCompactLevel != 2 {
+ if h.o.GetMaxMemCompationLevel() != 2 {
t.Fatal("fix test to reflect the config")
}
@@ -1568,10 +1629,10 @@ func TestDb_ManualCompaction(t *testing.T) {
h.tablesPerLevel("0,0,1")
}
-func TestDb_BloomFilter(t *testing.T) {
+func TestDB_BloomFilter(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
- BlockCache: opt.NoCache,
- Filter: filter.NewBloomFilter(10),
+ DisableBlockCache: true,
+ Filter: filter.NewBloomFilter(10),
})
defer h.close()
@@ -1579,7 +1640,7 @@ func TestDb_BloomFilter(t *testing.T) {
return fmt.Sprintf("key%06d", i)
}
- n := 10000
+ const n = 10000
// Populate multiple layers
for i := 0; i < n; i++ {
@@ -1621,7 +1682,7 @@ func TestDb_BloomFilter(t *testing.T) {
h.stor.ReleaseSync(storage.TypeTable)
}
-func TestDb_Concurrent(t *testing.T) {
+func TestDB_Concurrent(t *testing.T) {
const n, secs, maxkey = 4, 2, 1000
runtime.GOMAXPROCS(n)
@@ -1686,7 +1747,7 @@ func TestDb_Concurrent(t *testing.T) {
runtime.GOMAXPROCS(1)
}
-func TestDb_Concurrent2(t *testing.T) {
+func TestDB_Concurrent2(t *testing.T) {
const n, n2 = 4, 4000
runtime.GOMAXPROCS(n*2 + 2)
@@ -1757,7 +1818,7 @@ func TestDb_Concurrent2(t *testing.T) {
runtime.GOMAXPROCS(1)
}
-func TestDb_CreateReopenDbOnFile(t *testing.T) {
+func TestDB_CreateReopenDbOnFile(t *testing.T) {
dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid()))
if err := os.RemoveAll(dbpath); err != nil {
t.Fatal("cannot remove old db: ", err)
@@ -1785,7 +1846,7 @@ func TestDb_CreateReopenDbOnFile(t *testing.T) {
}
}
-func TestDb_CreateReopenDbOnFile2(t *testing.T) {
+func TestDB_CreateReopenDbOnFile2(t *testing.T) {
dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid()))
if err := os.RemoveAll(dbpath); err != nil {
t.Fatal("cannot remove old db: ", err)
@@ -1806,7 +1867,7 @@ func TestDb_CreateReopenDbOnFile2(t *testing.T) {
}
}
-func TestDb_DeletionMarkersOnMemdb(t *testing.T) {
+func TestDB_DeletionMarkersOnMemdb(t *testing.T) {
h := newDbHarness(t)
defer h.close()
@@ -1817,8 +1878,8 @@ func TestDb_DeletionMarkersOnMemdb(t *testing.T) {
h.getKeyVal("")
}
-func TestDb_LeveldbIssue178(t *testing.T) {
- nKeys := (kMaxTableSize / 30) * 5
+func TestDB_LeveldbIssue178(t *testing.T) {
+ nKeys := (opt.DefaultCompactionTableSize / 30) * 5
key1 := func(i int) string {
return fmt.Sprintf("my_key_%d", i)
}
@@ -1860,7 +1921,7 @@ func TestDb_LeveldbIssue178(t *testing.T) {
h.assertNumKeys(nKeys)
}
-func TestDb_LeveldbIssue200(t *testing.T) {
+func TestDB_LeveldbIssue200(t *testing.T) {
h := newDbHarness(t)
defer h.close()
@@ -1886,3 +1947,719 @@ func TestDb_LeveldbIssue200(t *testing.T) {
iter.Next()
assertBytes(t, []byte("5"), iter.Key())
}
+
+func TestDB_GoleveldbIssue74(t *testing.T) {
+ h := newDbHarnessWopt(t, &opt.Options{
+ WriteBuffer: 1 * opt.MiB,
+ })
+ defer h.close()
+
+ const n, dur = 10000, 5 * time.Second
+
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ until := time.Now().Add(dur)
+ wg := new(sync.WaitGroup)
+ wg.Add(2)
+ var done uint32
+ go func() {
+ var i int
+ defer func() {
+ t.Logf("WRITER DONE #%d", i)
+ atomic.StoreUint32(&done, 1)
+ wg.Done()
+ }()
+
+ b := new(Batch)
+ for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+ iv := fmt.Sprintf("VAL%010d", i)
+ for k := 0; k < n; k++ {
+ key := fmt.Sprintf("KEY%06d", k)
+ b.Put([]byte(key), []byte(key+iv))
+ b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key))
+ }
+ h.write(b)
+
+ b.Reset()
+ snap := h.getSnapshot()
+ iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
+ var k int
+ for ; iter.Next(); k++ {
+ ptrKey := iter.Key()
+ key := iter.Value()
+
+ if _, err := snap.Get(ptrKey, nil); err != nil {
+ t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err)
+ }
+ if value, err := snap.Get(key, nil); err != nil {
+ t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err)
+ } else if string(value) != string(key)+iv {
+ t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value)
+ }
+
+ b.Delete(key)
+ b.Delete(ptrKey)
+ }
+ h.write(b)
+ iter.Release()
+ snap.Release()
+ if k != n {
+ t.Fatalf("#%d %d != %d", i, k, n)
+ }
+ }
+ }()
+ go func() {
+ var i int
+ defer func() {
+ t.Logf("READER DONE #%d", i)
+ atomic.StoreUint32(&done, 1)
+ wg.Done()
+ }()
+ for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+ snap := h.getSnapshot()
+ iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
+ var prevValue string
+ var k int
+ for ; iter.Next(); k++ {
+ ptrKey := iter.Key()
+ key := iter.Value()
+
+ if _, err := snap.Get(ptrKey, nil); err != nil {
+ t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err)
+ }
+
+ if value, err := snap.Get(key, nil); err != nil {
+ t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err)
+ } else if prevValue != "" && string(value) != string(key)+prevValue {
+ t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value)
+ } else {
+ prevValue = string(value[len(key):])
+ }
+ }
+ iter.Release()
+ snap.Release()
+ if k > 0 && k != n {
+ t.Fatalf("#%d %d != %d", i, k, n)
+ }
+ }
+ }()
+ wg.Wait()
+}
+
+func TestDB_GetProperties(t *testing.T) {
+ h := newDbHarness(t)
+ defer h.close()
+
+ _, err := h.db.GetProperty("leveldb.num-files-at-level")
+ if err == nil {
+ t.Error("GetProperty() failed to detect missing level")
+ }
+
+ _, err = h.db.GetProperty("leveldb.num-files-at-level0")
+ if err != nil {
+ t.Error("got unexpected error", err)
+ }
+
+ _, err = h.db.GetProperty("leveldb.num-files-at-level0x")
+ if err == nil {
+ t.Error("GetProperty() failed to detect invalid level")
+ }
+}
+
+func TestDB_GoleveldbIssue72and83(t *testing.T) {
+ h := newDbHarnessWopt(t, &opt.Options{
+ WriteBuffer: 1 * opt.MiB,
+ OpenFilesCacheCapacity: 3,
+ })
+ defer h.close()
+
+ const n, wn, dur = 10000, 100, 30 * time.Second
+
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ randomData := func(prefix byte, i int) []byte {
+ data := make([]byte, 1+4+32+64+32)
+ _, err := crand.Reader.Read(data[1 : len(data)-8])
+ if err != nil {
+ panic(err)
+ }
+ data[0] = prefix
+ binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i))
+ binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value())
+ return data
+ }
+
+ keys := make([][]byte, n)
+ for i := range keys {
+ keys[i] = randomData(1, 0)
+ }
+
+ until := time.Now().Add(dur)
+ wg := new(sync.WaitGroup)
+ wg.Add(3)
+ var done uint32
+ go func() {
+ i := 0
+ defer func() {
+ t.Logf("WRITER DONE #%d", i)
+ wg.Done()
+ }()
+
+ b := new(Batch)
+ for ; i < wn && atomic.LoadUint32(&done) == 0; i++ {
+ b.Reset()
+ for _, k1 := range keys {
+ k2 := randomData(2, i)
+ b.Put(k2, randomData(42, i))
+ b.Put(k1, k2)
+ }
+ if err := h.db.Write(b, h.wo); err != nil {
+ atomic.StoreUint32(&done, 1)
+ t.Fatalf("WRITER #%d db.Write: %v", i, err)
+ }
+ }
+ }()
+ go func() {
+ var i int
+ defer func() {
+ t.Logf("READER0 DONE #%d", i)
+ atomic.StoreUint32(&done, 1)
+ wg.Done()
+ }()
+ for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+ snap := h.getSnapshot()
+ seq := snap.elem.seq
+ if seq == 0 {
+ snap.Release()
+ continue
+ }
+ iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
+ writei := int(seq/(n*2) - 1)
+ var k int
+ for ; iter.Next(); k++ {
+ k1 := iter.Key()
+ k2 := iter.Value()
+ k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:])
+ k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value()
+ if k1checksum0 != k1checksum1 {
+ t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0)
+ }
+ k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:])
+ k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value()
+ if k2checksum0 != k2checksum1 {
+ t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1)
+ }
+ kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:]))
+ if writei != kwritei {
+ t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei)
+ }
+ if _, err := snap.Get(k2, nil); err != nil {
+ t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2)
+ }
+ }
+ if err := iter.Error(); err != nil {
+ t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err)
+ }
+ iter.Release()
+ snap.Release()
+ if k > 0 && k != n {
+ t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n)
+ }
+ }
+ }()
+ go func() {
+ var i int
+ defer func() {
+ t.Logf("READER1 DONE #%d", i)
+ atomic.StoreUint32(&done, 1)
+ wg.Done()
+ }()
+ for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+ iter := h.db.NewIterator(nil, nil)
+ seq := iter.(*dbIter).seq
+ if seq == 0 {
+ iter.Release()
+ continue
+ }
+ writei := int(seq/(n*2) - 1)
+ var k int
+ for ok := iter.Last(); ok; ok = iter.Prev() {
+ k++
+ }
+ if err := iter.Error(); err != nil {
+ t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err)
+ }
+ iter.Release()
+ if m := (writei+1)*n + n; k != m {
+ t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m)
+ }
+ }
+ }()
+
+ wg.Wait()
+}
+
+func TestDB_TransientError(t *testing.T) {
+ h := newDbHarnessWopt(t, &opt.Options{
+ WriteBuffer: 128 * opt.KiB,
+ OpenFilesCacheCapacity: 3,
+ DisableCompactionBackoff: true,
+ })
+ defer h.close()
+
+ const (
+ nSnap = 20
+ nKey = 10000
+ )
+
+ var (
+ snaps [nSnap]*Snapshot
+ b = &Batch{}
+ )
+ for i := range snaps {
+ vtail := fmt.Sprintf("VAL%030d", i)
+ b.Reset()
+ for k := 0; k < nKey; k++ {
+ key := fmt.Sprintf("KEY%8d", k)
+ b.Put([]byte(key), []byte(key+vtail))
+ }
+ h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt)
+ if err := h.db.Write(b, nil); err != nil {
+ t.Logf("WRITE #%d error: %v", i, err)
+ h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite)
+ for {
+ if err := h.db.Write(b, nil); err == nil {
+ break
+ } else if errors.IsCorrupted(err) {
+ t.Fatalf("WRITE #%d corrupted: %v", i, err)
+ }
+ }
+ }
+
+ snaps[i] = h.db.newSnapshot()
+ b.Reset()
+ for k := 0; k < nKey; k++ {
+ key := fmt.Sprintf("KEY%8d", k)
+ b.Delete([]byte(key))
+ }
+ h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt)
+ if err := h.db.Write(b, nil); err != nil {
+ t.Logf("WRITE #%d error: %v", i, err)
+ h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt)
+ for {
+ if err := h.db.Write(b, nil); err == nil {
+ break
+ } else if errors.IsCorrupted(err) {
+ t.Fatalf("WRITE #%d corrupted: %v", i, err)
+ }
+ }
+ }
+ }
+ h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt)
+
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ rnd := rand.New(rand.NewSource(0xecafdaed))
+ wg := &sync.WaitGroup{}
+ for i, snap := range snaps {
+ wg.Add(2)
+
+ go func(i int, snap *Snapshot, sk []int) {
+ defer wg.Done()
+
+ vtail := fmt.Sprintf("VAL%030d", i)
+ for _, k := range sk {
+ key := fmt.Sprintf("KEY%8d", k)
+ xvalue, err := snap.Get([]byte(key), nil)
+ if err != nil {
+ t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err)
+ }
+ value := key + vtail
+ if !bytes.Equal([]byte(value), xvalue) {
+ t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue)
+ }
+ }
+ }(i, snap, rnd.Perm(nKey))
+
+ go func(i int, snap *Snapshot) {
+ defer wg.Done()
+
+ vtail := fmt.Sprintf("VAL%030d", i)
+ iter := snap.NewIterator(nil, nil)
+ defer iter.Release()
+ for k := 0; k < nKey; k++ {
+ if !iter.Next() {
+ if err := iter.Error(); err != nil {
+ t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err)
+ } else {
+ t.Fatalf("READER_ITER #%d K%d eoi", i, k)
+ }
+ }
+ key := fmt.Sprintf("KEY%8d", k)
+ xkey := iter.Key()
+ if !bytes.Equal([]byte(key), xkey) {
+ t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey)
+ }
+ value := key + vtail
+ xvalue := iter.Value()
+ if !bytes.Equal([]byte(value), xvalue) {
+ t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue)
+ }
+ }
+ }(i, snap)
+ }
+
+ wg.Wait()
+}
+
+func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) {
+ h := newDbHarnessWopt(t, &opt.Options{
+ WriteBuffer: 112 * opt.KiB,
+ CompactionTableSize: 90 * opt.KiB,
+ CompactionExpandLimitFactor: 1,
+ })
+ defer h.close()
+
+ const (
+ nSnap = 190
+ nKey = 140
+ )
+
+ var (
+ snaps [nSnap]*Snapshot
+ b = &Batch{}
+ )
+ for i := range snaps {
+ vtail := fmt.Sprintf("VAL%030d", i)
+ b.Reset()
+ for k := 0; k < nKey; k++ {
+ key := fmt.Sprintf("KEY%08d", k)
+ b.Put([]byte(key), []byte(key+vtail))
+ }
+ if err := h.db.Write(b, nil); err != nil {
+ t.Fatalf("WRITE #%d error: %v", i, err)
+ }
+
+ snaps[i] = h.db.newSnapshot()
+ b.Reset()
+ for k := 0; k < nKey; k++ {
+ key := fmt.Sprintf("KEY%08d", k)
+ b.Delete([]byte(key))
+ }
+ if err := h.db.Write(b, nil); err != nil {
+ t.Fatalf("WRITE #%d error: %v", i, err)
+ }
+ }
+
+ h.compactMem()
+
+ h.waitCompaction()
+ for level, tables := range h.db.s.stVersion.tables {
+ for _, table := range tables {
+ t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
+ }
+ }
+
+ h.compactRangeAt(0, "", "")
+ h.waitCompaction()
+ for level, tables := range h.db.s.stVersion.tables {
+ for _, table := range tables {
+ t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
+ }
+ }
+ h.compactRangeAt(1, "", "")
+ h.waitCompaction()
+ for level, tables := range h.db.s.stVersion.tables {
+ for _, table := range tables {
+ t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
+ }
+ }
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ wg := &sync.WaitGroup{}
+ for i, snap := range snaps {
+ wg.Add(1)
+
+ go func(i int, snap *Snapshot) {
+ defer wg.Done()
+
+ vtail := fmt.Sprintf("VAL%030d", i)
+ for k := 0; k < nKey; k++ {
+ key := fmt.Sprintf("KEY%08d", k)
+ xvalue, err := snap.Get([]byte(key), nil)
+ if err != nil {
+ t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err)
+ }
+ value := key + vtail
+ if !bytes.Equal([]byte(value), xvalue) {
+ t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue)
+ }
+ }
+ }(i, snap)
+ }
+
+ wg.Wait()
+}
+
+func TestDB_TableCompactionBuilder(t *testing.T) {
+ stor := newTestStorage(t)
+ defer stor.Close()
+
+ const nSeq = 99
+
+ o := &opt.Options{
+ WriteBuffer: 112 * opt.KiB,
+ CompactionTableSize: 43 * opt.KiB,
+ CompactionExpandLimitFactor: 1,
+ CompactionGPOverlapsFactor: 1,
+ DisableBlockCache: true,
+ }
+ s, err := newSession(stor, o)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := s.create(); err != nil {
+ t.Fatal(err)
+ }
+ defer s.close()
+ var (
+ seq uint64
+ targetSize = 5 * o.CompactionTableSize
+ value = bytes.Repeat([]byte{'0'}, 100)
+ )
+ for i := 0; i < 2; i++ {
+ tw, err := s.tops.create()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for k := 0; tw.tw.BytesLen() < targetSize; k++ {
+ key := []byte(fmt.Sprintf("%09d", k))
+ seq += nSeq - 1
+ for x := uint64(0); x < nSeq; x++ {
+ if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ tf, err := tw.finish()
+ if err != nil {
+ t.Fatal(err)
+ }
+ rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
+ rec.addTableFile(i, tf)
+ if err := s.commit(rec); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Build grandparent.
+ v := s.version()
+ c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
+ rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
+ b := &tableCompactionBuilder{
+ s: s,
+ c: c,
+ rec: rec,
+ stat1: new(cStatsStaging),
+ minSeq: 0,
+ strict: true,
+ tableSize: o.CompactionTableSize/3 + 961,
+ }
+ if err := b.run(new(compactionTransactCounter)); err != nil {
+ t.Fatal(err)
+ }
+ for _, t := range c.tables[0] {
+ rec.delTable(c.level, t.file.Num())
+ }
+ if err := s.commit(rec); err != nil {
+ t.Fatal(err)
+ }
+ c.release()
+
+ // Build level-1.
+ v = s.version()
+ c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...))
+ rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
+ b = &tableCompactionBuilder{
+ s: s,
+ c: c,
+ rec: rec,
+ stat1: new(cStatsStaging),
+ minSeq: 0,
+ strict: true,
+ tableSize: o.CompactionTableSize,
+ }
+ if err := b.run(new(compactionTransactCounter)); err != nil {
+ t.Fatal(err)
+ }
+ for _, t := range c.tables[0] {
+ rec.delTable(c.level, t.file.Num())
+ }
+ // Move grandparent to level-3
+ for _, t := range v.tables[2] {
+ rec.delTable(2, t.file.Num())
+ rec.addTableFile(3, t)
+ }
+ if err := s.commit(rec); err != nil {
+ t.Fatal(err)
+ }
+ c.release()
+
+ v = s.version()
+ for level, want := range []bool{false, true, false, true, false} {
+ got := len(v.tables[level]) > 0
+ if want != got {
+ t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got)
+ }
+ }
+ for i, f := range v.tables[1][:len(v.tables[1])-1] {
+ nf := v.tables[1][i+1]
+ if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) {
+ t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num())
+ }
+ }
+ v.release()
+
+ // Compaction with transient error.
+ v = s.version()
+ c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
+ rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
+ b = &tableCompactionBuilder{
+ s: s,
+ c: c,
+ rec: rec,
+ stat1: new(cStatsStaging),
+ minSeq: 0,
+ strict: true,
+ tableSize: o.CompactionTableSize,
+ }
+ stor.SetEmuErrOnce(storage.TypeTable, tsOpSync)
+ stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite)
+ stor.SetEmuRandErrProb(0xf0)
+ for {
+ if err := b.run(new(compactionTransactCounter)); err != nil {
+ t.Logf("(expected) b.run: %v", err)
+ } else {
+ break
+ }
+ }
+ if err := s.commit(rec); err != nil {
+ t.Fatal(err)
+ }
+ c.release()
+
+ stor.SetEmuErrOnce(0, tsOpSync)
+ stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite)
+
+ v = s.version()
+ if len(v.tables[1]) != len(v.tables[2]) {
+ t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2]))
+ }
+ for i, f0 := range v.tables[1] {
+ f1 := v.tables[2][i]
+ iter0 := s.tops.newIterator(f0, nil, nil)
+ iter1 := s.tops.newIterator(f1, nil, nil)
+ for j := 0; true; j++ {
+ next0 := iter0.Next()
+ next1 := iter1.Next()
+ if next0 != next1 {
+ t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1)
+ }
+ key0 := iter0.Key()
+ key1 := iter1.Key()
+ if !bytes.Equal(key0, key1) {
+ t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1)
+ }
+ if next0 == false {
+ break
+ }
+ }
+ iter0.Release()
+ iter1.Release()
+ }
+ v.release()
+}
+
+func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
+ const (
+ vSize = 200 * opt.KiB
+ tSize = 100 * opt.MiB
+ mIter = 100
+ n = tSize / vSize
+ )
+
+ h := newDbHarnessWopt(t, &opt.Options{
+ Compression: opt.NoCompression,
+ DisableBlockCache: true,
+ })
+ defer h.close()
+
+ key := func(x int) string {
+ return fmt.Sprintf("v%06d", x)
+ }
+
+ // Fill.
+ value := strings.Repeat("x", vSize)
+ for i := 0; i < n; i++ {
+ h.put(key(i), value)
+ }
+ h.compactMem()
+
+ // Delete all.
+ for i := 0; i < n; i++ {
+ h.delete(key(i))
+ }
+ h.compactMem()
+
+ var (
+ limit = n / limitDiv
+
+ startKey = key(0)
+ limitKey = key(limit)
+ maxKey = key(n)
+ slice = &util.Range{Limit: []byte(limitKey)}
+
+ initialSize0 = h.sizeOf(startKey, limitKey)
+ initialSize1 = h.sizeOf(limitKey, maxKey)
+ )
+
+ t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
+
+ for r := 0; true; r++ {
+ if r >= mIter {
+ t.Fatal("taking too long to compact")
+ }
+
+ // Iterates.
+ iter := h.db.NewIterator(slice, h.ro)
+ for iter.Next() {
+ }
+ if err := iter.Error(); err != nil {
+ t.Fatalf("Iter err: %v", err)
+ }
+ iter.Release()
+
+ // Wait compaction.
+ h.waitCompaction()
+
+ // Check size.
+ size0 := h.sizeOf(startKey, limitKey)
+ size1 := h.sizeOf(limitKey, maxKey)
+ t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1)))
+ if size0 < initialSize0/10 {
+ break
+ }
+ }
+
+ if initialSize1 > 0 {
+ h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB)
+ }
+}
+
+func TestDB_IterTriggeredCompaction(t *testing.T) {
+ testDB_IterTriggeredCompaction(t, 1)
+}
+
+func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
+ testDB_IterTriggeredCompaction(t, 2)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
index 2db007e27..a8a2bdf72 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -7,8 +7,7 @@
package leveldb
import (
- "errors"
-
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
@@ -32,40 +31,44 @@ func (p Sizes) Sum() (n uint64) {
return n
}
+// Logging.
+func (db *DB) log(v ...interface{}) { db.s.log(v...) }
+func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
+
// Check and clean files.
-func (d *DB) checkAndCleanFiles() error {
- s := d.s
+func (db *DB) checkAndCleanFiles() error {
+ v := db.s.version()
+ defer v.release()
- v := s.version_NB()
- tables := make(map[uint64]bool)
- for _, tt := range v.tables {
- for _, t := range tt {
- tables[t.file.Num()] = false
+ tablesMap := make(map[uint64]bool)
+ for _, tables := range v.tables {
+ for _, t := range tables {
+ tablesMap[t.file.Num()] = false
}
}
- ff, err := s.getFiles(storage.TypeAll)
+ files, err := db.s.getFiles(storage.TypeAll)
if err != nil {
return err
}
var nTables int
var rem []storage.File
- for _, f := range ff {
+ for _, f := range files {
keep := true
switch f.Type() {
case storage.TypeManifest:
- keep = f.Num() >= s.manifestFile.Num()
+ keep = f.Num() >= db.s.manifestFile.Num()
case storage.TypeJournal:
- if d.frozenJournalFile != nil {
- keep = f.Num() >= d.frozenJournalFile.Num()
+ if db.frozenJournalFile != nil {
+ keep = f.Num() >= db.frozenJournalFile.Num()
} else {
- keep = f.Num() >= d.journalFile.Num()
+ keep = f.Num() >= db.journalFile.Num()
}
case storage.TypeTable:
- _, keep = tables[f.Num()]
+ _, keep = tablesMap[f.Num()]
if keep {
- tables[f.Num()] = true
+ tablesMap[f.Num()] = true
nTables++
}
}
@@ -75,18 +78,20 @@ func (d *DB) checkAndCleanFiles() error {
}
}
- if nTables != len(tables) {
- for num, present := range tables {
+ if nTables != len(tablesMap) {
+ var missing []*storage.FileInfo
+ for num, present := range tablesMap {
if !present {
- s.logf("db@janitor table missing @%d", num)
+ missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num})
+ db.logf("db@janitor table missing @%d", num)
}
}
- return ErrCorrupted{Type: MissingFiles, Err: errors.New("leveldb: table files missing")}
+ return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing})
}
- s.logf("db@janitor F·%d G·%d", len(ff), len(rem))
+ db.logf("db@janitor F·%d G·%d", len(files), len(rem))
for _, f := range rem {
- s.logf("db@janitor removing %s-%d", f.Type(), f.Num())
+ db.logf("db@janitor removing %s-%d", f.Type(), f.Num())
if err := f.Remove(); err != nil {
return err
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
index 4660e840c..e1cf30c53 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -14,84 +14,93 @@ import (
"github.com/syndtr/goleveldb/leveldb/util"
)
-func (d *DB) writeJournal(b *Batch) error {
- w, err := d.journal.Next()
+func (db *DB) writeJournal(b *Batch) error {
+ w, err := db.journal.Next()
if err != nil {
return err
}
if _, err := w.Write(b.encode()); err != nil {
return err
}
- if err := d.journal.Flush(); err != nil {
+ if err := db.journal.Flush(); err != nil {
return err
}
if b.sync {
- return d.journalWriter.Sync()
+ return db.journalWriter.Sync()
}
return nil
}
-func (d *DB) jWriter() {
- defer d.closeW.Done()
+func (db *DB) jWriter() {
+ defer db.closeW.Done()
for {
select {
- case b := <-d.journalC:
+ case b := <-db.journalC:
if b != nil {
- d.journalAckC <- d.writeJournal(b)
+ db.journalAckC <- db.writeJournal(b)
}
- case _, _ = <-d.closeC:
+ case _, _ = <-db.closeC:
return
}
}
}
-func (d *DB) rotateMem(n int) (mem *memdb.DB, err error) {
+func (db *DB) rotateMem(n int) (mem *memDB, err error) {
// Wait for pending memdb compaction.
- err = d.compSendIdle(d.mcompCmdC)
+ err = db.compSendIdle(db.mcompCmdC)
if err != nil {
return
}
// Create new memdb and journal.
- mem, err = d.newMem(n)
+ mem, err = db.newMem(n)
if err != nil {
return
}
// Schedule memdb compaction.
- d.compTrigger(d.mcompTriggerC)
+ db.compSendTrigger(db.mcompCmdC)
return
}
-func (d *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
- s := d.s
-
+func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
delayed := false
- flush := func() bool {
- v := s.version()
+ flush := func() (retry bool) {
+ v := db.s.version()
defer v.release()
- mem = d.getEffectiveMem()
- nn = mem.Free()
+ mem = db.getEffectiveMem()
+ defer func() {
+ if retry {
+ mem.decref()
+ mem = nil
+ }
+ }()
+ nn = mem.mdb.Free()
switch {
- case v.tLen(0) >= kL0_SlowdownWritesTrigger && !delayed:
+ case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
delayed = true
time.Sleep(time.Millisecond)
case nn >= n:
return false
- case v.tLen(0) >= kL0_StopWritesTrigger:
+ case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
delayed = true
- err = d.compSendIdle(d.tcompCmdC)
+ err = db.compSendIdle(db.tcompCmdC)
if err != nil {
return false
}
default:
// Allow memdb to grow if it has no entry.
- if mem.Len() == 0 {
+ if mem.mdb.Len() == 0 {
nn = n
- return false
+ } else {
+ mem.decref()
+ mem, err = db.rotateMem(n)
+ if err == nil {
+ nn = mem.mdb.Free()
+ } else {
+ nn = 0
+ }
}
- mem, err = d.rotateMem(n)
- nn = mem.Free()
return false
}
return true
@@ -100,7 +109,12 @@ func (d *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
for flush() {
}
if delayed {
- s.logf("db@write delayed T·%v", time.Since(start))
+ db.writeDelay += time.Since(start)
+ db.writeDelayN++
+ } else if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+ db.writeDelay = 0
+ db.writeDelayN = 0
}
return
}
@@ -109,39 +123,45 @@ func (d *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
// sequentially.
//
// It is safe to modify the contents of the arguments after Write returns.
-func (d *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
- err = d.ok()
- if err != nil || b == nil || b.len() == 0 {
+func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
+ err = db.ok()
+ if err != nil || b == nil || b.Len() == 0 {
return
}
b.init(wo.GetSync())
// The write happen synchronously.
-retry:
select {
- case d.writeC <- b:
- if <-d.writeMergedC {
- return <-d.writeAckC
+ case db.writeC <- b:
+ if <-db.writeMergedC {
+ return <-db.writeAckC
}
- goto retry
- case d.writeLockC <- struct{}{}:
- case _, _ = <-d.closeC:
+ case db.writeLockC <- struct{}{}:
+ case err = <-db.compPerErrC:
+ return
+ case _, _ = <-db.closeC:
return ErrClosed
}
merged := 0
+ danglingMerge := false
defer func() {
- <-d.writeLockC
+ if danglingMerge {
+ db.writeMergedC <- false
+ } else {
+ <-db.writeLockC
+ }
for i := 0; i < merged; i++ {
- d.writeAckC <- err
+ db.writeAckC <- err
}
}()
- mem, memFree, err := d.flush(b.size())
+ mem, memFree, err := db.flush(b.size())
if err != nil {
return
}
+ defer mem.decref()
// Calculate maximum size of the batch.
m := 1 << 20
@@ -154,13 +174,13 @@ retry:
drain:
for b.size() < m && !b.sync {
select {
- case nb := <-d.writeC:
+ case nb := <-db.writeC:
if b.size()+nb.size() <= m {
b.append(nb)
- d.writeMergedC <- true
+ db.writeMergedC <- true
merged++
} else {
- d.writeMergedC <- false
+ danglingMerge = true
break drain
}
default:
@@ -169,44 +189,52 @@ drain:
}
// Set batch first seq number relative from last seq.
- b.seq = d.seq + 1
+ b.seq = db.seq + 1
// Write journal concurrently if it is large enough.
if b.size() >= (128 << 10) {
// Push the write batch to the journal writer
select {
- case _, _ = <-d.closeC:
+ case db.journalC <- b:
+ // Write into memdb
+ if berr := b.memReplay(mem.mdb); berr != nil {
+ panic(berr)
+ }
+ case err = <-db.compPerErrC:
+ return
+ case _, _ = <-db.closeC:
err = ErrClosed
return
- case d.journalC <- b:
- // Write into memdb
- b.memReplay(mem)
}
// Wait for journal writer
select {
- case _, _ = <-d.closeC:
- err = ErrClosed
- return
- case err = <-d.journalAckC:
+ case err = <-db.journalAckC:
if err != nil {
// Revert memdb if error detected
- b.revertMemReplay(mem)
+ if berr := b.revertMemReplay(mem.mdb); berr != nil {
+ panic(berr)
+ }
return
}
+ case _, _ = <-db.closeC:
+ err = ErrClosed
+ return
}
} else {
- err = d.writeJournal(b)
+ err = db.writeJournal(b)
if err != nil {
return
}
- b.memReplay(mem)
+ if berr := b.memReplay(mem.mdb); berr != nil {
+ panic(berr)
+ }
}
// Set last seq number.
- d.addSeq(uint64(b.len()))
+ db.addSeq(uint64(b.Len()))
if b.size() >= memFree {
- d.rotateMem(0)
+ db.rotateMem(0)
}
return
}
@@ -215,20 +243,20 @@ drain:
// for that key; a DB is not a multi-map.
//
// It is safe to modify the contents of the arguments after Put returns.
-func (d *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
+func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
b := new(Batch)
b.Put(key, value)
- return d.Write(b, wo)
+ return db.Write(b, wo)
}
// Delete deletes the value for the given key. It returns ErrNotFound if
// the DB does not contain the key.
//
// It is safe to modify the contents of the arguments after Delete returns.
-func (d *DB) Delete(key []byte, wo *opt.WriteOptions) error {
+func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
b := new(Batch)
b.Delete(key)
- return d.Write(b, wo)
+ return db.Write(b, wo)
}
func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
@@ -247,33 +275,37 @@ func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
// A nil Range.Start is treated as a key before all keys in the DB.
// And a nil Range.Limit is treated as a key after all keys in the DB.
// Therefore if both is nil then it will compact entire DB.
-func (d *DB) CompactRange(r util.Range) error {
- if err := d.ok(); err != nil {
+func (db *DB) CompactRange(r util.Range) error {
+ if err := db.ok(); err != nil {
return err
}
+ // Lock writer.
select {
- case d.writeLockC <- struct{}{}:
- case _, _ = <-d.closeC:
+ case db.writeLockC <- struct{}{}:
+ case err := <-db.compPerErrC:
+ return err
+ case _, _ = <-db.closeC:
return ErrClosed
}
// Check for overlaps in memdb.
- mem := d.getEffectiveMem()
- if isMemOverlaps(d.s.icmp, mem, r.Start, r.Limit) {
+ mem := db.getEffectiveMem()
+ defer mem.decref()
+ if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
// Memdb compaction.
- if _, err := d.rotateMem(0); err != nil {
- <-d.writeLockC
+ if _, err := db.rotateMem(0); err != nil {
+ <-db.writeLockC
return err
}
- <-d.writeLockC
- if err := d.compSendIdle(d.mcompCmdC); err != nil {
+ <-db.writeLockC
+ if err := db.compSendIdle(db.mcompCmdC); err != nil {
return err
}
} else {
- <-d.writeLockC
+ <-db.writeLockC
}
// Table compaction.
- return d.compSendRange(d.tcompCmdC, -1, r.Start, r.Limit)
+ return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
index ac9ea3d0c..53f13bb24 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
@@ -37,6 +37,16 @@
// err = iter.Error()
// ...
//
+// Iterate over subset of database content with a particular prefix:
+// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
+// for iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
// Seek-then-Iterate:
//
// iter := db.NewIterator(nil, nil)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
index 8066bd9a4..29d0d2f27 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
@@ -7,32 +7,12 @@
package leveldb
import (
- "errors"
-
- "github.com/syndtr/goleveldb/leveldb/util"
+ "github.com/syndtr/goleveldb/leveldb/errors"
)
var (
- ErrNotFound = util.ErrNotFound
+ ErrNotFound = errors.ErrNotFound
ErrSnapshotReleased = errors.New("leveldb: snapshot released")
ErrIterReleased = errors.New("leveldb: iterator released")
ErrClosed = errors.New("leveldb: closed")
)
-
-type CorruptionType int
-
-const (
- CorruptedManifest CorruptionType = iota
- MissingFiles
-)
-
-// ErrCorrupted is the type that wraps errors that indicate corruption in
-// the database.
-type ErrCorrupted struct {
- Type CorruptionType
- Err error
-}
-
-func (e ErrCorrupted) Error() string {
- return e.Err.Error()
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
new file mode 100644
index 000000000..84b5d6b7b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package errors provides common error types used throughout leveldb.
+package errors
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ ErrNotFound = New("leveldb: not found")
+ ErrReleased = util.ErrReleased
+ ErrHasReleaser = util.ErrHasReleaser
+)
+
+// New returns an error that formats as the given text.
+func New(text string) error {
+ return errors.New(text)
+}
+
+// ErrCorrupted is the type that wraps errors that indicate corruption in
+// the database.
+type ErrCorrupted struct {
+ File *storage.FileInfo
+ Err error
+}
+
+func (e *ErrCorrupted) Error() string {
+ if e.File != nil {
+ return fmt.Sprintf("%v [file=%v]", e.Err, e.File)
+ } else {
+ return e.Err.Error()
+ }
+}
+
+// NewErrCorrupted creates new ErrCorrupted error.
+func NewErrCorrupted(f storage.File, err error) error {
+ return &ErrCorrupted{storage.NewFileInfo(f), err}
+}
+
+// IsCorrupted returns a boolean indicating whether the error is indicating
+// a corruption.
+func IsCorrupted(err error) bool {
+ switch err.(type) {
+ case *ErrCorrupted:
+ return true
+ }
+ return false
+}
+
+// ErrMissingFiles is the type that indicating a corruption due to missing
+// files.
+type ErrMissingFiles struct {
+ Files []*storage.FileInfo
+}
+
+func (e *ErrMissingFiles) Error() string { return "file missing" }
+
+// SetFile sets 'file info' of the given error with the given file.
+// Currently only ErrCorrupted is supported, otherwise will do nothing.
+func SetFile(err error, f storage.File) error {
+ switch x := err.(type) {
+ case *ErrCorrupted:
+ x.File = storage.NewFileInfo(f)
+ return x
+ }
+ return err
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
index d7dff04b6..b328ece4e 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
@@ -17,13 +17,14 @@ import (
var _ = testutil.Defer(func() {
Describe("Leveldb external", func() {
o := &opt.Options{
- BlockCache: opt.NoCache,
- BlockRestartInterval: 5,
- BlockSize: 50,
- Compression: opt.NoCompression,
- MaxOpenFiles: 0,
- Strict: opt.StrictAll,
- WriteBuffer: 1000,
+ DisableBlockCache: true,
+ BlockRestartInterval: 5,
+ BlockSize: 80,
+ Compression: opt.NoCompression,
+ OpenFilesCacheCapacity: -1,
+ Strict: opt.StrictAll,
+ WriteBuffer: 1000,
+ CompactionTableSize: 2000,
}
Describe("write test", func() {
@@ -36,22 +37,21 @@ var _ = testutil.Defer(func() {
testutil.DoDBTesting(&t)
db.TestClose()
done <- true
- }, 9.0)
+ }, 20.0)
})
Describe("read test", func() {
- testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB {
+ testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB {
// Building the DB.
db := newTestingDB(o, nil, nil)
kv.IterateShuffled(nil, func(i int, key, value []byte) {
err := db.TestPut(key, value)
Expect(err).NotTo(HaveOccurred())
})
- testutil.Defer("teardown", func() {
- db.TestClose()
- })
return db
+ }, func(db testutil.DB) {
+ db.(*testingDB).TestClose()
})
})
})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
index 9b4b72741..a23ab05f7 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
@@ -40,13 +40,19 @@ type basicArrayIterator struct {
util.BasicReleaser
array BasicArray
pos int
+ err error
}
func (i *basicArrayIterator) Valid() bool {
- return i.pos >= 0 && i.pos < i.array.Len()
+ return i.pos >= 0 && i.pos < i.array.Len() && !i.Released()
}
func (i *basicArrayIterator) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
if i.array.Len() == 0 {
i.pos = -1
return false
@@ -56,6 +62,11 @@ func (i *basicArrayIterator) First() bool {
}
func (i *basicArrayIterator) Last() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
n := i.array.Len()
if n == 0 {
i.pos = 0
@@ -66,6 +77,11 @@ func (i *basicArrayIterator) Last() bool {
}
func (i *basicArrayIterator) Seek(key []byte) bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
n := i.array.Len()
if n == 0 {
i.pos = 0
@@ -79,6 +95,11 @@ func (i *basicArrayIterator) Seek(key []byte) bool {
}
func (i *basicArrayIterator) Next() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
i.pos++
if n := i.array.Len(); i.pos >= n {
i.pos = n
@@ -88,6 +109,11 @@ func (i *basicArrayIterator) Next() bool {
}
func (i *basicArrayIterator) Prev() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
i.pos--
if i.pos < 0 {
i.pos = -1
@@ -96,7 +122,7 @@ func (i *basicArrayIterator) Prev() bool {
return true
}
-func (i *basicArrayIterator) Error() error { return nil }
+func (i *basicArrayIterator) Error() error { return i.err }
type arrayIterator struct {
basicArrayIterator
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
index 1e99a2bf6..939adbb93 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
@@ -7,6 +7,7 @@
package iterator
import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/util"
)
@@ -22,13 +23,13 @@ type IteratorIndexer interface {
type indexedIterator struct {
util.BasicReleaser
- index IteratorIndexer
- strict bool
- strictGet bool
+ index IteratorIndexer
+ strict bool
- data Iterator
- err error
- errf func(err error)
+ data Iterator
+ err error
+ errf func(err error)
+ closed bool
}
func (i *indexedIterator) setData() {
@@ -36,11 +37,6 @@ func (i *indexedIterator) setData() {
i.data.Release()
}
i.data = i.index.Get()
- if i.strictGet {
- if err := i.data.Error(); err != nil {
- i.err = err
- }
- }
}
func (i *indexedIterator) clearData() {
@@ -50,14 +46,21 @@ func (i *indexedIterator) clearData() {
i.data = nil
}
-func (i *indexedIterator) dataErr() bool {
- if i.errf != nil {
- if err := i.data.Error(); err != nil {
+func (i *indexedIterator) indexErr() {
+ if err := i.index.Error(); err != nil {
+ if i.errf != nil {
i.errf(err)
}
+ i.err = err
}
- if i.strict {
- if err := i.data.Error(); err != nil {
+}
+
+func (i *indexedIterator) dataErr() bool {
+ if err := i.data.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ if i.strict || !errors.IsCorrupted(err) {
i.err = err
return true
}
@@ -72,9 +75,13 @@ func (i *indexedIterator) Valid() bool {
func (i *indexedIterator) First() bool {
if i.err != nil {
return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
}
if !i.index.First() {
+ i.indexErr()
i.clearData()
return false
}
@@ -85,9 +92,13 @@ func (i *indexedIterator) First() bool {
func (i *indexedIterator) Last() bool {
if i.err != nil {
return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
}
if !i.index.Last() {
+ i.indexErr()
i.clearData()
return false
}
@@ -105,9 +116,13 @@ func (i *indexedIterator) Last() bool {
func (i *indexedIterator) Seek(key []byte) bool {
if i.err != nil {
return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
}
if !i.index.Seek(key) {
+ i.indexErr()
i.clearData()
return false
}
@@ -125,6 +140,9 @@ func (i *indexedIterator) Seek(key []byte) bool {
func (i *indexedIterator) Next() bool {
if i.err != nil {
return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
}
switch {
@@ -136,6 +154,7 @@ func (i *indexedIterator) Next() bool {
fallthrough
case i.data == nil:
if !i.index.Next() {
+ i.indexErr()
return false
}
i.setData()
@@ -147,6 +166,9 @@ func (i *indexedIterator) Next() bool {
func (i *indexedIterator) Prev() bool {
if i.err != nil {
return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
}
switch {
@@ -158,6 +180,7 @@ func (i *indexedIterator) Prev() bool {
fallthrough
case i.data == nil:
if !i.index.Prev() {
+ i.indexErr()
return false
}
i.setData()
@@ -206,16 +229,14 @@ func (i *indexedIterator) SetErrorCallback(f func(err error)) {
i.errf = f
}
-// NewIndexedIterator returns an indexed iterator. An index is iterator
-// that returns another iterator, a data iterator. A data iterator is the
+// NewIndexedIterator returns an 'indexed iterator'. An index is iterator
+// that returns another iterator, a 'data iterator'. A 'data iterator' is the
// iterator that contains actual key/value pairs.
//
-// If strict is true then error yield by data iterator will halt the indexed
-// iterator, on contrary if strict is false then the indexed iterator will
-// ignore those error and move on to the next index. If strictGet is true and
-// index.Get() yield an 'error iterator' then the indexed iterator will be halted.
-// An 'error iterator' is iterator which its Error() method always return non-nil
-// even before any 'seeks method' is called.
-func NewIndexedIterator(index IteratorIndexer, strict, strictGet bool) Iterator {
- return &indexedIterator{index: index, strict: strict, strictGet: strictGet}
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'indexed iterator', otherwise the iterator will
+// continue to the next 'data iterator'. Corruption on 'index iterator' will not be
+// ignored and will halt the iterator.
+func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator {
+ return &indexedIterator{index: index, strict: strict}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go
index 6a89b3830..72a797892 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go
@@ -65,7 +65,7 @@ var _ = testutil.Defer(func() {
// Test the iterator.
t := testutil.IteratorTesting{
KeyValue: kv.Clone(),
- Iter: NewIndexedIterator(NewArrayIndexer(index), true, true),
+ Iter: NewIndexedIterator(NewArrayIndexer(index), true),
}
testutil.DoIteratorTesting(&t)
done <- true
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
index 1b80184e8..c2522860b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
@@ -14,6 +14,10 @@ import (
"github.com/syndtr/goleveldb/leveldb/util"
)
+var (
+ ErrIterReleased = errors.New("leveldb/iterator: iterator released")
+)
+
// IteratorSeeker is the interface that wraps the 'seeks method'.
type IteratorSeeker interface {
// First moves the iterator to the first key/value pair. If the iterator
@@ -100,28 +104,13 @@ type ErrorCallbackSetter interface {
}
type emptyIterator struct {
- releaser util.Releaser
- released bool
- err error
+ util.BasicReleaser
+ err error
}
func (i *emptyIterator) rErr() {
- if i.err == nil && i.released {
- i.err = errors.New("leveldb/iterator: iterator released")
- }
-}
-
-func (i *emptyIterator) Release() {
- if i.releaser != nil {
- i.releaser.Release()
- i.releaser = nil
- }
- i.released = true
-}
-
-func (i *emptyIterator) SetReleaser(releaser util.Releaser) {
- if !i.released {
- i.releaser = releaser
+ if i.err == nil && i.Released() {
+ i.err = ErrIterReleased
}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
index 7ec2fc6f2..5ef8d5baf 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
@@ -3,15 +3,9 @@ package iterator_test
import (
"testing"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
"github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestIterator(t *testing.T) {
- testutil.RunDefer()
-
- RegisterFailHandler(Fail)
- RunSpecs(t, "Iterator Suite")
+ testutil.RunSuite(t, "Iterator Suite")
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
index c8314c4e5..1a7e29df8 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
@@ -7,16 +7,11 @@
package iterator
import (
- "errors"
-
"github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/util"
)
-var (
- ErrIterReleased = errors.New("leveldb/iterator: iterator released")
-)
-
type dir int
const (
@@ -48,13 +43,11 @@ func assertKey(key []byte) []byte {
}
func (i *mergedIterator) iterErr(iter Iterator) bool {
- if i.errf != nil {
- if err := iter.Error(); err != nil {
+ if err := iter.Error(); err != nil {
+ if i.errf != nil {
i.errf(err)
}
- }
- if i.strict {
- if err := iter.Error(); err != nil {
+ if i.strict || !errors.IsCorrupted(err) {
i.err = err
return true
}
@@ -274,9 +267,13 @@ func (i *mergedIterator) Release() {
}
func (i *mergedIterator) SetReleaser(releaser util.Releaser) {
- if i.dir != dirReleased {
- i.releaser = releaser
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
}
+ i.releaser = releaser
}
func (i *mergedIterator) Error() error {
@@ -294,9 +291,9 @@ func (i *mergedIterator) SetErrorCallback(f func(err error)) {
// keys: if iters[i] contains a key k then iters[j] will not contain that key k.
// None of the iters may be nil.
//
-// If strict is true then error yield by any iterators will halt the merged
-// iterator, on contrary if strict is false then the merged iterator will
-// ignore those error and move on to the next iterator.
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'merged iterator', otherwise the iterator will
+// continue to the next 'input iterator'.
func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator {
return &mergedIterator{
iters: iters,
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
index b522c76e6..6519ec660 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
@@ -79,10 +79,10 @@ package journal
import (
"encoding/binary"
- "errors"
"fmt"
"io"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/util"
)
@@ -103,18 +103,18 @@ type flusher interface {
Flush() error
}
-// DroppedError is the error type that passed to Dropper.Drop method.
-type DroppedError struct {
+// ErrCorrupted is the error type that generated by corrupted block or chunk.
+type ErrCorrupted struct {
Size int
Reason string
}
-func (e DroppedError) Error() string {
- return fmt.Sprintf("leveldb/journal: dropped %d bytes: %s", e.Size, e.Reason)
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
}
// Dropper is the interface that wrap simple Drop method. The Drop
-// method will be called when the journal reader dropping a chunk.
+// method will be called when the journal reader dropping a block or chunk.
type Dropper interface {
Drop(err error)
}
@@ -158,76 +158,78 @@ func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
}
}
+var errSkip = errors.New("leveldb/journal: skipped")
+
+func (r *Reader) corrupt(n int, reason string, skip bool) error {
+ if r.dropper != nil {
+ r.dropper.Drop(&ErrCorrupted{n, reason})
+ }
+ if r.strict && !skip {
+ r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason})
+ return r.err
+ }
+ return errSkip
+}
+
// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
// next block into the buffer if necessary.
-func (r *Reader) nextChunk(wantFirst, skip bool) error {
+func (r *Reader) nextChunk(first bool) error {
for {
if r.j+headerSize <= r.n {
checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
chunkType := r.buf[r.j+6]
- var err error
if checksum == 0 && length == 0 && chunkType == 0 {
// Drop entire block.
- err = DroppedError{r.n - r.j, "zero header"}
+ m := r.n - r.j
r.i = r.n
r.j = r.n
+ return r.corrupt(m, "zero header", false)
} else {
m := r.n - r.j
r.i = r.j + headerSize
r.j = r.j + headerSize + int(length)
if r.j > r.n {
// Drop entire block.
- err = DroppedError{m, "chunk length overflows block"}
r.i = r.n
r.j = r.n
+ return r.corrupt(m, "chunk length overflows block", false)
} else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
// Drop entire block.
- err = DroppedError{m, "checksum mismatch"}
r.i = r.n
r.j = r.n
+ return r.corrupt(m, "checksum mismatch", false)
}
}
- if wantFirst && err == nil && chunkType != fullChunkType && chunkType != firstChunkType {
- if skip {
- // The chunk are intentionally skipped.
- if chunkType == lastChunkType {
- skip = false
- }
- continue
- } else {
- // Drop the chunk.
- err = DroppedError{r.j - r.i + headerSize, "orphan chunk"}
- }
- }
- if err == nil {
- r.last = chunkType == fullChunkType || chunkType == lastChunkType
- } else {
- if r.dropper != nil {
- r.dropper.Drop(err)
- }
- if r.strict {
- r.err = err
- }
+ if first && chunkType != fullChunkType && chunkType != firstChunkType {
+ m := r.j - r.i
+ r.i = r.j
+ // Report the error, but skip it.
+ return r.corrupt(m+headerSize, "orphan chunk", true)
}
- return err
+ r.last = chunkType == fullChunkType || chunkType == lastChunkType
+ return nil
}
+
+ // The last block.
if r.n < blockSize && r.n > 0 {
- // This is the last block.
- if r.j != r.n {
- r.err = io.ErrUnexpectedEOF
- } else {
- r.err = io.EOF
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
}
+ r.err = io.EOF
return r.err
}
+
+ // Read block.
n, err := io.ReadFull(r.r, r.buf[:])
- if err != nil && err != io.ErrUnexpectedEOF {
- r.err = err
- return r.err
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return err
}
if n == 0 {
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
+ }
r.err = io.EOF
return r.err
}
@@ -237,29 +239,26 @@ func (r *Reader) nextChunk(wantFirst, skip bool) error {
// Next returns a reader for the next journal. It returns io.EOF if there are no
// more journals. The reader returned becomes stale after the next Next call,
-// and should no longer be used.
+// and should no longer be used. If strict is false, the reader will returns
+// io.ErrUnexpectedEOF error when found corrupted journal.
func (r *Reader) Next() (io.Reader, error) {
r.seq++
if r.err != nil {
return nil, r.err
}
- skip := !r.last
+ r.i = r.j
for {
- r.i = r.j
- if r.nextChunk(true, skip) != nil {
- // So that 'orphan chunk' drop will be reported.
- skip = false
- } else {
+ if err := r.nextChunk(true); err == nil {
break
- }
- if r.err != nil {
- return nil, r.err
+ } else if err != errSkip {
+ return nil, err
}
}
return &singleReader{r, r.seq, nil}, nil
}
-// Reset resets the journal reader, allows reuse of the journal reader.
+// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
+// last accumulated error.
func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
r.seq++
err := r.err
@@ -296,7 +295,11 @@ func (x *singleReader) Read(p []byte) (int, error) {
if r.last {
return 0, io.EOF
}
- if x.err = r.nextChunk(false, false); x.err != nil {
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
return 0, x.err
}
}
@@ -320,7 +323,11 @@ func (x *singleReader) ReadByte() (byte, error) {
if r.last {
return 0, io.EOF
}
- if x.err = r.nextChunk(false, false); x.err != nil {
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
return 0, x.err
}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
index 5e1193ae2..0fcf22599 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
@@ -12,6 +12,7 @@ package journal
import (
"bytes"
+ "encoding/binary"
"fmt"
"io"
"io/ioutil"
@@ -326,3 +327,492 @@ func TestStaleWriter(t *testing.T) {
t.Fatalf("stale write #1: unexpected error: %v", err)
}
}
+
+func TestCorrupt_MissingLastBlock(t *testing.T) {
+ buf := new(bytes.Buffer)
+
+ w := NewWriter(buf)
+
+ // First record.
+ ww, err := w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
+ t.Fatalf("write #0: unexpected error: %v", err)
+ }
+
+ // Second record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+ t.Fatalf("write #1: unexpected error: %v", err)
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Cut the last block.
+ b := buf.Bytes()[:blockSize]
+ r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+ // First read.
+ rr, err := r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err := io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #0: %v", err)
+ }
+ if n != blockSize-1024 {
+ t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
+ }
+
+ // Second read.
+ rr, err = r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = io.Copy(ioutil.Discard, rr)
+ if err != io.ErrUnexpectedEOF {
+ t.Fatalf("read #1: unexpected error: %v", err)
+ }
+
+ if _, err := r.Next(); err != io.EOF {
+ t.Fatalf("last next: unexpected error: %v", err)
+ }
+}
+
+func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
+ buf := new(bytes.Buffer)
+
+ w := NewWriter(buf)
+
+ // First record.
+ ww, err := w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+ t.Fatalf("write #0: unexpected error: %v", err)
+ }
+
+ // Second record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+ t.Fatalf("write #1: unexpected error: %v", err)
+ }
+
+ // Third record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+ t.Fatalf("write #2: unexpected error: %v", err)
+ }
+
+ // Fourth record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
+ t.Fatalf("write #3: unexpected error: %v", err)
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ b := buf.Bytes()
+ // Corrupting block #0.
+ for i := 0; i < 1024; i++ {
+ b[i] = '1'
+ }
+
+ r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+ // First read (third record).
+ rr, err := r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err := io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #0: %v", err)
+ }
+ if want := int64(blockSize-headerSize) + 1; n != want {
+ t.Fatalf("read #0: got %d bytes want %d", n, want)
+ }
+
+ // Second read (fourth record).
+ rr, err = r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #1: %v", err)
+ }
+ if want := int64(blockSize-headerSize) + 2; n != want {
+ t.Fatalf("read #1: got %d bytes want %d", n, want)
+ }
+
+ if _, err := r.Next(); err != io.EOF {
+ t.Fatalf("last next: unexpected error: %v", err)
+ }
+}
+
+func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
+ buf := new(bytes.Buffer)
+
+ w := NewWriter(buf)
+
+ // First record.
+ ww, err := w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+ t.Fatalf("write #0: unexpected error: %v", err)
+ }
+
+ // Second record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+ t.Fatalf("write #1: unexpected error: %v", err)
+ }
+
+ // Third record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+ t.Fatalf("write #2: unexpected error: %v", err)
+ }
+
+ // Fourth record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
+ t.Fatalf("write #3: unexpected error: %v", err)
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ b := buf.Bytes()
+ // Corrupting block #1.
+ for i := 0; i < 1024; i++ {
+ b[blockSize+i] = '1'
+ }
+
+ r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+ // First read (first record).
+ rr, err := r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err := io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #0: %v", err)
+ }
+ if want := int64(blockSize / 2); n != want {
+ t.Fatalf("read #0: got %d bytes want %d", n, want)
+ }
+
+ // Second read (second record).
+ rr, err = r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = io.Copy(ioutil.Discard, rr)
+ if err != io.ErrUnexpectedEOF {
+ t.Fatalf("read #1: unexpected error: %v", err)
+ }
+
+ // Third read (fourth record).
+ rr, err = r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #2: %v", err)
+ }
+ if want := int64(blockSize-headerSize) + 2; n != want {
+ t.Fatalf("read #2: got %d bytes want %d", n, want)
+ }
+
+ if _, err := r.Next(); err != io.EOF {
+ t.Fatalf("last next: unexpected error: %v", err)
+ }
+}
+
+func TestCorrupt_CorruptedLastBlock(t *testing.T) {
+ buf := new(bytes.Buffer)
+
+ w := NewWriter(buf)
+
+ // First record.
+ ww, err := w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+ t.Fatalf("write #0: unexpected error: %v", err)
+ }
+
+ // Second record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+ t.Fatalf("write #1: unexpected error: %v", err)
+ }
+
+ // Third record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+ t.Fatalf("write #2: unexpected error: %v", err)
+ }
+
+ // Fourth record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
+ t.Fatalf("write #3: unexpected error: %v", err)
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ b := buf.Bytes()
+ // Corrupting block #3.
+ for i := len(b) - 1; i > len(b)-1024; i-- {
+ b[i] = '1'
+ }
+
+ r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+ // First read (first record).
+ rr, err := r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err := io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #0: %v", err)
+ }
+ if want := int64(blockSize / 2); n != want {
+ t.Fatalf("read #0: got %d bytes want %d", n, want)
+ }
+
+ // Second read (second record).
+ rr, err = r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #1: %v", err)
+ }
+ if want := int64(blockSize - headerSize); n != want {
+ t.Fatalf("read #1: got %d bytes want %d", n, want)
+ }
+
+ // Third read (third record).
+ rr, err = r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #2: %v", err)
+ }
+ if want := int64(blockSize-headerSize) + 1; n != want {
+ t.Fatalf("read #2: got %d bytes want %d", n, want)
+ }
+
+ // Fourth read (fourth record).
+ rr, err = r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = io.Copy(ioutil.Discard, rr)
+ if err != io.ErrUnexpectedEOF {
+ t.Fatalf("read #3: unexpected error: %v", err)
+ }
+
+ if _, err := r.Next(); err != io.EOF {
+ t.Fatalf("last next: unexpected error: %v", err)
+ }
+}
+
+func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
+ buf := new(bytes.Buffer)
+
+ w := NewWriter(buf)
+
+ // First record.
+ ww, err := w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+ t.Fatalf("write #0: unexpected error: %v", err)
+ }
+
+ // Second record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+ t.Fatalf("write #1: unexpected error: %v", err)
+ }
+
+ // Third record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+ t.Fatalf("write #2: unexpected error: %v", err)
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ b := buf.Bytes()
+ // Corrupting record #1.
+ x := blockSize
+ binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
+
+ r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+ // First read (first record).
+ rr, err := r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err := io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #0: %v", err)
+ }
+ if want := int64(blockSize / 2); n != want {
+ t.Fatalf("read #0: got %d bytes want %d", n, want)
+ }
+
+ // Second read (second record).
+ rr, err = r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = io.Copy(ioutil.Discard, rr)
+ if err != io.ErrUnexpectedEOF {
+ t.Fatalf("read #1: unexpected error: %v", err)
+ }
+
+ if _, err := r.Next(); err != io.EOF {
+ t.Fatalf("last next: unexpected error: %v", err)
+ }
+}
+
+func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
+ buf := new(bytes.Buffer)
+
+ w := NewWriter(buf)
+
+ // First record.
+ ww, err := w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+ t.Fatalf("write #0: unexpected error: %v", err)
+ }
+
+ // Second record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+ t.Fatalf("write #1: unexpected error: %v", err)
+ }
+
+ // Third record.
+ ww, err = w.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+ t.Fatalf("write #2: unexpected error: %v", err)
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ b := buf.Bytes()
+ // Corrupting record #1.
+ x := blockSize/2 + headerSize
+ binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
+
+ r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+ // First read (first record).
+ rr, err := r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err := io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #0: %v", err)
+ }
+ if want := int64(blockSize / 2); n != want {
+ t.Fatalf("read #0: got %d bytes want %d", n, want)
+ }
+
+ // Second read (third record).
+ rr, err = r.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ n, err = io.Copy(ioutil.Discard, rr)
+ if err != nil {
+ t.Fatalf("read #1: %v", err)
+ }
+ if want := int64(blockSize-headerSize) + 1; n != want {
+ t.Fatalf("read #1: got %d bytes want %d", n, want)
+ }
+
+ if _, err := r.Next(); err != io.EOF {
+ t.Fatalf("last next: unexpected error: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
index b9acf932d..572ae8150 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
@@ -9,15 +9,30 @@ package leveldb
import (
"encoding/binary"
"fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
)
-type vType int
+type ErrIkeyCorrupted struct {
+ Ikey []byte
+ Reason string
+}
+
+func (e *ErrIkeyCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
+}
+
+func newErrIkeyCorrupted(ikey []byte, reason string) error {
+ return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
+}
+
+type kType int
-func (t vType) String() string {
- switch t {
- case tDel:
+func (kt kType) String() string {
+ switch kt {
+ case ktDel:
return "d"
- case tVal:
+ case ktVal:
return "v"
}
return "x"
@@ -26,16 +41,16 @@ func (t vType) String() string {
// Value types encoded as the last component of internal keys.
// Don't modify; this value are saved to disk.
const (
- tDel vType = iota
- tVal
+ ktDel kType = iota
+ ktVal
)
-// tSeek defines the vType that should be passed when constructing an
+// ktSeek defines the kType that should be passed when constructing an
// internal key for seeking to a particular sequence number (since we
// sort sequence numbers in decreasing order and the value type is
// embedded as the low 8 bits in the sequence number in internal keys,
// we need to use the highest-numbered ValueType, not the lowest).
-const tSeek = tVal
+const ktSeek = ktVal
const (
// Maximum value possible for sequence number; the 8-bits are
@@ -43,7 +58,7 @@ const (
// 64-bit integer.
kMaxSeq uint64 = (uint64(1) << 56) - 1
// Maximum value possible for packed sequence number and type.
- kMaxNum uint64 = (kMaxSeq << 8) | uint64(tSeek)
+ kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
)
// Maximum number encoded in bytes.
@@ -55,85 +70,73 @@ func init() {
type iKey []byte
-func newIKey(ukey []byte, seq uint64, t vType) iKey {
- if seq > kMaxSeq || t > tVal {
- panic("invalid seq number or value type")
+func newIkey(ukey []byte, seq uint64, kt kType) iKey {
+ if seq > kMaxSeq {
+ panic("leveldb: invalid sequence number")
+ } else if kt > ktVal {
+ panic("leveldb: invalid type")
}
- b := make(iKey, len(ukey)+8)
- copy(b, ukey)
- binary.LittleEndian.PutUint64(b[len(ukey):], (seq<<8)|uint64(t))
- return b
+ ik := make(iKey, len(ukey)+8)
+ copy(ik, ukey)
+ binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt))
+ return ik
}
-func parseIkey(p []byte) (ukey []byte, seq uint64, t vType, ok bool) {
- if len(p) < 8 {
- return
+func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
+ if len(ik) < 8 {
+ return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
}
- num := binary.LittleEndian.Uint64(p[len(p)-8:])
- seq, t = uint64(num>>8), vType(num&0xff)
- if t > tVal {
- return
+ num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
+ seq, kt = uint64(num>>8), kType(num&0xff)
+ if kt > ktVal {
+ return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
}
- ukey = p[:len(p)-8]
- ok = true
+ ukey = ik[:len(ik)-8]
return
}
-func validIkey(p []byte) bool {
- _, _, _, ok := parseIkey(p)
- return ok
+func validIkey(ik []byte) bool {
+ _, _, _, err := parseIkey(ik)
+ return err == nil
}
-func (p iKey) assert() {
- if p == nil {
- panic("nil iKey")
+func (ik iKey) assert() {
+ if ik == nil {
+ panic("leveldb: nil iKey")
}
- if len(p) < 8 {
- panic(fmt.Sprintf("invalid iKey %q, len=%d", []byte(p), len(p)))
+ if len(ik) < 8 {
+ panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
}
}
-func (p iKey) ok() bool {
- if len(p) < 8 {
- return false
- }
- _, _, ok := p.parseNum()
- return ok
-}
-
-func (p iKey) ukey() []byte {
- p.assert()
- return p[:len(p)-8]
+func (ik iKey) ukey() []byte {
+ ik.assert()
+ return ik[:len(ik)-8]
}
-func (p iKey) num() uint64 {
- p.assert()
- return binary.LittleEndian.Uint64(p[len(p)-8:])
+func (ik iKey) num() uint64 {
+ ik.assert()
+ return binary.LittleEndian.Uint64(ik[len(ik)-8:])
}
-func (p iKey) parseNum() (seq uint64, t vType, ok bool) {
- if p == nil {
- panic("nil iKey")
+func (ik iKey) parseNum() (seq uint64, kt kType) {
+ num := ik.num()
+ seq, kt = uint64(num>>8), kType(num&0xff)
+ if kt > ktVal {
+ panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
}
- if len(p) < 8 {
- return
- }
- num := p.num()
- seq, t = uint64(num>>8), vType(num&0xff)
- if t > tVal {
- return 0, 0, false
- }
- ok = true
return
}
-func (p iKey) String() string {
- if len(p) == 0 {
+func (ik iKey) String() string {
+ if ik == nil {
return "<nil>"
}
- if seq, t, ok := p.parseNum(); ok {
- return fmt.Sprintf("%s,%s%d", shorten(string(p.ukey())), t, seq)
+
+ if ukey, seq, kt, err := parseIkey(ik); err == nil {
+ return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
+ } else {
+ return "<invalid>"
}
- return "<invalid>"
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
index e307cfc1d..30eadf784 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
@@ -15,8 +15,8 @@ import (
var defaultIComparer = &iComparer{comparer.DefaultComparer}
-func ikey(key string, seq uint64, t vType) iKey {
- return newIKey([]byte(key), uint64(seq), t)
+func ikey(key string, seq uint64, kt kType) iKey {
+ return newIkey([]byte(key), uint64(seq), kt)
}
func shortSep(a, b []byte) []byte {
@@ -37,27 +37,37 @@ func shortSuccessor(b []byte) []byte {
return dst
}
-func testSingleKey(t *testing.T, key string, seq uint64, vt vType) {
- ik := ikey(key, seq, vt)
+func testSingleKey(t *testing.T, key string, seq uint64, kt kType) {
+ ik := ikey(key, seq, kt)
if !bytes.Equal(ik.ukey(), []byte(key)) {
t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
}
- if rseq, rt, ok := ik.parseNum(); ok {
+ rseq, rt := ik.parseNum()
+ if rseq != seq {
+ t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
+ }
+ if rt != kt {
+ t.Errorf("type does not equal, got %v, want %v", rt, kt)
+ }
+
+ if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil {
+ if !bytes.Equal(rukey, []byte(key)) {
+ t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
+ }
if rseq != seq {
t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
}
-
- if rt != vt {
- t.Errorf("type does not equal, got %v, want %v", rt, vt)
+ if rt != kt {
+ t.Errorf("type does not equal, got %v, want %v", rt, kt)
}
} else {
- t.Error("cannot parse seq and type")
+ t.Errorf("key error: %v", kerr)
}
}
-func TestIKey_EncodeDecode(t *testing.T) {
+func TestIkey_EncodeDecode(t *testing.T) {
keys := []string{"", "k", "hello", "longggggggggggggggggggggg"}
seqs := []uint64{
1, 2, 3,
@@ -67,8 +77,8 @@ func TestIKey_EncodeDecode(t *testing.T) {
}
for _, key := range keys {
for _, seq := range seqs {
- testSingleKey(t, key, seq, tVal)
- testSingleKey(t, "hello", 1, tDel)
+ testSingleKey(t, key, seq, ktVal)
+ testSingleKey(t, "hello", 1, ktDel)
}
}
}
@@ -79,45 +89,45 @@ func assertBytes(t *testing.T, want, got []byte) {
}
}
-func TestIKeyShortSeparator(t *testing.T) {
+func TestIkeyShortSeparator(t *testing.T) {
// When user keys are same
- assertBytes(t, ikey("foo", 100, tVal),
- shortSep(ikey("foo", 100, tVal),
- ikey("foo", 99, tVal)))
- assertBytes(t, ikey("foo", 100, tVal),
- shortSep(ikey("foo", 100, tVal),
- ikey("foo", 101, tVal)))
- assertBytes(t, ikey("foo", 100, tVal),
- shortSep(ikey("foo", 100, tVal),
- ikey("foo", 100, tVal)))
- assertBytes(t, ikey("foo", 100, tVal),
- shortSep(ikey("foo", 100, tVal),
- ikey("foo", 100, tDel)))
+ assertBytes(t, ikey("foo", 100, ktVal),
+ shortSep(ikey("foo", 100, ktVal),
+ ikey("foo", 99, ktVal)))
+ assertBytes(t, ikey("foo", 100, ktVal),
+ shortSep(ikey("foo", 100, ktVal),
+ ikey("foo", 101, ktVal)))
+ assertBytes(t, ikey("foo", 100, ktVal),
+ shortSep(ikey("foo", 100, ktVal),
+ ikey("foo", 100, ktVal)))
+ assertBytes(t, ikey("foo", 100, ktVal),
+ shortSep(ikey("foo", 100, ktVal),
+ ikey("foo", 100, ktDel)))
// When user keys are misordered
- assertBytes(t, ikey("foo", 100, tVal),
- shortSep(ikey("foo", 100, tVal),
- ikey("bar", 99, tVal)))
+ assertBytes(t, ikey("foo", 100, ktVal),
+ shortSep(ikey("foo", 100, ktVal),
+ ikey("bar", 99, ktVal)))
// When user keys are different, but correctly ordered
- assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek),
- shortSep(ikey("foo", 100, tVal),
- ikey("hello", 200, tVal)))
+ assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
+ shortSep(ikey("foo", 100, ktVal),
+ ikey("hello", 200, ktVal)))
// When start user key is prefix of limit user key
- assertBytes(t, ikey("foo", 100, tVal),
- shortSep(ikey("foo", 100, tVal),
- ikey("foobar", 200, tVal)))
+ assertBytes(t, ikey("foo", 100, ktVal),
+ shortSep(ikey("foo", 100, ktVal),
+ ikey("foobar", 200, ktVal)))
// When limit user key is prefix of start user key
- assertBytes(t, ikey("foobar", 100, tVal),
- shortSep(ikey("foobar", 100, tVal),
- ikey("foo", 200, tVal)))
+ assertBytes(t, ikey("foobar", 100, ktVal),
+ shortSep(ikey("foobar", 100, ktVal),
+ ikey("foo", 200, ktVal)))
}
-func TestIKeyShortestSuccessor(t *testing.T) {
- assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek),
- shortSuccessor(ikey("foo", 100, tVal)))
- assertBytes(t, ikey("\xff\xff", 100, tVal),
- shortSuccessor(ikey("\xff\xff", 100, tVal)))
+func TestIkeyShortestSuccessor(t *testing.T) {
+ assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
+ shortSuccessor(ikey("foo", 100, ktVal)))
+ assertBytes(t, ikey("\xff\xff", 100, ktVal),
+ shortSuccessor(ikey("\xff\xff", 100, ktVal)))
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
index 245b1fd4d..fefa007a7 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
@@ -3,18 +3,9 @@ package leveldb
import (
"testing"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
"github.com/syndtr/goleveldb/leveldb/testutil"
)
-func TestLeveldb(t *testing.T) {
- testutil.RunDefer()
-
- RegisterFailHandler(Fail)
- RunSpecs(t, "Leveldb Suite")
-
- RegisterTestingT(t)
- testutil.RunDefer("teardown")
+func TestLevelDB(t *testing.T) {
+ testutil.RunSuite(t, "LevelDB Suite")
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
index 7bcae992a..e5398873b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
@@ -12,12 +12,14 @@ import (
"sync"
"github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
- ErrNotFound = util.ErrNotFound
+ ErrNotFound = errors.ErrNotFound
+ ErrIterReleased = errors.New("leveldb/memdb: iterator released")
)
const tMaxHeight = 12
@@ -29,6 +31,7 @@ type dbIter struct {
node int
forward bool
key, value []byte
+ err error
}
func (i *dbIter) fill(checkStart, checkLimit bool) bool {
@@ -59,6 +62,11 @@ func (i *dbIter) Valid() bool {
}
func (i *dbIter) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
i.forward = true
i.p.mu.RLock()
defer i.p.mu.RUnlock()
@@ -71,9 +79,11 @@ func (i *dbIter) First() bool {
}
func (i *dbIter) Last() bool {
- if i.p == nil {
+ if i.Released() {
+ i.err = ErrIterReleased
return false
}
+
i.forward = false
i.p.mu.RLock()
defer i.p.mu.RUnlock()
@@ -86,9 +96,11 @@ func (i *dbIter) Last() bool {
}
func (i *dbIter) Seek(key []byte) bool {
- if i.p == nil {
+ if i.Released() {
+ i.err = ErrIterReleased
return false
}
+
i.forward = true
i.p.mu.RLock()
defer i.p.mu.RUnlock()
@@ -100,9 +112,11 @@ func (i *dbIter) Seek(key []byte) bool {
}
func (i *dbIter) Next() bool {
- if i.p == nil {
+ if i.Released() {
+ i.err = ErrIterReleased
return false
}
+
if i.node == 0 {
if !i.forward {
return i.First()
@@ -117,9 +131,11 @@ func (i *dbIter) Next() bool {
}
func (i *dbIter) Prev() bool {
- if i.p == nil {
+ if i.Released() {
+ i.err = ErrIterReleased
return false
}
+
if i.node == 0 {
if i.forward {
return i.Last()
@@ -141,10 +157,10 @@ func (i *dbIter) Value() []byte {
return i.value
}
-func (i *dbIter) Error() error { return nil }
+func (i *dbIter) Error() error { return i.err }
func (i *dbIter) Release() {
- if i.p != nil {
+ if !i.Released() {
i.p = nil
i.node = 0
i.key = nil
@@ -437,6 +453,8 @@ func (p *DB) Reset() {
// New creates a new initalized in-memory key/value DB. The capacity
// is the initial key/value buffer capacity. The capacity is advisory,
// not enforced.
+//
+// The returned DB instance is goroutine-safe.
func New(cmp comparer.BasicComparer, capacity int) *DB {
p := &DB{
cmp: cmp,
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
index 788539a87..18c304b7f 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
@@ -3,15 +3,9 @@ package memdb
import (
"testing"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
"github.com/syndtr/goleveldb/leveldb/testutil"
)
-func TestMemdb(t *testing.T) {
- testutil.RunDefer()
-
- RegisterFailHandler(Fail)
- RunSpecs(t, "Memdb Suite")
+func TestMemDB(t *testing.T) {
+ testutil.RunSuite(t, "MemDB Suite")
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
index f96a9d1ea..5dd6dbc7b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
@@ -129,7 +129,7 @@ var _ = testutil.Defer(func() {
}
return db
- })
+ }, nil, nil)
})
})
})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
index b940ce427..61f0eadf9 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -11,6 +11,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/cache"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/filter"
+ "math"
)
const (
@@ -19,25 +20,57 @@ const (
GiB = MiB * 1024
)
-const (
- DefaultBlockCacheSize = 8 * MiB
- DefaultBlockRestartInterval = 16
- DefaultBlockSize = 4 * KiB
- DefaultCompressionType = SnappyCompression
- DefaultMaxOpenFiles = 1000
- DefaultWriteBuffer = 4 * MiB
+var (
+ DefaultBlockCacher = LRUCacher
+ DefaultBlockCacheCapacity = 8 * MiB
+ DefaultBlockRestartInterval = 16
+ DefaultBlockSize = 4 * KiB
+ DefaultCompactionExpandLimitFactor = 25
+ DefaultCompactionGPOverlapsFactor = 10
+ DefaultCompactionL0Trigger = 4
+ DefaultCompactionSourceLimitFactor = 1
+ DefaultCompactionTableSize = 2 * MiB
+ DefaultCompactionTableSizeMultiplier = 1.0
+ DefaultCompactionTotalSize = 10 * MiB
+ DefaultCompactionTotalSizeMultiplier = 10.0
+ DefaultCompressionType = SnappyCompression
+ DefaultIteratorSamplingRate = 1 * MiB
+ DefaultMaxMemCompationLevel = 2
+ DefaultNumLevel = 7
+ DefaultOpenFilesCacher = LRUCacher
+ DefaultOpenFilesCacheCapacity = 500
+ DefaultWriteBuffer = 4 * MiB
+ DefaultWriteL0PauseTrigger = 12
+ DefaultWriteL0SlowdownTrigger = 8
)
-type noCache struct{}
+// Cacher is a caching algorithm.
+type Cacher interface {
+ New(capacity int) cache.Cacher
+}
+
+type CacherFunc struct {
+ NewFunc func(capacity int) cache.Cacher
+}
+
+func (f *CacherFunc) New(capacity int) cache.Cacher {
+ if f.NewFunc != nil {
+ return f.NewFunc(capacity)
+ }
+ return nil
+}
-func (noCache) SetCapacity(capacity int) {}
-func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
-func (noCache) Purge(fin cache.PurgeFin) {}
-func (noCache) Zap(closed bool) {}
+func noCacher(int) cache.Cacher { return nil }
-var NoCache cache.Cache = noCache{}
+var (
+ // LRUCacher is the LRU-cache algorithm.
+ LRUCacher = &CacherFunc{cache.NewLRU}
-// Compression is the per-block compression algorithm to use.
+ // NoCacher is the value to disable caching algorithm.
+ NoCacher = &CacherFunc{}
+)
+
+// Compression is the 'sorted table' block compression algorithm to use.
type Compression uint
func (c Compression) String() string {
@@ -59,34 +92,47 @@ const (
nCompression
)
-// Strict is the DB strict level.
+// Strict is the DB 'strict level'.
type Strict uint
const (
// If present then a corrupted or invalid chunk or block in manifest
- // journal will cause an error istead of being dropped.
+ // journal will cause an error instead of being dropped.
+ // This will prevent database with corrupted manifest to be opened.
StrictManifest Strict = 1 << iota
- // If present then a corrupted or invalid chunk or block in journal
- // will cause an error istead of being dropped.
- StrictJournal
-
// If present then journal chunk checksum will be verified.
StrictJournalChecksum
- // If present then an invalid key/value pair will cause an error
- // instead of being skipped.
- StrictIterator
+ // If present then a corrupted or invalid chunk or block in journal
+ // will cause an error instead of being dropped.
+ // This will prevent database with corrupted journal to be opened.
+ StrictJournal
// If present then 'sorted table' block checksum will be verified.
+ // This has effect on both 'read operation' and compaction.
StrictBlockChecksum
+ // If present then a corrupted 'sorted table' will fails compaction.
+ // The database will enter read-only mode.
+ StrictCompaction
+
+ // If present then a corrupted 'sorted table' will halts 'read operation'.
+ StrictReader
+
+ // If present then leveldb.Recover will drop corrupted 'sorted table'.
+ StrictRecovery
+
+ // This only applicable for ReadOptions, if present then this ReadOptions
+ // 'strict level' will override global ones.
+ StrictOverride
+
// StrictAll enables all strict flags.
- StrictAll = StrictManifest | StrictJournal | StrictJournalChecksum | StrictIterator | StrictBlockChecksum
+ StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
// DefaultStrict is the default strict flags. Specify any strict flags
// will override default strict flags as whole (i.e. not OR'ed).
- DefaultStrict = StrictJournalChecksum | StrictBlockChecksum
+ DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
// NoStrict disables all strict flags. Override default strict flags.
NoStrict = ^StrictAll
@@ -101,11 +147,17 @@ type Options struct {
// The default value is nil
AltFilters []filter.Filter
- // BlockCache provides per-block caching for LevelDB. Specify NoCache to
- // disable block caching.
+ // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
+ // Specify NoCacher to disable caching algorithm.
//
- // By default LevelDB will create LRU-cache with capacity of 8MiB.
- BlockCache cache.Cache
+ // The default value is LRUCacher.
+ BlockCacher Cacher
+
+ // BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
+ //
+ // The default value is 8MiB.
+ BlockCacheCapacity int
// BlockRestartInterval is the number of keys between restart points for
// delta encoding of keys.
@@ -119,6 +171,73 @@ type Options struct {
// The default value is 4KiB.
BlockSize int
+ // CompactionExpandLimitFactor limits compaction size after expanded.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 25.
+ CompactionExpandLimitFactor int
+
+ // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
+ // single 'sorted table' generates.
+ // This will be multiplied by table size limit at grandparent level.
+ //
+ // The default value is 10.
+ CompactionGPOverlapsFactor int
+
+ // CompactionL0Trigger defines number of 'sorted table' at level-0 that will
+ // trigger compaction.
+ //
+ // The default value is 4.
+ CompactionL0Trigger int
+
+ // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
+ // level-0.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 1.
+ CompactionSourceLimitFactor int
+
+ // CompactionTableSize limits size of 'sorted table' that compaction generates.
+ // The limits for each level will be calculated as:
+ // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
+ //
+ // The default value is 2MiB.
+ CompactionTableSize int
+
+ // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
+ //
+ // The default value is 1.
+ CompactionTableSizeMultiplier float64
+
+ // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTableSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTableSizeMultiplierPerLevel []float64
+
+ // CompactionTotalSize limits total size of 'sorted table' for each level.
+ // The limits for each level will be calculated as:
+ // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using
+ // CompactionTotalSizeMultiplierPerLevel.
+ //
+ // The default value is 10MiB.
+ CompactionTotalSize int
+
+ // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
+ //
+ // The default value is 10.
+ CompactionTotalSizeMultiplier float64
+
+ // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTotalSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTotalSizeMultiplierPerLevel []float64
+
// Comparer defines a total ordering over the space of []byte keys: a 'less
// than' relationship. The same comparison algorithm must be used for reads
// and writes over the lifetime of the DB.
@@ -126,11 +245,22 @@ type Options struct {
// The default value uses the same ordering as bytes.Compare.
Comparer comparer.Comparer
- // Compression defines the per-block compression to use.
+ // Compression defines the 'sorted table' block compression to use.
//
// The default value (DefaultCompression) uses snappy compression.
Compression Compression
+ // DisableBlockCache allows disable use of cache.Cache functionality on
+ // 'sorted table' block.
+ //
+ // The default value is false.
+ DisableBlockCache bool
+
+ // DisableCompactionBackoff allows disable compaction retry backoff.
+ //
+ // The default value is false.
+ DisableCompactionBackoff bool
+
// ErrorIfExist defines whether an error should returned if the DB already
// exist.
//
@@ -159,12 +289,37 @@ type Options struct {
// The default value is nil.
Filter filter.Filter
- // MaxOpenFiles defines maximum number of open files to kept around
- // (cached). This is not an hard limit, actual open files may exceed
- // the defined value.
+ // IteratorSamplingRate defines approximate gap (in bytes) between read
+ // sampling of an iterator. The samples will be used to determine when
+ // compaction should be triggered.
+ //
+ // The default is 1MiB.
+ IteratorSamplingRate int
+
+ // MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
+ // will be pushed into if doesn't creates overlap. This should less than
+ // NumLevel. Use -1 for level-0.
//
- // The default value is 1000.
- MaxOpenFiles int
+ // The default is 2.
+ MaxMemCompationLevel int
+
+ // NumLevel defines number of database level. The level shouldn't changed
+ // between opens, or the database will panic.
+ //
+ // The default is 7.
+ NumLevel int
+
+ // OpenFilesCacher provides cache algorithm for open files caching.
+ // Specify NoCacher to disable caching algorithm.
+ //
+ // The default value is LRUCacher.
+ OpenFilesCacher Cacher
+
+ // OpenFilesCacheCapacity defines the capacity of the open files caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
+ //
+ // The default value is 500.
+ OpenFilesCacheCapacity int
// Strict defines the DB strict level.
Strict Strict
@@ -177,6 +332,18 @@ type Options struct {
//
// The default value is 4MiB.
WriteBuffer int
+
+ // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
+ // pause write.
+ //
+ // The default value is 12.
+ WriteL0PauseTrigger int
+
+ // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
+ // will trigger write slowdown.
+ //
+ // The default value is 8.
+ WriteL0SlowdownTrigger int
}
func (o *Options) GetAltFilters() []filter.Filter {
@@ -186,11 +353,22 @@ func (o *Options) GetAltFilters() []filter.Filter {
return o.AltFilters
}
-func (o *Options) GetBlockCache() cache.Cache {
- if o == nil {
+func (o *Options) GetBlockCacher() Cacher {
+ if o == nil || o.BlockCacher == nil {
+ return DefaultBlockCacher
+ } else if o.BlockCacher == NoCacher {
return nil
}
- return o.BlockCache
+ return o.BlockCacher
+}
+
+func (o *Options) GetBlockCacheCapacity() int {
+ if o == nil || o.BlockCacheCapacity == 0 {
+ return DefaultBlockCacheCapacity
+ } else if o.BlockCacheCapacity < 0 {
+ return 0
+ }
+ return o.BlockCacheCapacity
}
func (o *Options) GetBlockRestartInterval() int {
@@ -207,6 +385,79 @@ func (o *Options) GetBlockSize() int {
return o.BlockSize
}
+func (o *Options) GetCompactionExpandLimit(level int) int {
+ factor := DefaultCompactionExpandLimitFactor
+ if o != nil && o.CompactionExpandLimitFactor > 0 {
+ factor = o.CompactionExpandLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionGPOverlaps(level int) int {
+ factor := DefaultCompactionGPOverlapsFactor
+ if o != nil && o.CompactionGPOverlapsFactor > 0 {
+ factor = o.CompactionGPOverlapsFactor
+ }
+ return o.GetCompactionTableSize(level+2) * factor
+}
+
+func (o *Options) GetCompactionL0Trigger() int {
+ if o == nil || o.CompactionL0Trigger == 0 {
+ return DefaultCompactionL0Trigger
+ }
+ return o.CompactionL0Trigger
+}
+
+func (o *Options) GetCompactionSourceLimit(level int) int {
+ factor := DefaultCompactionSourceLimitFactor
+ if o != nil && o.CompactionSourceLimitFactor > 0 {
+ factor = o.CompactionSourceLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionTableSize(level int) int {
+ var (
+ base = DefaultCompactionTableSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTableSize > 0 {
+ base = o.CompactionTableSize
+ }
+ if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTableSizeMultiplierPerLevel[level]
+ } else if o.CompactionTableSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
+ }
+ return int(float64(base) * mult)
+}
+
+func (o *Options) GetCompactionTotalSize(level int) int64 {
+ var (
+ base = DefaultCompactionTotalSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTotalSize > 0 {
+ base = o.CompactionTotalSize
+ }
+ if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTotalSizeMultiplierPerLevel[level]
+ } else if o.CompactionTotalSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
+ }
+ return int64(float64(base) * mult)
+}
+
func (o *Options) GetComparer() comparer.Comparer {
if o == nil || o.Comparer == nil {
return comparer.DefaultComparer
@@ -221,6 +472,13 @@ func (o *Options) GetCompression() Compression {
return o.Compression
}
+func (o *Options) GetDisableCompactionBackoff() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableCompactionBackoff
+}
+
func (o *Options) GetErrorIfExist() bool {
if o == nil {
return false
@@ -242,11 +500,52 @@ func (o *Options) GetFilter() filter.Filter {
return o.Filter
}
-func (o *Options) GetMaxOpenFiles() int {
- if o == nil || o.MaxOpenFiles <= 0 {
- return DefaultMaxOpenFiles
+func (o *Options) GetIteratorSamplingRate() int {
+ if o == nil || o.IteratorSamplingRate <= 0 {
+ return DefaultIteratorSamplingRate
+ }
+ return o.IteratorSamplingRate
+}
+
+func (o *Options) GetMaxMemCompationLevel() int {
+ level := DefaultMaxMemCompationLevel
+ if o != nil {
+ if o.MaxMemCompationLevel > 0 {
+ level = o.MaxMemCompationLevel
+ } else if o.MaxMemCompationLevel < 0 {
+ level = 0
+ }
+ }
+ if level >= o.GetNumLevel() {
+ return o.GetNumLevel() - 1
+ }
+ return level
+}
+
+func (o *Options) GetNumLevel() int {
+ if o == nil || o.NumLevel <= 0 {
+ return DefaultNumLevel
+ }
+ return o.NumLevel
+}
+
+func (o *Options) GetOpenFilesCacher() Cacher {
+ if o == nil || o.OpenFilesCacher == nil {
+ return DefaultOpenFilesCacher
}
- return o.MaxOpenFiles
+ if o.OpenFilesCacher == NoCacher {
+ return nil
+ }
+ return o.OpenFilesCacher
+}
+
+func (o *Options) GetOpenFilesCacheCapacity() int {
+ if o == nil || o.OpenFilesCacheCapacity == 0 {
+ return DefaultOpenFilesCacheCapacity
+ } else if o.OpenFilesCacheCapacity < 0 {
+ return 0
+ }
+ return o.OpenFilesCacheCapacity
}
func (o *Options) GetStrict(strict Strict) bool {
@@ -263,6 +562,20 @@ func (o *Options) GetWriteBuffer() int {
return o.WriteBuffer
}
+func (o *Options) GetWriteL0PauseTrigger() int {
+ if o == nil || o.WriteL0PauseTrigger == 0 {
+ return DefaultWriteL0PauseTrigger
+ }
+ return o.WriteL0PauseTrigger
+}
+
+func (o *Options) GetWriteL0SlowdownTrigger() int {
+ if o == nil || o.WriteL0SlowdownTrigger == 0 {
+ return DefaultWriteL0SlowdownTrigger
+ }
+ return o.WriteL0SlowdownTrigger
+}
+
// ReadOptions holds the optional parameters for 'read operation'. The
// 'read operation' includes Get, Find and NewIterator.
type ReadOptions struct {
@@ -273,8 +586,8 @@ type ReadOptions struct {
// The default value is false.
DontFillCache bool
- // Strict overrides global DB strict level. Only StrictIterator and
- // StrictBlockChecksum that does have effects here.
+ // Strict will be OR'ed with global DB 'strict level' unless StrictOverride
+ // is present. Currently only StrictReader that has effect here.
Strict Strict
}
@@ -316,3 +629,11 @@ func (wo *WriteOptions) GetSync() bool {
}
return wo.Sync
}
+
+func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
+ if ro.GetStrict(StrictOverride) {
+ return ro.GetStrict(strict)
+ } else {
+ return o.GetStrict(strict) || ro.GetStrict(strict)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
index fc6a96965..a3d84ef60 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
@@ -7,35 +7,86 @@
package leveldb
import (
- "github.com/syndtr/goleveldb/leveldb/cache"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
)
-func (s *session) setOptions(o *opt.Options) {
- s.o = &opt.Options{}
+func dupOptions(o *opt.Options) *opt.Options {
+ newo := &opt.Options{}
if o != nil {
- *s.o = *o
+ *newo = *o
+ }
+ if newo.Strict == 0 {
+ newo.Strict = opt.DefaultStrict
}
+ return newo
+}
+
+func (s *session) setOptions(o *opt.Options) {
+ no := dupOptions(o)
// Alternative filters.
if filters := o.GetAltFilters(); len(filters) > 0 {
- s.o.AltFilters = make([]filter.Filter, len(filters))
+ no.AltFilters = make([]filter.Filter, len(filters))
for i, filter := range filters {
- s.o.AltFilters[i] = &iFilter{filter}
+ no.AltFilters[i] = &iFilter{filter}
}
}
- // Block cache.
- switch o.GetBlockCache() {
- case nil:
- s.o.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize)
- case opt.NoCache:
- s.o.BlockCache = nil
- }
// Comparer.
s.icmp = &iComparer{o.GetComparer()}
- s.o.Comparer = s.icmp
+ no.Comparer = s.icmp
// Filter.
if filter := o.GetFilter(); filter != nil {
- s.o.Filter = &iFilter{filter}
+ no.Filter = &iFilter{filter}
}
+
+ s.o = &cachedOptions{Options: no}
+ s.o.cache()
+}
+
+type cachedOptions struct {
+ *opt.Options
+
+ compactionExpandLimit []int
+ compactionGPOverlaps []int
+ compactionSourceLimit []int
+ compactionTableSize []int
+ compactionTotalSize []int64
+}
+
+func (co *cachedOptions) cache() {
+ numLevel := co.Options.GetNumLevel()
+
+ co.compactionExpandLimit = make([]int, numLevel)
+ co.compactionGPOverlaps = make([]int, numLevel)
+ co.compactionSourceLimit = make([]int, numLevel)
+ co.compactionTableSize = make([]int, numLevel)
+ co.compactionTotalSize = make([]int64, numLevel)
+
+ for level := 0; level < numLevel; level++ {
+ co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level)
+ co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level)
+ co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level)
+ co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level)
+ co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level)
+ }
+}
+
+func (co *cachedOptions) GetCompactionExpandLimit(level int) int {
+ return co.compactionExpandLimit[level]
+}
+
+func (co *cachedOptions) GetCompactionGPOverlaps(level int) int {
+ return co.compactionGPOverlaps[level]
+}
+
+func (co *cachedOptions) GetCompactionSourceLimit(level int) int {
+ return co.compactionSourceLimit[level]
+}
+
+func (co *cachedOptions) GetCompactionTableSize(level int) int {
+ return co.compactionTableSize[level]
+}
+
+func (co *cachedOptions) GetCompactionTotalSize(level int) int64 {
+ return co.compactionTotalSize[level]
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
index 6b2a61683..b3906f7fc 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
@@ -7,12 +7,13 @@
package leveldb
import (
- "errors"
+ "fmt"
"io"
"os"
"sync"
"sync/atomic"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/journal"
"github.com/syndtr/goleveldb/leveldb/opt"
@@ -20,18 +21,31 @@ import (
"github.com/syndtr/goleveldb/leveldb/util"
)
+type ErrManifestCorrupted struct {
+ Field string
+ Reason string
+}
+
+func (e *ErrManifestCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
+}
+
+func newErrManifestCorrupted(f storage.File, field, reason string) error {
+ return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason})
+}
+
// session represent a persistent database session.
type session struct {
// Need 64-bit alignment.
- stFileNum uint64 // current unused file number
+ stNextFileNum uint64 // current unused file number
stJournalNum uint64 // current journal file number; need external synchronization
stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb
- stSeq uint64 // last mem compacted seq; need external synchronization
+ stSeqNum uint64 // last mem compacted seq; need external synchronization
stTempFileNum uint64
stor storage.Storage
storLock util.Releaser
- o *opt.Options
+ o *cachedOptions
icmp *iComparer
tops *tOps
@@ -39,11 +53,12 @@ type session struct {
manifestWriter storage.Writer
manifestFile storage.File
- stCPtrs [kNumLevels]iKey // compact pointers; need external synchronization
- stVersion *version // current version
- vmu sync.Mutex
+ stCompPtrs []iKey // compaction pointers; need external synchronization
+ stVersion *version // current version
+ vmu sync.Mutex
}
+// Creates new initialized session instance.
func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
if stor == nil {
return nil, os.ErrInvalid
@@ -53,22 +68,20 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
return
}
s = &session{
- stor: stor,
- storLock: storLock,
+ stor: stor,
+ storLock: storLock,
+ stCompPtrs: make([]iKey, o.GetNumLevel()),
}
s.setOptions(o)
- s.tops = newTableOps(s, s.o.GetMaxOpenFiles())
- s.setVersion(&version{s: s})
- s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock D·DeletedEntry L·Level Q·SeqNum T·TimeElapsed")
+ s.tops = newTableOps(s)
+ s.setVersion(newVersion(s))
+ s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
return
}
// Close session.
func (s *session) close() {
s.tops.close()
- if bc := s.o.GetBlockCache(); bc != nil {
- bc.Purge(nil)
- }
if s.manifest != nil {
s.manifest.Close()
}
@@ -81,6 +94,7 @@ func (s *session) close() {
s.stVersion = nil
}
+// Release session lock.
func (s *session) release() {
s.storLock.Release()
}
@@ -98,26 +112,26 @@ func (s *session) recover() (err error) {
// Don't return os.ErrNotExist if the underlying storage contains
// other files that belong to LevelDB. So the DB won't get trashed.
if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 {
- err = ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest file missing")}
+ err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
}
}
}()
- file, err := s.stor.GetManifest()
+ m, err := s.stor.GetManifest()
if err != nil {
return
}
- reader, err := file.Open()
+ reader, err := m.Open()
if err != nil {
return
}
defer reader.Close()
strict := s.o.GetStrict(opt.StrictManifest)
- jr := journal.NewReader(reader, dropper{s, file}, strict, true)
+ jr := journal.NewReader(reader, dropper{s, m}, strict, true)
- staging := s.version_NB().newStaging()
- rec := &sessionRecord{}
+ staging := s.stVersion.newStaging()
+ rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
for {
var r io.Reader
r, err = jr.Next()
@@ -126,51 +140,57 @@ func (s *session) recover() (err error) {
err = nil
break
}
- return
+ return errors.SetFile(err, m)
}
err = rec.decode(r)
if err == nil {
// save compact pointers
- for _, rp := range rec.compactionPointers {
- s.stCPtrs[rp.level] = iKey(rp.key)
+ for _, r := range rec.compPtrs {
+ s.stCompPtrs[r.level] = iKey(r.ikey)
}
// commit record to version staging
staging.commit(rec)
- } else if strict {
- return ErrCorrupted{Type: CorruptedManifest, Err: err}
} else {
- s.logf("manifest error: %v (skipped)", err)
+ err = errors.SetFile(err, m)
+ if strict || !errors.IsCorrupted(err) {
+ return
+ } else {
+ s.logf("manifest error: %v (skipped)", errors.SetFile(err, m))
+ }
}
- rec.resetCompactionPointers()
+ rec.resetCompPtrs()
rec.resetAddedTables()
rec.resetDeletedTables()
}
switch {
case !rec.has(recComparer):
- return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing comparer name")}
+ return newErrManifestCorrupted(m, "comparer", "missing")
case rec.comparer != s.icmp.uName():
- return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.icmp.uName() + "', " + "got '" + rec.comparer + "'")}
- case !rec.has(recNextNum):
- return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing next file number")}
+ return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
+ case !rec.has(recNextFileNum):
+ return newErrManifestCorrupted(m, "next-file-num", "missing")
case !rec.has(recJournalNum):
- return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing journal file number")}
- case !rec.has(recSeq):
- return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing seq number")}
+ return newErrManifestCorrupted(m, "journal-file-num", "missing")
+ case !rec.has(recSeqNum):
+ return newErrManifestCorrupted(m, "seq-num", "missing")
}
- s.manifestFile = file
+ s.manifestFile = m
s.setVersion(staging.finish())
- s.setFileNum(rec.nextNum)
+ s.setNextFileNum(rec.nextFileNum)
s.recordCommited(rec)
return nil
}
// Commit session; need external synchronization.
func (s *session) commit(r *sessionRecord) (err error) {
+ v := s.version()
+ defer v.release()
+
// spawn new version based on current version
- nv := s.version_NB().spawn(r)
+ nv := v.spawn(r)
if s.manifest == nil {
// manifest journal writer not yet created, create one
@@ -189,22 +209,22 @@ func (s *session) commit(r *sessionRecord) (err error) {
// Pick a compaction based on current state; need external synchronization.
func (s *session) pickCompaction() *compaction {
- v := s.version_NB()
+ v := s.version()
var level int
var t0 tFiles
if v.cScore >= 1 {
level = v.cLevel
- cp := s.stCPtrs[level]
- tt := v.tables[level]
- for _, t := range tt {
- if cp == nil || s.icmp.Compare(t.max, cp) > 0 {
+ cptr := s.stCompPtrs[level]
+ tables := v.tables[level]
+ for _, t := range tables {
+ if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
t0 = append(t0, t)
break
}
}
if len(t0) == 0 {
- t0 = append(t0, tt[0])
+ t0 = append(t0, tables[0])
}
} else {
if p := atomic.LoadPointer(&v.cSeek); p != nil {
@@ -212,29 +232,21 @@ func (s *session) pickCompaction() *compaction {
level = ts.level
t0 = append(t0, ts.table)
} else {
+ v.release()
return nil
}
}
- c := &compaction{s: s, version: v, level: level}
- if level == 0 {
- min, max := t0.getRange(s.icmp)
- t0 = nil
- v.tables[0].getOverlaps(min.ukey(), max.ukey(), &t0, false, s.icmp.ucmp)
- }
-
- c.tables[0] = t0
- c.expand()
- return c
+ return newCompaction(s, v, level, t0)
}
// Create compaction from given level and range; need external synchronization.
-func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
- v := s.version_NB()
+func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
+ v := s.version()
- var t0 tFiles
- v.tables[level].getOverlaps(min, max, &t0, level != 0, s.icmp.ucmp)
+ t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
if len(t0) == 0 {
+ v.release()
return nil
}
@@ -243,7 +255,7 @@ func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
// and we must not pick one file and drop another older file if the
// two files overlap.
if level > 0 {
- limit := uint64(kMaxTableSize)
+ limit := uint64(v.s.o.GetCompactionSourceLimit(level))
total := uint64(0)
for i, t := range t0 {
total += t.size
@@ -255,90 +267,124 @@ func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
}
}
- c := &compaction{s: s, version: v, level: level}
- c.tables[0] = t0
+ return newCompaction(s, v, level, t0)
+}
+
+func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
+ c := &compaction{
+ s: s,
+ v: v,
+ level: level,
+ tables: [2]tFiles{t0, nil},
+ maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
+ tPtrs: make([]int, s.o.GetNumLevel()),
+ }
c.expand()
+ c.save()
return c
}
-// compaction represent a compaction state
+// compaction represent a compaction state.
type compaction struct {
- s *session
- version *version
+ s *session
+ v *version
+
+ level int
+ tables [2]tFiles
+ maxGPOverlaps uint64
+
+ gp tFiles
+ gpi int
+ seenKey bool
+ gpOverlappedBytes uint64
+ imin, imax iKey
+ tPtrs []int
+ released bool
+
+ snapGPI int
+ snapSeenKey bool
+ snapGPOverlappedBytes uint64
+ snapTPtrs []int
+}
- level int
- tables [2]tFiles
+func (c *compaction) save() {
+ c.snapGPI = c.gpi
+ c.snapSeenKey = c.seenKey
+ c.snapGPOverlappedBytes = c.gpOverlappedBytes
+ c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
+}
- gp tFiles
- gpidx int
- seenKey bool
- overlappedBytes uint64
- min, max iKey
+func (c *compaction) restore() {
+ c.gpi = c.snapGPI
+ c.seenKey = c.snapSeenKey
+ c.gpOverlappedBytes = c.snapGPOverlappedBytes
+ c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
+}
- tPtrs [kNumLevels]int
+func (c *compaction) release() {
+ if !c.released {
+ c.released = true
+ c.v.release()
+ }
}
// Expand compacted tables; need external synchronization.
func (c *compaction) expand() {
- s := c.s
- v := c.version
-
- level := c.level
- vt0, vt1 := v.tables[level], v.tables[level+1]
+ limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
+ vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
t0, t1 := c.tables[0], c.tables[1]
- min, max := t0.getRange(s.icmp)
- vt1.getOverlaps(min.ukey(), max.ukey(), &t1, true, s.icmp.ucmp)
-
- // Get entire range covered by compaction
- amin, amax := append(t0, t1...).getRange(s.icmp)
+ imin, imax := t0.getRange(c.s.icmp)
+ // We expand t0 here just incase ukey hop across tables.
+ t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
+ if len(t0) != len(c.tables[0]) {
+ imin, imax = t0.getRange(c.s.icmp)
+ }
+ t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
+ // Get entire range covered by compaction.
+ amin, amax := append(t0, t1...).getRange(c.s.icmp)
// See if we can grow the number of inputs in "level" without
// changing the number of "level+1" files we pick up.
if len(t1) > 0 {
- var exp0 tFiles
- vt0.getOverlaps(amin.ukey(), amax.ukey(), &exp0, level != 0, s.icmp.ucmp)
- if len(exp0) > len(t0) && t1.size()+exp0.size() < kExpCompactionMaxBytes {
- var exp1 tFiles
- xmin, xmax := exp0.getRange(s.icmp)
- vt1.getOverlaps(xmin.ukey(), xmax.ukey(), &exp1, true, s.icmp.ucmp)
+ exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
+ if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
+ xmin, xmax := exp0.getRange(c.s.icmp)
+ exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
if len(exp1) == len(t1) {
- s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
- level, level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
+ c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
+ c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
- min, max = xmin, xmax
+ imin, imax = xmin, xmax
t0, t1 = exp0, exp1
- amin, amax = append(t0, t1...).getRange(s.icmp)
+ amin, amax = append(t0, t1...).getRange(c.s.icmp)
}
}
}
// Compute the set of grandparent files that overlap this compaction
// (parent == level+1; grandparent == level+2)
- if level+2 < kNumLevels {
- v.tables[level+2].getOverlaps(amin.ukey(), amax.ukey(), &c.gp, true, s.icmp.ucmp)
+ if c.level+2 < c.s.o.GetNumLevel() {
+ c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
}
c.tables[0], c.tables[1] = t0, t1
- c.min, c.max = min, max
+ c.imin, c.imax = imin, imax
}
// Check whether compaction is trivial.
func (c *compaction) trivial() bool {
- return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= kMaxGrandParentOverlapBytes
+ return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
}
-func (c *compaction) isBaseLevelForKey(key []byte) bool {
- s := c.s
- v := c.version
-
- for level, tt := range v.tables[c.level+2:] {
- for c.tPtrs[level] < len(tt) {
- t := tt[c.tPtrs[level]]
- if s.icmp.uCompare(key, t.max.ukey()) <= 0 {
- // We've advanced far enough
- if s.icmp.uCompare(key, t.min.ukey()) >= 0 {
- // Key falls in this file's range, so definitely not base level
+func (c *compaction) baseLevelForKey(ukey []byte) bool {
+ for level, tables := range c.v.tables[c.level+2:] {
+ for c.tPtrs[level] < len(tables) {
+ t := tables[c.tPtrs[level]]
+ if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
+ // We've advanced far enough.
+ if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+ // Key falls in this file's range, so definitely not base level.
return false
}
break
@@ -349,55 +395,61 @@ func (c *compaction) isBaseLevelForKey(key []byte) bool {
return true
}
-func (c *compaction) shouldStopBefore(key iKey) bool {
- for ; c.gpidx < len(c.gp); c.gpidx++ {
- gp := c.gp[c.gpidx]
- if c.s.icmp.Compare(key, gp.max) <= 0 {
+func (c *compaction) shouldStopBefore(ikey iKey) bool {
+ for ; c.gpi < len(c.gp); c.gpi++ {
+ gp := c.gp[c.gpi]
+ if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
break
}
if c.seenKey {
- c.overlappedBytes += gp.size
+ c.gpOverlappedBytes += gp.size
}
}
c.seenKey = true
- if c.overlappedBytes > kMaxGrandParentOverlapBytes {
- // Too much overlap for current output; start new output
- c.overlappedBytes = 0
+ if c.gpOverlappedBytes > c.maxGPOverlaps {
+ // Too much overlap for current output; start new output.
+ c.gpOverlappedBytes = 0
return true
}
return false
}
+// Creates an iterator.
func (c *compaction) newIterator() iterator.Iterator {
- s := c.s
-
- level := c.level
- icap := 2
+ // Creates iterator slice.
+ icap := len(c.tables)
if c.level == 0 {
+ // Special case for level-0
icap = len(c.tables[0]) + 1
}
its := make([]iterator.Iterator, 0, icap)
+ // Options.
ro := &opt.ReadOptions{
DontFillCache: true,
+ Strict: opt.StrictOverride,
+ }
+ strict := c.s.o.GetStrict(opt.StrictCompaction)
+ if strict {
+ ro.Strict |= opt.StrictReader
}
- strict := s.o.GetStrict(opt.StrictIterator)
- for i, tt := range c.tables {
- if len(tt) == 0 {
+ for i, tables := range c.tables {
+ if len(tables) == 0 {
continue
}
- if level+i == 0 {
- for _, t := range tt {
- its = append(its, s.tops.newIterator(t, nil, ro))
+ // Level-0 is not sorted and may overlaps each other.
+ if c.level+i == 0 {
+ for _, t := range tables {
+ its = append(its, c.s.tops.newIterator(t, nil, ro))
}
} else {
- it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, s.icmp, nil, ro), strict, true)
+ it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
its = append(its, it)
}
}
- return iterator.NewMergedIterator(its, s.icmp, true)
+ return iterator.NewMergedIterator(its, c.s.icmp, strict)
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
index c50fda737..1bdcc68f5 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
@@ -9,11 +9,11 @@ package leveldb
import (
"bufio"
"encoding/binary"
- "errors"
"io"
-)
+ "strings"
-var errCorruptManifest = errors.New("leveldb: corrupt manifest")
+ "github.com/syndtr/goleveldb/leveldb/errors"
+)
type byteReader interface {
io.Reader
@@ -22,32 +22,28 @@ type byteReader interface {
// These numbers are written to disk and should not be changed.
const (
- recComparer = 1
- recJournalNum = 2
- recNextNum = 3
- recSeq = 4
- recCompactionPointer = 5
- recDeletedTable = 6
- recNewTable = 7
+ recComparer = 1
+ recJournalNum = 2
+ recNextFileNum = 3
+ recSeqNum = 4
+ recCompPtr = 5
+ recDelTable = 6
+ recAddTable = 7
// 8 was used for large value refs
recPrevJournalNum = 9
)
type cpRecord struct {
level int
- key iKey
+ ikey iKey
}
-type ntRecord struct {
+type atRecord struct {
level int
num uint64
size uint64
- min iKey
- max iKey
-}
-
-func (r ntRecord) makeFile(s *session) *tFile {
- return newTFile(s.getTableFile(r.num), r.size, r.min, r.max)
+ imin iKey
+ imax iKey
}
type dtRecord struct {
@@ -56,17 +52,20 @@ type dtRecord struct {
}
type sessionRecord struct {
- hasRec int
- comparer string
- journalNum uint64
- prevJournalNum uint64
- nextNum uint64
- seq uint64
- compactionPointers []cpRecord
- addedTables []ntRecord
- deletedTables []dtRecord
- scratch [binary.MaxVarintLen64]byte
- err error
+ numLevel int
+
+ hasRec int
+ comparer string
+ journalNum uint64
+ prevJournalNum uint64
+ nextFileNum uint64
+ seqNum uint64
+ compPtrs []cpRecord
+ addedTables []atRecord
+ deletedTables []dtRecord
+
+ scratch [binary.MaxVarintLen64]byte
+ err error
}
func (p *sessionRecord) has(rec int) bool {
@@ -88,47 +87,47 @@ func (p *sessionRecord) setPrevJournalNum(num uint64) {
p.prevJournalNum = num
}
-func (p *sessionRecord) setNextNum(num uint64) {
- p.hasRec |= 1 << recNextNum
- p.nextNum = num
+func (p *sessionRecord) setNextFileNum(num uint64) {
+ p.hasRec |= 1 << recNextFileNum
+ p.nextFileNum = num
}
-func (p *sessionRecord) setSeq(seq uint64) {
- p.hasRec |= 1 << recSeq
- p.seq = seq
+func (p *sessionRecord) setSeqNum(num uint64) {
+ p.hasRec |= 1 << recSeqNum
+ p.seqNum = num
}
-func (p *sessionRecord) addCompactionPointer(level int, key iKey) {
- p.hasRec |= 1 << recCompactionPointer
- p.compactionPointers = append(p.compactionPointers, cpRecord{level, key})
+func (p *sessionRecord) addCompPtr(level int, ikey iKey) {
+ p.hasRec |= 1 << recCompPtr
+ p.compPtrs = append(p.compPtrs, cpRecord{level, ikey})
}
-func (p *sessionRecord) resetCompactionPointers() {
- p.hasRec &= ^(1 << recCompactionPointer)
- p.compactionPointers = p.compactionPointers[:0]
+func (p *sessionRecord) resetCompPtrs() {
+ p.hasRec &= ^(1 << recCompPtr)
+ p.compPtrs = p.compPtrs[:0]
}
-func (p *sessionRecord) addTable(level int, num, size uint64, min, max iKey) {
- p.hasRec |= 1 << recNewTable
- p.addedTables = append(p.addedTables, ntRecord{level, num, size, min, max})
+func (p *sessionRecord) addTable(level int, num, size uint64, imin, imax iKey) {
+ p.hasRec |= 1 << recAddTable
+ p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax})
}
func (p *sessionRecord) addTableFile(level int, t *tFile) {
- p.addTable(level, t.file.Num(), t.size, t.min, t.max)
+ p.addTable(level, t.file.Num(), t.size, t.imin, t.imax)
}
func (p *sessionRecord) resetAddedTables() {
- p.hasRec &= ^(1 << recNewTable)
+ p.hasRec &= ^(1 << recAddTable)
p.addedTables = p.addedTables[:0]
}
-func (p *sessionRecord) deleteTable(level int, num uint64) {
- p.hasRec |= 1 << recDeletedTable
+func (p *sessionRecord) delTable(level int, num uint64) {
+ p.hasRec |= 1 << recDelTable
p.deletedTables = append(p.deletedTables, dtRecord{level, num})
}
func (p *sessionRecord) resetDeletedTables() {
- p.hasRec &= ^(1 << recDeletedTable)
+ p.hasRec &= ^(1 << recDelTable)
p.deletedTables = p.deletedTables[:0]
}
@@ -161,43 +160,45 @@ func (p *sessionRecord) encode(w io.Writer) error {
p.putUvarint(w, recJournalNum)
p.putUvarint(w, p.journalNum)
}
- if p.has(recNextNum) {
- p.putUvarint(w, recNextNum)
- p.putUvarint(w, p.nextNum)
+ if p.has(recNextFileNum) {
+ p.putUvarint(w, recNextFileNum)
+ p.putUvarint(w, p.nextFileNum)
}
- if p.has(recSeq) {
- p.putUvarint(w, recSeq)
- p.putUvarint(w, p.seq)
+ if p.has(recSeqNum) {
+ p.putUvarint(w, recSeqNum)
+ p.putUvarint(w, p.seqNum)
}
- for _, cp := range p.compactionPointers {
- p.putUvarint(w, recCompactionPointer)
- p.putUvarint(w, uint64(cp.level))
- p.putBytes(w, cp.key)
+ for _, r := range p.compPtrs {
+ p.putUvarint(w, recCompPtr)
+ p.putUvarint(w, uint64(r.level))
+ p.putBytes(w, r.ikey)
}
- for _, t := range p.deletedTables {
- p.putUvarint(w, recDeletedTable)
- p.putUvarint(w, uint64(t.level))
- p.putUvarint(w, t.num)
+ for _, r := range p.deletedTables {
+ p.putUvarint(w, recDelTable)
+ p.putUvarint(w, uint64(r.level))
+ p.putUvarint(w, r.num)
}
- for _, t := range p.addedTables {
- p.putUvarint(w, recNewTable)
- p.putUvarint(w, uint64(t.level))
- p.putUvarint(w, t.num)
- p.putUvarint(w, t.size)
- p.putBytes(w, t.min)
- p.putBytes(w, t.max)
+ for _, r := range p.addedTables {
+ p.putUvarint(w, recAddTable)
+ p.putUvarint(w, uint64(r.level))
+ p.putUvarint(w, r.num)
+ p.putUvarint(w, r.size)
+ p.putBytes(w, r.imin)
+ p.putBytes(w, r.imax)
}
return p.err
}
-func (p *sessionRecord) readUvarint(r io.ByteReader) uint64 {
+func (p *sessionRecord) readUvarintMayEOF(field string, r io.ByteReader, mayEOF bool) uint64 {
if p.err != nil {
return 0
}
x, err := binary.ReadUvarint(r)
if err != nil {
- if err == io.EOF {
- p.err = errCorruptManifest
+ if err == io.ErrUnexpectedEOF || (mayEOF == false && err == io.EOF) {
+ p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
+ } else if strings.HasPrefix(err.Error(), "binary:") {
+ p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, err.Error()})
} else {
p.err = err
}
@@ -206,35 +207,39 @@ func (p *sessionRecord) readUvarint(r io.ByteReader) uint64 {
return x
}
-func (p *sessionRecord) readBytes(r byteReader) []byte {
+func (p *sessionRecord) readUvarint(field string, r io.ByteReader) uint64 {
+ return p.readUvarintMayEOF(field, r, false)
+}
+
+func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
if p.err != nil {
return nil
}
- n := p.readUvarint(r)
+ n := p.readUvarint(field, r)
if p.err != nil {
return nil
}
x := make([]byte, n)
_, p.err = io.ReadFull(r, x)
if p.err != nil {
- if p.err == io.EOF {
- p.err = errCorruptManifest
+ if p.err == io.ErrUnexpectedEOF {
+ p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
}
return nil
}
return x
}
-func (p *sessionRecord) readLevel(r io.ByteReader) int {
+func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
if p.err != nil {
return 0
}
- x := p.readUvarint(r)
+ x := p.readUvarint(field, r)
if p.err != nil {
return 0
}
- if x >= kNumLevels {
- p.err = errCorruptManifest
+ if x >= uint64(p.numLevel) {
+ p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
return 0
}
return int(x)
@@ -247,59 +252,59 @@ func (p *sessionRecord) decode(r io.Reader) error {
}
p.err = nil
for p.err == nil {
- rec, err := binary.ReadUvarint(br)
- if err != nil {
- if err == io.EOF {
- err = nil
+ rec := p.readUvarintMayEOF("field-header", br, true)
+ if p.err != nil {
+ if p.err == io.EOF {
+ return nil
}
- return err
+ return p.err
}
switch rec {
case recComparer:
- x := p.readBytes(br)
+ x := p.readBytes("comparer", br)
if p.err == nil {
p.setComparer(string(x))
}
case recJournalNum:
- x := p.readUvarint(br)
+ x := p.readUvarint("journal-num", br)
if p.err == nil {
p.setJournalNum(x)
}
case recPrevJournalNum:
- x := p.readUvarint(br)
+ x := p.readUvarint("prev-journal-num", br)
if p.err == nil {
p.setPrevJournalNum(x)
}
- case recNextNum:
- x := p.readUvarint(br)
+ case recNextFileNum:
+ x := p.readUvarint("next-file-num", br)
if p.err == nil {
- p.setNextNum(x)
+ p.setNextFileNum(x)
}
- case recSeq:
- x := p.readUvarint(br)
+ case recSeqNum:
+ x := p.readUvarint("seq-num", br)
if p.err == nil {
- p.setSeq(x)
+ p.setSeqNum(x)
}
- case recCompactionPointer:
- level := p.readLevel(br)
- key := p.readBytes(br)
+ case recCompPtr:
+ level := p.readLevel("comp-ptr.level", br)
+ ikey := p.readBytes("comp-ptr.ikey", br)
if p.err == nil {
- p.addCompactionPointer(level, iKey(key))
+ p.addCompPtr(level, iKey(ikey))
}
- case recNewTable:
- level := p.readLevel(br)
- num := p.readUvarint(br)
- size := p.readUvarint(br)
- min := p.readBytes(br)
- max := p.readBytes(br)
+ case recAddTable:
+ level := p.readLevel("add-table.level", br)
+ num := p.readUvarint("add-table.num", br)
+ size := p.readUvarint("add-table.size", br)
+ imin := p.readBytes("add-table.imin", br)
+ imax := p.readBytes("add-table.imax", br)
if p.err == nil {
- p.addTable(level, num, size, min, max)
+ p.addTable(level, num, size, imin, imax)
}
- case recDeletedTable:
- level := p.readLevel(br)
- num := p.readUvarint(br)
+ case recDelTable:
+ level := p.readLevel("del-table.level", br)
+ num := p.readUvarint("del-table.num", br)
if p.err == nil {
- p.deleteTable(level, num)
+ p.delTable(level, num)
}
}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
index 029fabfe6..c0c035ae3 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
@@ -9,6 +9,8 @@ package leveldb
import (
"bytes"
"testing"
+
+ "github.com/syndtr/goleveldb/leveldb/opt"
)
func decodeEncode(v *sessionRecord) (res bool, err error) {
@@ -17,7 +19,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
if err != nil {
return
}
- v2 := new(sessionRecord)
+ v2 := &sessionRecord{numLevel: opt.DefaultNumLevel}
err = v.decode(b)
if err != nil {
return
@@ -32,7 +34,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
func TestSessionRecord_EncodeDecode(t *testing.T) {
big := uint64(1) << 50
- v := new(sessionRecord)
+ v := &sessionRecord{numLevel: opt.DefaultNumLevel}
i := uint64(0)
test := func() {
res, err := decodeEncode(v)
@@ -47,16 +49,16 @@ func TestSessionRecord_EncodeDecode(t *testing.T) {
for ; i < 4; i++ {
test()
v.addTable(3, big+300+i, big+400+i,
- newIKey([]byte("foo"), big+500+1, tVal),
- newIKey([]byte("zoo"), big+600+1, tDel))
- v.deleteTable(4, big+700+i)
- v.addCompactionPointer(int(i), newIKey([]byte("x"), big+900+1, tVal))
+ newIkey([]byte("foo"), big+500+1, ktVal),
+ newIkey([]byte("zoo"), big+600+1, ktDel))
+ v.delTable(4, big+700+i)
+ v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal))
}
v.setComparer("foo")
v.setJournalNum(big + 100)
v.setPrevJournalNum(big + 99)
- v.setNextNum(big + 200)
- v.setSeq(big + 1000)
+ v.setNextFileNum(big + 200)
+ v.setSeqNum(big + 1000)
test()
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
index bf412b030..007c02cde 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
@@ -14,7 +14,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
-// logging
+// Logging.
type dropper struct {
s *session
@@ -22,22 +22,17 @@ type dropper struct {
}
func (d dropper) Drop(err error) {
- if e, ok := err.(journal.DroppedError); ok {
+ if e, ok := err.(*journal.ErrCorrupted); ok {
d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason)
} else {
d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err)
}
}
-func (s *session) log(v ...interface{}) {
- s.stor.Log(fmt.Sprint(v...))
-}
-
-func (s *session) logf(format string, v ...interface{}) {
- s.stor.Log(fmt.Sprintf(format, v...))
-}
+func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) }
+func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) }
-// file utils
+// File utils.
func (s *session) getJournalFile(num uint64) storage.File {
return s.stor.GetFile(num, storage.TypeJournal)
@@ -56,9 +51,14 @@ func (s *session) newTemp() storage.File {
return s.stor.GetFile(num, storage.TypeTemp)
}
-// session state
+func (s *session) tableFileFromRecord(r atRecord) *tFile {
+ return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax)
+}
+
+// Session state.
-// Get current version.
+// Get current version. This will incr version ref, must call
+// version.release (exactly once) after use.
func (s *session) version() *version {
s.vmu.Lock()
defer s.vmu.Unlock()
@@ -66,85 +66,80 @@ func (s *session) version() *version {
return s.stVersion
}
-// Get current version; no barrier.
-func (s *session) version_NB() *version {
- return s.stVersion
-}
-
// Set current version to v.
func (s *session) setVersion(v *version) {
s.vmu.Lock()
- v.ref = 1
+ v.ref = 1 // Holds by session.
if old := s.stVersion; old != nil {
- v.ref++
+ v.ref++ // Holds by old version.
old.next = v
- old.release_NB()
+ old.releaseNB()
}
s.stVersion = v
s.vmu.Unlock()
}
// Get current unused file number.
-func (s *session) fileNum() uint64 {
- return atomic.LoadUint64(&s.stFileNum)
+func (s *session) nextFileNum() uint64 {
+ return atomic.LoadUint64(&s.stNextFileNum)
}
-// Get current unused file number to num.
-func (s *session) setFileNum(num uint64) {
- atomic.StoreUint64(&s.stFileNum, num)
+// Set current unused file number to num.
+func (s *session) setNextFileNum(num uint64) {
+ atomic.StoreUint64(&s.stNextFileNum, num)
}
// Mark file number as used.
func (s *session) markFileNum(num uint64) {
- num += 1
+ nextFileNum := num + 1
for {
- old, x := s.stFileNum, num
+ old, x := s.stNextFileNum, nextFileNum
if old > x {
x = old
}
- if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) {
+ if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
break
}
}
}
// Allocate a file number.
-func (s *session) allocFileNum() (num uint64) {
- return atomic.AddUint64(&s.stFileNum, 1) - 1
+func (s *session) allocFileNum() uint64 {
+ return atomic.AddUint64(&s.stNextFileNum, 1) - 1
}
// Reuse given file number.
func (s *session) reuseFileNum(num uint64) {
for {
- old, x := s.stFileNum, num
+ old, x := s.stNextFileNum, num
if old != x+1 {
x = old
}
- if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) {
+ if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
break
}
}
}
-// manifest related utils
+// Manifest related utils.
// Fill given session record obj with current states; need external
// synchronization.
func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
- r.setNextNum(s.fileNum())
+ r.setNextFileNum(s.nextFileNum())
if snapshot {
if !r.has(recJournalNum) {
r.setJournalNum(s.stJournalNum)
}
- if !r.has(recSeq) {
- r.setSeq(s.stSeq)
+ if !r.has(recSeqNum) {
+ r.setSeqNum(s.stSeqNum)
}
- for level, ik := range s.stCPtrs {
+ for level, ik := range s.stCompPtrs {
if ik != nil {
- r.addCompactionPointer(level, ik)
+ r.addCompPtr(level, ik)
}
}
@@ -152,7 +147,7 @@ func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
}
}
-// Mark if record has been commited, this will update session state;
+// Mark if record has been committed, this will update session state;
// need external synchronization.
func (s *session) recordCommited(r *sessionRecord) {
if r.has(recJournalNum) {
@@ -163,12 +158,12 @@ func (s *session) recordCommited(r *sessionRecord) {
s.stPrevJournalNum = r.prevJournalNum
}
- if r.has(recSeq) {
- s.stSeq = r.seq
+ if r.has(recSeqNum) {
+ s.stSeqNum = r.seqNum
}
- for _, p := range r.compactionPointers {
- s.stCPtrs[p.level] = iKey(p.key)
+ for _, p := range r.compPtrs {
+ s.stCompPtrs[p.level] = iKey(p.ikey)
}
}
@@ -183,10 +178,11 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
jw := journal.NewWriter(writer)
if v == nil {
- v = s.version_NB()
+ v = s.version()
+ defer v.release()
}
if rec == nil {
- rec = new(sessionRecord)
+ rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
}
s.fillRecord(rec, true)
v.fillRecord(rec)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
index 75439f6db..46cc9d070 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
@@ -221,7 +221,7 @@ func (fs *fileStorage) GetManifest() (f File, err error) {
fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
continue
}
- if _, e1 := strconv.ParseUint(fn[7:], 10, 0); e1 != nil {
+ if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
continue
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
new file mode 100644
index 000000000..102031bfd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+type unixFileLock struct {
+ f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+ if err := setFileLock(fl.f, false); err != nil {
+ return err
+ }
+ return fl.f.Close()
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return
+ }
+ err = setFileLock(f, true)
+ if err != nil {
+ f.Close()
+ return
+ }
+ fl = &unixFileLock{f: f}
+ return
+}
+
+func setFileLock(f *os.File, lock bool) error {
+ flock := syscall.Flock_t{
+ Type: syscall.F_UNLCK,
+ Start: 0,
+ Len: 0,
+ Whence: 1,
+ }
+ if lock {
+ flock.Type = syscall.F_WRLCK
+ }
+ return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock)
+}
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
index 73499afc8..d0a604b7a 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
@@ -4,7 +4,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// +build darwin freebsd linux netbsd openbsd
+// +build darwin dragonfly freebsd linux netbsd openbsd
package storage
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
index de5694888..85dd70b06 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
@@ -67,7 +67,7 @@ type Writer interface {
Syncer
}
-// File is the file.
+// File is the file. A file instance must be goroutine-safe.
type File interface {
// Open opens the file for read. Returns os.ErrNotExist error
// if the file does not exist.
@@ -94,7 +94,7 @@ type File interface {
Remove() error
}
-// Storage is the storage.
+// Storage is the storage. A storage instance must be goroutine-safe.
type Storage interface {
// Lock locks the storage. Any subsequent attempt to call Lock will fail
// until the last lock released.
@@ -125,3 +125,33 @@ type Storage interface {
// Other methods should not be called after the storage has been closed.
Close() error
}
+
+// FileInfo wraps basic file info.
+type FileInfo struct {
+ Type FileType
+ Num uint64
+}
+
+func (fi FileInfo) String() string {
+ switch fi.Type {
+ case TypeManifest:
+ return fmt.Sprintf("MANIFEST-%06d", fi.Num)
+ case TypeJournal:
+ return fmt.Sprintf("%06d.log", fi.Num)
+ case TypeTable:
+ return fmt.Sprintf("%06d.ldb", fi.Num)
+ case TypeTemp:
+ return fmt.Sprintf("%06d.tmp", fi.Num)
+ default:
+ return fmt.Sprintf("%#x-%d", fi.Type, fi.Num)
+ }
+}
+
+// NewFileInfo creates new FileInfo from the given File. It will returns nil
+// if File is nil.
+func NewFileInfo(f File) *FileInfo {
+ if f == nil {
+ return nil
+ }
+ return &FileInfo{f.Type(), f.Num()}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
index 27e76d707..dc1f1fb54 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
@@ -11,6 +11,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "math/rand"
"os"
"path/filepath"
"sync"
@@ -28,11 +29,25 @@ var (
)
var (
- tsFSEnv = os.Getenv("GOLEVELDB_USEFS")
- tsKeepFS = tsFSEnv == "2"
- tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
- tsMU = &sync.Mutex{}
- tsNum = 0
+ tsFSEnv = os.Getenv("GOLEVELDB_USEFS")
+ tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR")
+ tsKeepFS = tsFSEnv == "2"
+ tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
+ tsMU = &sync.Mutex{}
+ tsNum = 0
+)
+
+type tsOp uint
+
+const (
+ tsOpOpen tsOp = iota
+ tsOpCreate
+ tsOpRead
+ tsOpReadAt
+ tsOpWrite
+ tsOpSync
+
+ tsOpNum
)
type tsLock struct {
@@ -53,6 +68,9 @@ type tsReader struct {
func (tr tsReader) Read(b []byte) (n int, err error) {
ts := tr.tf.ts
ts.countRead(tr.tf.Type())
+ if tr.tf.shouldErrLocked(tsOpRead) {
+ return 0, errors.New("leveldb.testStorage: emulated read error")
+ }
n, err = tr.Reader.Read(b)
if err != nil && err != io.EOF {
ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err)
@@ -63,6 +81,9 @@ func (tr tsReader) Read(b []byte) (n int, err error) {
func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) {
ts := tr.tf.ts
ts.countRead(tr.tf.Type())
+ if tr.tf.shouldErrLocked(tsOpReadAt) {
+ return 0, errors.New("leveldb.testStorage: emulated readAt error")
+ }
n, err = tr.Reader.ReadAt(b, off)
if err != nil && err != io.EOF {
ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err)
@@ -82,15 +103,12 @@ type tsWriter struct {
}
func (tw tsWriter) Write(b []byte) (n int, err error) {
- ts := tw.tf.ts
- ts.mu.Lock()
- defer ts.mu.Unlock()
- if ts.emuWriteErr&tw.tf.Type() != 0 {
+ if tw.tf.shouldErrLocked(tsOpWrite) {
return 0, errors.New("leveldb.testStorage: emulated write error")
}
n, err = tw.Writer.Write(b)
if err != nil {
- ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err)
+ tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err)
}
return
}
@@ -98,23 +116,23 @@ func (tw tsWriter) Write(b []byte) (n int, err error) {
func (tw tsWriter) Sync() (err error) {
ts := tw.tf.ts
ts.mu.Lock()
- defer ts.mu.Unlock()
for ts.emuDelaySync&tw.tf.Type() != 0 {
ts.cond.Wait()
}
- if ts.emuSyncErr&tw.tf.Type() != 0 {
+ ts.mu.Unlock()
+ if tw.tf.shouldErrLocked(tsOpSync) {
return errors.New("leveldb.testStorage: emulated sync error")
}
err = tw.Writer.Sync()
if err != nil {
- ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err)
+ tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err)
}
return
}
func (tw tsWriter) Close() (err error) {
err = tw.Writer.Close()
- tw.tf.close("reader", err)
+ tw.tf.close("writer", err)
return
}
@@ -127,6 +145,16 @@ func (tf tsFile) x() uint64 {
return tf.Num()<<typeShift | uint64(tf.Type())
}
+func (tf tsFile) shouldErr(op tsOp) bool {
+ return tf.ts.shouldErr(tf, op)
+}
+
+func (tf tsFile) shouldErrLocked(op tsOp) bool {
+ tf.ts.mu.Lock()
+ defer tf.ts.mu.Unlock()
+ return tf.shouldErr(op)
+}
+
func (tf tsFile) checkOpen(m string) error {
ts := tf.ts
if writer, ok := ts.opens[tf.x()]; ok {
@@ -163,7 +191,7 @@ func (tf tsFile) Open() (r storage.Reader, err error) {
if err != nil {
return
}
- if ts.emuOpenErr&tf.Type() != 0 {
+ if tf.shouldErr(tsOpOpen) {
err = errors.New("leveldb.testStorage: emulated open error")
return
}
@@ -190,7 +218,7 @@ func (tf tsFile) Create() (w storage.Writer, err error) {
if err != nil {
return
}
- if ts.emuCreateErr&tf.Type() != 0 {
+ if tf.shouldErr(tsOpCreate) {
err = errors.New("leveldb.testStorage: emulated create error")
return
}
@@ -205,6 +233,23 @@ func (tf tsFile) Create() (w storage.Writer, err error) {
return
}
+func (tf tsFile) Replace(newfile storage.File) (err error) {
+ ts := tf.ts
+ ts.mu.Lock()
+ defer ts.mu.Unlock()
+ err = tf.checkOpen("replace")
+ if err != nil {
+ return
+ }
+ err = tf.File.Replace(newfile.(tsFile).File)
+ if err != nil {
+ ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
+ } else {
+ ts.t.Logf("I: file replace, num=%d type=%v", tf.Num(), tf.Type())
+ }
+ return
+}
+
func (tf tsFile) Remove() (err error) {
ts := tf.ts
ts.mu.Lock()
@@ -231,51 +276,75 @@ type testStorage struct {
cond sync.Cond
// Open files, true=writer, false=reader
opens map[uint64]bool
- emuOpenErr storage.FileType
- emuCreateErr storage.FileType
emuDelaySync storage.FileType
- emuWriteErr storage.FileType
- emuSyncErr storage.FileType
ignoreOpenErr storage.FileType
readCnt uint64
readCntEn storage.FileType
+
+ emuErr [tsOpNum]storage.FileType
+ emuErrOnce [tsOpNum]storage.FileType
+ emuRandErr [tsOpNum]storage.FileType
+ emuRandErrProb int
+ emuErrOnceMap map[uint64]uint
+ emuRandRand *rand.Rand
+}
+
+func (ts *testStorage) shouldErr(tf tsFile, op tsOp) bool {
+ if ts.emuErr[op]&tf.Type() != 0 {
+ return true
+ } else if ts.emuRandErr[op]&tf.Type() != 0 || ts.emuErrOnce[op]&tf.Type() != 0 {
+ sop := uint(1) << op
+ eop := ts.emuErrOnceMap[tf.x()]
+ if eop&sop == 0 && (ts.emuRandRand.Int()%ts.emuRandErrProb == 0 || ts.emuErrOnce[op]&tf.Type() != 0) {
+ ts.emuErrOnceMap[tf.x()] = eop | sop
+ ts.t.Logf("I: emulated error: file=%d type=%v op=%v", tf.Num(), tf.Type(), op)
+ return true
+ }
+ }
+ return false
}
-func (ts *testStorage) SetOpenErr(t storage.FileType) {
+func (ts *testStorage) SetEmuErr(t storage.FileType, ops ...tsOp) {
ts.mu.Lock()
- ts.emuOpenErr = t
+ for _, op := range ops {
+ ts.emuErr[op] = t
+ }
ts.mu.Unlock()
}
-func (ts *testStorage) SetCreateErr(t storage.FileType) {
+func (ts *testStorage) SetEmuErrOnce(t storage.FileType, ops ...tsOp) {
ts.mu.Lock()
- ts.emuCreateErr = t
+ for _, op := range ops {
+ ts.emuErrOnce[op] = t
+ }
ts.mu.Unlock()
}
-func (ts *testStorage) DelaySync(t storage.FileType) {
+func (ts *testStorage) SetEmuRandErr(t storage.FileType, ops ...tsOp) {
ts.mu.Lock()
- ts.emuDelaySync |= t
- ts.cond.Broadcast()
+ for _, op := range ops {
+ ts.emuRandErr[op] = t
+ }
ts.mu.Unlock()
}
-func (ts *testStorage) ReleaseSync(t storage.FileType) {
+func (ts *testStorage) SetEmuRandErrProb(prob int) {
ts.mu.Lock()
- ts.emuDelaySync &= ^t
- ts.cond.Broadcast()
+ ts.emuRandErrProb = prob
ts.mu.Unlock()
}
-func (ts *testStorage) SetWriteErr(t storage.FileType) {
+func (ts *testStorage) DelaySync(t storage.FileType) {
ts.mu.Lock()
- ts.emuWriteErr = t
+ ts.emuDelaySync |= t
+ ts.cond.Broadcast()
ts.mu.Unlock()
}
-func (ts *testStorage) SetSyncErr(t storage.FileType) {
+func (ts *testStorage) ReleaseSync(t storage.FileType) {
ts.mu.Lock()
- ts.emuSyncErr = t
+ ts.emuDelaySync &= ^t
+ ts.cond.Broadcast()
ts.mu.Unlock()
}
@@ -413,7 +482,11 @@ func newTestStorage(t *testing.T) *testStorage {
num := tsNum
tsNum++
tsMU.Unlock()
- path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
+ tempdir := tsTempdir
+ if tempdir == "" {
+ tempdir = os.TempDir()
+ }
+ path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
if _, err := os.Stat(path); err != nil {
stor, err = storage.OpenFile(path)
if err != nil {
@@ -436,6 +509,10 @@ func newTestStorage(t *testing.T) *testStorage {
}
f.Close()
}
+ if t.Failed() {
+ t.Logf("testing failed, test DB preserved at %s", path)
+ return nil
+ }
if tsKeepFS {
return nil
}
@@ -449,10 +526,13 @@ func newTestStorage(t *testing.T) *testStorage {
stor = storage.NewMemStorage()
}
ts := &testStorage{
- t: t,
- Storage: stor,
- closeFn: closeFn,
- opens: make(map[uint64]bool),
+ t: t,
+ Storage: stor,
+ closeFn: closeFn,
+ opens: make(map[uint64]bool),
+ emuErrOnceMap: make(map[uint64]uint),
+ emuRandErrProb: 0x999,
+ emuRandRand: rand.New(rand.NewSource(0xfacedead)),
}
ts.cond.L = &ts.mu
return ts
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
index fdd5d2bcf..3e8df6af5 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
@@ -7,11 +7,11 @@
package leveldb
import (
+ "fmt"
"sort"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/cache"
- "github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
@@ -19,34 +19,41 @@ import (
"github.com/syndtr/goleveldb/leveldb/util"
)
-// table file
+// tFile holds basic information about a table.
type tFile struct {
- file storage.File
- seekLeft int32
- size uint64
- min, max iKey
+ file storage.File
+ seekLeft int32
+ size uint64
+ imin, imax iKey
}
-// test if key is after t
-func (t *tFile) isAfter(key []byte, ucmp comparer.BasicComparer) bool {
- return key != nil && ucmp.Compare(key, t.max.ukey()) > 0
+// Returns true if given key is after largest key of this table.
+func (t *tFile) after(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0
}
-// test if key is before t
-func (t *tFile) isBefore(key []byte, ucmp comparer.BasicComparer) bool {
- return key != nil && ucmp.Compare(key, t.min.ukey()) < 0
+// Returns true if given key is before smallest key of this table.
+func (t *tFile) before(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0
}
-func (t *tFile) incrSeek() int32 {
+// Returns true if given key range overlaps with this table key range.
+func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool {
+ return !t.after(icmp, umin) && !t.before(icmp, umax)
+}
+
+// Cosumes one seek and return current seeks left.
+func (t *tFile) consumeSeek() int32 {
return atomic.AddInt32(&t.seekLeft, -1)
}
-func newTFile(file storage.File, size uint64, min, max iKey) *tFile {
+// Creates new tFile.
+func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile {
f := &tFile{
file: file,
size: size,
- min: min,
- max: max,
+ imin: imin,
+ imax: imax,
}
// We arrange to automatically compact this file after
@@ -70,33 +77,52 @@ func newTFile(file storage.File, size uint64, min, max iKey) *tFile {
return f
}
-// table files
+// tFiles hold multiple tFile.
type tFiles []*tFile
func (tf tFiles) Len() int { return len(tf) }
func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
+func (tf tFiles) nums() string {
+ x := "[ "
+ for i, f := range tf {
+ if i != 0 {
+ x += ", "
+ }
+ x += fmt.Sprint(f.file.Num())
+ }
+ x += " ]"
+ return x
+}
+
+// Returns true if i smallest key is less than j.
+// This used for sort by key in ascending order.
func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
a, b := tf[i], tf[j]
- n := icmp.Compare(a.min, b.min)
+ n := icmp.Compare(a.imin, b.imin)
if n == 0 {
return a.file.Num() < b.file.Num()
}
return n < 0
}
+// Returns true if i file number is greater than j.
+// This used for sort by file number in descending order.
func (tf tFiles) lessByNum(i, j int) bool {
return tf[i].file.Num() > tf[j].file.Num()
}
+// Sorts tables by key in ascending order.
func (tf tFiles) sortByKey(icmp *iComparer) {
sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
}
+// Sorts tables by file number in descending order.
func (tf tFiles) sortByNum() {
sort.Sort(&tFilesSortByNum{tFiles: tf})
}
+// Returns sum of all tables size.
func (tf tFiles) size() (sum uint64) {
for _, t := range tf {
sum += t.size
@@ -104,94 +130,107 @@ func (tf tFiles) size() (sum uint64) {
return sum
}
-func (tf tFiles) searchMin(key iKey, icmp *iComparer) int {
+// Searches smallest index of tables whose its smallest
+// key is after or equal with given key.
+func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
return sort.Search(len(tf), func(i int) bool {
- return icmp.Compare(tf[i].min, key) >= 0
+ return icmp.Compare(tf[i].imin, ikey) >= 0
})
}
-func (tf tFiles) searchMax(key iKey, icmp *iComparer) int {
+// Searches smallest index of tables whose its largest
+// key is after or equal with given key.
+func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int {
return sort.Search(len(tf), func(i int) bool {
- return icmp.Compare(tf[i].max, key) >= 0
+ return icmp.Compare(tf[i].imax, ikey) >= 0
})
}
-func (tf tFiles) isOverlaps(min, max []byte, disjSorted bool, icmp *iComparer) bool {
- if !disjSorted {
- // Need to check against all files
+// Returns true if given key range overlaps with one or more
+// tables key range. If unsorted is true then binary search will not be used.
+func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
+ if unsorted {
+ // Check against all files.
for _, t := range tf {
- if !t.isAfter(min, icmp.ucmp) && !t.isBefore(max, icmp.ucmp) {
+ if t.overlaps(icmp, umin, umax) {
return true
}
}
return false
}
- var idx int
- if len(min) > 0 {
- // Find the earliest possible internal key for min
- idx = tf.searchMax(newIKey(min, kMaxSeq, tSeek), icmp)
+ i := 0
+ if len(umin) > 0 {
+ // Find the earliest possible internal key for min.
+ i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek))
}
-
- if idx >= len(tf) {
- // beginning of range is after all files, so no overlap
+ if i >= len(tf) {
+ // Beginning of range is after all files, so no overlap.
return false
}
- return !tf[idx].isBefore(max, icmp.ucmp)
+ return !tf[i].before(icmp, umax)
}
-func (tf tFiles) getOverlaps(min, max []byte, r *tFiles, disjSorted bool, ucmp comparer.BasicComparer) {
+// Returns tables whose its key range overlaps with given key range.
+// Range will be expanded if ukey found hop across tables.
+// If overlapped is true then the search will be restarted if umax
+// expanded.
+// The dst content will be overwritten.
+func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
+ dst = dst[:0]
for i := 0; i < len(tf); {
t := tf[i]
- i++
- if t.isAfter(min, ucmp) || t.isBefore(max, ucmp) {
- continue
- }
-
- *r = append(*r, t)
- if !disjSorted {
- // Level-0 files may overlap each other. So check if the newly
- // added file has expanded the range. If so, restart search.
- if min != nil && ucmp.Compare(t.min.ukey(), min) < 0 {
- min = t.min.ukey()
- *r = nil
- i = 0
- } else if max != nil && ucmp.Compare(t.max.ukey(), max) > 0 {
- max = t.max.ukey()
- *r = nil
+ if t.overlaps(icmp, umin, umax) {
+ if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
+ umin = t.imin.ukey()
+ dst = dst[:0]
i = 0
+ continue
+ } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
+ umax = t.imax.ukey()
+ // Restart search if it is overlapped.
+ if overlapped {
+ dst = dst[:0]
+ i = 0
+ continue
+ }
}
+
+ dst = append(dst, t)
}
+ i++
}
- return
+ return dst
}
-func (tf tFiles) getRange(icmp *iComparer) (min, max iKey) {
+// Returns tables key range.
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) {
for i, t := range tf {
if i == 0 {
- min, max = t.min, t.max
+ imin, imax = t.imin, t.imax
continue
}
- if icmp.Compare(t.min, min) < 0 {
- min = t.min
+ if icmp.Compare(t.imin, imin) < 0 {
+ imin = t.imin
}
- if icmp.Compare(t.max, max) > 0 {
- max = t.max
+ if icmp.Compare(t.imax, imax) > 0 {
+ imax = t.imax
}
}
return
}
+// Creates iterator index from tables.
func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
if slice != nil {
var start, limit int
if slice.Start != nil {
- start = tf.searchMax(iKey(slice.Start), icmp)
+ start = tf.searchMax(icmp, iKey(slice.Start))
}
if slice.Limit != nil {
- limit = tf.searchMin(iKey(slice.Limit), icmp)
+ limit = tf.searchMin(icmp, iKey(slice.Limit))
} else {
limit = tf.Len()
}
@@ -206,6 +245,7 @@ func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range
})
}
+// Tables iterator index.
type tFilesArrayIndexer struct {
tFiles
tops *tOps
@@ -215,7 +255,7 @@ type tFilesArrayIndexer struct {
}
func (a *tFilesArrayIndexer) Search(key []byte) int {
- return a.searchMax(iKey(key), a.icmp)
+ return a.searchMax(a.icmp, iKey(key))
}
func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
@@ -225,6 +265,7 @@ func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
return a.tops.newIterator(a.tFiles[i], nil, a.ro)
}
+// Helper type for sortByKey.
type tFilesSortByKey struct {
tFiles
icmp *iComparer
@@ -234,6 +275,7 @@ func (x *tFilesSortByKey) Less(i, j int) bool {
return x.lessByKey(x.icmp, i, j)
}
+// Helper type for sortByNum.
type tFilesSortByNum struct {
tFiles
}
@@ -242,19 +284,15 @@ func (x *tFilesSortByNum) Less(i, j int) bool {
return x.lessByNum(i, j)
}
-// table operations
+// Table operations.
type tOps struct {
- s *session
- cache cache.Cache
- cacheNS cache.Namespace
-}
-
-func newTableOps(s *session, cacheCap int) *tOps {
- c := cache.NewLRUCache(cacheCap)
- ns := c.GetNamespace(0)
- return &tOps{s, c, ns}
+ s *session
+ cache *cache.Cache
+ bcache *cache.Cache
+ bpool *util.BufferPool
}
+// Creates an empty table and returns table writer.
func (t *tOps) create() (*tWriter, error) {
file := t.s.getTableFile(t.s.allocFileNum())
fw, err := file.Create()
@@ -265,14 +303,15 @@ func (t *tOps) create() (*tWriter, error) {
t: t,
file: file,
w: fw,
- tw: table.NewWriter(fw, t.s.o),
+ tw: table.NewWriter(fw, t.s.o.Options),
}, nil
}
+// Builds table from src iterator.
func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
w, err := t.create()
if err != nil {
- return f, n, err
+ return
}
defer func() {
@@ -282,7 +321,7 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
}()
for src.Next() {
- err = w.add(src.Key(), src.Value())
+ err = w.append(src.Key(), src.Value())
if err != nil {
return
}
@@ -297,84 +336,132 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
return
}
-func (t *tOps) lookup(f *tFile) (c cache.Object, err error) {
+// Opens table. It returns a cache handle, which should
+// be released after use.
+func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
num := f.file.Num()
- c, ok := t.cacheNS.Get(num, func() (ok bool, value interface{}, charge int, fin cache.SetFin) {
+ ch = t.cache.Get(0, num, func() (size int, value cache.Value) {
var r storage.Reader
r, err = f.file.Open()
if err != nil {
- return
+ return 0, nil
}
- o := t.s.o
-
- var cacheNS cache.Namespace
- if bc := o.GetBlockCache(); bc != nil {
- cacheNS = bc.GetNamespace(num)
+ var bcache *cache.CacheGetter
+ if t.bcache != nil {
+ bcache = &cache.CacheGetter{Cache: t.bcache, NS: num}
}
- ok = true
- value = table.NewReader(r, int64(f.size), cacheNS, o)
- charge = 1
- fin = func() {
+ var tr *table.Reader
+ tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options)
+ if err != nil {
r.Close()
+ return 0, nil
}
- return
+ return 1, tr
+
})
- if !ok && err == nil {
+ if ch == nil && err == nil {
err = ErrClosed
}
return
}
-func (t *tOps) get(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
- c, err := t.lookup(f)
+// Finds key/value pair whose key is greater than or equal to the
+// given key.
+func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
+ ch, err := t.open(f)
if err != nil {
return nil, nil, err
}
- defer c.Release()
- return c.Value().(*table.Reader).Find(key, ro)
+ defer ch.Release()
+ return ch.Value().(*table.Reader).Find(key, true, ro)
+}
+
+// Finds key that is greater than or equal to the given key.
+func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return nil, err
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).FindKey(key, true, ro)
}
+// Returns approximate offset of the given key.
func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
- c, err := t.lookup(f)
+ ch, err := t.open(f)
if err != nil {
return
}
- _offset, err := c.Value().(*table.Reader).OffsetOf(key)
- offset = uint64(_offset)
- c.Release()
- return
+ defer ch.Release()
+ offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
+ return uint64(offset_), err
}
+// Creates an iterator from the given table.
func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
- c, err := t.lookup(f)
+ ch, err := t.open(f)
if err != nil {
return iterator.NewEmptyIterator(err)
}
- iter := c.Value().(*table.Reader).NewIterator(slice, ro)
- iter.SetReleaser(c)
+ iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
+ iter.SetReleaser(ch)
return iter
}
+// Removes table from persistent storage. It waits until
+// no one use the the table.
func (t *tOps) remove(f *tFile) {
num := f.file.Num()
- t.cacheNS.Delete(num, func(exist bool) {
+ t.cache.Delete(0, num, func() {
if err := f.file.Remove(); err != nil {
t.s.logf("table@remove removing @%d %q", num, err)
} else {
t.s.logf("table@remove removed @%d", num)
}
- if bc := t.s.o.GetBlockCache(); bc != nil {
- bc.GetNamespace(num).Zap(false)
+ if t.bcache != nil {
+ t.bcache.EvictNS(num)
}
})
}
+// Closes the table ops instance. It will close all tables,
+// regadless still used or not.
func (t *tOps) close() {
- t.cache.Zap(true)
+ t.bpool.Close()
+ t.cache.Close()
+ if t.bcache != nil {
+ t.bcache.Close()
+ }
+}
+
+// Creates new initialized table ops instance.
+func newTableOps(s *session) *tOps {
+ var (
+ cacher cache.Cacher
+ bcache *cache.Cache
+ )
+ if s.o.GetOpenFilesCacheCapacity() > 0 {
+ cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
+ }
+ if !s.o.DisableBlockCache {
+ var bcacher cache.Cacher
+ if s.o.GetBlockCacheCapacity() > 0 {
+ bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
+ }
+ bcache = cache.NewCache(bcacher)
+ }
+ return &tOps{
+ s: s,
+ cache: cache.NewCache(cacher),
+ bcache: bcache,
+ bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
+ }
}
+// tWriter wraps the table writer. It keep track of file descriptor
+// and added key range.
type tWriter struct {
t *tOps
@@ -385,7 +472,8 @@ type tWriter struct {
first, last []byte
}
-func (w *tWriter) add(key, value []byte) error {
+// Append key/value pair to the table.
+func (w *tWriter) append(key, value []byte) error {
if w.first == nil {
w.first = append([]byte{}, key...)
}
@@ -393,30 +481,39 @@ func (w *tWriter) add(key, value []byte) error {
return w.tw.Append(key, value)
}
+// Returns true if the table is empty.
func (w *tWriter) empty() bool {
return w.first == nil
}
+// Closes the storage.Writer.
+func (w *tWriter) close() {
+ if w.w != nil {
+ w.w.Close()
+ w.w = nil
+ }
+}
+
+// Finalizes the table and returns table file.
func (w *tWriter) finish() (f *tFile, err error) {
+ defer w.close()
err = w.tw.Close()
if err != nil {
return
}
err = w.w.Sync()
if err != nil {
- w.w.Close()
return
}
- w.w.Close()
- f = newTFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
+ f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
return
}
+// Drops the table.
func (w *tWriter) drop() {
- w.w.Close()
+ w.close()
w.file.Remove()
w.t.s.reuseFileNum(w.file.Num())
- w.w = nil
w.file = nil
w.tw = nil
w.first = nil
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
index ca598f4f5..00e6f9eea 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
@@ -19,13 +19,18 @@ import (
"github.com/syndtr/goleveldb/leveldb/util"
)
-func (b *block) TestNewIterator(slice *util.Range) iterator.Iterator {
- return b.newIterator(slice, false, nil)
+type blockTesting struct {
+ tr *Reader
+ b *block
+}
+
+func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator {
+ return t.tr.newBlockIter(t.b, nil, slice, false)
}
var _ = testutil.Defer(func() {
Describe("Block", func() {
- Build := func(kv *testutil.KeyValue, restartInterval int) *block {
+ Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting {
// Building the block.
bw := &blockWriter{
restartInterval: restartInterval,
@@ -39,11 +44,13 @@ var _ = testutil.Defer(func() {
// Opening the block.
data := bw.buf.Bytes()
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
- return &block{
- cmp: comparer.DefaultComparer,
- data: data,
- restartsLen: restartsLen,
- restartsOffset: len(data) - (restartsLen+1)*4,
+ return &blockTesting{
+ tr: &Reader{cmp: comparer.DefaultComparer},
+ b: &block{
+ data: data,
+ restartsLen: restartsLen,
+ restartsOffset: len(data) - (restartsLen+1)*4,
+ },
}
}
@@ -59,7 +66,7 @@ var _ = testutil.Defer(func() {
// Make block.
br := Build(kv, restartInterval)
// Do testing.
- testutil.KeyValueTesting(nil, br, kv.Clone())
+ testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil)
}
Describe(Text(), Test)
@@ -102,11 +109,11 @@ var _ = testutil.Defer(func() {
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
// Make block.
- br := Build(kv, restartInterval)
+ bt := Build(kv, restartInterval)
Test := func(r *util.Range) func(done Done) {
return func(done Done) {
- iter := br.newIterator(r, false, nil)
+ iter := bt.TestNewIterator(r)
Expect(iter.Error()).ShouldNot(HaveOccurred())
t := testutil.IteratorTesting{
@@ -115,6 +122,7 @@ var _ = testutil.Defer(func() {
}
testutil.DoIteratorTesting(&t)
+ iter.Release()
done <- true
}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
index 8acb9f720..6f38e84b3 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
@@ -8,27 +8,41 @@ package table
import (
"encoding/binary"
- "errors"
"fmt"
"io"
"sort"
"strings"
+ "sync"
- "code.google.com/p/snappy-go/snappy"
+ "github.com/syndtr/gosnappy/snappy"
"github.com/syndtr/goleveldb/leveldb/cache"
"github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
- ErrNotFound = util.ErrNotFound
- ErrIterReleased = errors.New("leveldb/table: iterator released")
+ ErrNotFound = errors.ErrNotFound
+ ErrReaderReleased = errors.New("leveldb/table: reader released")
+ ErrIterReleased = errors.New("leveldb/table: iterator released")
)
+type ErrCorrupted struct {
+ Pos int64
+ Size int64
+ Kind string
+ Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason)
+}
+
func max(x, y int) int {
if x > y {
return x
@@ -37,40 +51,33 @@ func max(x, y int) int {
}
type block struct {
- cmp comparer.BasicComparer
+ bpool *util.BufferPool
+ bh blockHandle
data []byte
restartsLen int
restartsOffset int
- // Whether checksum is verified and valid.
- checksum bool
}
-func (b *block) seek(rstart, rlimit int, key []byte) (index, offset int, err error) {
- n := b.restartsOffset
- data := b.data
- cmp := b.cmp
-
+func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) {
index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
- offset := int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):]))
- offset += 1 // shared always zero, since this is a restart point
- v1, n1 := binary.Uvarint(data[offset:]) // key length
- _, n2 := binary.Uvarint(data[offset+n1:]) // value length
+ offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
+ offset += 1 // shared always zero, since this is a restart point
+ v1, n1 := binary.Uvarint(b.data[offset:]) // key length
+ _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
m := offset + n1 + n2
- return cmp.Compare(data[m:m+int(v1)], key) > 0
+ return cmp.Compare(b.data[m:m+int(v1)], key) > 0
}) + rstart - 1
if index < rstart {
// The smallest key is greater-than key sought.
index = rstart
}
- offset = int(binary.LittleEndian.Uint32(data[n+4*index:]))
+ offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
return
}
func (b *block) restartIndex(rstart, rlimit, offset int) int {
- n := b.restartsOffset
- data := b.data
return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
- return int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):])) > offset
+ return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset
}) + rstart - 1
}
@@ -81,7 +88,7 @@ func (b *block) restartOffset(index int) int {
func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) {
if offset >= b.restartsOffset {
if offset != b.restartsOffset {
- err = errors.New("leveldb/table: Reader: BlockEntry: invalid block (block entries offset not aligned)")
+ err = &ErrCorrupted{Reason: "entries offset not aligned"}
}
return
}
@@ -91,7 +98,7 @@ func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error)
m := n0 + n1 + n2
n = m + int(v1) + int(v2)
if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset {
- err = errors.New("leveldb/table: Reader: invalid block (block entries corrupted)")
+ err = &ErrCorrupted{Reason: "entries corrupted"}
return
}
key = b.data[offset+m : offset+m+int(v1)]
@@ -100,43 +107,10 @@ func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error)
return
}
-func (b *block) newIterator(slice *util.Range, inclLimit bool, cache util.Releaser) *blockIter {
- bi := &blockIter{
- block: b,
- cache: cache,
- // Valid key should never be nil.
- key: make([]byte, 0),
- dir: dirSOI,
- riStart: 0,
- riLimit: b.restartsLen,
- offsetStart: 0,
- offsetRealStart: 0,
- offsetLimit: b.restartsOffset,
- }
- if slice != nil {
- if slice.Start != nil {
- if bi.Seek(slice.Start) {
- bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
- bi.offsetStart = b.restartOffset(bi.riStart)
- bi.offsetRealStart = bi.prevOffset
- } else {
- bi.riStart = b.restartsLen
- bi.offsetStart = b.restartsOffset
- bi.offsetRealStart = b.restartsOffset
- }
- }
- if slice.Limit != nil {
- if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
- bi.offsetLimit = bi.prevOffset
- bi.riLimit = bi.restartIndex + 1
- }
- }
- bi.reset()
- if bi.offsetStart > bi.offsetLimit {
- bi.sErr(errors.New("leveldb/table: Reader: invalid slice range"))
- }
- }
- return bi
+func (b *block) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
}
type dir int
@@ -150,10 +124,12 @@ const (
)
type blockIter struct {
- block *block
- cache, releaser util.Releaser
- key, value []byte
- offset int
+ tr *Reader
+ block *block
+ blockReleaser util.Releaser
+ releaser util.Releaser
+ key, value []byte
+ offset int
// Previous offset, only filled by Next.
prevOffset int
prevNode []int
@@ -250,7 +226,7 @@ func (i *blockIter) Seek(key []byte) bool {
return false
}
- ri, offset, err := i.block.seek(i.riStart, i.riLimit, key)
+ ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key)
if err != nil {
i.sErr(err)
return false
@@ -261,7 +237,7 @@ func (i *blockIter) Seek(key []byte) bool {
i.dir = dirForward
}
for i.Next() {
- if i.block.cmp.Compare(i.key, key) >= 0 {
+ if i.tr.cmp.Compare(i.key, key) >= 0 {
return true
}
}
@@ -286,7 +262,7 @@ func (i *blockIter) Next() bool {
for i.offset < i.offsetRealStart {
key, value, nShared, n, err := i.block.entry(i.offset)
if err != nil {
- i.sErr(err)
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
return false
}
if n == 0 {
@@ -300,13 +276,13 @@ func (i *blockIter) Next() bool {
if i.offset >= i.offsetLimit {
i.dir = dirEOI
if i.offset != i.offsetLimit {
- i.sErr(errors.New("leveldb/table: Reader: Next: invalid block (block entries offset not aligned)"))
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
}
return false
}
key, value, nShared, n, err := i.block.entry(i.offset)
if err != nil {
- i.sErr(err)
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
return false
}
if n == 0 {
@@ -391,7 +367,7 @@ func (i *blockIter) Prev() bool {
for {
key, value, nShared, n, err := i.block.entry(offset)
if err != nil {
- i.sErr(err)
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
return false
}
if offset >= i.offsetRealStart {
@@ -410,7 +386,7 @@ func (i *blockIter) Prev() bool {
// Stop if target offset reached.
if offset >= i.offset {
if offset != i.offset {
- i.sErr(errors.New("leveldb/table: Reader: Prev: invalid block (block entries offset not aligned)"))
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
return false
}
@@ -437,25 +413,33 @@ func (i *blockIter) Value() []byte {
}
func (i *blockIter) Release() {
- i.prevNode = nil
- i.prevKeys = nil
- i.key = nil
- i.value = nil
- i.dir = dirReleased
- if i.cache != nil {
- i.cache.Release()
- i.cache = nil
- }
- if i.releaser != nil {
- i.releaser.Release()
- i.releaser = nil
+ if i.dir != dirReleased {
+ i.tr = nil
+ i.block = nil
+ i.prevNode = nil
+ i.prevKeys = nil
+ i.key = nil
+ i.value = nil
+ i.dir = dirReleased
+ if i.blockReleaser != nil {
+ i.blockReleaser.Release()
+ i.blockReleaser = nil
+ }
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
}
}
func (i *blockIter) SetReleaser(releaser util.Releaser) {
- if i.dir > dirReleased {
- i.releaser = releaser
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
}
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
}
func (i *blockIter) Valid() bool {
@@ -467,21 +451,21 @@ func (i *blockIter) Error() error {
}
type filterBlock struct {
- filter filter.Filter
+ bpool *util.BufferPool
data []byte
oOffset int
baseLg uint
filtersNum int
}
-func (b *filterBlock) contains(offset uint64, key []byte) bool {
+func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool {
i := int(offset >> b.baseLg)
if i < b.filtersNum {
o := b.data[b.oOffset+i*4:]
n := int(binary.LittleEndian.Uint32(o))
m := int(binary.LittleEndian.Uint32(o[4:]))
if n < m && m <= b.oOffset {
- return b.filter.Contains(b.data[n:m], key)
+ return filter.Contains(b.data[n:m], key)
} else if n == m {
return false
}
@@ -489,12 +473,17 @@ func (b *filterBlock) contains(offset uint64, key []byte) bool {
return true
}
+func (b *filterBlock) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
+}
+
type indexIter struct {
- blockIter
- tableReader *Reader
- slice *util.Range
+ *blockIter
+ tr *Reader
+ slice *util.Range
// Options
- checksum bool
fillCache bool
}
@@ -505,95 +494,173 @@ func (i *indexIter) Get() iterator.Iterator {
}
dataBH, n := decodeBlockHandle(value)
if n == 0 {
- return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid table (bad data block handle)"))
+ return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle"))
}
+
var slice *util.Range
if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
slice = i.slice
}
- return i.tableReader.getDataIter(dataBH, slice, i.checksum, i.fillCache)
+ return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache)
}
// Reader is a table reader.
type Reader struct {
+ mu sync.RWMutex
+ fi *storage.FileInfo
reader io.ReaderAt
- cache cache.Namespace
+ cache *cache.CacheGetter
err error
+ bpool *util.BufferPool
// Options
- cmp comparer.Comparer
- filter filter.Filter
- checksum bool
- strictIter bool
+ o *opt.Options
+ cmp comparer.Comparer
+ filter filter.Filter
+ verifyChecksum bool
- dataEnd int64
- indexBlock *block
- filterBlock *filterBlock
+ dataEnd int64
+ metaBH, indexBH, filterBH blockHandle
+ indexBlock *block
+ filterBlock *filterBlock
}
-func verifyChecksum(data []byte) bool {
- n := len(data) - 4
- checksum0 := binary.LittleEndian.Uint32(data[n:])
- checksum1 := util.NewCRC(data[:n]).Value()
- return checksum0 == checksum1
+func (r *Reader) blockKind(bh blockHandle) string {
+ switch bh.offset {
+ case r.metaBH.offset:
+ return "meta-block"
+ case r.indexBH.offset:
+ return "index-block"
+ case r.filterBH.offset:
+ if r.filterBH.length > 0 {
+ return "filter-block"
+ }
+ }
+ return "data-block"
}
-func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
- data := make([]byte, bh.length+blockTrailerLen)
+func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error {
+ return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}}
+}
+
+func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error {
+ return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason)
+}
+
+func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error {
+ if cerr, ok := err.(*ErrCorrupted); ok {
+ cerr.Pos = int64(bh.offset)
+ cerr.Size = int64(bh.length)
+ cerr.Kind = r.blockKind(bh)
+ return &errors.ErrCorrupted{File: r.fi, Err: cerr}
+ }
+ return err
+}
+
+func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) {
+ data := r.bpool.Get(int(bh.length + blockTrailerLen))
if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
return nil, err
}
- if checksum || r.checksum {
- if !verifyChecksum(data) {
- return nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)")
+
+ if verifyChecksum {
+ n := bh.length + 1
+ checksum0 := binary.LittleEndian.Uint32(data[n:])
+ checksum1 := util.NewCRC(data[:n]).Value()
+ if checksum0 != checksum1 {
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1))
}
}
+
switch data[bh.length] {
case blockTypeNoCompression:
data = data[:bh.length]
case blockTypeSnappyCompression:
- var err error
- data, err = snappy.Decode(nil, data[:bh.length])
+ decLen, err := snappy.DecodedLen(data[:bh.length])
if err != nil {
- return nil, err
+ return nil, r.newErrCorruptedBH(bh, err.Error())
+ }
+ decData := r.bpool.Get(decLen)
+ decData, err = snappy.Decode(decData, data[:bh.length])
+ r.bpool.Put(data)
+ if err != nil {
+ r.bpool.Put(decData)
+ return nil, r.newErrCorruptedBH(bh, err.Error())
}
+ data = decData
default:
- return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length])
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length]))
}
return data, nil
}
-func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) {
- data, err := r.readRawBlock(bh, checksum)
+func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) {
+ data, err := r.readRawBlock(bh, verifyChecksum)
if err != nil {
return nil, err
}
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
b := &block{
- cmp: r.cmp,
+ bpool: r.bpool,
+ bh: bh,
data: data,
restartsLen: restartsLen,
restartsOffset: len(data) - (restartsLen+1)*4,
- checksum: checksum || r.checksum,
}
return b, nil
}
-func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterBlock, error) {
+func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
+ if r.cache != nil {
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *block
+ b, err = r.readBlock(bh, verifyChecksum)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*block)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
+ }
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readBlock(bh, verifyChecksum)
+ return b, b, err
+}
+
+func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
data, err := r.readRawBlock(bh, true)
if err != nil {
return nil, err
}
n := len(data)
if n < 5 {
- return nil, errors.New("leveldb/table: Reader: invalid filter block (too short)")
+ return nil, r.newErrCorruptedBH(bh, "too short")
}
m := n - 5
oOffset := int(binary.LittleEndian.Uint32(data[m:]))
if oOffset > m {
- return nil, errors.New("leveldb/table: Reader: invalid filter block (invalid offset)")
+ return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset")
}
b := &filterBlock{
- filter: filter,
+ bpool: r.bpool,
data: data,
oOffset: oOffset,
baseLg: uint(data[n-1]),
@@ -602,44 +669,111 @@ func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterB
return b, nil
}
-func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
+func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
if r.cache != nil {
- // Get/set block cache.
- var err error
- cache, ok := r.cache.Get(dataBH.offset, func() (ok bool, value interface{}, charge int, fin cache.SetFin) {
- if !fillCache {
- return
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *filterBlock
+ b, err = r.readFilterBlock(bh)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*filterBlock)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
}
- var dataBlock *block
- dataBlock, err = r.readBlock(dataBH, checksum)
- if err == nil {
- ok = true
- value = dataBlock
- charge = int(dataBH.length)
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readFilterBlock(bh)
+ return b, b, err
+}
+
+func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) {
+ if r.indexBlock == nil {
+ return r.readBlockCached(r.indexBH, true, fillCache)
+ }
+ return r.indexBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) {
+ if r.filterBlock == nil {
+ return r.readFilterBlockCached(r.filterBH, fillCache)
+ }
+ return r.filterBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter {
+ bi := &blockIter{
+ tr: r,
+ block: b,
+ blockReleaser: bReleaser,
+ // Valid key should never be nil.
+ key: make([]byte, 0),
+ dir: dirSOI,
+ riStart: 0,
+ riLimit: b.restartsLen,
+ offsetStart: 0,
+ offsetRealStart: 0,
+ offsetLimit: b.restartsOffset,
+ }
+ if slice != nil {
+ if slice.Start != nil {
+ if bi.Seek(slice.Start) {
+ bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
+ bi.offsetStart = b.restartOffset(bi.riStart)
+ bi.offsetRealStart = bi.prevOffset
+ } else {
+ bi.riStart = b.restartsLen
+ bi.offsetStart = b.restartsOffset
+ bi.offsetRealStart = b.restartsOffset
}
- return
- })
- if err != nil {
- return iterator.NewEmptyIterator(err)
}
- if ok {
- dataBlock := cache.Value().(*block)
- if !dataBlock.checksum && (r.checksum || checksum) {
- if !verifyChecksum(dataBlock.data) {
- return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid block (checksum mismatch)"))
- }
- dataBlock.checksum = true
+ if slice.Limit != nil {
+ if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
+ bi.offsetLimit = bi.prevOffset
+ bi.riLimit = bi.restartIndex + 1
}
- iter := dataBlock.newIterator(slice, false, cache)
- return iter
+ }
+ bi.reset()
+ if bi.offsetStart > bi.offsetLimit {
+ bi.sErr(errors.New("leveldb/table: invalid slice range"))
}
}
- dataBlock, err := r.readBlock(dataBH, checksum)
+ return bi
+}
+
+func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache)
if err != nil {
return iterator.NewEmptyIterator(err)
}
- iter := dataBlock.newIterator(slice, false, nil)
- return iter
+ return r.newBlockIter(b, rel, slice, false)
+}
+
+func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ return iterator.NewEmptyIterator(r.err)
+ }
+
+ return r.getDataIter(dataBH, slice, verifyChecksum, fillCache)
}
// NewIterator creates an iterator from the table.
@@ -653,35 +787,44 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
// when not used.
//
// Also read Iterator documentation of the leveldb/iterator package.
-
func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
if r.err != nil {
return iterator.NewEmptyIterator(r.err)
}
+ fillCache := !ro.GetDontFillCache()
+ indexBlock, rel, err := r.getIndexBlock(fillCache)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
index := &indexIter{
- blockIter: *r.indexBlock.newIterator(slice, true, nil),
- tableReader: r,
- slice: slice,
- checksum: ro.GetStrict(opt.StrictBlockChecksum),
- fillCache: !ro.GetDontFillCache(),
+ blockIter: r.newBlockIter(indexBlock, rel, slice, true),
+ tr: r,
+ slice: slice,
+ fillCache: !ro.GetDontFillCache(),
}
- return iterator.NewIndexedIterator(index, r.strictIter || ro.GetStrict(opt.StrictIterator), false)
+ return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader))
}
-// Find finds key/value pair whose key is greater than or equal to the
-// given key. It returns ErrNotFound if the table doesn't contain
-// such pair.
-//
-// The caller should not modify the contents of the returned slice, but
-// it is safe to modify the contents of the argument after Find returns.
-func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
if r.err != nil {
err = r.err
return
}
- index := r.indexBlock.newIterator(nil, true, nil)
+ indexBlock, rel, err := r.getIndexBlock(true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
defer index.Release()
if !index.Seek(key) {
err = index.Error()
@@ -692,14 +835,23 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
}
dataBH, n := decodeBlockHandle(index.Value())
if n == 0 {
- err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)")
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
return
}
- if r.filterBlock != nil && !r.filterBlock.contains(dataBH.offset, key) {
- err = ErrNotFound
- return
+ if filtered && r.filter != nil {
+ filterBlock, frel, ferr := r.getFilterBlock(true)
+ if ferr == nil {
+ if !filterBlock.contains(r.filter, dataBH.offset, key) {
+ frel.Release()
+ return nil, nil, ErrNotFound
+ }
+ frel.Release()
+ } else if !errors.IsCorrupted(ferr) {
+ err = ferr
+ return
+ }
}
- data := r.getDataIter(dataBH, nil, ro.GetStrict(opt.StrictBlockChecksum), !ro.GetDontFillCache())
+ data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
defer data.Release()
if !data.Seek(key) {
err = data.Error()
@@ -708,23 +860,64 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
}
return
}
+ // Don't use block buffer, no need to copy the buffer.
rkey = data.Key()
- value = data.Value()
+ if !noValue {
+ if r.bpool == nil {
+ value = data.Value()
+ } else {
+ // Use block buffer, and since the buffer will be recycled, the buffer
+ // need to be copied.
+ value = append([]byte{}, data.Value()...)
+ }
+ }
+ return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such pair doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+ return r.find(key, filtered, ro, false)
+}
+
+// Find finds key that is greater than or equal to the given key.
+// It returns ErrNotFound if the table doesn't contain such key.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such key doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) {
+ rkey, _, err = r.find(key, filtered, ro, true)
return
}
// Get gets the value for the given key. It returns errors.ErrNotFound
// if the table does not contain the key.
//
-// The caller should not modify the contents of the returned slice, but
-// it is safe to modify the contents of the argument after Get returns.
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
if r.err != nil {
err = r.err
return
}
- rkey, value, err := r.Find(key, ro)
+ rkey, value, err := r.find(key, false, ro, false)
if err == nil && r.cmp.Compare(rkey, key) != 0 {
value = nil
err = ErrNotFound
@@ -736,17 +929,26 @@ func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
//
// It is safe to modify the contents of the argument after Get returns.
func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
if r.err != nil {
err = r.err
return
}
- index := r.indexBlock.newIterator(nil, true, nil)
+ indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
defer index.Release()
if index.Seek(key) {
dataBH, n := decodeBlockHandle(index.Value())
if n == 0 {
- err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)")
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
return
}
offset = int64(dataBH.offset)
@@ -759,90 +961,147 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
return
}
-// NewReader creates a new initialized table reader for the file.
-// The cache is optional and can be nil.
-func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, o *opt.Options) *Reader {
- r := &Reader{
- reader: f,
- cache: cache,
- cmp: o.GetComparer(),
- checksum: o.GetStrict(opt.StrictBlockChecksum),
- strictIter: o.GetStrict(opt.StrictIterator),
+// Release implements util.Releaser.
+// It also close the file if it is an io.Closer.
+func (r *Reader) Release() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if closer, ok := r.reader.(io.Closer); ok {
+ closer.Close()
+ }
+ if r.indexBlock != nil {
+ r.indexBlock.Release()
+ r.indexBlock = nil
}
+ if r.filterBlock != nil {
+ r.filterBlock.Release()
+ r.filterBlock = nil
+ }
+ r.reader = nil
+ r.cache = nil
+ r.bpool = nil
+ r.err = ErrReaderReleased
+}
+
+// NewReader creates a new initialized table reader for the file.
+// The fi, cache and bpool is optional and can be nil.
+//
+// The returned table reader instance is goroutine-safe.
+func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
if f == nil {
- r.err = errors.New("leveldb/table: Reader: nil file")
- return r
+ return nil, errors.New("leveldb/table: nil file")
}
+
+ r := &Reader{
+ fi: fi,
+ reader: f,
+ cache: cache,
+ bpool: bpool,
+ o: o,
+ cmp: o.GetComparer(),
+ verifyChecksum: o.GetStrict(opt.StrictBlockChecksum),
+ }
+
if size < footerLen {
- r.err = errors.New("leveldb/table: Reader: invalid table (file size is too small)")
- return r
+ r.err = r.newErrCorrupted(0, size, "table", "too small")
+ return r, nil
}
+
+ footerPos := size - footerLen
var footer [footerLen]byte
- if _, err := r.reader.ReadAt(footer[:], size-footerLen); err != nil && err != io.EOF {
- r.err = fmt.Errorf("leveldb/table: Reader: invalid table (could not read footer): %v", err)
+ if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF {
+ return nil, err
}
if string(footer[footerLen-len(magic):footerLen]) != magic {
- r.err = errors.New("leveldb/table: Reader: invalid table (bad magic number)")
- return r
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number")
+ return r, nil
}
+
+ var n int
// Decode the metaindex block handle.
- metaBH, n := decodeBlockHandle(footer[:])
+ r.metaBH, n = decodeBlockHandle(footer[:])
if n == 0 {
- r.err = errors.New("leveldb/table: Reader: invalid table (bad metaindex block handle)")
- return r
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle")
+ return r, nil
}
+
// Decode the index block handle.
- indexBH, n := decodeBlockHandle(footer[n:])
+ r.indexBH, n = decodeBlockHandle(footer[n:])
if n == 0 {
- r.err = errors.New("leveldb/table: Reader: invalid table (bad index block handle)")
- return r
- }
- // Read index block.
- r.indexBlock, r.err = r.readBlock(indexBH, true)
- if r.err != nil {
- return r
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle")
+ return r, nil
}
+
// Read metaindex block.
- metaBlock, err := r.readBlock(metaBH, true)
+ metaBlock, err := r.readBlock(r.metaBH, true)
if err != nil {
- r.err = err
- return r
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ } else {
+ return nil, err
+ }
}
+
// Set data end.
- r.dataEnd = int64(metaBH.offset)
- metaIter := metaBlock.newIterator(nil, false, nil)
+ r.dataEnd = int64(r.metaBH.offset)
+
+ // Read metaindex.
+ metaIter := r.newBlockIter(metaBlock, nil, nil, true)
for metaIter.Next() {
key := string(metaIter.Key())
if !strings.HasPrefix(key, "filter.") {
continue
}
fn := key[7:]
- var filter filter.Filter
if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn {
- filter = f0
+ r.filter = f0
} else {
for _, f0 := range o.GetAltFilters() {
if f0.Name() == fn {
- filter = f0
+ r.filter = f0
break
}
}
}
- if filter != nil {
+ if r.filter != nil {
filterBH, n := decodeBlockHandle(metaIter.Value())
if n == 0 {
continue
}
+ r.filterBH = filterBH
// Update data end.
r.dataEnd = int64(filterBH.offset)
- filterBlock, err := r.readFilterBlock(filterBH, filter)
- if err != nil {
- continue
- }
- r.filterBlock = filterBlock
break
}
}
metaIter.Release()
- return r
+ metaBlock.Release()
+
+ // Cache index and filter block locally, since we don't have global cache.
+ if cache == nil {
+ r.indexBlock, err = r.readBlock(r.indexBH, true)
+ if err != nil {
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ } else {
+ return nil, err
+ }
+ }
+ if r.filter != nil {
+ r.filterBlock, err = r.readFilterBlock(r.filterBH)
+ if err != nil {
+ if !errors.IsCorrupted(err) {
+ return nil, err
+ }
+
+ // Don't use filter then.
+ r.filter = nil
+ }
+ }
+ }
+
+ return r, nil
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go
index c0ac70d9e..beacdc1f0 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go
@@ -133,9 +133,9 @@ Filter block trailer:
+- 4-bytes -+
/ \
- +---------------+---------------+---------------+-------------------------+------------------+
- | offset 1 | .... | offset n | filter offset (4-bytes) | base Lg (1-byte) |
- +-------------- +---------------+---------------+-------------------------+------------------+
+ +---------------+---------------+---------------+-------------------------------+------------------+
+ | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) |
+ +-------------- +---------------+---------------+-------------------------------+------------------+
NOTE: All fixed-length integer are little-endian.
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
index bc9eb83cc..6465da6e3 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
@@ -3,15 +3,9 @@ package table
import (
"testing"
- . "github.com/onsi/ginkgo"
- . "github.com/onsi/gomega"
-
"github.com/syndtr/goleveldb/leveldb/testutil"
)
func TestTable(t *testing.T) {
- testutil.RunDefer()
-
- RegisterFailHandler(Fail)
- RunSpecs(t, "Table Suite")
+ testutil.RunSuite(t, "Table Suite")
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
index d7d3b2a4b..4b59b31f5 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
@@ -23,7 +23,7 @@ type tableWrapper struct {
}
func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) {
- return t.Reader.Find(key, nil)
+ return t.Reader.Find(key, false, nil)
}
func (t tableWrapper) TestGet(key []byte) (value []byte, err error) {
@@ -59,7 +59,8 @@ var _ = testutil.Defer(func() {
It("Should be able to approximate offset of a key correctly", func() {
Expect(err).ShouldNot(HaveOccurred())
- tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
+ tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o)
+ Expect(err).ShouldNot(HaveOccurred())
CheckOffset := func(key string, expect, threshold int) {
offset, err := tr.OffsetOf([]byte(key))
Expect(err).ShouldNot(HaveOccurred())
@@ -95,7 +96,7 @@ var _ = testutil.Defer(func() {
tw.Close()
// Opening the table.
- tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
+ tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o)
return tableWrapper{tr}
}
Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {
@@ -104,14 +105,16 @@ var _ = testutil.Defer(func() {
if body != nil {
body(db.(tableWrapper).Reader)
}
- testutil.KeyValueTesting(nil, db, *kv)
+ testutil.KeyValueTesting(nil, *kv, db, nil, nil)
}
}
- testutil.AllKeyValueTesting(nil, Build)
+ testutil.AllKeyValueTesting(nil, Build, nil, nil)
Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) {
It("should have correct blocks number", func() {
- Expect(r.indexBlock.restartsLen).Should(Equal(9))
+ indexBlock, err := r.readBlock(r.indexBH, true)
+ Expect(err).To(BeNil())
+ Expect(indexBlock.restartsLen).Should(Equal(9))
})
}))
})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
index 4e19e93a9..274c95fad 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
@@ -12,7 +12,7 @@ import (
"fmt"
"io"
- "code.google.com/p/snappy-go/snappy"
+ "github.com/syndtr/gosnappy/snappy"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/filter"
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
index 4b87b5ef6..ec3f177a1 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
@@ -12,6 +12,7 @@ import (
. "github.com/onsi/gomega"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/util"
)
@@ -34,6 +35,10 @@ type Get interface {
TestGet(key []byte) (value []byte, err error)
}
+type Has interface {
+ TestHas(key []byte) (ret bool, err error)
+}
+
type NewIterator interface {
TestNewIterator(slice *util.Range) iterator.Iterator
}
@@ -110,7 +115,7 @@ func (t *DBTesting) TestAllPresent() {
func (t *DBTesting) TestDeletedKey(key []byte) {
_, err := t.DB.TestGet(key)
- Expect(err).Should(Equal(util.ErrNotFound), "Get on deleted key %q, %s", key, t.text())
+ Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text())
}
func (t *DBTesting) TestAllDeleted() {
@@ -212,5 +217,6 @@ func DoDBTesting(t *DBTesting) {
}
DoIteratorTesting(&it)
+ iter.Release()
}
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
new file mode 100644
index 000000000..82f3d0e81
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
@@ -0,0 +1,21 @@
+package testutil
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func RunSuite(t GinkgoTestingT, name string) {
+ RunDefer()
+
+ SynchronizedBeforeSuite(func() []byte {
+ RunDefer("setup")
+ return nil
+ }, func(data []byte) {})
+ SynchronizedAfterSuite(func() {
+ RunDefer("teardown")
+ }, func() {})
+
+ RegisterFailHandler(Fail)
+ RunSpecs(t, name)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
index 4fc75b6f2..a0b58f0e7 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
@@ -13,16 +13,28 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/util"
)
-func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
+func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) {
if rnd == nil {
rnd = NewRand()
}
- if db, ok := p.(Find); ok {
- It("Should find all keys with Find", func() {
+ if p == nil {
+ BeforeEach(func() {
+ p = setup(kv)
+ })
+ if teardown != nil {
+ AfterEach(func() {
+ teardown(p)
+ })
+ }
+ }
+
+ It("Should find all keys with Find", func() {
+ if db, ok := p.(Find); ok {
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
key_, key, value := kv.IndexInexact(i)
@@ -38,9 +50,11 @@ func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
Expect(rkey).Should(Equal(key))
Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key)
})
- })
+ }
+ })
- It("Should return error if the key is not present", func() {
+ It("Should return error if the key is not present", func() {
+ if db, ok := p.(Find); ok {
var key []byte
if kv.Len() > 0 {
key_, _ := kv.Index(kv.Len() - 1)
@@ -48,12 +62,12 @@ func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
}
rkey, _, err := db.TestFind(key)
Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey)
- Expect(err).Should(Equal(util.ErrNotFound))
- })
- }
+ Expect(err).Should(Equal(errors.ErrNotFound))
+ }
+ })
- if db, ok := p.(Get); ok {
- It("Should only find exact key with Get", func() {
+ It("Should only find exact key with Get", func() {
+ if db, ok := p.(Get); ok {
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
key_, key, value := kv.IndexInexact(i)
@@ -66,14 +80,34 @@ func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
if len(key_) > 0 {
_, err = db.TestGet(key_)
Expect(err).Should(HaveOccurred(), "Error for key %q", key_)
- Expect(err).Should(Equal(util.ErrNotFound))
+ Expect(err).Should(Equal(errors.ErrNotFound))
}
})
- })
- }
+ }
+ })
+
+ It("Should only find present key with Has", func() {
+ if db, ok := p.(Has); ok {
+ ShuffledIndex(nil, kv.Len(), 1, func(i int) {
+ key_, key, _ := kv.IndexInexact(i)
+
+ // Using exact key.
+ ret, err := db.TestHas(key)
+ Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
+ Expect(ret).Should(BeTrue(), "False for key %q", key)
- if db, ok := p.(NewIterator); ok {
- TestIter := func(r *util.Range, _kv KeyValue) {
+ // Using inexact key.
+ if len(key_) > 0 {
+ ret, err = db.TestHas(key_)
+ Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_)
+ Expect(ret).ShouldNot(BeTrue(), "True for key %q", key)
+ }
+ })
+ }
+ })
+
+ TestIter := func(r *util.Range, _kv KeyValue) {
+ if db, ok := p.(NewIterator); ok {
iter := db.TestNewIterator(r)
Expect(iter.Error()).ShouldNot(HaveOccurred())
@@ -83,46 +117,62 @@ func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
}
DoIteratorTesting(&t)
+ iter.Release()
}
+ }
- It("Should iterates and seeks correctly", func(done Done) {
- TestIter(nil, kv.Clone())
- done <- true
- }, 3.0)
-
- RandomIndex(rnd, kv.Len(), kv.Len(), func(i int) {
- type slice struct {
- r *util.Range
- start, limit int
- }
+ It("Should iterates and seeks correctly", func(done Done) {
+ TestIter(nil, kv.Clone())
+ done <- true
+ }, 3.0)
- key_, _, _ := kv.IndexInexact(i)
- for _, x := range []slice{
- {&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
- {&util.Range{Start: nil, Limit: key_}, 0, i},
- } {
- It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) {
- TestIter(x.r, kv.Slice(x.start, x.limit))
- done <- true
- }, 3.0)
- }
- })
+ RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) {
+ type slice struct {
+ r *util.Range
+ start, limit int
+ }
- RandomRange(rnd, kv.Len(), kv.Len(), func(start, limit int) {
- It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) {
- r := kv.Range(start, limit)
- TestIter(&r, kv.Slice(start, limit))
+ key_, _, _ := kv.IndexInexact(i)
+ for _, x := range []slice{
+ {&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
+ {&util.Range{Start: nil, Limit: key_}, 0, i},
+ } {
+ It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) {
+ TestIter(x.r, kv.Slice(x.start, x.limit))
done <- true
}, 3.0)
- })
- }
+ }
+ })
+
+ RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) {
+ It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) {
+ r := kv.Range(start, limit)
+ TestIter(&r, kv.Slice(start, limit))
+ done <- true
+ }, 3.0)
+ })
}
-func AllKeyValueTesting(rnd *rand.Rand, body func(kv KeyValue) DB) {
+func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) {
Test := func(kv *KeyValue) func() {
return func() {
- db := body(*kv)
- KeyValueTesting(rnd, db, *kv)
+ var p DB
+ if setup != nil {
+ Defer("setup", func() {
+ p = setup(*kv)
+ })
+ }
+ if teardown != nil {
+ Defer("teardown", func() {
+ teardown(p)
+ })
+ }
+ if body != nil {
+ p = body(*kv)
+ }
+ KeyValueTesting(rnd, *kv, p, func(KeyValue) DB {
+ return p
+ }, nil)
}
}
@@ -133,4 +183,5 @@ func AllKeyValueTesting(rnd *rand.Rand, body func(kv KeyValue) DB) {
Describe("with big value", Test(KeyValue_BigValue()))
Describe("with special key", Test(KeyValue_SpecialKey()))
Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
+ Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120)))
}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
index 0f8d77a73..59c496d54 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
@@ -397,6 +397,7 @@ func (s *Storage) logI(format string, args ...interface{}) {
func (s *Storage) Log(str string) {
s.log(1, "Log: "+str)
+ s.Storage.Log(str)
}
func (s *Storage) Lock() (r util.Releaser, err error) {
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
index 38fe25d52..97c5294b1 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
@@ -155,3 +155,17 @@ func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) {
}
return
}
+
+func Max(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+func Min(x, y int) int {
+ if x < y {
+ return x
+ }
+ return y
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
index c1402fda3..25bf2b29f 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
@@ -34,6 +34,10 @@ func (t *testingDB) TestGet(key []byte) (value []byte, err error) {
return t.Get(key, t.ro)
}
+func (t *testingDB) TestHas(key []byte) (ret bool, err error) {
+ return t.Has(key, t.ro)
+}
+
func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator {
return t.NewIterator(slice, t.ro)
}
@@ -48,6 +52,7 @@ func (t *testingDB) TestClose() {
func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB {
stor := testutil.NewStorage()
db, err := Open(stor, o)
+ // FIXME: This may be called from outside It, which may cause panic.
Expect(err).NotTo(HaveOccurred())
return &testingDB{
DB: db,
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go
index a43d2e460..1a5bf71a3 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go
@@ -14,10 +14,10 @@ import (
)
func shorten(str string) string {
- if len(str) <= 4 {
+ if len(str) <= 8 {
return str
}
- return str[:1] + ".." + str[len(str)-1:]
+ return str[:3] + ".." + str[len(str)-3:]
}
var bunits = [...]string{"", "Ki", "Mi", "Gi"}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
new file mode 100644
index 000000000..2b8453d75
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
@@ -0,0 +1,238 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type buffer struct {
+ b []byte
+ miss int
+}
+
+// BufferPool is a 'buffer pool'.
+type BufferPool struct {
+ pool [6]chan []byte
+ size [5]uint32
+ sizeMiss [5]uint32
+ sizeHalf [5]uint32
+ baseline [4]int
+ baseline0 int
+
+ mu sync.RWMutex
+ closed bool
+ closeC chan struct{}
+
+ get uint32
+ put uint32
+ half uint32
+ less uint32
+ equal uint32
+ greater uint32
+ miss uint32
+}
+
+func (p *BufferPool) poolNum(n int) int {
+ if n <= p.baseline0 && n > p.baseline0/2 {
+ return 0
+ }
+ for i, x := range p.baseline {
+ if n <= x {
+ return i + 1
+ }
+ }
+ return len(p.baseline) + 1
+}
+
+// Get returns buffer with length of n.
+func (p *BufferPool) Get(n int) []byte {
+ if p == nil {
+ return make([]byte, n)
+ }
+
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ if p.closed {
+ return make([]byte, n)
+ }
+
+ atomic.AddUint32(&p.get, 1)
+
+ poolNum := p.poolNum(n)
+ pool := p.pool[poolNum]
+ if poolNum == 0 {
+ // Fast path.
+ select {
+ case b := <-pool:
+ switch {
+ case cap(b) > n:
+ if cap(b)-n >= n {
+ atomic.AddUint32(&p.half, 1)
+ select {
+ case pool <- b:
+ default:
+ }
+ return make([]byte, n)
+ } else {
+ atomic.AddUint32(&p.less, 1)
+ return b[:n]
+ }
+ case cap(b) == n:
+ atomic.AddUint32(&p.equal, 1)
+ return b[:n]
+ default:
+ atomic.AddUint32(&p.greater, 1)
+ }
+ default:
+ atomic.AddUint32(&p.miss, 1)
+ }
+
+ return make([]byte, n, p.baseline0)
+ } else {
+ sizePtr := &p.size[poolNum-1]
+
+ select {
+ case b := <-pool:
+ switch {
+ case cap(b) > n:
+ if cap(b)-n >= n {
+ atomic.AddUint32(&p.half, 1)
+ sizeHalfPtr := &p.sizeHalf[poolNum-1]
+ if atomic.AddUint32(sizeHalfPtr, 1) == 20 {
+ atomic.StoreUint32(sizePtr, uint32(cap(b)/2))
+ atomic.StoreUint32(sizeHalfPtr, 0)
+ } else {
+ select {
+ case pool <- b:
+ default:
+ }
+ }
+ return make([]byte, n)
+ } else {
+ atomic.AddUint32(&p.less, 1)
+ return b[:n]
+ }
+ case cap(b) == n:
+ atomic.AddUint32(&p.equal, 1)
+ return b[:n]
+ default:
+ atomic.AddUint32(&p.greater, 1)
+ if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
+ select {
+ case pool <- b:
+ default:
+ }
+ }
+ }
+ default:
+ atomic.AddUint32(&p.miss, 1)
+ }
+
+ if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
+ if size == 0 {
+ atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
+ } else {
+ sizeMissPtr := &p.sizeMiss[poolNum-1]
+ if atomic.AddUint32(sizeMissPtr, 1) == 20 {
+ atomic.StoreUint32(sizePtr, uint32(n))
+ atomic.StoreUint32(sizeMissPtr, 0)
+ }
+ }
+ return make([]byte, n)
+ } else {
+ return make([]byte, n, size)
+ }
+ }
+}
+
+// Put adds given buffer to the pool.
+func (p *BufferPool) Put(b []byte) {
+ if p == nil {
+ return
+ }
+
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ if p.closed {
+ return
+ }
+
+ atomic.AddUint32(&p.put, 1)
+
+ pool := p.pool[p.poolNum(cap(b))]
+ select {
+ case pool <- b:
+ default:
+ }
+
+}
+
+func (p *BufferPool) Close() {
+ if p == nil {
+ return
+ }
+
+ p.mu.Lock()
+ if !p.closed {
+ p.closed = true
+ p.closeC <- struct{}{}
+ }
+ p.mu.Unlock()
+}
+
+func (p *BufferPool) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+
+ return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}",
+ p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss)
+}
+
+func (p *BufferPool) drain() {
+ ticker := time.NewTicker(2 * time.Second)
+ for {
+ select {
+ case <-ticker.C:
+ for _, ch := range p.pool {
+ select {
+ case <-ch:
+ default:
+ }
+ }
+ case <-p.closeC:
+ close(p.closeC)
+ for _, ch := range p.pool {
+ close(ch)
+ }
+ return
+ }
+ }
+}
+
+// NewBufferPool creates a new initialized 'buffer pool'.
+func NewBufferPool(baseline int) *BufferPool {
+ if baseline <= 0 {
+ panic("baseline can't be <= 0")
+ }
+ p := &BufferPool{
+ baseline0: baseline,
+ baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4},
+ closeC: make(chan struct{}, 1),
+ }
+ for i, cap := range []int{2, 2, 4, 4, 2, 1} {
+ p.pool[i] = make(chan []byte, cap)
+ }
+ go p.drain()
+ return p
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
new file mode 100644
index 000000000..1f7fdd41f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build go1.3
+
+package util
+
+import (
+ "sync"
+)
+
+type Pool struct {
+ sync.Pool
+}
+
+func NewPool(cap int) *Pool {
+ return &Pool{}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
new file mode 100644
index 000000000..27b8d03be
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.3
+
+package util
+
+type Pool struct {
+ pool chan interface{}
+}
+
+func (p *Pool) Get() interface{} {
+ select {
+ case x := <-p.pool:
+ return x
+ default:
+ return nil
+ }
+}
+
+func (p *Pool) Put(x interface{}) {
+ select {
+ case p.pool <- x:
+ default:
+ }
+}
+
+func NewPool(cap int) *Pool {
+ return &Pool{pool: make(chan interface{}, cap)}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
index da0583123..85159583d 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
@@ -14,3 +14,19 @@ type Range struct {
// Limit of the key range, not include in the range.
Limit []byte
}
+
+// BytesPrefix returns key range that satisfy the given prefix.
+// This only applicable for the standard 'bytes comparer'.
+func BytesPrefix(prefix []byte) *Range {
+ var limit []byte
+ for i := len(prefix) - 1; i >= 0; i-- {
+ c := prefix[i]
+ if c < 0xff {
+ limit = make([]byte, i+1)
+ copy(limit, prefix)
+ limit[i] = c + 1
+ break
+ }
+ }
+ return &Range{prefix, limit}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
index 229c7d41f..f35976865 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
@@ -12,7 +12,8 @@ import (
)
var (
- ErrNotFound = errors.New("leveldb: not found")
+ ErrReleased = errors.New("leveldb: resource already relesed")
+ ErrHasReleaser = errors.New("leveldb: releaser already defined")
)
// Releaser is the interface that wraps the basic Release method.
@@ -27,23 +28,46 @@ type ReleaseSetter interface {
// SetReleaser associates the given releaser to the resources. The
// releaser will be called once coresponding resources released.
// Calling SetReleaser with nil will clear the releaser.
+ //
+ // This will panic if a releaser already present or coresponding
+ // resource is already released. Releaser should be cleared first
+ // before assigned a new one.
SetReleaser(releaser Releaser)
}
// BasicReleaser provides basic implementation of Releaser and ReleaseSetter.
type BasicReleaser struct {
releaser Releaser
+ released bool
+}
+
+// Released returns whether Release method already called.
+func (r *BasicReleaser) Released() bool {
+ return r.released
}
// Release implements Releaser.Release.
func (r *BasicReleaser) Release() {
- if r.releaser != nil {
- r.releaser.Release()
- r.releaser = nil
+ if !r.released {
+ if r.releaser != nil {
+ r.releaser.Release()
+ r.releaser = nil
+ }
+ r.released = true
}
}
// SetReleaser implements ReleaseSetter.SetReleaser.
func (r *BasicReleaser) SetReleaser(releaser Releaser) {
+ if r.released {
+ panic(ErrReleased)
+ }
+ if r.releaser != nil && releaser != nil {
+ panic(ErrHasReleaser)
+ }
r.releaser = releaser
}
+
+type NoopReleaser struct{}
+
+func (NoopReleaser) Release() {}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
index 4c54d6480..88a52f53e 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
@@ -7,7 +7,6 @@
package leveldb
import (
- "errors"
"sync/atomic"
"unsafe"
@@ -16,19 +15,6 @@ import (
"github.com/syndtr/goleveldb/leveldb/util"
)
-var levelMaxSize [kNumLevels]float64
-
-func init() {
- // Precompute max size of each level
- for level := range levelMaxSize {
- res := float64(10 * 1048576)
- for n := level; n > 1; n-- {
- res *= 10
- }
- levelMaxSize[level] = res
- }
-}
-
type tSet struct {
level int
table *tFile
@@ -37,21 +23,26 @@ type tSet struct {
type version struct {
s *session
- tables [kNumLevels]tFiles
+ tables []tFiles
// Level that should be compacted next and its compaction score.
- // Score < 1 means compaction is not strictly needed. These fields
- // are initialized by ComputeCompaction()
+ // Score < 1 means compaction is not strictly needed. These fields
+ // are initialized by computeCompaction()
cLevel int
cScore float64
cSeek unsafe.Pointer
- ref int
+ ref int
+ // Succeeding version.
next *version
}
-func (v *version) release_NB() {
+func newVersion(s *session) *version {
+ return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())}
+}
+
+func (v *version) releaseNB() {
v.ref--
if v.ref > 0 {
return
@@ -60,8 +51,6 @@ func (v *version) release_NB() {
panic("negative version ref")
}
- s := v.s
-
tables := make(map[uint64]bool)
for _, tt := range v.next.tables {
for _, t := range tt {
@@ -74,145 +63,184 @@ func (v *version) release_NB() {
for _, t := range tt {
num := t.file.Num()
if _, ok := tables[num]; !ok {
- s.tops.remove(t)
+ v.s.tops.remove(t)
}
}
}
- v.next.release_NB()
+ v.next.releaseNB()
v.next = nil
}
func (v *version) release() {
v.s.vmu.Lock()
- v.release_NB()
+ v.releaseNB()
v.s.vmu.Unlock()
}
-func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool, err error) {
- s := v.s
-
- ukey := key.ukey()
+func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
+ ukey := ikey.ukey()
- var tset *tSet
- tseek := true
-
- // We can search level-by-level since entries never hop across
- // levels. Therefore we are guaranteed that if we find data
- // in an smaller level, later levels are irrelevant.
- for level, ts := range v.tables {
- if len(ts) == 0 {
+ // Walk tables level-by-level.
+ for level, tables := range v.tables {
+ if len(tables) == 0 {
continue
}
if level == 0 {
// Level-0 files may overlap each other. Find all files that
- // overlap user_key and process them in order from newest to
- var tmp tFiles
- for _, t := range ts {
- if s.icmp.uCompare(ukey, t.min.ukey()) >= 0 &&
- s.icmp.uCompare(ukey, t.max.ukey()) <= 0 {
- tmp = append(tmp, t)
+ // overlap ukey.
+ for _, t := range tables {
+ if t.overlaps(v.s.icmp, ukey, ukey) {
+ if !f(level, t) {
+ return
+ }
}
}
-
- if len(tmp) == 0 {
- continue
- }
-
- tmp.sortByNum()
- ts = tmp
} else {
- i := ts.searchMax(key, s.icmp)
- if i >= len(ts) || s.icmp.uCompare(ukey, ts[i].min.ukey()) < 0 {
- continue
+ if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) {
+ t := tables[i]
+ if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+ if !f(level, t) {
+ return
+ }
+ }
}
+ }
- ts = ts[i : i+1]
+ if lf != nil && !lf(level) {
+ return
}
+ }
+}
- var l0found bool
- var l0seq uint64
- var l0type vType
- var l0value []byte
- for _, t := range ts {
- if tseek {
- if tset == nil {
- tset = &tSet{level, t}
- } else if tset.table.incrSeek() <= 0 {
- cstate = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
- tseek = false
- }
- }
+func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
+ ukey := ikey.ukey()
- var _rkey, rval []byte
- _rkey, rval, err = s.tops.get(t, key, ro)
- if err == ErrNotFound {
- continue
- } else if err != nil {
- return
+ var (
+ tset *tSet
+ tseek bool
+
+ // Level-0.
+ zfound bool
+ zseq uint64
+ zkt kType
+ zval []byte
+ )
+
+ err = ErrNotFound
+
+ // Since entries never hope across level, finding key/value
+ // in smaller level make later levels irrelevant.
+ v.walkOverlapping(ikey, func(level int, t *tFile) bool {
+ if !tseek {
+ if tset == nil {
+ tset = &tSet{level, t}
+ } else {
+ tseek = true
}
+ }
- rkey := iKey(_rkey)
- if seq, t, ok := rkey.parseNum(); ok {
- if s.icmp.uCompare(ukey, rkey.ukey()) == 0 {
- if level == 0 {
- if seq >= l0seq {
- l0found = true
- l0seq = seq
- l0type = t
- l0value = rval
- }
- } else {
- switch t {
- case tVal:
- value = rval
- case tDel:
- err = ErrNotFound
- default:
- panic("invalid type")
- }
- return
+ var (
+ fikey, fval []byte
+ ferr error
+ )
+ if noValue {
+ fikey, ferr = v.s.tops.findKey(t, ikey, ro)
+ } else {
+ fikey, fval, ferr = v.s.tops.find(t, ikey, ro)
+ }
+ switch ferr {
+ case nil:
+ case ErrNotFound:
+ return true
+ default:
+ err = ferr
+ return false
+ }
+
+ if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil {
+ if v.s.icmp.uCompare(ukey, fukey) == 0 {
+ if level == 0 {
+ if fseq >= zseq {
+ zfound = true
+ zseq = fseq
+ zkt = fkt
+ zval = fval
}
+ } else {
+ switch fkt {
+ case ktVal:
+ value = fval
+ err = nil
+ case ktDel:
+ default:
+ panic("leveldb: invalid iKey type")
+ }
+ return false
}
- } else {
- err = errors.New("leveldb: internal key corrupted")
- return
}
+ } else {
+ err = fkerr
+ return false
}
- if level == 0 && l0found {
- switch l0type {
- case tVal:
- value = l0value
- case tDel:
- err = ErrNotFound
+
+ return true
+ }, func(level int) bool {
+ if zfound {
+ switch zkt {
+ case ktVal:
+ value = zval
+ err = nil
+ case ktDel:
default:
- panic("invalid type")
+ panic("leveldb: invalid iKey type")
}
- return
+ return false
}
+
+ return true
+ })
+
+ if tseek && tset.table.consumeSeek() <= 0 {
+ tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
- err = ErrNotFound
return
}
-func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
- s := v.s
+func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
+ var tset *tSet
+ v.walkOverlapping(ikey, func(level int, t *tFile) bool {
+ if tset == nil {
+ tset = &tSet{level, t}
+ return true
+ } else {
+ if tset.table.consumeSeek() <= 0 {
+ tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+ }
+ return false
+ }
+ }, nil)
+
+ return
+}
+
+func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
// Merge all level zero files together since they may overlap
for _, t := range v.tables[0] {
- it := s.tops.newIterator(t, slice, ro)
+ it := v.s.tops.newIterator(t, slice, ro)
its = append(its, it)
}
- strict := s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
- for _, tt := range v.tables[1:] {
- if len(tt) == 0 {
+ strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader)
+ for _, tables := range v.tables[1:] {
+ if len(tables) == 0 {
continue
}
- it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, s.icmp, slice, ro), strict, true)
+ it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)
its = append(its, it)
}
@@ -220,7 +248,7 @@ func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []it
}
func (v *version) newStaging() *versionStaging {
- return &versionStaging{base: v}
+ return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())}
}
// Spawn a new version based on this version.
@@ -242,25 +270,25 @@ func (v *version) tLen(level int) int {
return len(v.tables[level])
}
-func (v *version) offsetOf(key iKey) (n uint64, err error) {
- for level, tt := range v.tables {
- for _, t := range tt {
- if v.s.icmp.Compare(t.max, key) <= 0 {
- // Entire file is before "key", so just add the file size
+func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
+ for level, tables := range v.tables {
+ for _, t := range tables {
+ if v.s.icmp.Compare(t.imax, ikey) <= 0 {
+ // Entire file is before "ikey", so just add the file size
n += t.size
- } else if v.s.icmp.Compare(t.min, key) > 0 {
- // Entire file is after "key", so ignore
+ } else if v.s.icmp.Compare(t.imin, ikey) > 0 {
+ // Entire file is after "ikey", so ignore
if level > 0 {
// Files other than level 0 are sorted by meta->min, so
// no further files in this level will contain data for
- // "key".
+ // "ikey".
break
}
} else {
- // "key" falls in the range for this table. Add the
- // approximate offset of "key" within the table.
+ // "ikey" falls in the range for this table. Add the
+ // approximate offset of "ikey" within the table.
var nn uint64
- nn, err = v.s.tops.offsetOf(t, key)
+ nn, err = v.s.tops.offsetOf(t, ikey)
if err != nil {
return 0, err
}
@@ -272,15 +300,16 @@ func (v *version) offsetOf(key iKey) (n uint64, err error) {
return
}
-func (v *version) pickLevel(min, max []byte) (level int) {
- if !v.tables[0].isOverlaps(min, max, false, v.s.icmp) {
- var r tFiles
- for ; level < kMaxMemCompactLevel; level++ {
- if v.tables[level+1].isOverlaps(min, max, true, v.s.icmp) {
+func (v *version) pickLevel(umin, umax []byte) (level int) {
+ if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
+ var overlaps tFiles
+ maxLevel := v.s.o.GetMaxMemCompationLevel()
+ for ; level < maxLevel; level++ {
+ if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) {
break
}
- v.tables[level+2].getOverlaps(min, max, &r, true, v.s.icmp.ucmp)
- if r.size() > kMaxGrandParentOverlapBytes {
+ overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false)
+ if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) {
break
}
}
@@ -294,7 +323,7 @@ func (v *version) computeCompaction() {
var bestLevel int = -1
var bestScore float64 = -1
- for level, ff := range v.tables {
+ for level, tables := range v.tables {
var score float64
if level == 0 {
// We treat level-0 specially by bounding the number of files
@@ -308,9 +337,9 @@ func (v *version) computeCompaction() {
// file size is small (perhaps because of a small write-buffer
// setting, or very high compression ratios, or lots of
// overwrites/deletions).
- score = float64(len(ff)) / kL0_CompactionTrigger
+ score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger())
} else {
- score = float64(ff.size()) / levelMaxSize[level]
+ score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level))
}
if score > bestScore {
@@ -327,66 +356,62 @@ func (v *version) needCompaction() bool {
return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil
}
+type tablesScratch struct {
+ added map[uint64]atRecord
+ deleted map[uint64]struct{}
+}
+
type versionStaging struct {
base *version
- tables [kNumLevels]struct {
- added map[uint64]ntRecord
- deleted map[uint64]struct{}
- }
+ tables []tablesScratch
}
func (p *versionStaging) commit(r *sessionRecord) {
- btt := p.base.tables
-
- // deleted tables
- for _, tr := range r.deletedTables {
- tm := &(p.tables[tr.level])
+ // Deleted tables.
+ for _, r := range r.deletedTables {
+ tm := &(p.tables[r.level])
- bt := btt[tr.level]
- if len(bt) > 0 {
+ if len(p.base.tables[r.level]) > 0 {
if tm.deleted == nil {
tm.deleted = make(map[uint64]struct{})
}
- tm.deleted[tr.num] = struct{}{}
+ tm.deleted[r.num] = struct{}{}
}
if tm.added != nil {
- delete(tm.added, tr.num)
+ delete(tm.added, r.num)
}
}
- // new tables
- for _, tr := range r.addedTables {
- tm := &(p.tables[tr.level])
+ // New tables.
+ for _, r := range r.addedTables {
+ tm := &(p.tables[r.level])
if tm.added == nil {
- tm.added = make(map[uint64]ntRecord)
+ tm.added = make(map[uint64]atRecord)
}
- tm.added[tr.num] = tr
+ tm.added[r.num] = r
if tm.deleted != nil {
- delete(tm.deleted, tr.num)
+ delete(tm.deleted, r.num)
}
}
}
func (p *versionStaging) finish() *version {
- s := p.base.s
- btt := p.base.tables
-
- // build new version
- nv := &version{s: s}
+ // Build new version.
+ nv := newVersion(p.base.s)
for level, tm := range p.tables {
- bt := btt[level]
+ btables := p.base.tables[level]
- n := len(bt) + len(tm.added) - len(tm.deleted)
+ n := len(btables) + len(tm.added) - len(tm.deleted)
if n < 0 {
n = 0
}
nt := make(tFiles, 0, n)
- // base tables
- for _, t := range bt {
+ // Base tables.
+ for _, t := range btables {
if _, ok := tm.deleted[t.file.Num()]; ok {
continue
}
@@ -396,17 +421,21 @@ func (p *versionStaging) finish() *version {
nt = append(nt, t)
}
- // new tables
- for _, tr := range tm.added {
- nt = append(nt, tr.makeFile(s))
+ // New tables.
+ for _, r := range tm.added {
+ nt = append(nt, p.base.s.tableFileFromRecord(r))
}
- // sort tables
- nt.sortByKey(s.icmp)
+ // Sort tables.
+ if level == 0 {
+ nt.sortByNum()
+ } else {
+ nt.sortByKey(p.base.s.icmp)
+ }
nv.tables[level] = nt
}
- // compute compaction score for new version
+ // Compute compaction score for new version.
nv.computeCompaction()
return nv
@@ -421,7 +450,7 @@ func (vr *versionReleaser) Release() {
v := vr.v
v.s.vmu.Lock()
if !vr.once {
- v.release_NB()
+ v.releaseNB()
vr.once = true
}
v.s.vmu.Unlock()
diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go
new file mode 100644
index 000000000..552a17bfb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go
@@ -0,0 +1,292 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n == 0 {
+ return 0, 0, ErrCorrupt
+ }
+ if uint64(int(v)) != v {
+ return 0, 0, errors.New("snappy: decoded block is too large")
+ }
+ return int(v), n, nil
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+// It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if len(dst) < dLen {
+ dst = make([]byte, dLen)
+ }
+
+ var d, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint(src[s] >> 2)
+ switch {
+ case x < 60:
+ s += 1
+ case x == 60:
+ s += 2
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-1])
+ case x == 61:
+ s += 3
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-2]) | uint(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
+ }
+ length = int(x + 1)
+ if length <= 0 {
+ return nil, errors.New("snappy: unsupported literal length")
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return nil, ErrCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
+
+ case tagCopy2:
+ s += 3
+ if s > len(src) {
+ return nil, ErrCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(src[s-2]) | int(src[s-1])<<8
+
+ case tagCopy4:
+ return nil, errors.New("snappy: unsupported COPY_4 tag")
+ }
+
+ end := d + length
+ if offset > d || end > len(dst) {
+ return nil, ErrCorrupt
+ }
+ for ; d < end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != dLen {
+ return nil, ErrCorrupt
+ }
+ return dst[:d], nil
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxUncompressedChunkLen),
+ buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
+ }
+}
+
+// Reader is an io.Reader than can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4]) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if !r.readFull(r.decoded[:n]) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)]) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+
+ } else {
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen]) {
+ return 0, r.err
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
index b2371db11..dda372422 100644
--- a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
@@ -6,6 +6,7 @@ package snappy
import (
"encoding/binary"
+ "io"
)
// We limit how far copy back-references can go, the same as the C++ code.
@@ -172,3 +173,86 @@ func MaxEncodedLen(srcLen int) int {
// This last factor dominates the blowup, so the final estimate is:
return 32 + srcLen + srcLen/6
}
+
+// NewWriter returns a new Writer that compresses to w, using the framing
+// format described at
+// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
+ }
+}
+
+// Writer is an io.Writer than can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+ enc []byte
+ buf [checksumSize + chunkHeaderSize]byte
+ wroteHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ w.wroteHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (n int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ if !w.wroteHeader {
+ copy(w.enc, magicChunk)
+ if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
+ w.err = err
+ return n, err
+ }
+ w.wroteHeader = true
+ }
+ for len(p) > 0 {
+ var uncompressed []byte
+ if len(p) > maxUncompressedChunkLen {
+ uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkBody, err := Encode(w.enc, uncompressed)
+ if err != nil {
+ w.err = err
+ return n, err
+ }
+ if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
+ }
+
+ chunkLen := 4 + len(chunkBody)
+ w.buf[0] = chunkType
+ w.buf[1] = uint8(chunkLen >> 0)
+ w.buf[2] = uint8(chunkLen >> 8)
+ w.buf[3] = uint8(chunkLen >> 16)
+ w.buf[4] = uint8(checksum >> 0)
+ w.buf[5] = uint8(checksum >> 8)
+ w.buf[6] = uint8(checksum >> 16)
+ w.buf[7] = uint8(checksum >> 24)
+ if _, err = w.w.Write(w.buf[:]); err != nil {
+ w.err = err
+ return n, err
+ }
+ if _, err = w.w.Write(chunkBody); err != nil {
+ w.err = err
+ return n, err
+ }
+ n += len(uncompressed)
+ }
+ return n, nil
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
index 2f1b790d0..043bf3d81 100644
--- a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
@@ -8,6 +8,10 @@
// The C++ snappy implementation is at http://code.google.com/p/snappy/
package snappy
+import (
+ "hash/crc32"
+)
+
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
@@ -36,3 +40,29 @@ const (
tagCopy2 = 0x02
tagCopy4 = 0x03
)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+ // https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536 bytes".
+ maxUncompressedChunkLen = 65536
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
index 7ba839244..0623385b7 100644
--- a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
@@ -18,7 +18,10 @@ import (
"testing"
)
-var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
+var (
+ download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
+ testdata = flag.String("testdata", "testdata", "Directory containing the test data")
+)
func roundtrip(b, ebuf, dbuf []byte) error {
e, err := Encode(ebuf, b)
@@ -55,11 +58,11 @@ func TestSmallCopy(t *testing.T) {
}
func TestSmallRand(t *testing.T) {
- rand.Seed(27354294)
+ rng := rand.New(rand.NewSource(27354294))
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
- for i, _ := range b {
- b[i] = uint8(rand.Uint32())
+ for i := range b {
+ b[i] = uint8(rng.Uint32())
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
@@ -70,7 +73,7 @@ func TestSmallRand(t *testing.T) {
func TestSmallRegular(t *testing.T) {
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
- for i, _ := range b {
+ for i := range b {
b[i] = uint8(i%10 + 'a')
}
if err := roundtrip(b, nil, nil); err != nil {
@@ -79,6 +82,120 @@ func TestSmallRegular(t *testing.T) {
}
}
+func cmp(a, b []byte) error {
+ if len(a) != len(b) {
+ return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
+ }
+ }
+ return nil
+}
+
+func TestFramingFormat(t *testing.T) {
+ // src is comprised of alternating 1e5-sized sequences of random
+ // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
+ // because it is larger than maxUncompressedChunkLen (64k).
+ src := make([]byte, 1e6)
+ rng := rand.New(rand.NewSource(1))
+ for i := 0; i < 10; i++ {
+ if i%2 == 0 {
+ for j := 0; j < 1e5; j++ {
+ src[1e5*i+j] = uint8(rng.Intn(256))
+ }
+ } else {
+ for j := 0; j < 1e5; j++ {
+ src[1e5*i+j] = uint8(i)
+ }
+ }
+ }
+
+ buf := new(bytes.Buffer)
+ if _, err := NewWriter(buf).Write(src); err != nil {
+ t.Fatalf("Write: encoding: %v", err)
+ }
+ dst, err := ioutil.ReadAll(NewReader(buf))
+ if err != nil {
+ t.Fatalf("ReadAll: decoding: %v", err)
+ }
+ if err := cmp(dst, src); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestReaderReset(t *testing.T) {
+ gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
+ buf := new(bytes.Buffer)
+ if _, err := NewWriter(buf).Write(gold); err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+ encoded, invalid, partial := buf.String(), "invalid", "partial"
+ r := NewReader(nil)
+ for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
+ if s == partial {
+ r.Reset(strings.NewReader(encoded))
+ if _, err := r.Read(make([]byte, 101)); err != nil {
+ t.Errorf("#%d: %v", i, err)
+ continue
+ }
+ continue
+ }
+ r.Reset(strings.NewReader(s))
+ got, err := ioutil.ReadAll(r)
+ switch s {
+ case encoded:
+ if err != nil {
+ t.Errorf("#%d: %v", i, err)
+ continue
+ }
+ if err := cmp(got, gold); err != nil {
+ t.Errorf("#%d: %v", i, err)
+ continue
+ }
+ case invalid:
+ if err == nil {
+ t.Errorf("#%d: got nil error, want non-nil", i)
+ continue
+ }
+ }
+ }
+}
+
+func TestWriterReset(t *testing.T) {
+ gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
+ var gots, wants [][]byte
+ const n = 20
+ w, failed := NewWriter(nil), false
+ for i := 0; i <= n; i++ {
+ buf := new(bytes.Buffer)
+ w.Reset(buf)
+ want := gold[:len(gold)*i/n]
+ if _, err := w.Write(want); err != nil {
+ t.Errorf("#%d: Write: %v", i, err)
+ failed = true
+ continue
+ }
+ got, err := ioutil.ReadAll(NewReader(buf))
+ if err != nil {
+ t.Errorf("#%d: ReadAll: %v", i, err)
+ failed = true
+ continue
+ }
+ gots = append(gots, got)
+ wants = append(wants, want)
+ }
+ if failed {
+ return
+ }
+ for i := range gots {
+ if err := cmp(gots[i], wants[i]); err != nil {
+ t.Errorf("#%d: %v", i, err)
+ }
+ }
+}
+
func benchDecode(b *testing.B, src []byte) {
encoded, err := Encode(nil, src)
if err != nil {
@@ -102,7 +219,7 @@ func benchEncode(b *testing.B, src []byte) {
}
}
-func readFile(b *testing.B, filename string) []byte {
+func readFile(b testing.TB, filename string) []byte {
src, err := ioutil.ReadFile(filename)
if err != nil {
b.Fatalf("failed reading %s: %s", filename, err)
@@ -144,7 +261,7 @@ func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
// testFiles' values are copied directly from
-// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.
+// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
// The label field is unused in snappy-go.
var testFiles = []struct {
label string
@@ -152,29 +269,36 @@ var testFiles = []struct {
}{
{"html", "html"},
{"urls", "urls.10K"},
- {"jpg", "house.jpg"},
- {"pdf", "mapreduce-osdi-1.pdf"},
+ {"jpg", "fireworks.jpeg"},
+ {"jpg_200", "fireworks.jpeg"},
+ {"pdf", "paper-100k.pdf"},
{"html4", "html_x_4"},
- {"cp", "cp.html"},
- {"c", "fields.c"},
- {"lsp", "grammar.lsp"},
- {"xls", "kennedy.xls"},
{"txt1", "alice29.txt"},
{"txt2", "asyoulik.txt"},
{"txt3", "lcet10.txt"},
{"txt4", "plrabn12.txt"},
- {"bin", "ptt5"},
- {"sum", "sum"},
- {"man", "xargs.1"},
{"pb", "geo.protodata"},
{"gaviota", "kppkn.gtb"},
}
// The test data files are present at this canonical URL.
-const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/"
+const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
func downloadTestdata(basename string) (errRet error) {
- filename := filepath.Join("testdata", basename)
+ filename := filepath.Join(*testdata, basename)
+ if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
+ return nil
+ }
+
+ if !*download {
+ return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
+ }
+ // Download the official snappy C++ implementation reference test data
+ // files for benchmarking.
+ if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
+ return fmt.Errorf("failed to create testdata: %s", err)
+ }
+
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create %s: %s", filename, err)
@@ -185,36 +309,27 @@ func downloadTestdata(basename string) (errRet error) {
os.Remove(filename)
}
}()
- resp, err := http.Get(baseURL + basename)
+ url := baseURL + basename
+ resp, err := http.Get(url)
if err != nil {
- return fmt.Errorf("failed to download %s: %s", baseURL+basename, err)
+ return fmt.Errorf("failed to download %s: %s", url, err)
}
defer resp.Body.Close()
+ if s := resp.StatusCode; s != http.StatusOK {
+ return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
+ }
_, err = io.Copy(f, resp.Body)
if err != nil {
- return fmt.Errorf("failed to write %s: %s", filename, err)
+ return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
}
return nil
}
func benchFile(b *testing.B, n int, decode bool) {
- filename := filepath.Join("testdata", testFiles[n].filename)
- if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
- if !*download {
- b.Fatal("test data not found; skipping benchmark without the -download flag")
- }
- // Download the official snappy C++ implementation reference test data
- // files for benchmarking.
- if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
- b.Fatalf("failed to create testdata: %s", err)
- }
- for _, tf := range testFiles {
- if err := downloadTestdata(tf.filename); err != nil {
- b.Fatalf("failed to download testdata: %s", err)
- }
- }
+ if err := downloadTestdata(testFiles[n].filename); err != nil {
+ b.Fatalf("failed to download testdata: %s", err)
}
- data := readFile(b, filename)
+ data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
if decode {
benchDecode(b, data)
} else {
@@ -235,12 +350,6 @@ func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
-func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }
-func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }
-func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }
-func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }
-func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }
-func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
@@ -253,9 +362,3 @@ func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
-func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }
-func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }
-func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }
-func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }
-func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }
-func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }
diff --git a/README.md b/README.md
index 64af8e5c4..6fcf29b88 100644
--- a/README.md
+++ b/README.md
@@ -52,7 +52,7 @@ Automated (dev) builds
* Ubuntu
[trusty](https://build.ethdev.com/builds/Linux%20Go%20develop%20deb%20i386-trusty/latest/) |
[utopic](https://build.ethdev.com/builds/Linux%20Go%20develop%20deb%20i386-utopic/latest/)
-* [Windows] Coming soon&trade;
+* [Windows 64-bit](https://build.ethdev.com/builds/Windows%20Go%20develop%20branch/Geth-Win64-latest.7z)
Executables
===========
@@ -94,3 +94,4 @@ Commits that are directly based on master are simply ignored.
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) for more details on configuring your environment, testing, and dependency management.
+TEST
diff --git a/cmd/geth/admin.go b/cmd/geth/admin.go
index e75ff047a..31f8d4400 100644
--- a/cmd/geth/admin.go
+++ b/cmd/geth/admin.go
@@ -35,7 +35,6 @@ func (js *jsre) adminBindings() {
admin.Set("import", js.importChain)
admin.Set("export", js.exportChain)
admin.Set("verbosity", js.verbosity)
- admin.Set("backtrace", js.backtrace)
admin.Set("progress", js.downloadProgress)
admin.Set("miner", struct{}{})
@@ -49,11 +48,12 @@ func (js *jsre) adminBindings() {
admin.Set("debug", struct{}{})
t, _ = admin.Get("debug")
debug := t.Object()
+ debug.Set("backtrace", js.backtrace)
debug.Set("printBlock", js.printBlock)
debug.Set("dumpBlock", js.dumpBlock)
debug.Set("getBlockRlp", js.getBlockRlp)
debug.Set("setHead", js.setHead)
- debug.Set("block", js.debugBlock)
+ debug.Set("processBlock", js.debugBlock)
}
func (js *jsre) getBlock(call otto.FunctionCall) (*types.Block, error) {
@@ -203,16 +203,26 @@ func (js *jsre) startRPC(call otto.FunctionCall) otto.Value {
fmt.Println(err)
return otto.FalseValue()
}
+
port, err := call.Argument(1).ToInteger()
if err != nil {
fmt.Println(err)
return otto.FalseValue()
}
+ corsDomain := js.corsDomain
+ if len(call.ArgumentList) > 2 {
+ corsDomain, err = call.Argument(2).ToString()
+ if err != nil {
+ fmt.Println(err)
+ return otto.FalseValue()
+ }
+ }
+
config := rpc.RpcConfig{
ListenAddress: addr,
ListenPort: uint(port),
- // CorsDomain: ctx.GlobalString(RPCCORSDomainFlag.Name),
+ CorsDomain: corsDomain,
}
xeth := xeth.New(js.ethereum, nil)
@@ -274,10 +284,6 @@ func (js *jsre) unlock(call otto.FunctionCall) otto.Value {
}
}
am := js.ethereum.AccountManager()
- // err := am.Unlock(common.FromHex(split[0]), split[1])
- // if err != nil {
- // utils.Fatalf("Unlock account failed '%v'", err)
- // }
err = am.TimedUnlock(common.FromHex(addr), passphrase, time.Duration(seconds)*time.Second)
if err != nil {
fmt.Printf("Unlock account failed '%v'\n", err)
diff --git a/cmd/geth/blocktest.go b/cmd/geth/blocktest.go
index 343a0bf28..5c80ad07e 100644
--- a/cmd/geth/blocktest.go
+++ b/cmd/geth/blocktest.go
@@ -104,7 +104,7 @@ func runOneBlockTest(ctx *cli.Context, test *tests.BlockTest) (*eth.Ethereum, er
ethereum.ResetWithGenesisBlock(test.Genesis)
// import pre accounts
- statedb, err := test.InsertPreState(ethereum.StateDb())
+ statedb, err := test.InsertPreState(ethereum)
if err != nil {
return ethereum, fmt.Errorf("InsertPreState: %v", err)
}
diff --git a/cmd/geth/js.go b/cmd/geth/js.go
index 6e5a6f1c7..a545de1d0 100644
--- a/cmd/geth/js.go
+++ b/cmd/geth/js.go
@@ -59,17 +59,19 @@ func (r dumbterm) PasswordPrompt(p string) (string, error) {
func (r dumbterm) AppendHistory(string) {}
type jsre struct {
- re *re.JSRE
- ethereum *eth.Ethereum
- xeth *xeth.XEth
- ps1 string
- atexit func()
-
+ re *re.JSRE
+ ethereum *eth.Ethereum
+ xeth *xeth.XEth
+ ps1 string
+ atexit func()
+ corsDomain string
prompter
}
-func newJSRE(ethereum *eth.Ethereum, libPath string, interactive bool) *jsre {
+func newJSRE(ethereum *eth.Ethereum, libPath string, interactive bool, corsDomain string) *jsre {
js := &jsre{ethereum: ethereum, ps1: "> "}
+ // set default cors domain used by startRpc from CLI flag
+ js.corsDomain = corsDomain
js.xeth = xeth.New(ethereum, js)
js.re = re.New(libPath)
js.apiBindings()
@@ -118,7 +120,7 @@ func (js *jsre) apiBindings() {
utils.Fatalf("Error loading ethereum.js: %v", err)
}
- _, err = js.re.Eval("var web3 = require('ethereum.js');")
+ _, err = js.re.Eval("var web3 = require('web3');")
if err != nil {
utils.Fatalf("Error requiring web3: %v", err)
}
diff --git a/cmd/geth/js_test.go b/cmd/geth/js_test.go
index 521039121..50528b80a 100644
--- a/cmd/geth/js_test.go
+++ b/cmd/geth/js_test.go
@@ -3,16 +3,16 @@ package main
import (
"fmt"
"io/ioutil"
- "path/filepath"
"os"
"path"
+ "path/filepath"
"testing"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
- "runtime"
"regexp"
+ "runtime"
"strconv"
)
@@ -36,7 +36,7 @@ func testJEthRE(t *testing.T) (*jsre, *eth.Ethereum) {
t.Fatal("%v", err)
}
assetPath := path.Join(os.Getenv("GOPATH"), "src", "github.com", "ethereum", "go-ethereum", "cmd", "mist", "assets", "ext")
- repl := newJSRE(ethereum, assetPath, false)
+ repl := newJSRE(ethereum, assetPath, false, "")
return repl, ethereum
}
@@ -70,8 +70,8 @@ func TestAccounts(t *testing.T) {
t.Errorf("address not hex: %q", addr)
}
- checkEvalJSON(t, repl, `eth.accounts`, `["` + addr + `"]`)
- checkEvalJSON(t, repl, `eth.coinbase`, `"` + addr + `"`)
+ checkEvalJSON(t, repl, `eth.accounts`, `["`+addr+`"]`)
+ checkEvalJSON(t, repl, `eth.coinbase`, `"`+addr+`"`)
}
func TestBlockChain(t *testing.T) {
@@ -97,13 +97,13 @@ func TestBlockChain(t *testing.T) {
tmpfile := filepath.Join(tmp, "export.chain")
tmpfileq := strconv.Quote(tmpfile)
- checkEvalJSON(t, repl, `admin.export(` + tmpfileq + `)`, `true`)
+ checkEvalJSON(t, repl, `admin.export(`+tmpfileq+`)`, `true`)
if _, err := os.Stat(tmpfile); err != nil {
t.Fatal(err)
}
// check import, verify that dumpBlock gives the same result.
- checkEvalJSON(t, repl, `admin.import(` + tmpfileq + `)`, `true`)
+ checkEvalJSON(t, repl, `admin.import(`+tmpfileq+`)`, `true`)
checkEvalJSON(t, repl, `admin.debug.dumpBlock()`, beforeExport)
}
@@ -129,7 +129,7 @@ func TestRPC(t *testing.T) {
}
func checkEvalJSON(t *testing.T, re *jsre, expr, want string) error {
- val, err := re.re.Run("JSON.stringify("+ expr + ")")
+ val, err := re.re.Run("JSON.stringify(" + expr + ")")
if err == nil && val.String() != want {
err = fmt.Errorf("Output mismatch for `%s`:\ngot: %s\nwant: %s", expr, val.String(), want)
}
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index e399731e7..6ffc3c4a0 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -47,7 +47,7 @@ import _ "net/http/pprof"
const (
ClientIdentifier = "Geth"
- Version = "0.9.11"
+ Version = "0.9.12"
)
var app = utils.NewApp(Version, "the go-ethereum command line interface")
@@ -97,6 +97,8 @@ The output of this command is supposed to be machine-readable.
Manage accounts lets you create new accounts, list all existing accounts,
import a private key into a new account.
+'account help' shows a list of subcommands or help for one subcommand.
+
It supports interactive mode, when you are prompted for password as well as
non-interactive mode where passwords are supplied via a given password file.
Non-interactive mode is only meant for scripted use on test networks or known
@@ -186,8 +188,8 @@ Use "ethereum dump 0" to dump the genesis block.
Usage: `Geth Console: interactive JavaScript environment`,
Description: `
The Geth console is an interactive shell for the JavaScript runtime environment
-which exposes a node admin interface as well as the DAPP JavaScript API.
-See https://github.com/ethereum/go-ethereum/wiki/Frontier-Console
+which exposes a node admin interface as well as the Ðapp JavaScript API.
+See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
`,
},
{
@@ -195,7 +197,7 @@ See https://github.com/ethereum/go-ethereum/wiki/Frontier-Console
Name: "js",
Usage: `executes the given JavaScript files in the Geth JavaScript VM`,
Description: `
-The JavaScript VM exposes a node admin interface as well as the DAPP
+The JavaScript VM exposes a node admin interface as well as the Ðapp
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
`,
},
@@ -261,14 +263,11 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
// flag.BoolVar(&DiffTool, "difftool", false, "creates output for diff'ing. Sets LogLevel=0")
// flag.StringVar(&DiffType, "diff", "all", "sets the level of diff output [vm, all]. Has no effect if difftool=false")
- // potential subcommands:
- // flag.StringVar(&SecretFile, "import", "", "imports the file given (hex or mnemonic formats)")
- // flag.StringVar(&ExportDir, "export", "", "exports the session keyring to files in the directory given")
- // flag.BoolVar(&GenAddr, "genaddr", false, "create a new priv/pub key")
}
func main() {
- fmt.Printf("\n\n█ █ █ █ █ █ ███ █ █\n█ █ █ ███ █ ███ ███ ███ ███ ███ ███ ███ ███ ███ █ ███ ███ ██ ███ ███ ███ \n█ █ █ ██ █ █ █ █ ███ ██ █ █ █ █ █ █ ██ ██ █ █ █ █ █ █ █ ██ █ \n█████ ███ ██ ███ ███ █ █ ███ ██ ███ ██ █ █ ███ █ █ ███ █ █ ██ ██ ███ █ \n\n")
+ //fmt.Printf("\n 🌞\n\n ᴡᴇʟᴄᴏᴍᴇ ᴛᴏ ᴛʜᴇ\n 𝐅 𝐑 𝐎 𝐍 𝐓 𝐈 𝐄 𝐑\n\n🌾 🌵🌾🌾 🐎 🌾 🌵 🌾\n\n")
+ fmt.Println("\n Welcome to the\n FRONTIER\n")
runtime.GOMAXPROCS(runtime.NumCPU())
defer logger.Flush()
if err := app.Run(os.Args); err != nil {
@@ -298,7 +297,7 @@ func console(ctx *cli.Context) {
}
startEth(ctx, ethereum)
- repl := newJSRE(ethereum, ctx.String(utils.JSpathFlag.Name), true)
+ repl := newJSRE(ethereum, ctx.String(utils.JSpathFlag.Name), true, ctx.GlobalString(utils.RPCCORSDomainFlag.Name))
repl.interactive()
ethereum.Stop()
@@ -313,7 +312,7 @@ func execJSFiles(ctx *cli.Context) {
}
startEth(ctx, ethereum)
- repl := newJSRE(ethereum, ctx.String(utils.JSpathFlag.Name), false)
+ repl := newJSRE(ethereum, ctx.String(utils.JSpathFlag.Name), false, ctx.GlobalString(utils.RPCCORSDomainFlag.Name))
for _, file := range ctx.Args() {
repl.exec(file)
}
@@ -356,10 +355,14 @@ func startEth(ctx *cli.Context, eth *eth.Ethereum) {
}
// Start auxiliary services if enabled.
if ctx.GlobalBool(utils.RPCEnabledFlag.Name) {
- utils.StartRPC(eth, ctx)
+ if err := utils.StartRPC(eth, ctx); err != nil {
+ utils.Fatalf("Error starting RPC: %v", err)
+ }
}
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) {
- eth.StartMining()
+ if err := eth.StartMining(); err != nil {
+ utils.Fatalf("%v", err)
+ }
}
}
@@ -369,8 +372,10 @@ func accountList(ctx *cli.Context) {
if err != nil {
utils.Fatalf("Could not list accounts: %v", err)
}
- for _, acct := range accts {
- fmt.Printf("Address: %x\n", acct)
+ name := "Primary"
+ for i, acct := range accts {
+ fmt.Printf("%s #%d: %x\n", name, i, acct)
+ name = "Account"
}
}
diff --git a/cmd/mist/assets/examples/coin.html b/cmd/mist/assets/examples/coin.html
index e78f2d73f..e6baf4579 100644
--- a/cmd/mist/assets/examples/coin.html
+++ b/cmd/mist/assets/examples/coin.html
@@ -3,7 +3,7 @@
<title>JevCoin</title>
<head>
<script type="text/javascript" src="../ext/bignumber.min.js"></script>
-<script type="text/javascript" src="../ext/ethereum.js/dist/ethereum-light.min.js"></script>
+<script type="text/javascript" src="../ext/ethereum.js/dist/web3-light.min.js"></script>
</head>
<body>
@@ -32,7 +32,6 @@
</body>
<script type="text/javascript">
- var web3 = require('ethereum.js');
var eth = web3.eth;
web3.setProvider(new web3.providers.HttpProvider('http://localhost:8545'));
diff --git a/cmd/mist/assets/ext/ethereum.js b/cmd/mist/assets/ext/ethereum.js
-Subproject c80ede50c3b60a482f1ec76038325ec52f5e73b
+Subproject 3b799d128452639463424c657956ee90a28daec
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index b8f3982e2..c013510d8 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -79,22 +79,22 @@ var (
}
ProtocolVersionFlag = cli.IntFlag{
Name: "protocolversion",
- Usage: "ETH protocol version",
+ Usage: "ETH protocol version (integer)",
Value: eth.ProtocolVersion,
}
NetworkIdFlag = cli.IntFlag{
Name: "networkid",
- Usage: "Network Id",
+ Usage: "Network Id (integer)",
Value: eth.NetworkId,
}
BlockchainVersionFlag = cli.IntFlag{
Name: "blockchainversion",
- Usage: "Blockchain version",
+ Usage: "Blockchain version (integer)",
Value: core.BlockChainVersion,
}
IdentityFlag = cli.StringFlag{
Name: "identity",
- Usage: "node name",
+ Usage: "Custom node name",
}
NatspecEnabledFlag = cli.BoolFlag{
Name: "natspec",
@@ -113,18 +113,18 @@ var (
}
EtherbaseFlag = cli.StringFlag{
Name: "etherbase",
- Usage: "public address for block mining rewards. By default the address of your primary account is used",
+ Usage: "Public address for block mining rewards. By default the address of your primary account is used",
Value: "primary",
}
UnlockedAccountFlag = cli.StringFlag{
Name: "unlock",
- Usage: "unlock the account given until this program exits (prompts for password). '--unlock primary' unlocks the primary account",
+ Usage: "Unlock the account given until this program exits (prompts for password). '--unlock primary' unlocks the primary account",
Value: "",
}
PasswordFileFlag = cli.StringFlag{
Name: "password",
- Usage: "Path to password file for (un)locking an existing account.",
+ Usage: "Path to password file to use with options and subcommands needing a password",
Value: "",
}
@@ -135,7 +135,7 @@ var (
}
LogLevelFlag = cli.IntFlag{
Name: "loglevel",
- Usage: "0-5 (silent, error, warn, info, debug, debug detail)",
+ Usage: "Logging verbosity: 0-6 (0=silent, 1=error, 2=warn, 3=info, 4=core, 5=debug, 6=debug detail)",
Value: int(logger.InfoLevel),
}
LogJSONFlag = cli.StringFlag{
@@ -149,7 +149,7 @@ var (
}
LogVModuleFlag = cli.GenericFlag{
Name: "vmodule",
- Usage: "The syntax of the argument is a comma-separated list of pattern=N, where pattern is a literal file name (minus the \".go\" suffix) or \"glob\" pattern and N is a V level.",
+ Usage: "The syntax of the argument is a comma-separated list of pattern=N, where pattern is a literal file name (minus the \".go\" suffix) or \"glob\" pattern and N is a log verbosity level.",
Value: glog.GetVModule(),
}
VMDebugFlag = cli.BoolFlag{
@@ -158,12 +158,12 @@ var (
}
BacktraceAtFlag = cli.GenericFlag{
Name: "backtrace_at",
- Usage: "When set to a file and line number holding a logging statement a stack trace will be written to the Info log",
+ Usage: "If set to a file and line number (e.g., \"block.go:271\") holding a logging statement, a stack trace will be logged",
Value: glog.GetTraceLocation(),
}
PProfEanbledFlag = cli.BoolFlag{
Name: "pprof",
- Usage: "Whether the profiling server should be enabled",
+ Usage: "Enable the profiling server on localhost",
}
PProfPortFlag = cli.IntFlag{
Name: "pprofport",
@@ -174,7 +174,7 @@ var (
// RPC settings
RPCEnabledFlag = cli.BoolFlag{
Name: "rpc",
- Usage: "Whether RPC server is enabled",
+ Usage: "Enable the JSON-RPC server",
}
RPCListenAddrFlag = cli.StringFlag{
Name: "rpcaddr",
@@ -194,7 +194,7 @@ var (
// Network Settings
MaxPeersFlag = cli.IntFlag{
Name: "maxpeers",
- Usage: "Maximum number of network peers",
+ Usage: "Maximum number of network peers (network disabled if set to 0)",
Value: 16,
}
ListenPortFlag = cli.IntFlag{
@@ -204,7 +204,7 @@ var (
}
BootnodesFlag = cli.StringFlag{
Name: "bootnodes",
- Usage: "Space-separated enode URLs for discovery bootstrap",
+ Usage: "Space-separated enode URLs for p2p discovery bootstrap",
Value: "",
}
NodeKeyFileFlag = cli.StringFlag{
@@ -217,12 +217,12 @@ var (
}
NATFlag = cli.StringFlag{
Name: "nat",
- Usage: "Port mapping mechanism (any|none|upnp|pmp|extip:<IP>)",
+ Usage: "NAT port mapping mechanism (any|none|upnp|pmp|extip:<IP>)",
Value: "any",
}
WhisperEnabledFlag = cli.BoolFlag{
Name: "shh",
- Usage: "Whether the whisper sub-protocol is enabled",
+ Usage: "Enable whisper",
}
JSpathFlag = cli.StringFlag{
Name: "jspath",
@@ -317,7 +317,7 @@ func GetChain(ctx *cli.Context) (*core.ChainManager, common.Database, common.Dat
eventMux := new(event.TypeMux)
chainManager := core.NewChainManager(blockDb, stateDb, eventMux)
pow := ethash.New(chainManager)
- txPool := core.NewTxPool(eventMux, chainManager.State)
+ txPool := core.NewTxPool(eventMux, chainManager.State, chainManager.GasLimit)
blockProcessor := core.NewBlockProcessor(stateDb, extraDb, pow, txPool, chainManager, eventMux)
chainManager.SetProcessor(blockProcessor)
@@ -330,7 +330,7 @@ func GetAccountManager(ctx *cli.Context) *accounts.Manager {
return accounts.NewManager(ks)
}
-func StartRPC(eth *eth.Ethereum, ctx *cli.Context) {
+func StartRPC(eth *eth.Ethereum, ctx *cli.Context) error {
config := rpc.RpcConfig{
ListenAddress: ctx.GlobalString(RPCListenAddrFlag.Name),
ListenPort: uint(ctx.GlobalInt(RPCPortFlag.Name)),
@@ -338,7 +338,7 @@ func StartRPC(eth *eth.Ethereum, ctx *cli.Context) {
}
xeth := xeth.New(eth, nil)
- _ = rpc.Start(xeth, config)
+ return rpc.Start(xeth, config)
}
func StartPProf(ctx *cli.Context) {
diff --git a/common/natspec/natspec_e2e_test.go b/common/natspec/natspec_e2e_test.go
index 6bdaec8a1..e54b9ee96 100644
--- a/common/natspec/natspec_e2e_test.go
+++ b/common/natspec/natspec_e2e_test.go
@@ -284,6 +284,7 @@ func (self *testFrontend) testResolver() *resolver.Resolver {
}
func TestNatspecE2E(t *testing.T) {
+ t.Skip()
tf := testInit(t)
defer tf.ethereum.Stop()
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 9b4911fba..4512a5493 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -64,7 +64,7 @@ func newBlockFromParent(addr common.Address, parent *types.Block) *types.Block {
header.Difficulty = CalcDifficulty(block.Header(), parent.Header())
header.Number = new(big.Int).Add(parent.Header().Number, common.Big1)
header.Time = parent.Header().Time + 10
- header.GasLimit = CalcGasLimit(parent, block)
+ header.GasLimit = CalcGasLimit(parent)
block.Td = parent.Td
@@ -79,7 +79,7 @@ func makeBlock(bman *BlockProcessor, parent *types.Block, i int, db common.Datab
block := newBlockFromParent(addr, parent)
state := state.New(block.Root(), db)
cbase := state.GetOrNewStateObject(addr)
- cbase.SetGasPool(CalcGasLimit(parent, block))
+ cbase.SetGasPool(CalcGasLimit(parent))
cbase.AddBalance(BlockReward)
state.Update()
block.SetRoot(state.Root())
@@ -124,7 +124,7 @@ func newChainManager(block *types.Block, eventMux *event.TypeMux, db common.Data
// block processor with fake pow
func newBlockProcessor(db common.Database, cman *ChainManager, eventMux *event.TypeMux) *BlockProcessor {
chainMan := newChainManager(nil, eventMux, db)
- txpool := NewTxPool(eventMux, chainMan.State)
+ txpool := NewTxPool(eventMux, chainMan.State, chainMan.GasLimit)
bman := NewBlockProcessor(db, db, FakePow{}, txpool, chainMan, eventMux)
return bman
}
diff --git a/core/chain_manager.go b/core/chain_manager.go
index a09b2e63b..fb2af0280 100644
--- a/core/chain_manager.go
+++ b/core/chain_manager.go
@@ -54,11 +54,7 @@ func CalculateTD(block, parent *types.Block) *big.Int {
return td
}
-func CalcGasLimit(parent, block *types.Block) *big.Int {
- if block.Number().Cmp(big.NewInt(0)) == 0 {
- return common.BigPow(10, 6)
- }
-
+func CalcGasLimit(parent *types.Block) *big.Int {
// ((1024-1) * parent.gasLimit + (gasUsed * 6 / 5)) / 1024
previous := new(big.Int).Mul(big.NewInt(1024-1), parent.GasLimit())
current := new(big.Rat).Mul(new(big.Rat).SetInt(parent.GasUsed()), big.NewRat(6, 5))
@@ -78,11 +74,12 @@ type ChainManager struct {
eventMux *event.TypeMux
genesisBlock *types.Block
// Last known total difficulty
- mu sync.RWMutex
- tsmu sync.RWMutex
- td *big.Int
- currentBlock *types.Block
- lastBlockHash common.Hash
+ mu sync.RWMutex
+ tsmu sync.RWMutex
+ td *big.Int
+ currentBlock *types.Block
+ lastBlockHash common.Hash
+ currentGasLimit *big.Int
transState *state.StateDB
txState *state.ManagedState
@@ -95,12 +92,13 @@ type ChainManager struct {
func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *ChainManager {
bc := &ChainManager{
- blockDb: blockDb,
- stateDb: stateDb,
- genesisBlock: GenesisBlock(stateDb),
- eventMux: mux,
- quit: make(chan struct{}),
- cache: NewBlockCache(blockCacheLimit),
+ blockDb: blockDb,
+ stateDb: stateDb,
+ genesisBlock: GenesisBlock(stateDb),
+ eventMux: mux,
+ quit: make(chan struct{}),
+ cache: NewBlockCache(blockCacheLimit),
+ currentGasLimit: new(big.Int),
}
bc.setLastBlock()
@@ -157,6 +155,10 @@ func (self *ChainManager) Td() *big.Int {
return self.td
}
+func (self *ChainManager) GasLimit() *big.Int {
+ return self.currentGasLimit
+}
+
func (self *ChainManager) LastBlockHash() common.Hash {
self.mu.RLock()
defer self.mu.RUnlock()
@@ -271,7 +273,7 @@ func (bc *ChainManager) NewBlock(coinbase common.Address) *types.Block {
header := block.Header()
header.Difficulty = CalcDifficulty(block.Header(), parent.Header())
header.Number = new(big.Int).Add(parent.Header().Number, common.Big1)
- header.GasLimit = CalcGasLimit(parent, block)
+ header.GasLimit = CalcGasLimit(parent)
}
@@ -652,6 +654,7 @@ out:
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
// and in most cases isn't even necessary.
if i+1 == ev.canonicalCount {
+ self.currentGasLimit = CalcGasLimit(event.Block)
self.eventMux.Post(ChainHeadEvent{event.Block})
}
case ChainSplitEvent:
diff --git a/core/chain_manager_test.go b/core/chain_manager_test.go
index f16c0f0c3..c2911150a 100644
--- a/core/chain_manager_test.go
+++ b/core/chain_manager_test.go
@@ -256,7 +256,7 @@ func TestChainInsertions(t *testing.T) {
var eventMux event.TypeMux
chainMan := NewChainManager(db, db, &eventMux)
- txPool := NewTxPool(&eventMux, chainMan.State)
+ txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) })
blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux)
chainMan.SetProcessor(blockMan)
@@ -302,7 +302,7 @@ func TestChainMultipleInsertions(t *testing.T) {
}
var eventMux event.TypeMux
chainMan := NewChainManager(db, db, &eventMux)
- txPool := NewTxPool(&eventMux, chainMan.State)
+ txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) })
blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux)
chainMan.SetProcessor(blockMan)
done := make(chan bool, max)
diff --git a/core/filter.go b/core/filter.go
index a924709f2..c10fb7eeb 100644
--- a/core/filter.go
+++ b/core/filter.go
@@ -131,17 +131,26 @@ Logs:
logTopics := make([]common.Hash, len(self.topics))
copy(logTopics, log.Topics)
+ // If the to filtered topics is greater than the amount of topics in
+ // logs, skip.
+ if len(self.topics) > len(log.Topics) {
+ continue Logs
+ }
+
for i, topics := range self.topics {
+ var match bool
for _, topic := range topics {
- var match bool
// common.Hash{} is a match all (wildcard)
if (topic == common.Hash{}) || log.Topics[i] == topic {
match = true
- }
- if !match {
- continue Logs
+ break
}
}
+
+ if !match {
+ continue Logs
+ }
+
}
ret = append(ret, log)
@@ -168,7 +177,7 @@ func (self *Filter) bloomFilter(block *types.Block) bool {
for _, sub := range self.topics {
var included bool
for _, topic := range sub {
- if types.BloomLookup(block.Bloom(), topic) {
+ if (topic == common.Hash{}) || types.BloomLookup(block.Bloom(), topic) {
included = true
break
}
diff --git a/core/transaction_pool.go b/core/transaction_pool.go
index 392e17856..8543aa017 100644
--- a/core/transaction_pool.go
+++ b/core/transaction_pool.go
@@ -20,9 +20,11 @@ import (
var (
ErrInvalidSender = errors.New("Invalid sender")
ErrNonce = errors.New("Nonce too low")
+ ErrBalance = errors.New("Insufficient balance")
ErrNonExistentAccount = errors.New("Account does not exist")
- ErrInsufficientFunds = errors.New("Insufficient funds")
+ ErrInsufficientFunds = errors.New("Insufficient funds for gas * price + value")
ErrIntrinsicGas = errors.New("Intrinsic gas too low")
+ ErrGasLimit = errors.New("Exceeds block gas limit")
)
const txPoolQueueSize = 50
@@ -52,6 +54,8 @@ type TxPool struct {
quit chan bool
// The state function which will allow us to do some pre checkes
currentState stateFn
+ // The current gas limit function callback
+ gasLimit func() *big.Int
// The actual pool
txs map[common.Hash]*types.Transaction
invalidHashes *set.Set
@@ -63,7 +67,7 @@ type TxPool struct {
eventMux *event.TypeMux
}
-func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn) *TxPool {
+func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
txPool := &TxPool{
txs: make(map[common.Hash]*types.Transaction),
queue: make(map[common.Address]types.Transactions),
@@ -72,6 +76,7 @@ func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn) *TxPool {
eventMux: eventMux,
invalidHashes: set.New(),
currentState: currentStateFn,
+ gasLimit: gasLimitFn,
}
return txPool
}
@@ -116,7 +121,13 @@ func (pool *TxPool) ValidateTransaction(tx *types.Transaction) error {
return ErrNonExistentAccount
}
- if pool.currentState().GetBalance(from).Cmp(new(big.Int).Mul(tx.Price, tx.GasLimit)) < 0 {
+ if pool.gasLimit().Cmp(tx.GasLimit) < 0 {
+ return ErrGasLimit
+ }
+
+ total := new(big.Int).Mul(tx.Price, tx.GasLimit)
+ total.Add(total, tx.Value())
+ if pool.currentState().GetBalance(from).Cmp(total) < 0 {
return ErrInsufficientFunds
}
@@ -185,7 +196,7 @@ func (self *TxPool) AddTransactions(txs []*types.Transaction) {
for _, tx := range txs {
if err := self.add(tx); err != nil {
- glog.V(logger.Debug).Infoln(err)
+ glog.V(logger.Debug).Infoln("tx error:", err)
} else {
h := tx.Hash()
glog.V(logger.Debug).Infof("tx %x\n", h[:4])
@@ -288,7 +299,6 @@ func (pool *TxPool) checkQueue() {
pool.addTx(tx)
}
- //pool.queue[address] = txs[i:]
// delete the entire queue entry if it's empty. There's no need to keep it
if len(pool.queue[address]) == 0 {
delete(pool.queue, address)
@@ -300,12 +310,10 @@ func (pool *TxPool) validatePool() {
pool.mu.Lock()
defer pool.mu.Unlock()
- statedb := pool.currentState()
for hash, tx := range pool.txs {
- from, _ := tx.From()
- if nonce := statedb.GetNonce(from); nonce > tx.Nonce() {
- if glog.V(logger.Debug) {
- glog.Infof("removed tx (%x) from pool due to nonce error. state=%d tx=%d\n", hash[:4], nonce, tx.Nonce())
+ if err := pool.ValidateTransaction(tx); err != nil {
+ if glog.V(logger.Info) {
+ glog.Infof("removed tx (%x) from pool: %v\n", hash[:4], err)
}
delete(pool.txs, hash)
diff --git a/core/transaction_pool_test.go b/core/transaction_pool_test.go
index 0e049139e..4d66776f0 100644
--- a/core/transaction_pool_test.go
+++ b/core/transaction_pool_test.go
@@ -23,7 +23,7 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
var m event.TypeMux
key, _ := crypto.GenerateKey()
- return NewTxPool(&m, func() *state.StateDB { return statedb }), key
+ return NewTxPool(&m, func() *state.StateDB { return statedb }, func() *big.Int { return big.NewInt(1000000) }), key
}
func TestInvalidTransactions(t *testing.T) {
@@ -43,10 +43,11 @@ func TestInvalidTransactions(t *testing.T) {
t.Error("expected", ErrInsufficientFunds)
}
- pool.currentState().AddBalance(from, big.NewInt(100*100))
+ balance := new(big.Int).Add(tx.Value(), new(big.Int).Mul(tx.Gas(), tx.GasPrice()))
+ pool.currentState().AddBalance(from, balance)
err = pool.Add(tx)
if err != ErrIntrinsicGas {
- t.Error("expected", ErrIntrinsicGas)
+ t.Error("expected", ErrIntrinsicGas, "got", err)
}
pool.currentState().SetNonce(from, 1)
diff --git a/crypto/crypto.go b/crypto/crypto.go
index 89423e0c4..3c5783014 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -179,6 +179,19 @@ func Decrypt(prv *ecdsa.PrivateKey, ct []byte) ([]byte, error) {
return key.Decrypt(rand.Reader, ct, nil, nil)
}
+// Used only by block tests.
+func ImportBlockTestKey(privKeyBytes []byte) error {
+ ks := NewKeyStorePassphrase(common.DefaultDataDir() + "/keys")
+ ecKey := ToECDSA(privKeyBytes)
+ key := &Key{
+ Id: uuid.NewRandom(),
+ Address: PubkeyToAddress(ecKey.PublicKey),
+ PrivateKey: ecKey,
+ }
+ err := ks.StoreKey(key, "")
+ return err
+}
+
// creates a Key and stores that in the given KeyStore by decrypting a presale key JSON
func ImportPreSaleKey(keyStore KeyStore2, keyJSON []byte, password string) (*Key, error) {
key, err := decryptPreSaleKey(keyJSON, password)
diff --git a/crypto/key_store_plain.go b/crypto/key_store_plain.go
index 338a4a2c3..9bbaf1c15 100644
--- a/crypto/key_store_plain.go
+++ b/crypto/key_store_plain.go
@@ -117,13 +117,12 @@ func GetKeyAddresses(keysDirPath string) (addresses [][]byte, err error) {
if err != nil {
return nil, err
}
- addresses = make([][]byte, len(fileInfos))
- for i, fileInfo := range fileInfos {
+ for _, fileInfo := range fileInfos {
address, err := hex.DecodeString(fileInfo.Name())
if err != nil {
continue
}
- addresses[i] = address
+ addresses = append(addresses, address)
}
return addresses, err
}
diff --git a/eth/backend.go b/eth/backend.go
index 28640b63d..c5fa328b0 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -219,15 +219,17 @@ func New(config *Config) (*Ethereum, error) {
}
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux())
- eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.InsertChain, eth.chainManager.Td)
+ eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.InsertChain)
eth.pow = ethash.New(eth.chainManager)
- eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State)
+ eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux())
eth.chainManager.SetProcessor(eth.blockProcessor)
- eth.whisper = whisper.New()
- eth.shhVersionId = int(eth.whisper.Version())
eth.miner = miner.New(eth, eth.pow, config.MinerThreads)
eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.eventMux, eth.txPool, eth.chainManager, eth.downloader)
+ if config.Shh {
+ eth.whisper = whisper.New()
+ eth.shhVersionId = int(eth.whisper.Version())
+ }
netprv, err := config.nodeKey()
if err != nil {
@@ -325,10 +327,9 @@ func (s *Ethereum) StartMining() error {
err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
glog.V(logger.Error).Infoln(err)
return err
-
}
- s.miner.Start(eb)
+ go s.miner.Start(eb)
return nil
}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index cfc494b2f..60d908758 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -39,7 +39,6 @@ var (
type hashCheckFn func(common.Hash) bool
type chainInsertFn func(types.Blocks) error
type hashIterFn func() (common.Hash, error)
-type currentTdFn func() *big.Int
type blockPack struct {
peerId string
@@ -61,7 +60,6 @@ type Downloader struct {
// Callbacks
hasBlock hashCheckFn
insertChain chainInsertFn
- currentTd currentTdFn
// Status
fetchingHashes int32
@@ -70,27 +68,20 @@ type Downloader struct {
// Channels
newPeerCh chan *peer
- syncCh chan syncPack
hashCh chan []common.Hash
blockCh chan blockPack
- quit chan struct{}
}
-func New(hasBlock hashCheckFn, insertChain chainInsertFn, currentTd currentTdFn) *Downloader {
+func New(hasBlock hashCheckFn, insertChain chainInsertFn) *Downloader {
downloader := &Downloader{
queue: newqueue(),
peers: make(peers),
hasBlock: hasBlock,
insertChain: insertChain,
- currentTd: currentTd,
newPeerCh: make(chan *peer, 1),
- syncCh: make(chan syncPack, 1),
hashCh: make(chan []common.Hash, 1),
blockCh: make(chan blockPack, 1),
- quit: make(chan struct{}),
}
- go downloader.peerHandler()
- go downloader.update()
return downloader
}
@@ -99,18 +90,17 @@ func (d *Downloader) Stats() (current int, max int) {
return d.queue.blockHashes.Size(), d.queue.fetchPool.Size() + d.queue.hashPool.Size()
}
-func (d *Downloader) RegisterPeer(id string, td *big.Int, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error {
+func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error {
d.mu.Lock()
defer d.mu.Unlock()
- glog.V(logger.Detail).Infoln("Register peer", id, "TD =", td)
+ glog.V(logger.Detail).Infoln("Register peer", id)
// Create a new peer and add it to the list of known peers
- peer := newPeer(id, td, hash, getHashes, getBlocks)
+ peer := newPeer(id, hash, getHashes, getBlocks)
// add peer to our peer set
d.peers[id] = peer
// broadcast new peer
- d.newPeerCh <- peer
return nil
}
@@ -125,72 +115,59 @@ func (d *Downloader) UnregisterPeer(id string) {
delete(d.peers, id)
}
-func (d *Downloader) peerHandler() {
- // itimer is used to determine when to start ignoring `minDesiredPeerCount`
- itimer := time.NewTimer(peerCountTimeout)
-out:
- for {
- select {
- case <-d.newPeerCh:
- // Meet the `minDesiredPeerCount` before we select our best peer
- if len(d.peers) < minDesiredPeerCount {
- break
- }
- itimer.Stop()
-
- d.selectPeer(d.peers.bestPeer())
- case <-itimer.C:
- // The timer will make sure that the downloader keeps an active state
- // in which it attempts to always check the network for highest td peers
- // Either select the peer or restart the timer if no peers could
- // be selected.
- if peer := d.peers.bestPeer(); peer != nil {
- d.selectPeer(d.peers.bestPeer())
- } else {
- itimer.Reset(5 * time.Second)
- }
- case <-d.quit:
- break out
- }
- }
-}
-
-func (d *Downloader) selectPeer(p *peer) {
+// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given
+// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the
+// checks fail an error will be returned. This method is synchronous
+func (d *Downloader) Synchronise(id string, hash common.Hash) error {
// Make sure it's doing neither. Once done we can restart the
// downloading process if the TD is higher. For now just get on
// with whatever is going on. This prevents unecessary switching.
if d.isBusy() {
- return
+ return errBusy
}
- // selected peer must be better than our own
- // XXX we also check the peer's recent hash to make sure we
- // don't have it. Some peers report (i think) incorrect TD.
- if p.td.Cmp(d.currentTd()) <= 0 || d.hasBlock(p.recentHash) {
- return
+
+ // Fetch the peer using the id or throw an error if the peer couldn't be found
+ p := d.peers[id]
+ if p == nil {
+ return errUnknownPeer
}
- glog.V(logger.Detail).Infoln("New peer with highest TD =", p.td)
- d.syncCh <- syncPack{p, p.recentHash, false}
+ // Get the hash from the peer and initiate the downloading progress.
+ err := d.getFromPeer(p, hash, false)
+ if err != nil {
+ return err
+ }
+ return d.process(p)
}
-func (d *Downloader) update() {
-out:
- for {
- select {
- case sync := <-d.syncCh:
- var peer *peer = sync.peer
- err := d.getFromPeer(peer, sync.hash, sync.ignoreInitial)
- if err != nil {
- glog.V(logger.Detail).Infoln(err)
- break
- }
+func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error {
+ d.activePeer = p.id
+
+ glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
+ // Start the fetcher. This will block the update entirely
+ // interupts need to be send to the appropriate channels
+ // respectively.
+ if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
+ // handle error
+ glog.V(logger.Debug).Infoln("Error fetching hashes:", err)
+ // XXX Reset
+ return err
+ }
- d.process()
- case <-d.quit:
- break out
- }
+ // Start fetching blocks in paralel. The strategy is simple
+ // take any available peers, seserve a chunk for each peer available,
+ // let the peer deliver the chunkn and periodically check if a peer
+ // has timedout. When done downloading, process blocks.
+ if err := d.startFetchingBlocks(p); err != nil {
+ glog.V(logger.Debug).Infoln("Error downloading blocks:", err)
+ // XXX reset
+ return err
}
+
+ glog.V(logger.Detail).Infoln("Sync completed")
+
+ return nil
}
// XXX Make synchronous
@@ -403,13 +380,12 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error
}
peer.mu.Lock()
- peer.td = td
peer.recentHash = block.Hash()
peer.mu.Unlock()
peer.promote()
glog.V(logger.Detail).Infoln("Inserting new block from:", id)
- d.queue.addBlock(id, block, td)
+ d.queue.addBlock(id, block)
// if neither go ahead to process
if d.isBusy() {
@@ -429,10 +405,10 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error
}
}
- return d.process()
+ return d.process(peer)
}
-func (d *Downloader) process() error {
+func (d *Downloader) process(peer *peer) error {
atomic.StoreInt32(&d.processingBlocks, 1)
defer atomic.StoreInt32(&d.processingBlocks, 0)
@@ -458,18 +434,8 @@ func (d *Downloader) process() error {
// grandparents can be requested and queued.
err = d.insertChain(blocks[:max])
if err != nil && core.IsParentErr(err) {
- glog.V(logger.Debug).Infoln("Aborting process due to missing parent. Fetching hashes")
-
- // TODO change this. This shite
- for i, block := range blocks[:max] {
- if !d.hasBlock(block.ParentHash()) {
- d.syncCh <- syncPack{d.peers.bestPeer(), block.Hash(), true}
- // remove processed blocks
- blocks = blocks[i:]
+ glog.V(logger.Debug).Infoln("Aborting process due to missing parent.")
- break
- }
- }
break
} else if err != nil {
// immediatly unregister the false peer but do not disconnect
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 1d449cfba..8843ca0c7 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -49,7 +49,7 @@ type downloadTester struct {
func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester {
tester := &downloadTester{t: t, hashes: hashes, blocks: blocks, done: make(chan bool)}
- downloader := New(tester.hasBlock, tester.insertChain, func() *big.Int { return new(big.Int) })
+ downloader := New(tester.hasBlock, tester.insertChain)
tester.downloader = downloader
return tester
@@ -65,10 +65,6 @@ func (dl *downloadTester) hasBlock(hash common.Hash) bool {
func (dl *downloadTester) insertChain(blocks types.Blocks) error {
dl.insertedBlocks += len(blocks)
- if len(dl.blocks)-1 <= dl.insertedBlocks {
- dl.done <- true
- }
-
return nil
}
@@ -93,14 +89,14 @@ func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error {
func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) {
dl.pcount++
- dl.downloader.RegisterPeer(id, td, hash, dl.getHashes, dl.getBlocks(id))
+ dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id))
}
func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash) {
dl.pcount++
// This bad peer never returns any blocks
- dl.downloader.RegisterPeer(id, td, hash, dl.getHashes, func([]common.Hash) error {
+ dl.downloader.RegisterPeer(id, hash, dl.getHashes, func([]common.Hash) error {
return nil
})
}
@@ -112,7 +108,8 @@ func TestDownload(t *testing.T) {
minDesiredPeerCount = 4
blockTtl = 1 * time.Second
- hashes := createHashes(0, 1000)
+ targetBlocks := 1000
+ hashes := createHashes(0, targetBlocks)
blocks := createBlocksFromHashes(hashes)
tester := newTester(t, hashes, blocks)
@@ -121,21 +118,21 @@ func TestDownload(t *testing.T) {
tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
-success:
- select {
- case <-tester.done:
- break success
- case <-time.After(10 * time.Second): // XXX this could actually fail on a slow computer
- t.Error("timeout")
+ err := tester.downloader.Synchronise("peer1", hashes[0])
+ if err != nil {
+ t.Error("download error", err)
+ }
+
+ if tester.insertedBlocks != targetBlocks {
+ t.Error("expected", targetBlocks, "have", tester.insertedBlocks)
}
}
func TestMissing(t *testing.T) {
- t.Skip()
-
glog.SetV(logger.Detail)
glog.SetToStderr(true)
+ targetBlocks := 1000
hashes := createHashes(0, 1000)
extraHashes := createHashes(1001, 1003)
blocks := createBlocksFromHashes(append(extraHashes, hashes...))
@@ -146,13 +143,12 @@ func TestMissing(t *testing.T) {
hashes = append(extraHashes, hashes[:len(hashes)-1]...)
tester.newPeer("peer2", big.NewInt(0), common.Hash{})
-success1:
- select {
- case <-tester.done:
- break success1
- case <-time.After(10 * time.Second): // XXX this could actually fail on a slow computer
- t.Error("timout")
+ err := tester.downloader.Synchronise("peer1", hashes[0])
+ if err != nil {
+ t.Error("download error", err)
}
- tester.downloader.AddBlock("peer2", blocks[hashes[len(hashes)-1]], big.NewInt(10001))
+ if tester.insertedBlocks != targetBlocks {
+ t.Error("expected", targetBlocks, "have", tester.insertedBlocks)
+ }
}
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index bcb8ad43a..91977f592 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -2,7 +2,6 @@ package downloader
import (
"errors"
- "math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
@@ -51,16 +50,6 @@ func (p peers) getPeer(id string) *peer {
return p[id]
}
-func (p peers) bestPeer() *peer {
- var peer *peer
- for _, cp := range p {
- if peer == nil || cp.td.Cmp(peer.td) > 0 {
- peer = cp
- }
- }
- return peer
-}
-
// peer represents an active peer
type peer struct {
state int // Peer state (working, idle)
@@ -68,7 +57,6 @@ type peer struct {
mu sync.RWMutex
id string
- td *big.Int
recentHash common.Hash
ignored *set.Set
@@ -78,10 +66,9 @@ type peer struct {
}
// create a new peer
-func newPeer(id string, td *big.Int, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) *peer {
+func newPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) *peer {
return &peer{
id: id,
- td: td,
recentHash: hash,
getHashes: getHashes,
getBlocks: getBlocks,
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index adbc2a0d0..a21a44706 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -2,7 +2,6 @@ package downloader
import (
"math"
- "math/big"
"sync"
"time"
@@ -93,7 +92,7 @@ func (c *queue) has(hash common.Hash) bool {
return c.hashPool.Has(hash) || c.fetchPool.Has(hash)
}
-func (c *queue) addBlock(id string, block *types.Block, td *big.Int) {
+func (c *queue) addBlock(id string, block *types.Block) {
c.mu.Lock()
defer c.mu.Unlock()
diff --git a/eth/downloader/synchronous.go b/eth/downloader/synchronous.go
deleted file mode 100644
index 7bb49d24e..000000000
--- a/eth/downloader/synchronous.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package downloader
-
-import (
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/logger"
- "github.com/ethereum/go-ethereum/logger/glog"
-)
-
-// THIS IS PENDING AND TO DO CHANGES FOR MAKING THE DOWNLOADER SYNCHRONOUS
-
-// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given
-// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the
-// checks fail an error will be returned. This method is synchronous
-func (d *Downloader) SynchroniseWithPeer(id string) (types.Blocks, error) {
- // Check if we're busy
- if d.isBusy() {
- return nil, errBusy
- }
-
- // Attempt to select a peer. This can either be nothing, which returns, best peer
- // or selected peer. If no peer could be found an error will be returned
- var p *peer
- if len(id) == 0 {
- p = d.peers[id]
- if p == nil {
- return nil, errUnknownPeer
- }
- } else {
- p = d.peers.bestPeer()
- }
-
- // Make sure our td is lower than the peer's td
- if p.td.Cmp(d.currentTd()) <= 0 || d.hasBlock(p.recentHash) {
- return nil, errLowTd
- }
-
- // Get the hash from the peer and initiate the downloading progress.
- err := d.getFromPeer(p, p.recentHash, false)
- if err != nil {
- return nil, err
- }
-
- return d.queue.blocks, nil
-}
-
-// Synchronise will synchronise using the best peer.
-func (d *Downloader) Synchronise() (types.Blocks, error) {
- return d.SynchroniseWithPeer("")
-}
-
-func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error {
- d.activePeer = p.id
-
- glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
- // Start the fetcher. This will block the update entirely
- // interupts need to be send to the appropriate channels
- // respectively.
- if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
- // handle error
- glog.V(logger.Debug).Infoln("Error fetching hashes:", err)
- // XXX Reset
- return err
- }
-
- // Start fetching blocks in paralel. The strategy is simple
- // take any available peers, seserve a chunk for each peer available,
- // let the peer deliver the chunkn and periodically check if a peer
- // has timedout. When done downloading, process blocks.
- if err := d.startFetchingBlocks(p); err != nil {
- glog.V(logger.Debug).Infoln("Error downloading blocks:", err)
- // XXX reset
- return err
- }
-
- glog.V(logger.Detail).Infoln("Sync completed")
-
- return nil
-}
diff --git a/eth/handler.go b/eth/handler.go
index d466dbfee..d00d00f23 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -39,6 +39,7 @@ import (
"math"
"math/big"
"sync"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@@ -51,6 +52,11 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
+const (
+ peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
+ minDesiredPeerCount = 5 // Amount of peers desired to start syncing
+)
+
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
}
@@ -82,6 +88,9 @@ type ProtocolManager struct {
eventMux *event.TypeMux
txSub event.Subscription
minedBlockSub event.Subscription
+
+ newPeerCh chan *peer
+ quitSync chan struct{}
}
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
@@ -93,6 +102,8 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
chainman: chainman,
downloader: downloader,
peers: make(map[string]*peer),
+ newPeerCh: make(chan *peer, 1),
+ quitSync: make(chan struct{}),
}
manager.SubProtocol = p2p.Protocol{
@@ -101,16 +112,67 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
Length: ProtocolLength,
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(protocolVersion, networkId, p, rw)
- err := manager.handle(peer)
- //glog.V(logger.Detail).Infof("[%s]: %v\n", peer.id, err)
- return err
+ manager.newPeerCh <- peer
+
+ return manager.handle(peer)
},
}
return manager
}
+func (pm *ProtocolManager) syncHandler() {
+ // itimer is used to determine when to start ignoring `minDesiredPeerCount`
+ itimer := time.NewTimer(peerCountTimeout)
+out:
+ for {
+ select {
+ case <-pm.newPeerCh:
+ // Meet the `minDesiredPeerCount` before we select our best peer
+ if len(pm.peers) < minDesiredPeerCount {
+ break
+ }
+
+ // Find the best peer
+ peer := getBestPeer(pm.peers)
+ if peer == nil {
+ glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available")
+ }
+
+ itimer.Stop()
+ go pm.synchronise(peer)
+ case <-itimer.C:
+ // The timer will make sure that the downloader keeps an active state
+ // in which it attempts to always check the network for highest td peers
+ // Either select the peer or restart the timer if no peers could
+ // be selected.
+ if peer := getBestPeer(pm.peers); peer != nil {
+ go pm.synchronise(peer)
+ } else {
+ itimer.Reset(5 * time.Second)
+ }
+ case <-pm.quitSync:
+ break out
+ }
+ }
+}
+
+func (pm *ProtocolManager) synchronise(peer *peer) {
+ // Make sure the peer's TD is higher than our own. If not drop.
+ if peer.td.Cmp(pm.chainman.Td()) <= 0 {
+ return
+ }
+
+ glog.V(logger.Info).Infof("Synchronisation attempt using %s TD=%v\n", peer.id, peer.td)
+ // Get the hashes from the peer (synchronously)
+ err := pm.downloader.Synchronise(peer.id, peer.recentHash)
+ if err != nil {
+ // handle error
+ glog.V(logger.Debug).Infoln("error downloading:", err)
+ }
+}
+
func (pm *ProtocolManager) Start() {
// broadcast transactions
pm.txSub = pm.eventMux.Subscribe(core.TxPreEvent{})
@@ -119,11 +181,15 @@ func (pm *ProtocolManager) Start() {
// broadcast mined blocks
pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
go pm.minedBroadcastLoop()
+
+ // sync handler
+ go pm.syncHandler()
}
func (pm *ProtocolManager) Stop() {
pm.txSub.Unsubscribe() // quits txBroadcastLoop
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
+ close(pm.quitSync) // quits the sync handler
}
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
@@ -141,7 +207,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
pm.peers[p.id] = p
pm.pmu.Unlock()
- pm.downloader.RegisterPeer(p.id, p.td, p.currentHash, p.requestHashes, p.requestBlocks)
+ pm.downloader.RegisterPeer(p.id, p.recentHash, p.requestHashes, p.requestBlocks)
defer func() {
pm.pmu.Lock()
defer pm.pmu.Unlock()
@@ -276,7 +342,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "block validation %v: %v", msg, err)
}
hash := request.Block.Hash()
- // Add the block hash as a known hash to the peer. This will later be used to detirmine
+ // Add the block hash as a known hash to the peer. This will later be used to determine
// who should receive this.
p.blockHashes.Add(hash)
@@ -296,7 +362,6 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
if self.chainman.HasBlock(hash) {
break
}
- /* XXX unsure about this */
if self.chainman.Td().Cmp(request.TD) > 0 && new(big.Int).Add(request.Block.Number(), big.NewInt(7)).Cmp(self.chainman.CurrentBlock().Number()) < 0 {
glog.V(logger.Debug).Infof("[%s] dropped block %v due to low TD %v\n", p.id, request.Block.Number(), request.TD)
break
@@ -305,24 +370,22 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
// Attempt to insert the newly received by checking if the parent exists.
// if the parent exists we process the block and propagate to our peers
// if the parent does not exists we delegate to the downloader.
- // NOTE we can reduce chatter by dropping blocks with Td < currentTd
if self.chainman.HasBlock(request.Block.ParentHash()) {
if err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil {
// handle error
return nil
}
self.BroadcastBlock(hash, request.Block)
- //fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD)
} else {
// adding blocks is synchronous
go func() {
+ // TODO check parent error
err := self.downloader.AddBlock(p.id, request.Block, request.TD)
if err != nil {
glog.V(logger.Detail).Infoln("downloader err:", err)
return
}
self.BroadcastBlock(hash, request.Block)
- //fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD)
}()
}
default:
diff --git a/eth/peer.go b/eth/peer.go
index ec0c4b1f3..861efaaec 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -25,6 +25,16 @@ type getBlockHashesMsgData struct {
Amount uint64
}
+func getBestPeer(peers map[string]*peer) *peer {
+ var peer *peer
+ for _, cp := range peers {
+ if peer == nil || cp.td.Cmp(peer.td) > 0 {
+ peer = cp
+ }
+ }
+ return peer
+}
+
type peer struct {
*p2p.Peer
@@ -32,9 +42,9 @@ type peer struct {
protv, netid int
- currentHash common.Hash
- id string
- td *big.Int
+ recentHash common.Hash
+ id string
+ td *big.Int
genesis, ourHash common.Hash
ourTd *big.Int
@@ -43,14 +53,14 @@ type peer struct {
blockHashes *set.Set
}
-func newPeer(protv, netid int, genesis, currentHash common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
+func newPeer(protv, netid int, genesis, recentHash common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID()
return &peer{
Peer: p,
rw: rw,
genesis: genesis,
- ourHash: currentHash,
+ ourHash: recentHash,
ourTd: td,
protv: protv,
netid: netid,
@@ -145,7 +155,7 @@ func (p *peer) handleStatus() error {
// Set the total difficulty of the peer
p.td = status.TD
// set the best hash of the peer
- p.currentHash = status.CurrentBlock
+ p.recentHash = status.CurrentBlock
return <-errc
}
diff --git a/jsre/ethereum_js.go b/jsre/ethereum_js.go
index 2afc51763..f9e491a09 100644
--- a/jsre/ethereum_js.go
+++ b/jsre/ethereum_js.go
@@ -1,3 +1,5 @@
package jsre
-const Ethereum_JS = `require=function t(e,n,r){function o(a,u){if(!n[a]){if(!e[a]){var s="function"==typeof require&&require;if(!u&&s)return s(a,!0);if(i)return i(a,!0);var c=new Error("Cannot find module '"+a+"'");throw c.code="MODULE_NOT_FOUND",c}var l=n[a]={exports:{}};e[a][0].call(l.exports,function(t){var n=e[a][1][t];return o(n?n:t)},l,l.exports,t,e,n,r)}return n[a].exports}for(var i="function"==typeof require&&require,a=0;a<r.length;a++)o(r[a]);return o}({1:[function(t,e){var n=t("../utils/utils"),r=t("../utils/config"),o=t("./types"),i=t("./formatters"),a=function(t){throw new Error("parser does not support type: "+t)},u=function(t){return"[]"===t.slice(-2)},s=function(t,e){return u(t)||"bytes"===t?i.formatInputInt(e.length):""},c=o.inputTypes(),l=function(t,e){var n="",r="",o="";return t.forEach(function(t,r){n+=s(t.type,e[r])}),t.forEach(function(n,i){for(var s=!1,l=0;l<c.length&&!s;l++)s=c[l].type(t[i].type,e[i]);s||a(t[i].type);var f=c[l-1].format;u(t[i].type)?o+=e[i].reduce(function(t,e){return t+f(e)},""):"bytes"===t[i].type?o+=f(e[i]):r+=f(e[i])}),n+=r+o},f=function(t){return u(t)||"bytes"===t?2*r.ETH_PADDING:0},p=o.outputTypes(),m=function(t,e){e=e.slice(2);var n=[],s=2*r.ETH_PADDING,c=t.reduce(function(t,e){return t+f(e.type)},0),l=e.slice(0,c);return e=e.slice(c),t.forEach(function(r,c){for(var f=!1,m=0;m<p.length&&!f;m++)f=p[m].type(t[c].type);f||a(t[c].type);var h=p[m-1].format;if(u(t[c].type)){var d=i.formatOutputUInt(l.slice(0,s));l=l.slice(s);for(var g=[],v=0;d>v;v++)g.push(h(e.slice(0,s))),e=e.slice(s);n.push(g)}else o.prefixedType("bytes")(t[c].type)?(l=l.slice(s),n.push(h(e.slice(0,s))),e=e.slice(s)):(n.push(h(e.slice(0,s))),e=e.slice(s))}),n},h=function(t){var e={};return t.forEach(function(t){var r=n.extractDisplayName(t.name),o=n.extractTypeName(t.name),i=function(){var e=Array.prototype.slice.call(arguments);return l(t.inputs,e)};void 0===e[r]&&(e[r]=i),e[r][o]=i}),e},d=function(t){var e={};return t.forEach(function(t){var r=n.extractDisplayName(t.name),o=n.extractTypeName(t.name),i=function(e){return m(t.outputs,e)};void 0===e[r]&&(e[r]=i),e[r][o]=i}),e};e.exports={inputParser:h,outputParser:d,formatInput:l,formatOutput:m}},{"../utils/config":5,"../utils/utils":6,"./formatters":2,"./types":3}],2:[function(t,e){var n=t("bignumber.js"),r=t("../utils/utils"),o=t("../utils/config"),i=function(t){var e=2*o.ETH_PADDING;return n.config(o.ETH_BIGNUMBER_ROUNDING_MODE),r.padLeft(r.toTwosComplement(t).round().toString(16),e)},a=function(t){return r.fromAscii(t,o.ETH_PADDING).substr(2)},u=function(t){return"000000000000000000000000000000000000000000000000000000000000000"+(t?"1":"0")},s=function(t){return i(new n(t).times(new n(2).pow(128)))},c=function(t){return"1"===new n(t.substr(0,1),16).toString(2).substr(0,1)},l=function(t){return t=t||"0",c(t)?new n(t,16).minus(new n("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",16)).minus(1):new n(t,16)},f=function(t){return t=t||"0",new n(t,16)},p=function(t){return l(t).dividedBy(new n(2).pow(128))},m=function(t){return f(t).dividedBy(new n(2).pow(128))},h=function(t){return"0x"+t},d=function(t){return"0000000000000000000000000000000000000000000000000000000000000001"===t?!0:!1},g=function(t){return r.toAscii(t)},v=function(t){return"0x"+t.slice(t.length-40,t.length)};e.exports={formatInputInt:i,formatInputString:a,formatInputBool:u,formatInputReal:s,formatOutputInt:l,formatOutputUInt:f,formatOutputReal:p,formatOutputUReal:m,formatOutputHash:h,formatOutputBool:d,formatOutputString:g,formatOutputAddress:v}},{"../utils/config":5,"../utils/utils":6,"bignumber.js":"bignumber.js"}],3:[function(t,e){var n=t("./formatters"),r=function(t){return function(e){return 0===e.indexOf(t)}},o=function(t){return function(e){return t===e}},i=function(){return[{type:r("uint"),format:n.formatInputInt},{type:r("int"),format:n.formatInputInt},{type:r("bytes"),format:n.formatInputString},{type:r("real"),format:n.formatInputReal},{type:r("ureal"),format:n.formatInputReal},{type:o("address"),format:n.formatInputInt},{type:o("bool"),format:n.formatInputBool}]},a=function(){return[{type:r("uint"),format:n.formatOutputUInt},{type:r("int"),format:n.formatOutputInt},{type:r("bytes"),format:n.formatOutputString},{type:r("real"),format:n.formatOutputReal},{type:r("ureal"),format:n.formatOutputUReal},{type:o("address"),format:n.formatOutputAddress},{type:o("bool"),format:n.formatOutputBool}]};e.exports={prefixedType:r,namedType:o,inputTypes:i,outputTypes:a}},{"./formatters":2}],4:[function(t,e,n){"use strict";n.XMLHttpRequest="undefined"==typeof XMLHttpRequest?{}:XMLHttpRequest},{}],5:[function(t,e){var n=t("bignumber.js"),r=["wei","Kwei","Mwei","Gwei","szabo","finney","ether","grand","Mether","Gether","Tether","Pether","Eether","Zether","Yether","Nether","Dether","Vether","Uether"];e.exports={ETH_PADDING:32,ETH_SIGNATURE_LENGTH:4,ETH_UNITS:r,ETH_BIGNUMBER_ROUNDING_MODE:{ROUNDING_MODE:n.ROUND_DOWN},ETH_POLLING_TIMEOUT:1e3,ETH_DEFAULTBLOCK:"latest"}},{"bignumber.js":"bignumber.js"}],6:[function(t,e){var n=t("bignumber.js"),r={wei:"1",kwei:"1000",ada:"1000",mwei:"1000000",babbage:"1000000",gwei:"1000000000",shannon:"1000000000",szabo:"1000000000000",finney:"1000000000000000",ether:"1000000000000000000",kether:"1000000000000000000000",grand:"1000000000000000000000",einstein:"1000000000000000000000",mether:"1000000000000000000000000",gether:"1000000000000000000000000000",tether:"1000000000000000000000000000000"},o=function(t,e,n){return new Array(e-t.length+1).join(n?n:"0")+t},i=function(t,e){for(var n=!1,r=0;r<t.length&&!n;r++)n=e(t[r]);return n?r-1:-1},a=function(t){var e="",n=0,r=t.length;for("0x"===t.substring(0,2)&&(n=2);r>n;n+=2){var o=parseInt(t.substr(n,2),16);if(0===o)break;e+=String.fromCharCode(o)}return e},u=function(t){for(var e="",n=0;n<t.length;n++){var r=t.charCodeAt(n).toString(16);e+=r.length<2?"0"+r:r}return e},s=function(t,e){e=void 0===e?0:e;for(var n=u(t);n.length<2*e;)n+="00";return"0x"+n},c=function(t){var e=t.indexOf("(");return-1!==e?t.substr(0,e):t},l=function(t){var e=t.indexOf("(");return-1!==e?t.substr(e+1,t.length-1-(e+1)).replace(" ",""):""},f=function(t){return t.filter(function(t){return"function"===t.type})},p=function(t){return t.filter(function(t){return"event"===t.type})},m=function(t){return y(t).toNumber()},h=function(t){var e=y(t),n=e.toString(16);return e.lessThan(0)?"-0x"+n.substr(1):"0x"+n},d=function(t){if(O(t))return h(+t);if(F(t))return h(t);if(N(t))return s(JSON.stringify(t));if(_(t)){if(0===t.indexOf("-0x"))return h(t);if(!isFinite(t))return s(t)}return h(t)},g=function(t){t=t?t.toLowerCase():"ether";var e=r[t];if(void 0===e)throw new Error("This unit doesn't exists, please use the one of the following units"+JSON.stringify(r,null,2));return new n(e,10)},v=function(t,e){var n=y(t).dividedBy(g(e));return F(t)?n:n.toString(10)},b=function(t,e){var n=y(t).times(g(e));return F(t)?n:n.toString(10)},y=function(t){return t=t||0,F(t)?t:!_(t)||0!==t.indexOf("0x")&&0!==t.indexOf("-0x")?new n(t.toString(10),10):new n(t.replace("0x",""),16)},w=function(t){var e=y(t);return e.lessThan(0)?new n("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",16).plus(e).plus(1):e},x=function(t){return/^0x[0-9a-f]{40}$/.test(t)},I=function(t){return x(t)?t:/^[0-9a-f]{40}$/.test(t)?"0x"+t:"0x"+o(d(t).substr(2),40)},F=function(t){return t instanceof n||t&&t.constructor&&"BigNumber"===t.constructor.name},_=function(t){return"string"==typeof t||t&&t.constructor&&"String"===t.constructor.name},T=function(t){return"function"==typeof t},N=function(t){return"object"==typeof t},O=function(t){return"boolean"==typeof t},B=function(t){return t instanceof Array},D=function(t){try{return!!JSON.parse(t)}catch(e){return!1}};e.exports={padLeft:o,findIndex:i,toHex:d,toDecimal:m,fromDecimal:h,toAscii:a,fromAscii:s,extractDisplayName:c,extractTypeName:l,filterFunctions:f,filterEvents:p,toWei:b,fromWei:v,toBigNumber:y,toTwosComplement:w,toAddress:I,isBigNumber:F,isAddress:x,isFunction:T,isString:_,isObject:N,isBoolean:O,isArray:B,isJson:D}},{"bignumber.js":"bignumber.js"}],7:[function(t,e){e.exports={version:"0.2.4"}},{}],8:[function(t,e){var n=t("./version.json"),r=t("./web3/net"),o=t("./web3/eth"),i=t("./web3/db"),a=t("./web3/shh"),u=t("./web3/watches"),s=t("./web3/filter"),c=t("./utils/utils"),l=t("./web3/formatters"),f=t("./web3/requestmanager"),p=t("./utils/config"),m=t("./web3/method"),h=t("./web3/property"),d=[new m({name:"sha3",call:"web3_sha3",params:1})],g=[new h({name:"version.client",getter:"web3_clientVersion"}),new h({name:"version.network",getter:"net_version",inputFormatter:c.toDecimal}),new h({name:"version.ethereum",getter:"eth_version",inputFormatter:c.toDecimal}),new h({name:"version.whisper",getter:"shh_version",inputFormatter:c.toDecimal})],v=function(t,e){e.forEach(function(e){e.attachToObject(t)})},b=function(t,e){e.forEach(function(e){e.attachToObject(t)})},y={};y.providers={},y.version={},y.version.api=n.version,y.eth={},y.eth.filter=function(t,e,n,r){return t._isEvent?t(e,n):new s(t,u.eth(),r||l.outputLogFormatter)},y.shh={},y.shh.filter=function(t){return new s(t,u.shh(),l.outputPostFormatter)},y.net={},y.db={},y.setProvider=function(t){f.getInstance().setProvider(t)},y.reset=function(){f.getInstance().reset()},y.toHex=c.toHex,y.toAscii=c.toAscii,y.fromAscii=c.fromAscii,y.toDecimal=c.toDecimal,y.fromDecimal=c.fromDecimal,y.toBigNumber=c.toBigNumber,y.toWei=c.toWei,y.fromWei=c.fromWei,y.isAddress=c.isAddress,Object.defineProperty(y.eth,"defaultBlock",{get:function(){return p.ETH_DEFAULTBLOCK},set:function(t){return p.ETH_DEFAULTBLOCK=t,p.ETH_DEFAULTBLOCK}}),v(y,d),b(y,g),v(y.net,r.methods),b(y.net,r.properties),v(y.eth,o.methods),b(y.eth,o.properties),v(y.db,i.methods),v(y.shh,a.methods),e.exports=y},{"./utils/config":5,"./utils/utils":6,"./version.json":7,"./web3/db":10,"./web3/eth":12,"./web3/filter":14,"./web3/formatters":15,"./web3/method":18,"./web3/net":19,"./web3/property":20,"./web3/requestmanager":22,"./web3/shh":23,"./web3/watches":25}],9:[function(t,e){function n(t,e){t.forEach(function(t){if(-1===t.name.indexOf("(")){var e=t.name,n=t.inputs.map(function(t){return t.type}).join();t.name=e+"("+n+")"}});var n={};return s(n),c(n,t,e),l(n,t,e),f(n,t,e),n}var r=t("../web3"),o=t("../solidity/abi"),i=t("../utils/utils"),a=t("./event"),u=t("./signature"),s=function(t){t.call=function(e){return t._isTransaction=!1,t._options=e,t},t.sendTransaction=function(e){return t._isTransaction=!0,t._options=e,t}},c=function(t,e,n){var a=o.inputParser(e),s=o.outputParser(e);i.filterFunctions(e).forEach(function(e){var o=i.extractDisplayName(e.name),c=i.extractTypeName(e.name),l=function(){var i=Array.prototype.slice.call(arguments),l=u.functionSignatureFromAscii(e.name),f=a[o][c].apply(null,i),p=t._options||{};p.to=n,p.data=l+f;var m=t._isTransaction===!0||t._isTransaction!==!1&&!e.constant,h=p.collapse!==!1;if(t._options={},t._isTransaction=null,m)return void r.eth.sendTransaction(p);var d=r.eth.call(p),g=s[o][c](d);return h&&(1===g.length?g=g[0]:0===g.length&&(g=null)),g};void 0===t[o]&&(t[o]=l),t[o][c]=l})},l=function(t,e,n){t.address=n,t._onWatchEventResult=function(t){var n=event.getMatchingEvent(i.filterEvents(e)),r=a.outputParser(n);return r(t)},Object.defineProperty(t,"topics",{get:function(){return i.filterEvents(e).map(function(t){return u.eventSignatureFromAscii(t.name)})}})},f=function(t,e,n){i.filterEvents(e).forEach(function(e){var o=function(){var t=Array.prototype.slice.call(arguments),o=u.eventSignatureFromAscii(e.name),i=a.inputParser(n,o,e),s=i.apply(null,t),c=function(t){var n=a.outputParser(e);return n(t)};return r.eth.filter(s,void 0,void 0,c)};o._isEvent=!0;var s=i.extractDisplayName(e.name),c=i.extractTypeName(e.name);void 0===t[s]&&(t[s]=o),t[s][c]=o})},p=function(t){return n.bind(null,t)};e.exports=p},{"../solidity/abi":1,"../utils/utils":6,"../web3":8,"./event":13,"./signature":24}],10:[function(t,e){var n=t("./method"),r=new n({name:"putString",call:"db_putString",params:3}),o=new n({name:"getString",call:"db_getString",params:2}),i=new n({name:"putHex",call:"db_putHex",params:3}),a=new n({name:"getHex",call:"db_getHex",params:2}),u=[r,o,i,a];e.exports={methods:u}},{"./method":18}],11:[function(t,e){var n=t("../utils/utils");e.exports={InvalidNumberOfParams:new Error("Invalid number of input parameters"),InvalidProvider:new Error("Providor not set or invalid"),InvalidResponse:function(t){var e="Invalid JSON RPC response";return n.isObject(t)&&t.error&&t.error.message&&(e=t.error.message),new Error(e)}}},{"../utils/utils":6}],12:[function(t,e){"use strict";var n=t("./formatters"),r=t("../utils/utils"),o=t("./method"),i=t("./property"),a=function(t){return r.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getBlockByHash":"eth_getBlockByNumber"},u=function(t){return r.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getTransactionByBlockHashAndIndex":"eth_getTransactionByBlockNumberAndIndex"},s=function(t){return r.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getUncleByBlockHashAndIndex":"eth_getUncleByBlockNumberAndIndex"},c=function(t){return r.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getBlockTransactionCountByHash":"eth_getBlockTransactionCountByNumber"},l=function(t){return r.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getUncleCountByBlockHash":"eth_getUncleCountByBlockNumber"},f=new o({name:"getBalance",call:"eth_getBalance",params:2,inputFormatter:[r.toAddress,n.inputDefaultBlockNumberFormatter],outputFormatter:n.outputBigNumberFormatter}),p=new o({name:"getStorageAt",call:"eth_getStorageAt",params:3,inputFormatter:[null,r.toHex,n.inputDefaultBlockNumberFormatter]}),m=new o({name:"getCode",call:"eth_getCode",params:2,inputFormatter:[r.toAddress,n.inputDefaultBlockNumberFormatter]}),h=new o({name:"getBlock",call:a,params:2,inputFormatter:[r.toHex,function(t){return!!t}],outputFormatter:n.outputBlockFormatter}),d=new o({name:"getUncle",call:s,params:3,inputFormatter:[r.toHex,r.toHex,function(t){return!!t}],outputFormatter:n.outputBlockFormatter}),g=new o({name:"getCompilers",call:"eth_getCompilers",params:0}),v=new o({name:"getBlockTransactionCount",call:c,params:1,inputFormatter:[n.inputBlockNumberFormatter],outputFormatter:r.toDecimal}),b=new o({name:"getBlockUncleCount",call:l,params:1,inputFormatter:[n.inputBlockNumberFormatter],outputFormatter:r.toDecimal}),y=new o({name:"getTransaction",call:"eth_getTransactionByHash",params:1,outputFormatter:n.outputTransactionFormatter}),w=new o({name:"getTransactionFromBlock",call:u,params:2,inputFormatter:[r.toHex,r.toHex],outputFormatter:n.outputTransactionFormatter}),x=new o({name:"getTransactionCount",call:"eth_getTransactionCount",params:2,inputFormatter:[null,n.inputDefaultBlockNumberFormatter],outputFormatter:r.toDecimal}),I=new o({name:"sendTransaction",call:"eth_sendTransaction",params:1,inputFormatter:[n.inputTransactionFormatter]}),F=new o({name:"call",call:"eth_call",params:2,inputFormatter:[n.inputTransactionFormatter,n.inputDefaultBlockNumberFormatter]}),_=new o({name:"compile.solidity",call:"eth_compileSolidity",params:1}),T=new o({name:"compile.lll",call:"eth_compileLLL",params:1}),N=new o({name:"compile.serpent",call:"eth_compileSerpent",params:1}),O=new o({name:"flush",call:"eth_flush",params:0}),B=[f,p,m,h,d,g,v,b,y,w,x,F,I,_,T,N,O],D=[new i({name:"coinbase",getter:"eth_coinbase"}),new i({name:"mining",getter:"eth_mining"}),new i({name:"gasPrice",getter:"eth_gasPrice",outputFormatter:n.inputNumberFormatter}),new i({name:"accounts",getter:"eth_accounts"}),new i({name:"blockNumber",getter:"eth_blockNumber",outputFormatter:r.toDecimal})];e.exports={methods:B,properties:D}},{"../utils/utils":6,"./formatters":15,"./method":18,"./property":20}],13:[function(t,e){var n=t("../solidity/abi"),r=t("../utils/utils"),o=t("./signature"),i=function(t,e){return t.filter(function(t){return t.indexed===e})},a=function(t,e){var n=r.findIndex(t,function(t){return t.name===e});return-1===n?void console.error("indexed param with name "+e+" not found"):t[n]},u=function(t,e){return Object.keys(e).map(function(r){var o=[a(i(t.inputs,!0),r)],u=e[r];return u instanceof Array?u.map(function(t){return n.formatInput(o,[t])}):"0x"+n.formatInput(o,[u])})},s=function(t,e,n){return function(r,o){var i=o||{};return i.address=t,i.topics=[],i.topics.push(e),r&&(i.topics=i.topics.concat(u(n,r))),i}},c=function(t,e,n){var r=e.slice(),o=n.slice();return t.reduce(function(t,e){var n;return n=e.indexed?r.splice(0,1)[0]:o.splice(0,1)[0],t[e.name]=n,t},{})},l=function(t){return function(e){var o={event:r.extractDisplayName(t.name),number:e.number,hash:e.hash,args:{}};if(!e.topics)return o;e.data=e.data||"";var a=i(t.inputs,!0),u="0x"+e.topics.slice(1,e.topics.length).map(function(t){return t.slice(2)}).join(""),s=n.formatOutput(a,u),l=i(t.inputs,!1),f=n.formatOutput(l,e.data);return o.args=c(t.inputs,s,f),o}},f=function(t,e){for(var n=0;n<t.length;n++){var r=o.eventSignatureFromAscii(t[n].name);if(r===e.topics[0])return t[n]}return void 0};e.exports={inputParser:s,outputParser:l,getMatchingEvent:f}},{"../solidity/abi":1,"../utils/utils":6,"./signature":24}],14:[function(t,e){var n=t("./requestmanager"),r=t("./formatters"),o=t("../utils/utils"),i=function(t){return o.isString(t)?t:(t=t||{},t.topics=t.topics||[],t.topics=t.topics.map(function(t){return o.toHex(t)}),{topics:t.topics,to:t.to,address:t.address,fromBlock:r.inputBlockNumberFormatter(t.fromBlock),toBlock:r.inputBlockNumberFormatter(t.toBlock)})},a=function(t,e,n){var r={};e.forEach(function(t){t.attachToObject(r)}),this.options=i(t),this.implementation=r,this.callbacks=[],this.formatter=n,this.filterId=this.implementation.newFilter(this.options)};a.prototype.watch=function(t){this.callbacks.push(t);var e=this,r=function(t,n){return t?e.callbacks.forEach(function(e){e(t)}):void n.forEach(function(t){t=e.formatter?e.formatter(t):t,e.callbacks.forEach(function(e){e(null,t)})})};n.getInstance().startPolling({method:this.implementation.poll.call,params:[this.filterId]},this.filterId,r,this.stopWatching.bind(this))},a.prototype.stopWatching=function(){n.getInstance().stopPolling(this.filterId),this.implementation.uninstallFilter(this.filterId),this.callbacks=[]},a.prototype.get=function(){var t=this.implementation.getLogs(this.filterId),e=this;return t.map(function(t){return e.formatter?e.formatter(t):t})},e.exports=a},{"../utils/utils":6,"./formatters":15,"./requestmanager":22}],15:[function(t,e){var n=t("../utils/utils"),r=t("../utils/config"),o=function(t){return n.toBigNumber(t)},i=function(t){return"latest"===t||"pending"===t||"earliest"===t},a=function(t){return void 0===t?r.ETH_DEFAULTBLOCK:u(t)},u=function(t){return void 0===t?void 0:i(t)?t:n.toHex(t)},s=function(t){return t.code&&(t.data=t.code,delete t.code),["gasPrice","gas","value"].filter(function(e){return void 0!==t[e]}).forEach(function(e){t[e]=n.fromDecimal(t[e])}),t},c=function(t){return t.blockNumber=n.toDecimal(t.blockNumber),t.transactionIndex=n.toDecimal(t.transactionIndex),t.gas=n.toDecimal(t.gas),t.gasPrice=n.toBigNumber(t.gasPrice),t.value=n.toBigNumber(t.value),t},l=function(t){return t.gasLimit=n.toDecimal(t.gasLimit),t.gasUsed=n.toDecimal(t.gasUsed),t.size=n.toDecimal(t.size),t.timestamp=n.toDecimal(t.timestamp),t.number=n.toDecimal(t.number),t.minGasPrice=n.toBigNumber(t.minGasPrice),t.difficulty=n.toBigNumber(t.difficulty),t.totalDifficulty=n.toBigNumber(t.totalDifficulty),n.isArray(t.transactions)&&t.transactions.forEach(function(t){return n.isString(t)?void 0:c(t)}),t},f=function(t){return null===t?null:(t.blockNumber=n.toDecimal(t.blockNumber),t.transactionIndex=n.toDecimal(t.transactionIndex),t.logIndex=n.toDecimal(t.logIndex),t)},p=function(t){return t.payload=n.toHex(t.payload),t.ttl=n.fromDecimal(t.ttl),t.priority=n.fromDecimal(t.priority),n.isArray(t.topics)||(t.topics=[t.topics]),t.topics=t.topics.map(function(t){return n.fromAscii(t)}),t},m=function(t){return t.expiry=n.toDecimal(t.expiry),t.sent=n.toDecimal(t.sent),t.ttl=n.toDecimal(t.ttl),t.workProved=n.toDecimal(t.workProved),t.payloadRaw=t.payload,t.payload=n.toAscii(t.payload),n.isJson(t.payload)&&(t.payload=JSON.parse(t.payload)),t.topics=t.topics.map(function(t){return n.toAscii(t)}),t};e.exports={inputDefaultBlockNumberFormatter:a,inputBlockNumberFormatter:u,inputTransactionFormatter:s,inputPostFormatter:p,outputBigNumberFormatter:o,outputTransactionFormatter:c,outputBlockFormatter:l,outputLogFormatter:f,outputPostFormatter:m}},{"../utils/config":5,"../utils/utils":6}],16:[function(t,e){"use strict";var n=t("xmlhttprequest").XMLHttpRequest,r=function(t){this.host=t||"http://localhost:8080"};r.prototype.send=function(t){var e=new n;return e.open("POST",this.host,!1),e.send(JSON.stringify(t)),JSON.parse(e.responseText)},r.prototype.sendAsync=function(t,e){var r=new n;r.onreadystatechange=function(){4===r.readyState&&e(null,JSON.parse(r.responseText))},r.open("POST",this.host,!0),r.send(JSON.stringify(t))},e.exports=r},{xmlhttprequest:4}],17:[function(t,e){var n=function(){return arguments.callee._singletonInstance?arguments.callee._singletonInstance:(arguments.callee._singletonInstance=this,void(this.messageId=1))};n.getInstance=function(){var t=new n;return t},n.prototype.toPayload=function(t,e){return t||console.error("jsonrpc method should be specified!"),{jsonrpc:"2.0",method:t,params:e||[],id:this.messageId++}},n.prototype.isValidResponse=function(t){return!!t&&!t.error&&"2.0"===t.jsonrpc&&"number"==typeof t.id&&void 0!==t.result},n.prototype.toBatchPayload=function(t){var e=this;return t.map(function(t){return e.toPayload(t.method,t.params)})},e.exports=n},{}],18:[function(t,e){var n=t("./requestmanager"),r=t("../utils/utils"),o=t("./errors"),i=function(t){this.name=t.name,this.call=t.call,this.params=t.params||0,this.inputFormatter=t.inputFormatter,this.outputFormatter=t.outputFormatter};i.prototype.getCall=function(t){return r.isFunction(this.call)?this.call(t):this.call},i.prototype.extractCallback=function(t){return r.isFunction(t[t.length-1])?t.pop():null},i.prototype.validateArgs=function(t){if(t.length!==this.params)throw o.InvalidNumberOfParams},i.prototype.formatInput=function(t){return this.inputFormatter?this.inputFormatter.map(function(e,n){return e?e(t[n]):t[n]}):t},i.prototype.formatOutput=function(t){return this.outputFormatter&&null!==t?this.outputFormatter(t):t},i.prototype.attachToObject=function(t){var e=this.send.bind(this);e.call=this.call;var n=this.name.split(".");n.length>1?(t[n[0]]=t[n[0]]||{},t[n[0]][n[1]]=e):t[n[0]]=e},i.prototype.toPayload=function(t){var e=this.getCall(t),n=this.extractCallback(t),r=this.formatInput(t);return this.validateArgs(r),{method:e,params:r,callback:n}},i.prototype.send=function(){var t=this.toPayload(Array.prototype.slice.call(arguments));if(t.callback){var e=this;return n.getInstance().sendAsync(t,function(n,r){t.callback(null,e.formatOutput(r))})}return this.formatOutput(n.getInstance().send(t))},e.exports=i},{"../utils/utils":6,"./errors":11,"./requestmanager":22}],19:[function(t,e){var n=t("../utils/utils"),r=t("./property"),o=[],i=[new r({name:"listening",getter:"net_listening"}),new r({name:"peerCount",getter:"net_peerCount",outputFormatter:n.toDecimal})];e.exports={methods:o,properties:i}},{"../utils/utils":6,"./property":20}],20:[function(t,e){var n=t("./requestmanager"),r=function(t){this.name=t.name,this.getter=t.getter,this.setter=t.setter,this.outputFormatter=t.outputFormatter,this.inputFormatter=t.inputFormatter};r.prototype.formatInput=function(t){return this.inputFormatter?this.inputFormatter(t):t},r.prototype.formatOutput=function(t){return this.outputFormatter&&null!==t?this.outputFormatter(t):t},r.prototype.attachToObject=function(t){var e={get:this.get.bind(this),set:this.set.bind(this)},n=this.name.split(".");n.length>1?(t[n[0]]=t[n[0]]||{},Object.defineProperty(t[n[0]],n[1],e)):Object.defineProperty(t,n[0],e)},r.prototype.get=function(){return this.formatOutput(n.getInstance().send({method:this.getter}))},r.prototype.set=function(t){return n.getInstance().send({method:this.setter,params:[this.formatInput(t)]})},e.exports=r},{"./requestmanager":22}],21:[function(t,e){var n=function(){};n.prototype.send=function(t){var e=navigator.qt.callMethod(JSON.stringify(t));return JSON.parse(e)},e.exports=n},{}],22:[function(t,e){var n=t("./jsonrpc"),r=t("../utils/utils"),o=t("../utils/config"),i=t("./errors"),a=function(t){return arguments.callee._singletonInstance?arguments.callee._singletonInstance:(arguments.callee._singletonInstance=this,this.provider=t,this.polls=[],this.timeout=null,void this.poll())};a.getInstance=function(){var t=new a;return t},a.prototype.send=function(t){if(!this.provider)return console.error(i.InvalidProvider),null;var e=n.getInstance().toPayload(t.method,t.params),r=this.provider.send(e);if(!n.getInstance().isValidResponse(r))throw i.InvalidResponse(r);return r.result},a.prototype.sendAsync=function(t,e){if(!this.provider)return e(i.InvalidProvider);var r=n.getInstance().toPayload(t.method,t.params);this.provider.sendAsync(r,function(t,r){return t?e(t):n.getInstance().isValidResponse(r)?void e(null,r.result):e(i.InvalidResponse(r))})},a.prototype.setProvider=function(t){this.provider=t},a.prototype.startPolling=function(t,e,n,r){this.polls.push({data:t,id:e,callback:n,uninstall:r})},a.prototype.stopPolling=function(t){for(var e=this.polls.length;e--;){var n=this.polls[e];n.id===t&&this.polls.splice(e,1)}},a.prototype.reset=function(){this.polls.forEach(function(t){t.uninstall(t.id)}),this.polls=[],this.timeout&&(clearTimeout(this.timeout),this.timeout=null),this.poll()},a.prototype.poll=function(){if(this.timeout=setTimeout(this.poll.bind(this),o.ETH_POLLING_TIMEOUT),this.polls.length){if(!this.provider)return void console.error(i.InvalidProvider);var t=n.getInstance().toBatchPayload(this.polls.map(function(t){return t.data})),e=this;this.provider.sendAsync(t,function(t,o){if(!t){if(!r.isArray(o))throw i.InvalidResponse(o);o.map(function(t,n){return t.callback=e.polls[n].callback,t}).filter(function(t){var e=n.getInstance().isValidResponse(t);return e||t.callback(i.InvalidResponse(t)),e}).filter(function(t){return r.isArray(t.result)&&t.result.length>0}).forEach(function(t){t.callback(null,t.result)})}})}},e.exports=a},{"../utils/config":5,"../utils/utils":6,"./errors":11,"./jsonrpc":17}],23:[function(t,e){var n=t("./method"),r=t("./formatters"),o=new n({name:"post",call:"shh_post",params:1,inputFormatter:r.inputPostFormatter}),i=new n({name:"newIdentity",call:"shh_newIdentity",params:0}),a=new n({name:"hasIdentity",call:"shh_hasIdentity",params:1}),u=new n({name:"newGroup",call:"shh_newGroup",params:0}),s=new n({name:"addToGroup",call:"shh_addToGroup",params:0}),c=[o,i,a,u,s];e.exports={methods:c}},{"./formatters":15,"./method":18}],24:[function(t,e){var n=t("../web3"),r=t("../utils/config"),o=function(t){return n.sha3(n.fromAscii(t)).slice(0,2+2*r.ETH_SIGNATURE_LENGTH)},i=function(t){return n.sha3(n.fromAscii(t))};e.exports={functionSignatureFromAscii:o,eventSignatureFromAscii:i}},{"../utils/config":5,"../web3":8}],25:[function(t,e){var n=t("./method"),r=function(){var t=function(t){return"string"==typeof t[0]?"eth_newBlockFilter":"eth_newFilter"},e=new n({name:"newFilter",call:t,params:1}),r=new n({name:"uninstallFilter",call:"eth_uninstallFilter",params:1}),o=new n({name:"getLogs",call:"eth_getFilterLogs",params:1}),i=new n({name:"poll",call:"eth_getFilterChanges",params:1});return[e,r,o,i]},o=function(){var t=new n({name:"newFilter",call:"shh_newFilter",params:1}),e=new n({name:"uninstallFilter",call:"shh_uninstallFilter",params:1}),r=new n({name:"getLogs",call:"shh_getMessages",params:1}),o=new n({name:"poll",call:"shh_getFilterChanges",params:1});return[t,e,r,o]};e.exports={eth:r,shh:o}},{"./method":18}],26:[function(){},{}],"bignumber.js":[function(t,e){"use strict";e.exports=BigNumber},{}],"ethereum.js":[function(t,e){var n=t("./lib/web3");n.providers.HttpProvider=t("./lib/web3/httpprovider"),n.providers.QtSyncProvider=t("./lib/web3/qtsync"),n.eth.contract=t("./lib/web3/contract"),n.abi=t("./lib/solidity/abi"),e.exports=n},{"./lib/solidity/abi":1,"./lib/web3":8,"./lib/web3/contract":9,"./lib/web3/httpprovider":16,"./lib/web3/qtsync":21}]},{},["ethereum.js"]);`
+const Ethereum_JS = `
+require=function t(e,r,n){function o(a,s){if(!r[a]){if(!e[a]){var u="function"==typeof require&&require;if(!s&&u)return u(a,!0);if(i)return i(a,!0);var c=new Error("Cannot find module '"+a+"'");throw c.code="MODULE_NOT_FOUND",c}var l=r[a]={exports:{}};e[a][0].call(l.exports,function(t){var r=e[a][1][t];return o(r?r:t)},l,l.exports,t,e,r,n)}return r[a].exports}for(var i="function"==typeof require&&require,a=0;a<n.length;a++)o(n[a]);return o}({1:[function(t,e,r){var n=t("../utils/utils"),o=t("./coder"),i=t("./utils"),a=function(t,e){var r=t.map(function(t){return t.type});return o.encodeParams(r,e)},s=function(t,e){var r=t.map(function(t){return t.type});return o.decodeParams(r,e)},u=function(t){var e={};return t.forEach(function(t){var r=n.extractDisplayName(t.name),o=n.extractTypeName(t.name),i=function(){var e=Array.prototype.slice.call(arguments);return a(t.inputs,e)};void 0===e[r]&&(e[r]=i),e[r][o]=i}),e},c=function(t){var e={};return t.forEach(function(t){var r=n.extractDisplayName(t.name),o=n.extractTypeName(t.name),i=function(e){return s(t.outputs,e)};void 0===e[r]&&(e[r]=i),e[r][o]=i}),e},l=function(t,e){var r=i.getConstructor(t,e.length);return r?a(r.inputs,e):(e.length>0&&console.warn("didn't found matching constructor, using default one"),"")};e.exports={inputParser:u,outputParser:c,formatInput:a,formatOutput:s,formatConstructorParams:l}},{"../utils/utils":8,"./coder":2,"./utils":5}],2:[function(t,e,r){var n=t("bignumber.js"),o=t("../utils/utils"),i=t("./formatters"),a=t("./param"),s=function(t){return"[]"===t.slice(-2)},u=function(t){this._name=t.name,this._match=t.match,this._mode=t.mode,this._inputFormatter=t.inputFormatter,this._outputFormatter=t.outputFormatter};u.prototype.isType=function(t){return"strict"===this._match?this._name===t||0===t.indexOf(this._name)&&"[]"===t.slice(this._name.length):"prefix"===this._match?0===t.indexOf(this._name):void 0},u.prototype.formatInput=function(t,e){if(o.isArray(t)&&e){var r=this;return t.map(function(t){return r._inputFormatter(t)}).reduce(function(t,e){return t.appendArrayElement(e),t},new a("",i.formatInputInt(t.length).value))}return this._inputFormatter(t)},u.prototype.formatOutput=function(t,e){if(e){for(var r=[],o=new n(t.prefix,16),i=0;64*o>i;i+=64)r.push(this._outputFormatter(new a(t.suffix.slice(i,i+64))));return r}return this._outputFormatter(t)},u.prototype.isVariadicType=function(t){return s(t)||"bytes"===this._mode},u.prototype.shiftParam=function(t,e){if("bytes"===this._mode)return e.shiftBytes();if(s(t)){var r=new n(e.prefix.slice(0,64),16);return e.shiftArray(r)}return e.shiftValue()};var c=function(t){this._types=t};c.prototype._requireType=function(t){var e=this._types.filter(function(e){return e.isType(t)})[0];if(!e)throw Error("invalid solidity type!: "+t);return e},c.prototype._bytesToParam=function(t,e){var r=this,n=t.reduce(function(t,e){return r._requireType(e).isVariadicType(e)?t+1:t},0),o=t.length-n,i=e.slice(0,64*n);e=e.slice(64*n);var s=e.slice(0,64*o),u=e.slice(64*o);return new a(s,i,u)},c.prototype._formatInput=function(t,e){return this._requireType(t).formatInput(e,s(t))},c.prototype.encodeParam=function(t,e){return this._formatInput(t,e).encode()},c.prototype.encodeParams=function(t,e){var r=this;return t.map(function(t,n){return r._formatInput(t,e[n])}).reduce(function(t,e){return t.append(e),t},new a).encode()},c.prototype._formatOutput=function(t,e){return this._requireType(t).formatOutput(e,s(t))},c.prototype.decodeParam=function(t,e){return this._formatOutput(t,this._bytesToParam([t],e))},c.prototype.decodeParams=function(t,e){var r=this,n=this._bytesToParam(t,e);return t.map(function(t){var e=r._requireType(t),o=e.shiftParam(t,n);return e.formatOutput(o,s(t))})};var l=new c([new u({name:"address",match:"strict",mode:"value",inputFormatter:i.formatInputInt,outputFormatter:i.formatOutputAddress}),new u({name:"bool",match:"strict",mode:"value",inputFormatter:i.formatInputBool,outputFormatter:i.formatOutputBool}),new u({name:"int",match:"prefix",mode:"value",inputFormatter:i.formatInputInt,outputFormatter:i.formatOutputInt}),new u({name:"uint",match:"prefix",mode:"value",inputFormatter:i.formatInputInt,outputFormatter:i.formatOutputUInt}),new u({name:"bytes",match:"strict",mode:"bytes",inputFormatter:i.formatInputDynamicBytes,outputFormatter:i.formatOutputDynamicBytes}),new u({name:"bytes",match:"prefix",mode:"value",inputFormatter:i.formatInputBytes,outputFormatter:i.formatOutputBytes}),new u({name:"real",match:"prefix",mode:"value",inputFormatter:i.formatInputReal,outputFormatter:i.formatOutputReal}),new u({name:"ureal",match:"prefix",mode:"value",inputFormatter:i.formatInputReal,outputFormatter:i.formatOutputUReal})]);e.exports=l},{"../utils/utils":8,"./formatters":3,"./param":4,"bignumber.js":"bignumber.js"}],3:[function(t,e,r){var n=t("bignumber.js"),o=t("../utils/utils"),i=t("../utils/config"),a=t("./param"),s=function(t){var e=2*i.ETH_PADDING;n.config(i.ETH_BIGNUMBER_ROUNDING_MODE);var r=o.padLeft(o.toTwosComplement(t).round().toString(16),e);return new a(r)},u=function(t){var e=o.fromAscii(t,i.ETH_PADDING).substr(2);return new a(e)},c=function(t){var e=o.fromAscii(t,i.ETH_PADDING).substr(2);return new a("",s(t.length).value,e)},l=function(t){var e="000000000000000000000000000000000000000000000000000000000000000"+(t?"1":"0");return new a(e)},p=function(t){return s(new n(t).times(new n(2).pow(128)))},f=function(t){return"1"===new n(t.substr(0,1),16).toString(2).substr(0,1)},m=function(t){var e=t.value||"0";return f(e)?new n(e,16).minus(new n("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",16)).minus(1):new n(e,16)},h=function(t){var e=t.value||"0";return new n(e,16)},d=function(t){return m(t).dividedBy(new n(2).pow(128))},y=function(t){return h(t).dividedBy(new n(2).pow(128))},g=function(t){return"0000000000000000000000000000000000000000000000000000000000000001"===t.value?!0:!1},v=function(t){return o.toAscii(t.value)},b=function(t){return o.toAscii(t.suffix)},w=function(t){var e=t.value;return"0x"+e.slice(e.length-40,e.length)};e.exports={formatInputInt:s,formatInputBytes:u,formatInputDynamicBytes:c,formatInputBool:l,formatInputReal:p,formatOutputInt:m,formatOutputUInt:h,formatOutputReal:d,formatOutputUReal:y,formatOutputBool:g,formatOutputBytes:v,formatOutputDynamicBytes:b,formatOutputAddress:w}},{"../utils/config":7,"../utils/utils":8,"./param":4,"bignumber.js":"bignumber.js"}],4:[function(t,e,r){var n=function(t,e,r){this.prefix=e||"",this.value=t||"",this.suffix=r||""};n.prototype.append=function(t){this.prefix+=t.prefix,this.value+=t.value,this.suffix+=t.suffix},n.prototype.appendArrayElement=function(t){this.suffix+=t.value,this.prefix+=t.prefix},n.prototype.encode=function(){return this.prefix+this.value+this.suffix},n.prototype.shiftValue=function(){var t=this.value.slice(0,64);return this.value=this.value.slice(64),new n(t)},n.prototype.shiftBytes=function(){return this.shiftArray(1)},n.prototype.shiftArray=function(t){var e=this.prefix.slice(0,64);this.prefix=this.value.slice(64);var r=this.suffix.slice(0,64*t);return this.suffix=this.suffix.slice(64*t),new n("",e,r)},e.exports=n},{}],5:[function(t,e,r){var n=function(t,e){return t.filter(function(t){return"constructor"===t.type&&t.inputs.length===e})[0]};e.exports={getConstructor:n}},{}],6:[function(t,e,r){"use strict";r.XMLHttpRequest="undefined"==typeof XMLHttpRequest?{}:XMLHttpRequest},{}],7:[function(t,e,r){var n=t("bignumber.js"),o=["wei","Kwei","Mwei","Gwei","szabo","finney","ether","grand","Mether","Gether","Tether","Pether","Eether","Zether","Yether","Nether","Dether","Vether","Uether"];e.exports={ETH_PADDING:32,ETH_SIGNATURE_LENGTH:4,ETH_UNITS:o,ETH_BIGNUMBER_ROUNDING_MODE:{ROUNDING_MODE:n.ROUND_DOWN},ETH_POLLING_TIMEOUT:1e3,defaultBlock:"latest",defaultAccount:void 0}},{"bignumber.js":"bignumber.js"}],8:[function(t,e,r){var n=t("bignumber.js"),o={wei:"1",kwei:"1000",ada:"1000",mwei:"1000000",babbage:"1000000",gwei:"1000000000",shannon:"1000000000",szabo:"1000000000000",finney:"1000000000000000",ether:"1000000000000000000",kether:"1000000000000000000000",grand:"1000000000000000000000",einstein:"1000000000000000000000",mether:"1000000000000000000000000",gether:"1000000000000000000000000000",tether:"1000000000000000000000000000000"},i=function(t,e,r){return new Array(e-t.length+1).join(r?r:"0")+t},a=function(t){var e="",r=0,n=t.length;for("0x"===t.substring(0,2)&&(r=2);n>r;r+=2){var o=parseInt(t.substr(r,2),16);if(0===o)break;e+=String.fromCharCode(o)}return e},s=function(t){for(var e="",r=0;r<t.length;r++){var n=t.charCodeAt(r).toString(16);e+=n.length<2?"0"+n:n}return e},u=function(t,e){e=void 0===e?0:e;for(var r=s(t);r.length<2*e;)r+="00";return"0x"+r},c=function(t){if(-1!==t.name.indexOf("("))return t.name;var e=t.inputs.map(function(t){return t.type}).join();return t.name+"("+e+")"},l=function(t){var e=t.indexOf("(");return-1!==e?t.substr(0,e):t},p=function(t){var e=t.indexOf("(");return-1!==e?t.substr(e+1,t.length-1-(e+1)).replace(" ",""):""},f=function(t){return v(t).toNumber()},m=function(t){var e=v(t),r=e.toString(16);return e.lessThan(0)?"-0x"+r.substr(1):"0x"+r},h=function(t){if(T(t))return m(+t);if(F(t))return m(t);if(B(t))return u(JSON.stringify(t));if(I(t)){if(0===t.indexOf("-0x"))return m(t);if(!isFinite(t))return u(t)}return m(t)},d=function(t){t=t?t.toLowerCase():"ether";var e=o[t];if(void 0===e)throw new Error("This unit doesn't exists, please use the one of the following units"+JSON.stringify(o,null,2));return new n(e,10)},y=function(t,e){var r=v(t).dividedBy(d(e));return F(t)?r:r.toString(10)},g=function(t,e){var r=v(t).times(d(e));return F(t)?r:r.toString(10)},v=function(t){return t=t||0,F(t)?t:!I(t)||0!==t.indexOf("0x")&&0!==t.indexOf("-0x")?new n(t.toString(10),10):new n(t.replace("0x",""),16)},b=function(t){var e=v(t);return e.lessThan(0)?new n("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",16).plus(e).plus(1):e},w=function(t){return/^0x[0-9a-f]{40}$/.test(t)},x=function(t){return/^(0x)?[0-9a-f]{40}$/.test(t)},_=function(t){return w(t)?t:/^[0-9a-f]{40}$/.test(t)?"0x"+t:"0x"+i(h(t).substr(2),40)},F=function(t){return t instanceof n||t&&t.constructor&&"BigNumber"===t.constructor.name},I=function(t){return"string"==typeof t||t&&t.constructor&&"String"===t.constructor.name},N=function(t){return"function"==typeof t},B=function(t){return"object"==typeof t},T=function(t){return"boolean"==typeof t},O=function(t){return t instanceof Array},P=function(t){try{return!!JSON.parse(t)}catch(e){return!1}};e.exports={padLeft:i,toHex:h,toDecimal:f,fromDecimal:m,toAscii:a,fromAscii:u,transformToFullName:c,extractDisplayName:l,extractTypeName:p,toWei:g,fromWei:y,toBigNumber:v,toTwosComplement:b,toAddress:_,isBigNumber:F,isStrictAddress:w,isAddress:x,isFunction:N,isString:I,isObject:B,isBoolean:T,isArray:O,isJson:P}},{"bignumber.js":"bignumber.js"}],9:[function(t,e,r){e.exports={version:"0.3.3"}},{}],10:[function(t,e,r){var n=t("./version.json"),o=t("./web3/net"),i=t("./web3/eth"),a=t("./web3/db"),s=t("./web3/shh"),u=t("./web3/watches"),c=t("./web3/filter"),l=t("./utils/utils"),p=t("./web3/formatters"),f=t("./web3/requestmanager"),m=t("./utils/config"),h=t("./web3/method"),d=t("./web3/property"),y=[new h({name:"sha3",call:"web3_sha3",params:1})],g=[new d({name:"version.client",getter:"web3_clientVersion"}),new d({name:"version.network",getter:"net_version",inputFormatter:l.toDecimal}),new d({name:"version.ethereum",getter:"eth_protocolVersion",inputFormatter:l.toDecimal}),new d({name:"version.whisper",getter:"shh_version",inputFormatter:l.toDecimal})],v=function(t,e){e.forEach(function(e){e.attachToObject(t)})},b=function(t,e){e.forEach(function(e){e.attachToObject(t)})},w={};w.providers={},w.version={},w.version.api=n.version,w.eth={},w.eth.filter=function(t,e,r,n){return t._isEvent?t(e,r):new c(t,u.eth(),n||p.outputLogFormatter)},w.shh={},w.shh.filter=function(t){return new c(t,u.shh(),p.outputPostFormatter)},w.net={},w.db={},w.setProvider=function(t){f.getInstance().setProvider(t)},w.reset=function(){f.getInstance().reset(),m.defaultBlock="latest",m.defaultAccount=void 0},w.toHex=l.toHex,w.toAscii=l.toAscii,w.fromAscii=l.fromAscii,w.toDecimal=l.toDecimal,w.fromDecimal=l.fromDecimal,w.toBigNumber=l.toBigNumber,w.toWei=l.toWei,w.fromWei=l.fromWei,w.isAddress=l.isAddress,Object.defineProperty(w.eth,"defaultBlock",{get:function(){return m.defaultBlock},set:function(t){return m.defaultBlock=t,t}}),Object.defineProperty(w.eth,"defaultAccount",{get:function(){return m.defaultAccount},set:function(t){return m.defaultAccount=t,t}}),v(w,y),b(w,g),v(w.net,o.methods),b(w.net,o.properties),v(w.eth,i.methods),b(w.eth,i.properties),v(w.db,a.methods),v(w.shh,s.methods),e.exports=w},{"./utils/config":7,"./utils/utils":8,"./version.json":9,"./web3/db":12,"./web3/eth":14,"./web3/filter":16,"./web3/formatters":17,"./web3/method":21,"./web3/net":22,"./web3/property":23,"./web3/requestmanager":25,"./web3/shh":26,"./web3/watches":27}],11:[function(t,e,r){var n=t("../web3"),o=t("../solidity/abi"),i=t("../utils/utils"),a=t("./event"),s=t("./function"),u=function(t,e){e.filter(function(t){return"function"===t.type}).map(function(e){return new s(e,t.address)}).forEach(function(e){e.attachToContract(t)})},c=function(t,e){e.filter(function(t){return"event"===t.type}).map(function(e){return new a(e,t.address)}).forEach(function(e){e.attachToContract(t)})},l=function(t){return p.bind(null,t)},p=function(t,e){if(this.address="",i.isAddress(e))this.address=e;else{e=e||{};var r=Array.prototype.slice.call(arguments,2),a=o.formatConstructorParams(t,r);e.data+=a,this.address=n.eth.sendTransaction(e)}u(this,t),c(this,t)};p.prototype.call=function(){return console.error("contract.call is deprecated"),this},p.prototype.sendTransaction=function(){return console.error("contract.sendTransact is deprecated"),this},e.exports=l},{"../solidity/abi":1,"../utils/utils":8,"../web3":10,"./event":15,"./function":18}],12:[function(t,e,r){var n=t("./method"),o=new n({name:"putString",call:"db_putString",params:3}),i=new n({name:"getString",call:"db_getString",params:2}),a=new n({name:"putHex",call:"db_putHex",params:3}),s=new n({name:"getHex",call:"db_getHex",params:2}),u=[o,i,a,s];e.exports={methods:u}},{"./method":21}],13:[function(t,e,r){e.exports={InvalidNumberOfParams:function(){return new Error("Invalid number of input parameters")},InvalidConnection:function(t){return new Error("CONNECTION ERROR: Couldn't connect to node "+t+", is it running?")},InvalidProvider:function(){return new Error("Providor not set or invalid")},InvalidResponse:function(t){var e=t&&t.error&&t.error.message?t.error.message:"Invalid JSON RPC response";return new Error(e)}}},{}],14:[function(t,e,r){"use strict";var n=t("./formatters"),o=t("../utils/utils"),i=t("./method"),a=t("./property"),s=function(t){return o.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getBlockByHash":"eth_getBlockByNumber"},u=function(t){return o.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getTransactionByBlockHashAndIndex":"eth_getTransactionByBlockNumberAndIndex"},c=function(t){return o.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getUncleByBlockHashAndIndex":"eth_getUncleByBlockNumberAndIndex"},l=function(t){return o.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getBlockTransactionCountByHash":"eth_getBlockTransactionCountByNumber"},p=function(t){return o.isString(t[0])&&0===t[0].indexOf("0x")?"eth_getUncleCountByBlockHash":"eth_getUncleCountByBlockNumber"},f=new i({name:"getBalance",call:"eth_getBalance",params:2,inputFormatter:[o.toAddress,n.inputDefaultBlockNumberFormatter],outputFormatter:n.outputBigNumberFormatter}),m=new i({name:"getStorageAt",call:"eth_getStorageAt",params:3,inputFormatter:[null,o.toHex,n.inputDefaultBlockNumberFormatter]}),h=new i({name:"getCode",call:"eth_getCode",params:2,inputFormatter:[o.toAddress,n.inputDefaultBlockNumberFormatter]}),d=new i({name:"getBlock",call:s,params:2,inputFormatter:[n.inputBlockNumberFormatter,function(t){return!!t}],outputFormatter:n.outputBlockFormatter}),y=new i({name:"getUncle",call:c,params:2,inputFormatter:[n.inputBlockNumberFormatter,o.toHex],outputFormatter:n.outputBlockFormatter}),g=new i({name:"getCompilers",call:"eth_getCompilers",params:0}),v=new i({name:"getBlockTransactionCount",call:l,params:1,inputFormatter:[n.inputBlockNumberFormatter],outputFormatter:o.toDecimal}),b=new i({name:"getBlockUncleCount",call:p,params:1,inputFormatter:[n.inputBlockNumberFormatter],outputFormatter:o.toDecimal}),w=new i({name:"getTransaction",call:"eth_getTransactionByHash",params:1,outputFormatter:n.outputTransactionFormatter}),x=new i({name:"getTransactionFromBlock",call:u,params:2,inputFormatter:[n.inputBlockNumberFormatter,o.toHex],outputFormatter:n.outputTransactionFormatter}),_=new i({name:"getTransactionCount",call:"eth_getTransactionCount",params:2,inputFormatter:[null,n.inputDefaultBlockNumberFormatter],outputFormatter:o.toDecimal}),F=new i({name:"sendTransaction",call:"eth_sendTransaction",params:1,inputFormatter:[n.inputTransactionFormatter]}),I=new i({name:"call",call:"eth_call",params:2,inputFormatter:[n.inputTransactionFormatter,n.inputDefaultBlockNumberFormatter]}),N=new i({name:"compile.solidity",call:"eth_compileSolidity",params:1}),B=new i({name:"compile.lll",call:"eth_compileLLL",params:1}),T=new i({name:"compile.serpent",call:"eth_compileSerpent",params:1}),O=[f,m,h,d,y,g,v,b,w,x,_,I,F,N,B,T],P=[new a({name:"coinbase",getter:"eth_coinbase"}),new a({name:"mining",getter:"eth_mining"}),new a({name:"gasPrice",getter:"eth_gasPrice",outputFormatter:n.outputBigNumberFormatter}),new a({name:"accounts",getter:"eth_accounts"}),new a({name:"blockNumber",getter:"eth_blockNumber",outputFormatter:o.toDecimal})];e.exports={methods:O,properties:P}},{"../utils/utils":8,"./formatters":17,"./method":21,"./property":23}],15:[function(t,e,r){var n=t("../utils/utils"),o=t("../solidity/coder"),i=t("../web3"),a=t("./formatters"),s=function(t,e){this._params=t.inputs,this._name=n.transformToFullName(t),this._address=e,this._anonymous=t.anonymous};s.prototype.types=function(t){return this._params.filter(function(e){return e.indexed===t}).map(function(t){return t.type})},s.prototype.displayName=function(){return n.extractDisplayName(this._name)},s.prototype.typeName=function(){return n.extractTypeName(this._name)},s.prototype.signature=function(){return i.sha3(i.fromAscii(this._name)).slice(2)},s.prototype.encode=function(t,e){t=t||{},e=e||{};var r={};["fromBlock","toBlock"].filter(function(t){return void 0!==e[t]}).forEach(function(t){r[t]=n.toHex(e[t])}),r.topics=[],this._anonymous||(r.address=this._address,r.topics.push("0x"+this.signature()));var i=this._params.filter(function(t){return t.indexed===!0}).map(function(e){var r=t[e.name];return void 0===r||null===r?null:n.isArray(r)?r.map(function(t){return"0x"+o.encodeParam(e.type,t)}):"0x"+o.encodeParam(e.type,r)});return r.topics=r.topics.concat(i),r},s.prototype.decode=function(t){t.data=t.data||"",t.topics=t.topics||[];var e=this._anonymous?t.topics:t.topics.slice(1),r=e.map(function(t){return t.slice(2)}).join(""),n=o.decodeParams(this.types(!0),r),i=t.data.slice(2),s=o.decodeParams(this.types(!1),i),u=a.outputLogFormatter(t);return u.event=this.displayName(),u.address=t.address,u.args=this._params.reduce(function(t,e){return t[e.name]=e.indexed?n.shift():s.shift(),t},{}),delete u.data,delete u.topics,u},s.prototype.execute=function(t,e){var r=this.encode(t,e),n=this.decode.bind(this);return i.eth.filter(r,void 0,void 0,n)},s.prototype.attachToContract=function(t){var e=this.execute.bind(this),r=this.displayName();t[r]||(t[r]=e),t[r][this.typeName()]=this.execute.bind(this,t)},e.exports=s},{"../solidity/coder":2,"../utils/utils":8,"../web3":10,"./formatters":17}],16:[function(t,e,r){var n=t("./requestmanager"),o=t("./formatters"),i=t("../utils/utils"),a=function(t){return null===t||"undefined"==typeof t?null:(t=String(t),0===t.indexOf("0x")?t:i.fromAscii(t))},s=function(t){return i.isString(t)?t:(t=t||{},t.topics=t.topics||[],t.topics=t.topics.map(function(t){return i.isArray(t)?t.map(a):a(t)}),{topics:t.topics,to:t.to,address:t.address,fromBlock:o.inputBlockNumberFormatter(t.fromBlock),toBlock:o.inputBlockNumberFormatter(t.toBlock)})},u=function(t,e,r){var n={};e.forEach(function(t){t.attachToObject(n)}),this.options=s(t),this.implementation=n,this.callbacks=[],this.formatter=r,this.filterId=this.implementation.newFilter(this.options)};u.prototype.watch=function(t){this.callbacks.push(t);var e=this,r=function(t,r){return t?e.callbacks.forEach(function(e){e(t)}):void r.forEach(function(t){t=e.formatter?e.formatter(t):t,e.callbacks.forEach(function(e){e(null,t)})})};i.isString(this.options)||this.get(function(e,r){e&&t(e),r.forEach(function(e){t(null,e)})}),n.getInstance().startPolling({method:this.implementation.poll.call,params:[this.filterId]},this.filterId,r,this.stopWatching.bind(this))},u.prototype.stopWatching=function(){n.getInstance().stopPolling(this.filterId),this.implementation.uninstallFilter(this.filterId),this.callbacks=[]},u.prototype.get=function(t){var e=this;if(!i.isFunction(t)){var r=this.implementation.getLogs(this.filterId);return r.map(function(t){return e.formatter?e.formatter(t):t})}this.implementation.getLogs(this.filterId,function(r,n){r?t(r):t(null,n.map(function(t){return e.formatter?e.formatter(t):t}))})},e.exports=u},{"../utils/utils":8,"./formatters":17,"./requestmanager":25}],17:[function(t,e,r){var n=t("../utils/utils"),o=t("../utils/config"),i=function(t){return n.toBigNumber(t)},a=function(t){return"latest"===t||"pending"===t||"earliest"===t},s=function(t){return void 0===t?o.defaultBlock:u(t)},u=function(t){return void 0===t?void 0:a(t)?t:n.toHex(t)},c=function(t){return t.from=t.from||o.defaultAccount,t.code&&(t.data=t.code,delete t.code),["gasPrice","gas","value"].filter(function(e){return void 0!==t[e]}).forEach(function(e){t[e]=n.fromDecimal(t[e])}),t},l=function(t){return t.blockNumber=n.toDecimal(t.blockNumber),t.transactionIndex=n.toDecimal(t.transactionIndex),t.nonce=n.toDecimal(t.nonce),t.gas=n.toDecimal(t.gas),t.gasPrice=n.toBigNumber(t.gasPrice),t.value=n.toBigNumber(t.value),t},p=function(t){return t.gasLimit=n.toDecimal(t.gasLimit),t.gasUsed=n.toDecimal(t.gasUsed),t.size=n.toDecimal(t.size),t.timestamp=n.toDecimal(t.timestamp),t.number=n.toDecimal(t.number),t.difficulty=n.toBigNumber(t.difficulty),t.totalDifficulty=n.toBigNumber(t.totalDifficulty),n.isArray(t.transactions)&&t.transactions.forEach(function(t){return n.isString(t)?void 0:l(t)}),t},f=function(t){return null===t?null:(t.blockNumber=n.toDecimal(t.blockNumber),t.transactionIndex=n.toDecimal(t.transactionIndex),t.logIndex=n.toDecimal(t.logIndex),t)},m=function(t){return t.payload=n.toHex(t.payload),t.ttl=n.fromDecimal(t.ttl),t.workToProve=n.fromDecimal(t.workToProve),t.priority=n.fromDecimal(t.priority),n.isArray(t.topics)||(t.topics=t.topics?[t.topics]:[]),t.topics=t.topics.map(function(t){return n.fromAscii(t)}),t},h=function(t){return t.expiry=n.toDecimal(t.expiry),t.sent=n.toDecimal(t.sent),t.ttl=n.toDecimal(t.ttl),t.workProved=n.toDecimal(t.workProved),t.payloadRaw=t.payload,t.payload=n.toAscii(t.payload),n.isJson(t.payload)&&(t.payload=JSON.parse(t.payload)),t.topics||(t.topics=[]),t.topics=t.topics.map(function(t){return n.toAscii(t)}),t};e.exports={inputDefaultBlockNumberFormatter:s,inputBlockNumberFormatter:u,inputTransactionFormatter:c,inputPostFormatter:m,outputBigNumberFormatter:i,outputTransactionFormatter:l,outputBlockFormatter:p,outputLogFormatter:f,outputPostFormatter:h}},{"../utils/config":7,"../utils/utils":8}],18:[function(t,e,r){var n=t("../web3"),o=t("../solidity/coder"),i=t("../utils/utils"),a=function(t,e){this._inputTypes=t.inputs.map(function(t){return t.type}),this._outputTypes=t.outputs.map(function(t){return t.type}),this._constant=t.constant,this._name=i.transformToFullName(t),this._address=e};a.prototype.toPayload=function(){var t=Array.prototype.slice.call(arguments),e={};return t.length>this._inputTypes.length&&i.isObject(t[t.length-1])&&(e=t.pop()),e.to=this._address,e.data="0x"+this.signature()+o.encodeParams(this._inputTypes,t),e},a.prototype.signature=function(){return n.sha3(n.fromAscii(this._name)).slice(2,10)},a.prototype.call=function(){var t=this.toPayload.apply(this,Array.prototype.slice.call(arguments)),e=n.eth.call(t);e=e.length>=2?e.slice(2):e;var r=o.decodeParams(this._outputTypes,e);return 1===r.length?r[0]:r},a.prototype.sendTransaction=function(){var t=this.toPayload.apply(this,Array.prototype.slice.call(arguments));n.eth.sendTransaction(t)},a.prototype.displayName=function(){return i.extractDisplayName(this._name)},a.prototype.typeName=function(){return i.extractTypeName(this._name)},a.prototype.execute=function(){var t=!this._constant;return t?this.sendTransaction.apply(this,Array.prototype.slice.call(arguments)):this.call.apply(this,Array.prototype.slice.call(arguments))},a.prototype.attachToContract=function(t){var e=this.execute.bind(this);e.call=this.call.bind(this),e.sendTransaction=this.sendTransaction.bind(this);var r=this.displayName();t[r]||(t[r]=e),t[r][this.typeName()]=e},e.exports=a},{"../solidity/coder":2,"../utils/utils":8,"../web3":10}],19:[function(t,e,r){"use strict";var n=t("xmlhttprequest").XMLHttpRequest,o=t("./errors"),i=function(t){this.host=t||"http://localhost:8545"};i.prototype.send=function(t){var e=new n;e.open("POST",this.host,!1);try{e.send(JSON.stringify(t))}catch(r){throw o.InvalidConnection(this.host)}return JSON.parse(e.responseText)},i.prototype.sendAsync=function(t,e){var r=new n;r.onreadystatechange=function(){4===r.readyState&&e(null,JSON.parse(r.responseText))},r.open("POST",this.host,!0);try{r.send(JSON.stringify(t))}catch(i){e(o.InvalidConnection(this.host))}},e.exports=i},{"./errors":13,xmlhttprequest:6}],20:[function(t,e,r){var n=function(){return arguments.callee._singletonInstance?arguments.callee._singletonInstance:(arguments.callee._singletonInstance=this,void(this.messageId=1))};n.getInstance=function(){var t=new n;return t},n.prototype.toPayload=function(t,e){return t||console.error("jsonrpc method should be specified!"),{jsonrpc:"2.0",method:t,params:e||[],id:this.messageId++}},n.prototype.isValidResponse=function(t){return!!t&&!t.error&&"2.0"===t.jsonrpc&&"number"==typeof t.id&&void 0!==t.result},n.prototype.toBatchPayload=function(t){var e=this;return t.map(function(t){return e.toPayload(t.method,t.params)})},e.exports=n},{}],21:[function(t,e,r){var n=t("./requestmanager"),o=t("../utils/utils"),i=t("./errors"),a=function(t){this.name=t.name,this.call=t.call,this.params=t.params||0,this.inputFormatter=t.inputFormatter,this.outputFormatter=t.outputFormatter};a.prototype.getCall=function(t){return o.isFunction(this.call)?this.call(t):this.call},a.prototype.extractCallback=function(t){return o.isFunction(t[t.length-1])?t.pop():null},a.prototype.validateArgs=function(t){if(t.length!==this.params)throw i.InvalidNumberOfParams()},a.prototype.formatInput=function(t){return this.inputFormatter?this.inputFormatter.map(function(e,r){return e?e(t[r]):t[r]}):t},a.prototype.formatOutput=function(t){return this.outputFormatter&&null!==t?this.outputFormatter(t):t},a.prototype.attachToObject=function(t){var e=this.send.bind(this);e.call=this.call;var r=this.name.split(".");r.length>1?(t[r[0]]=t[r[0]]||{},t[r[0]][r[1]]=e):t[r[0]]=e},a.prototype.toPayload=function(t){var e=this.getCall(t),r=this.extractCallback(t),n=this.formatInput(t);return this.validateArgs(n),{method:e,params:n,callback:r}},a.prototype.send=function(){var t=this.toPayload(Array.prototype.slice.call(arguments));if(t.callback){var e=this;return n.getInstance().sendAsync(t,function(r,n){t.callback(null,e.formatOutput(n))})}return this.formatOutput(n.getInstance().send(t))},e.exports=a},{"../utils/utils":8,"./errors":13,"./requestmanager":25}],22:[function(t,e,r){var n=t("../utils/utils"),o=t("./property"),i=[],a=[new o({name:"listening",getter:"net_listening"}),new o({name:"peerCount",getter:"net_peerCount",outputFormatter:n.toDecimal})];e.exports={methods:i,properties:a}},{"../utils/utils":8,"./property":23}],23:[function(t,e,r){var n=t("./requestmanager"),o=function(t){this.name=t.name,this.getter=t.getter,this.setter=t.setter,this.outputFormatter=t.outputFormatter,this.inputFormatter=t.inputFormatter};o.prototype.formatInput=function(t){return this.inputFormatter?this.inputFormatter(t):t},o.prototype.formatOutput=function(t){return this.outputFormatter&&null!==t?this.outputFormatter(t):t},o.prototype.attachToObject=function(t){var e={get:this.get.bind(this),set:this.set.bind(this)},r=this.name.split(".");r.length>1?(t[r[0]]=t[r[0]]||{},Object.defineProperty(t[r[0]],r[1],e)):Object.defineProperty(t,r[0],e)},o.prototype.get=function(){return this.formatOutput(n.getInstance().send({method:this.getter}))},o.prototype.set=function(t){return n.getInstance().send({method:this.setter,params:[this.formatInput(t)]})},e.exports=o},{"./requestmanager":25}],24:[function(t,e,r){var n=function(){};n.prototype.send=function(t){var e=navigator.qt.callMethod(JSON.stringify(t));return JSON.parse(e)},e.exports=n},{}],25:[function(t,e,r){var n=t("./jsonrpc"),o=t("../utils/utils"),i=t("../utils/config"),a=t("./errors"),s=function(t){return arguments.callee._singletonInstance?arguments.callee._singletonInstance:(arguments.callee._singletonInstance=this,this.provider=t,this.polls=[],this.timeout=null,void this.poll())};s.getInstance=function(){var t=new s;return t},s.prototype.send=function(t){if(!this.provider)return console.error(a.InvalidProvider()),null;var e=n.getInstance().toPayload(t.method,t.params),r=this.provider.send(e);if(!n.getInstance().isValidResponse(r))throw a.InvalidResponse(r);return r.result},s.prototype.sendAsync=function(t,e){if(!this.provider)return e(a.InvalidProvider());var r=n.getInstance().toPayload(t.method,t.params);this.provider.sendAsync(r,function(t,r){return t?e(t):n.getInstance().isValidResponse(r)?void e(null,r.result):e(a.InvalidResponse(r))})},s.prototype.setProvider=function(t){this.provider=t},s.prototype.startPolling=function(t,e,r,n){this.polls.push({data:t,id:e,callback:r,uninstall:n})},s.prototype.stopPolling=function(t){for(var e=this.polls.length;e--;){var r=this.polls[e];r.id===t&&this.polls.splice(e,1)}},s.prototype.reset=function(){this.polls.forEach(function(t){t.uninstall(t.id)}),this.polls=[],this.timeout&&(clearTimeout(this.timeout),this.timeout=null),this.poll()},s.prototype.poll=function(){if(this.timeout=setTimeout(this.poll.bind(this),i.ETH_POLLING_TIMEOUT),this.polls.length){if(!this.provider)return void console.error(a.InvalidProvider());var t=n.getInstance().toBatchPayload(this.polls.map(function(t){return t.data})),e=this;this.provider.sendAsync(t,function(t,r){if(!t){if(!o.isArray(r))throw a.InvalidResponse(r);r.map(function(t,r){return t.callback=e.polls[r].callback,t}).filter(function(t){var e=n.getInstance().isValidResponse(t);return e||t.callback(a.InvalidResponse(t)),e}).filter(function(t){return o.isArray(t.result)&&t.result.length>0}).forEach(function(t){t.callback(null,t.result)})}})}},e.exports=s},{"../utils/config":7,"../utils/utils":8,"./errors":13,"./jsonrpc":20}],26:[function(t,e,r){var n=t("./method"),o=t("./formatters"),i=new n({name:"post",call:"shh_post",params:1,inputFormatter:[o.inputPostFormatter]}),a=new n({name:"newIdentity",call:"shh_newIdentity",params:0}),s=new n({name:"hasIdentity",call:"shh_hasIdentity",params:1}),u=new n({name:"newGroup",call:"shh_newGroup",params:0}),c=new n({name:"addToGroup",call:"shh_addToGroup",params:0}),l=[i,a,s,u,c];e.exports={methods:l}},{"./formatters":17,"./method":21}],27:[function(t,e,r){var n=t("./method"),o=function(){var t=function(t){return"string"==typeof t[0]?"eth_newBlockFilter":"eth_newFilter"},e=new n({name:"newFilter",call:t,params:1}),r=new n({name:"uninstallFilter",call:"eth_uninstallFilter",params:1}),o=new n({name:"getLogs",call:"eth_getFilterLogs",params:1}),i=new n({name:"poll",call:"eth_getFilterChanges",params:1
+});return[e,r,o,i]},i=function(){var t=new n({name:"newFilter",call:"shh_newFilter",params:1}),e=new n({name:"uninstallFilter",call:"shh_uninstallFilter",params:1}),r=new n({name:"getLogs",call:"shh_getMessages",params:1}),o=new n({name:"poll",call:"shh_getFilterChanges",params:1});return[t,e,r,o]};e.exports={eth:o,shh:i}},{"./method":21}],28:[function(t,e,r){},{}],"bignumber.js":[function(t,e,r){"use strict";e.exports=BigNumber},{}],web3:[function(t,e,r){var n=t("./lib/web3");n.providers.HttpProvider=t("./lib/web3/httpprovider"),n.providers.QtSyncProvider=t("./lib/web3/qtsync"),n.eth.contract=t("./lib/web3/contract"),n.abi=t("./lib/solidity/abi"),"undefined"!=typeof window&&"undefined"==typeof window.web3&&(window.web3=n),e.exports=n},{"./lib/solidity/abi":1,"./lib/web3":10,"./lib/web3/contract":11,"./lib/web3/httpprovider":19,"./lib/web3/qtsync":24}]},{},["web3"]);`
diff --git a/miner/worker.go b/miner/worker.go
index 19ede3c93..a38b8a5d4 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -236,7 +236,7 @@ func (self *worker) makeCurrent() {
}
parent := self.chain.GetBlock(self.current.block.ParentHash())
- self.current.coinbase.SetGasPool(core.CalcGasLimit(parent, self.current.block))
+ self.current.coinbase.SetGasPool(core.CalcGasLimit(parent))
}
func (self *worker) commitNewWork() {
diff --git a/rpc/api.go b/rpc/api.go
index 085d8cf27..6d3a20bfa 100644
--- a/rpc/api.go
+++ b/rpc/api.go
@@ -63,8 +63,8 @@ func (api *EthereumApi) GetRequestReply(req *RpcRequest, reply *interface{}) err
case "eth_mining":
*reply = api.xeth().IsMining()
case "eth_gasPrice":
- v := xeth.DefaultGas()
- *reply = newHexData(v.Bytes())
+ v := xeth.DefaultGasPrice()
+ *reply = newHexNum(v.Bytes())
case "eth_accounts":
*reply = api.xeth().Accounts()
case "eth_blockNumber":
@@ -406,65 +406,69 @@ func (api *EthereumApi) GetRequestReply(req *RpcRequest, reply *interface{}) err
res, _ := api.xeth().DbGet([]byte(args.Database + args.Key))
*reply = newHexData(res)
+
case "shh_version":
+ // Retrieves the currently running whisper protocol version
*reply = api.xeth().WhisperVersion()
+
case "shh_post":
+ // Injects a new message into the whisper network
args := new(WhisperMessageArgs)
if err := json.Unmarshal(req.Params, &args); err != nil {
return err
}
-
err := api.xeth().Whisper().Post(args.Payload, args.To, args.From, args.Topics, args.Priority, args.Ttl)
if err != nil {
return err
}
-
*reply = true
+
case "shh_newIdentity":
+ // Creates a new whisper identity to use for sending/receiving messages
*reply = api.xeth().Whisper().NewIdentity()
- // case "shh_removeIdentity":
- // args := new(WhisperIdentityArgs)
- // if err := json.Unmarshal(req.Params, &args); err != nil {
- // return err
- // }
- // *reply = api.xeth().Whisper().RemoveIdentity(args.Identity)
+
case "shh_hasIdentity":
+ // Checks if an identity if owned or not
args := new(WhisperIdentityArgs)
if err := json.Unmarshal(req.Params, &args); err != nil {
return err
}
*reply = api.xeth().Whisper().HasIdentity(args.Identity)
- case "shh_newGroup", "shh_addToGroup":
- return NewNotImplementedError(req.Method)
+
case "shh_newFilter":
+ // Create a new filter to watch and match messages with
args := new(WhisperFilterArgs)
if err := json.Unmarshal(req.Params, &args); err != nil {
return err
}
- opts := new(xeth.Options)
- // opts.From = args.From
- opts.To = args.To
- opts.Topics = args.Topics
- id := api.xeth().NewWhisperFilter(opts)
+ id := api.xeth().NewWhisperFilter(args.To, args.From, args.Topics)
*reply = newHexNum(big.NewInt(int64(id)).Bytes())
+
case "shh_uninstallFilter":
+ // Remove an existing filter watching messages
args := new(FilterIdArgs)
if err := json.Unmarshal(req.Params, &args); err != nil {
return err
}
*reply = api.xeth().UninstallWhisperFilter(args.Id)
+
case "shh_getFilterChanges":
+ // Retrieve all the new messages arrived since the last request
args := new(FilterIdArgs)
if err := json.Unmarshal(req.Params, &args); err != nil {
return err
}
- *reply = api.xeth().MessagesChanged(args.Id)
+ *reply = api.xeth().WhisperMessagesChanged(args.Id)
+
case "shh_getMessages":
+ // Retrieve all the cached messages matching a specific, existing filter
args := new(FilterIdArgs)
if err := json.Unmarshal(req.Params, &args); err != nil {
return err
}
- *reply = api.xeth().Whisper().Messages(args.Id)
+ *reply = api.xeth().WhisperMessages(args.Id)
+ case "eth_hashrate":
+ *reply = newHexNum(api.xeth().HashRate())
// case "eth_register":
// // Placeholder for actual type
@@ -489,6 +493,6 @@ func (api *EthereumApi) GetRequestReply(req *RpcRequest, reply *interface{}) err
return NewNotImplementedError(req.Method)
}
- rpclogger.DebugDetailf("Reply: %T %s", reply, reply)
+ glog.V(logger.Detail).Infof("Reply: %T %s\n", reply, reply)
return nil
}
diff --git a/rpc/args.go b/rpc/args.go
index 7694a3d3f..4bd48e6d6 100644
--- a/rpc/args.go
+++ b/rpc/args.go
@@ -1010,25 +1010,27 @@ func (args *WhisperIdentityArgs) UnmarshalJSON(b []byte) (err error) {
}
type WhisperFilterArgs struct {
- To string `json:"to"`
+ To string
From string
- Topics []string
+ Topics [][]string
}
+// UnmarshalJSON implements the json.Unmarshaler interface, invoked to convert a
+// JSON message blob into a WhisperFilterArgs structure.
func (args *WhisperFilterArgs) UnmarshalJSON(b []byte) (err error) {
+ // Unmarshal the JSON message and sanity check
var obj []struct {
- To interface{}
- Topics []interface{}
+ To interface{} `json:"to"`
+ From interface{} `json:"from"`
+ Topics interface{} `json:"topics"`
}
-
- if err = json.Unmarshal(b, &obj); err != nil {
+ if err := json.Unmarshal(b, &obj); err != nil {
return NewDecodeParamError(err.Error())
}
-
if len(obj) < 1 {
return NewInsufficientParamsError(len(obj), 1)
}
-
+ // Retrieve the simple data contents of the filter arguments
if obj[0].To == nil {
args.To = ""
} else {
@@ -1038,17 +1040,52 @@ func (args *WhisperFilterArgs) UnmarshalJSON(b []byte) (err error) {
}
args.To = argstr
}
-
- t := make([]string, len(obj[0].Topics))
- for i, j := range obj[0].Topics {
- argstr, ok := j.(string)
+ if obj[0].From == nil {
+ args.From = ""
+ } else {
+ argstr, ok := obj[0].From.(string)
if !ok {
- return NewInvalidTypeError("topics["+string(i)+"]", "is not a string")
+ return NewInvalidTypeError("from", "is not a string")
}
- t[i] = argstr
+ args.From = argstr
+ }
+ // Construct the nested topic array
+ if obj[0].Topics != nil {
+ // Make sure we have an actual topic array
+ list, ok := obj[0].Topics.([]interface{})
+ if !ok {
+ return NewInvalidTypeError("topics", "is not an array")
+ }
+ // Iterate over each topic and handle nil, string or array
+ topics := make([][]string, len(list))
+ for idx, field := range list {
+ switch value := field.(type) {
+ case nil:
+ topics[idx] = []string{}
+
+ case string:
+ topics[idx] = []string{value}
+
+ case []interface{}:
+ topics[idx] = make([]string, len(value))
+ for i, nested := range value {
+ switch value := nested.(type) {
+ case nil:
+ topics[idx][i] = ""
+
+ case string:
+ topics[idx][i] = value
+
+ default:
+ return NewInvalidTypeError(fmt.Sprintf("topic[%d][%d]", idx, i), "is not a string")
+ }
+ }
+ default:
+ return NewInvalidTypeError(fmt.Sprintf("topic[%d]", idx), "not a string or array")
+ }
+ }
+ args.Topics = topics
}
- args.Topics = t
-
return nil
}
diff --git a/rpc/args_test.go b/rpc/args_test.go
index 2f011bfd9..f5949b7a2 100644
--- a/rpc/args_test.go
+++ b/rpc/args_test.go
@@ -1943,7 +1943,7 @@ func TestWhisperFilterArgs(t *testing.T) {
input := `[{"topics": ["0x68656c6c6f20776f726c64"], "to": "0x34ag445g3455b34"}]`
expected := new(WhisperFilterArgs)
expected.To = "0x34ag445g3455b34"
- expected.Topics = []string{"0x68656c6c6f20776f726c64"}
+ expected.Topics = [][]string{[]string{"0x68656c6c6f20776f726c64"}}
args := new(WhisperFilterArgs)
if err := json.Unmarshal([]byte(input), &args); err != nil {
diff --git a/rpc/http.go b/rpc/http.go
index f9c646908..4760601d8 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -13,7 +13,6 @@ import (
"github.com/rs/cors"
)
-var rpclogger = logger.NewLogger("RPC")
var rpclistener *stoppableTCPListener
const (
@@ -31,7 +30,7 @@ func Start(pipe *xeth.XEth, config RpcConfig) error {
l, err := newStoppableTCPListener(fmt.Sprintf("%s:%d", config.ListenAddress, config.ListenPort))
if err != nil {
- rpclogger.Errorf("Can't listen on %s:%d: %v", config.ListenAddress, config.ListenPort, err)
+ glog.V(logger.Error).Infof("Can't listen on %s:%d: %v", config.ListenAddress, config.ListenPort, err)
return err
}
rpclistener = l
@@ -136,7 +135,7 @@ func send(writer io.Writer, v interface{}) (n int, err error) {
var payload []byte
payload, err = json.MarshalIndent(v, "", "\t")
if err != nil {
- rpclogger.Fatalln("Error marshalling JSON", err)
+ glog.V(logger.Error).Infoln("Error marshalling JSON", err)
return 0, err
}
glog.V(logger.Detail).Infof("Sending payload: %s", payload)
diff --git a/tests/block_test.go b/tests/block_test.go
index 9343a3de9..a46751f00 100644
--- a/tests/block_test.go
+++ b/tests/block_test.go
@@ -87,7 +87,7 @@ func runBlockTest(name string, test *BlockTest, t *testing.T) {
ethereum.ResetWithGenesisBlock(test.Genesis)
// import pre accounts
- statedb, err := test.InsertPreState(ethereum.StateDb())
+ statedb, err := test.InsertPreState(ethereum)
if err != nil {
t.Fatalf("InsertPreState: %v", err)
}
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index f34c5d200..06f082ca3 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -10,11 +10,14 @@ import (
"runtime"
"strconv"
"strings"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -41,10 +44,11 @@ type btBlock struct {
}
type btAccount struct {
- Balance string
- Code string
- Nonce string
- Storage map[string]string
+ Balance string
+ Code string
+ Nonce string
+ Storage map[string]string
+ PrivateKey string
}
type btHeader struct {
@@ -97,15 +101,24 @@ func LoadBlockTests(file string) (map[string]*BlockTest, error) {
// InsertPreState populates the given database with the genesis
// accounts defined by the test.
-func (t *BlockTest) InsertPreState(db common.Database) (*state.StateDB, error) {
+func (t *BlockTest) InsertPreState(ethereum *eth.Ethereum) (*state.StateDB, error) {
+ db := ethereum.StateDb()
statedb := state.New(common.Hash{}, db)
for addrString, acct := range t.preAccounts {
- // XXX: is is worth it checking for errors here?
- //addr, _ := hex.DecodeString(addrString)
+ addr, _ := hex.DecodeString(addrString)
code, _ := hex.DecodeString(strings.TrimPrefix(acct.Code, "0x"))
balance, _ := new(big.Int).SetString(acct.Balance, 0)
nonce, _ := strconv.ParseUint(acct.Nonce, 16, 64)
+ if acct.PrivateKey != "" {
+ privkey, err := hex.DecodeString(strings.TrimPrefix(acct.PrivateKey, "0x"))
+ err = crypto.ImportBlockTestKey(privkey)
+ err = ethereum.AccountManager().TimedUnlock(addr, "", 999999*time.Second)
+ if err != nil {
+ return nil, err
+ }
+ }
+
obj := statedb.CreateAccount(common.HexToAddress(addrString))
obj.SetCode(code)
obj.SetBalance(balance)
diff --git a/tests/files/BlockTests/bcGasPricerTest.json b/tests/files/BlockTests/bcGasPricerTest.json
new file mode 100644
index 000000000..bbaafa821
--- /dev/null
+++ b/tests/files/BlockTests/bcGasPricerTest.json
@@ -0,0 +1,1115 @@
+{
+ "highGasUsage" : {
+ "blocks" : [
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020000",
+ "extraData" : "0x",
+ "gasLimit" : "0x01dee698",
+ "gasUsed" : "0x53a0",
+ "hash" : "d946eb32109b0c982d8ca0be61526fc092e38a835817a5cace5b91a2402044da",
+ "mixHash" : "0f4c223564c93caa39f8ae0e4b3a4ab45be533f72effff4de1b9ff064c2414f3",
+ "nonce" : "ec7c71a453af5f95",
+ "number" : "0x01",
+ "parentHash" : "5624076d15018935ba2c0e94550c27d944acc5c9cd91a0fac44d1bf2e8d8c1af",
+ "receiptTrie" : "08ffbde000912f7a562428e6750194b5862548d98ee02739e8d8671290d49abe",
+ "stateRoot" : "92eb4bd8d175ec6ed972069e4581fa27238e286bbc0d3e1ad454d5622dfe9721",
+ "timestamp" : "0x5538a738",
+ "transactionsTrie" : "d4f8cfc29bf8c5cbcdeaf767ea5dc8ac4db675a1b037e2295e4242a91763da56",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa05624076d15018935ba2c0e94550c27d944acc5c9cd91a0fac44d1bf2e8d8c1afa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a092eb4bd8d175ec6ed972069e4581fa27238e286bbc0d3e1ad454d5622dfe9721a0d4f8cfc29bf8c5cbcdeaf767ea5dc8ac4db675a1b037e2295e4242a91763da56a008ffbde000912f7a562428e6750194b5862548d98ee02739e8d8671290d49abeb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018401dee6988253a0845538a73880a00f4c223564c93caa39f8ae0e4b3a4ab45be533f72effff4de1b9ff064c2414f388ec7c71a453af5f95f86ef86c808609184e72a000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ca01747862a9d2100c3ae39cc79cf47d7c5f201c7062bd06d3d88cbd98736932cd1a07af35a4ff64e9e3d12b9d2ef211a9d2c51a0b557faad8efd211617326123aca9c0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x09184e72a000",
+ "nonce" : "0x00",
+ "r" : "0x1747862a9d2100c3ae39cc79cf47d7c5f201c7062bd06d3d88cbd98736932cd1",
+ "s" : "0x7af35a4ff64e9e3d12b9d2ef211a9d2c51a0b557faad8efd211617326123aca9",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1c",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020040",
+ "extraData" : "0x",
+ "gasLimit" : "0x01de6ef7",
+ "gasUsed" : "0x53a0",
+ "hash" : "1b29d6d87544d341673fc2934f171dfd32dbef9998c97dbe89b0444e0f92edd9",
+ "mixHash" : "31bcd6380911ea6f10a6ceea474077c8188df56d056957dcb7343b29943d8510",
+ "nonce" : "f60f49a10aecad31",
+ "number" : "0x02",
+ "parentHash" : "d946eb32109b0c982d8ca0be61526fc092e38a835817a5cace5b91a2402044da",
+ "receiptTrie" : "67c656b9a0921d60806d3afd6efd806fb9c926bfc20ed994a3b649d7474c39a7",
+ "stateRoot" : "96a7c35dd2c30d7474f6a42c578be5c123ccd819d0f96a4bfc44470769310776",
+ "timestamp" : "0x5538a73a",
+ "transactionsTrie" : "17ec93c98646f62936113a43546328c05bc23853fcb3f4c3a805011ffeb9e792",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa0d946eb32109b0c982d8ca0be61526fc092e38a835817a5cace5b91a2402044daa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a096a7c35dd2c30d7474f6a42c578be5c123ccd819d0f96a4bfc44470769310776a017ec93c98646f62936113a43546328c05bc23853fcb3f4c3a805011ffeb9e792a067c656b9a0921d60806d3afd6efd806fb9c926bfc20ed994a3b649d7474c39a7b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020040028401de6ef78253a0845538a73a80a031bcd6380911ea6f10a6ceea474077c8188df56d056957dcb7343b29943d851088f60f49a10aecad31f86ef86c01860ae9f7bcc000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ba04357b780e5906edd91c7c7af1d299b6413252cca6e0e21fa3c9fd84c128da997a0b3bfd830a2fde8ae8a77f1df8d776200001be61c2e374cdc10b2e9276f735c17c0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x0ae9f7bcc000",
+ "nonce" : "0x01",
+ "r" : "0x4357b780e5906edd91c7c7af1d299b6413252cca6e0e21fa3c9fd84c128da997",
+ "s" : "0xb3bfd830a2fde8ae8a77f1df8d776200001be61c2e374cdc10b2e9276f735c17",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1b",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020080",
+ "extraData" : "0x",
+ "gasLimit" : "0x01ddf774",
+ "gasUsed" : "0x53a0",
+ "hash" : "95ce545c26a01a0c5ea5e66617a7d854612d69f5d2d950c38b5f12593503e3b2",
+ "mixHash" : "2e9a95e21ff82c56a927202983e7b8004781bccdfb50e5120aec0653feb2ce16",
+ "nonce" : "4e6c598fd024cf06",
+ "number" : "0x03",
+ "parentHash" : "1b29d6d87544d341673fc2934f171dfd32dbef9998c97dbe89b0444e0f92edd9",
+ "receiptTrie" : "55d8b7426bea61d92d3be00068b1c0a1c3179b577e25e4b7ca527c54183c8131",
+ "stateRoot" : "6260950734efec3344c57c4c57146b5a1b07e12cc43e2fd1f620fa52dc00c69b",
+ "timestamp" : "0x5538a73c",
+ "transactionsTrie" : "f4679a78bacf15f824ad0d2d04b812f00ccefb8f536b6e8a0e19e3c4f6ef5aba",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa01b29d6d87544d341673fc2934f171dfd32dbef9998c97dbe89b0444e0f92edd9a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a06260950734efec3344c57c4c57146b5a1b07e12cc43e2fd1f620fa52dc00c69ba0f4679a78bacf15f824ad0d2d04b812f00ccefb8f536b6e8a0e19e3c4f6ef5abaa055d8b7426bea61d92d3be00068b1c0a1c3179b577e25e4b7ca527c54183c8131b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020080038401ddf7748253a0845538a73c80a02e9a95e21ff82c56a927202983e7b8004781bccdfb50e5120aec0653feb2ce16884e6c598fd024cf06f86ef86c02860cbba106e000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ca049246609fee26f9dcaf3501f11fc2854797a993e365b459107795910d7cb5ddaa0a254df5b25792fd0b6555f583f088cec0da320979b284f03076ffce812e877c4c0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x0cbba106e000",
+ "nonce" : "0x02",
+ "r" : "0x49246609fee26f9dcaf3501f11fc2854797a993e365b459107795910d7cb5dda",
+ "s" : "0xa254df5b25792fd0b6555f583f088cec0da320979b284f03076ffce812e877c4",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1c",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x0200c0",
+ "extraData" : "0x",
+ "gasLimit" : "0x01dd800f",
+ "gasUsed" : "0x53a0",
+ "hash" : "2c926ec837d45e27734c095dc6be14b6544d659e6f9bc8800ab5f15fab2c777d",
+ "mixHash" : "083d7f9f9581d64662f041a4be954f6ffcb6ab9b8785ca863c19a70dd5d22ceb",
+ "nonce" : "d7b92505c2e3d447",
+ "number" : "0x04",
+ "parentHash" : "95ce545c26a01a0c5ea5e66617a7d854612d69f5d2d950c38b5f12593503e3b2",
+ "receiptTrie" : "26f751f5ad9e99145c76b865d4c9649fd0239499ad78d92b826026ec65e50b19",
+ "stateRoot" : "7435221f38518e8e97c94e5b8eeb03c6ee619fbb9dca6c5f84a96b36af2410dd",
+ "timestamp" : "0x5538a743",
+ "transactionsTrie" : "9210b6f15e7a78df04c58b6e7b9f791c201687a3f77bc750fabd65860aeb6676",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa095ce545c26a01a0c5ea5e66617a7d854612d69f5d2d950c38b5f12593503e3b2a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07435221f38518e8e97c94e5b8eeb03c6ee619fbb9dca6c5f84a96b36af2410dda09210b6f15e7a78df04c58b6e7b9f791c201687a3f77bc750fabd65860aeb6676a026f751f5ad9e99145c76b865d4c9649fd0239499ad78d92b826026ec65e50b19b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200c0048401dd800f8253a0845538a74380a0083d7f9f9581d64662f041a4be954f6ffcb6ab9b8785ca863c19a70dd5d22ceb88d7b92505c2e3d447f86ef86c03860e8d4a510000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ca007bf7e8c01d92ff2f391bfaf5e407ed45dbbf3315fab042424ea4e0f6b53bd44a0aea837b32ab80ded89fae212cb4f09fed0c4bb6a52579b66dedb728571cfc2f5c0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x0e8d4a510000",
+ "nonce" : "0x03",
+ "r" : "0x07bf7e8c01d92ff2f391bfaf5e407ed45dbbf3315fab042424ea4e0f6b53bd44",
+ "s" : "0xaea837b32ab80ded89fae212cb4f09fed0c4bb6a52579b66dedb728571cfc2f5",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1c",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020100",
+ "extraData" : "0x",
+ "gasLimit" : "0x01dd08c8",
+ "gasUsed" : "0x53a0",
+ "hash" : "b06119272efaa98ee41fb3f0d595e6230e8fcd4ab7a02e638d1befe2f88401f2",
+ "mixHash" : "5a48287926ab638dbc6ea90e35e6416151b76ece2beb9da3dccd565e61a5d70b",
+ "nonce" : "d40871f1407ea839",
+ "number" : "0x05",
+ "parentHash" : "2c926ec837d45e27734c095dc6be14b6544d659e6f9bc8800ab5f15fab2c777d",
+ "receiptTrie" : "aca5e1e81c37dd97f9c056a07db0984e02f4f64c67d5ad21ea6726ceb8372623",
+ "stateRoot" : "3c5c948f8536b3ddad9917dbd784836337e822f6a6b3723dc9bb4eb8d0b8ac92",
+ "timestamp" : "0x5538a744",
+ "transactionsTrie" : "8b7827ee9231cd22f6c98672017fe167c24728ddc39eb8a385ffc9199ffba1a3",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa02c926ec837d45e27734c095dc6be14b6544d659e6f9bc8800ab5f15fab2c777da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a03c5c948f8536b3ddad9917dbd784836337e822f6a6b3723dc9bb4eb8d0b8ac92a08b7827ee9231cd22f6c98672017fe167c24728ddc39eb8a385ffc9199ffba1a3a0aca5e1e81c37dd97f9c056a07db0984e02f4f64c67d5ad21ea6726ceb8372623b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020100058401dd08c88253a0845538a74480a05a48287926ab638dbc6ea90e35e6416151b76ece2beb9da3dccd565e61a5d70b88d40871f1407ea839f86ef86c0486105ef39b2000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ca0cebfbf16d35076e3b647c9bf018309c5542c1ac548e073594717b71e78d2a98fa0f3275f71fa6421627fd3a9415a415547961e7e71a8af094059f704a8b4f7c967c0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x105ef39b2000",
+ "nonce" : "0x04",
+ "r" : "0xcebfbf16d35076e3b647c9bf018309c5542c1ac548e073594717b71e78d2a98f",
+ "s" : "0xf3275f71fa6421627fd3a9415a415547961e7e71a8af094059f704a8b4f7c967",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1c",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020140",
+ "extraData" : "0x",
+ "gasLimit" : "0x01dc919e",
+ "gasUsed" : "0x53a0",
+ "hash" : "2255881f5868fb62580045d3c20fa9e8b21296ff297e506ab9869245144bcd77",
+ "mixHash" : "3c18e70ddef903b5e8448de0d83e6c5c54ee8643a7dd625fd4df008dc43b21cb",
+ "nonce" : "d1208e160f582724",
+ "number" : "0x06",
+ "parentHash" : "b06119272efaa98ee41fb3f0d595e6230e8fcd4ab7a02e638d1befe2f88401f2",
+ "receiptTrie" : "856e042a5a275cf31d42c99656b2dcff6f15a79ea9f371ce9f22fbfeaecc9c0f",
+ "stateRoot" : "2486ac33701d668e7957eb04dbf23f461aa28b92240e77a70a15d7b6b37ce965",
+ "timestamp" : "0x5538a746",
+ "transactionsTrie" : "f472612afdf3692c9eeda50cd3d6851a2733adf36112a28cb99e2b4cfa99d92d",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa0b06119272efaa98ee41fb3f0d595e6230e8fcd4ab7a02e638d1befe2f88401f2a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a02486ac33701d668e7957eb04dbf23f461aa28b92240e77a70a15d7b6b37ce965a0f472612afdf3692c9eeda50cd3d6851a2733adf36112a28cb99e2b4cfa99d92da0856e042a5a275cf31d42c99656b2dcff6f15a79ea9f371ce9f22fbfeaecc9c0fb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020140068401dc919e8253a0845538a74680a03c18e70ddef903b5e8448de0d83e6c5c54ee8643a7dd625fd4df008dc43b21cb88d1208e160f582724f86ef86c058612309ce54000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ca02624358680a51d81ef75a53102beb09636b4105cd033e804cff0d76327b27f40a0ca8f392adcf3a06be3c5945d37e55ef7e85fde4ca2458be88d4fe1801c648663c0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x12309ce54000",
+ "nonce" : "0x05",
+ "r" : "0x2624358680a51d81ef75a53102beb09636b4105cd033e804cff0d76327b27f40",
+ "s" : "0xca8f392adcf3a06be3c5945d37e55ef7e85fde4ca2458be88d4fe1801c648663",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1c",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020180",
+ "extraData" : "0x",
+ "gasLimit" : "0x01dc1a92",
+ "gasUsed" : "0x53a0",
+ "hash" : "df725c32579ec3827634486c4018290eee98005b8483c1baf5387dcbce0226bf",
+ "mixHash" : "26125be6678cbedb74f795b51e17cccd915f13acb3c0b037dac69e061567be13",
+ "nonce" : "a6b4a11e4aad4487",
+ "number" : "0x07",
+ "parentHash" : "2255881f5868fb62580045d3c20fa9e8b21296ff297e506ab9869245144bcd77",
+ "receiptTrie" : "443f1d43455549b75230d9da53fae8caf8f98195e9970ebc9096474b5abf40bd",
+ "stateRoot" : "d0b20f7e8884653113f1c178ee755153ba9d1158672f3eec33b4a71b451d69f0",
+ "timestamp" : "0x5538a748",
+ "transactionsTrie" : "f3ebe6ae4646c903735449022a7c3b8498518571832cf987014a4edc5f1d5401",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa02255881f5868fb62580045d3c20fa9e8b21296ff297e506ab9869245144bcd77a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0d0b20f7e8884653113f1c178ee755153ba9d1158672f3eec33b4a71b451d69f0a0f3ebe6ae4646c903735449022a7c3b8498518571832cf987014a4edc5f1d5401a0443f1d43455549b75230d9da53fae8caf8f98195e9970ebc9096474b5abf40bdb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020180078401dc1a928253a0845538a74880a026125be6678cbedb74f795b51e17cccd915f13acb3c0b037dac69e061567be1388a6b4a11e4aad4487f86ef86c06861402462f6000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ca06e1a48adc711558308c2f16487756ba13441bce5253d7a7ab047c473ca2c25c2a02af800f9913e91834efd7ce695203be5508acf518da104caa2b14f83a4337af9c0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x1402462f6000",
+ "nonce" : "0x06",
+ "r" : "0x6e1a48adc711558308c2f16487756ba13441bce5253d7a7ab047c473ca2c25c2",
+ "s" : "0x2af800f9913e91834efd7ce695203be5508acf518da104caa2b14f83a4337af9",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1c",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x0201c0",
+ "extraData" : "0x",
+ "gasLimit" : "0x01dba3a4",
+ "gasUsed" : "0x53a0",
+ "hash" : "20365a5413783692c1a26c554892bd535a66bf1fcc7eb33abfbbb60a0caf7a7a",
+ "mixHash" : "a4ea3db290e9d9e83de893b41307c29f98ffd2fcfe9b0b9e83e10f1845b93d6e",
+ "nonce" : "94e114e7f19e2a8a",
+ "number" : "0x08",
+ "parentHash" : "df725c32579ec3827634486c4018290eee98005b8483c1baf5387dcbce0226bf",
+ "receiptTrie" : "5a4f5251b73a022176cb231c2ceaf2bfb3276df92d0e43d422187a0a1ed88d6e",
+ "stateRoot" : "1896e62ef9a6391f3454a2dd87774ee1714241e076de7e532ac8f0e7897168e2",
+ "timestamp" : "0x5538a74a",
+ "transactionsTrie" : "e5938f6f4a0f4093ecf7fe8198a79b03fc49b498c446c7a4cb7a3f07d8ba987f",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa0df725c32579ec3827634486c4018290eee98005b8483c1baf5387dcbce0226bfa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a01896e62ef9a6391f3454a2dd87774ee1714241e076de7e532ac8f0e7897168e2a0e5938f6f4a0f4093ecf7fe8198a79b03fc49b498c446c7a4cb7a3f07d8ba987fa05a4f5251b73a022176cb231c2ceaf2bfb3276df92d0e43d422187a0a1ed88d6eb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830201c0088401dba3a48253a0845538a74a80a0a4ea3db290e9d9e83de893b41307c29f98ffd2fcfe9b0b9e83e10f1845b93d6e8894e114e7f19e2a8af86ef86c078615d3ef798000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ca0e0e3ed05f9b3331b71891fd11cc343809d724ee245d6702fc949452fe472e3a0a076af76c7bf4e6734cbc6bf5ed3cd3c42a88fe4f559bdde6c9818db9380deee60c0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x15d3ef798000",
+ "nonce" : "0x07",
+ "r" : "0xe0e3ed05f9b3331b71891fd11cc343809d724ee245d6702fc949452fe472e3a0",
+ "s" : "0x76af76c7bf4e6734cbc6bf5ed3cd3c42a88fe4f559bdde6c9818db9380deee60",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1c",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020200",
+ "extraData" : "0x",
+ "gasLimit" : "0x01db2cd4",
+ "gasUsed" : "0x53a0",
+ "hash" : "b8c31ad209a8624a82602118ed6e47055e53c53d595a6a03b7514b974e720256",
+ "mixHash" : "b84f7cd210a3d545280e7f0f2bdcbc1dafa8ac3bde7c4adce3e95280d20395ee",
+ "nonce" : "d809aba0e4cf489c",
+ "number" : "0x09",
+ "parentHash" : "20365a5413783692c1a26c554892bd535a66bf1fcc7eb33abfbbb60a0caf7a7a",
+ "receiptTrie" : "07d008287c1837442e9d2f47512ed99a06d06150a3382af55715a6f52308aa5d",
+ "stateRoot" : "7c2f16eee704574ad35e48564e95da67d1b5c7f4c0c1ae7f30a3876dec8da945",
+ "timestamp" : "0x5538a74f",
+ "transactionsTrie" : "c5fd0f34b75eabfc459b2a187bab5858c52b50442caf97a60fb79d922a9e7387",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa020365a5413783692c1a26c554892bd535a66bf1fcc7eb33abfbbb60a0caf7a7aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07c2f16eee704574ad35e48564e95da67d1b5c7f4c0c1ae7f30a3876dec8da945a0c5fd0f34b75eabfc459b2a187bab5858c52b50442caf97a60fb79d922a9e7387a007d008287c1837442e9d2f47512ed99a06d06150a3382af55715a6f52308aa5db901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020200098401db2cd48253a0845538a74f80a0b84f7cd210a3d545280e7f0f2bdcbc1dafa8ac3bde7c4adce3e95280d20395ee88d809aba0e4cf489cf86ef86c088617a598c3a000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ca0269a937686c36e5ee49f8f5004a67222675bd6a2402fa1e8ea1cfed864c8324ba0a9257937f6d553cb5ba879d7be7dd157e02f4d2bfbf68b18995f4eb550a8bc75c0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x17a598c3a000",
+ "nonce" : "0x08",
+ "r" : "0x269a937686c36e5ee49f8f5004a67222675bd6a2402fa1e8ea1cfed864c8324b",
+ "s" : "0xa9257937f6d553cb5ba879d7be7dd157e02f4d2bfbf68b18995f4eb550a8bc75",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1c",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020240",
+ "extraData" : "0x",
+ "gasLimit" : "0x01dab621",
+ "gasUsed" : "0x53a0",
+ "hash" : "2e7c26bdf04d8f0915849cbaf00690e3c95bcfac2cae83dd46ca33b18d9e062a",
+ "mixHash" : "18a527d5b26bef58d1ea8674e1ed472d0724caf7e03009e80cb9b2040db4f250",
+ "nonce" : "428f2155fa715212",
+ "number" : "0x0a",
+ "parentHash" : "b8c31ad209a8624a82602118ed6e47055e53c53d595a6a03b7514b974e720256",
+ "receiptTrie" : "cad92e1582f57725638bb0ea0bc584af5248d4381e1312fbd72a3b07f51756fd",
+ "stateRoot" : "3319cd884d889029a50f134caa28adbe8b700c1259f0a42226e466da8ab4832b",
+ "timestamp" : "0x5538a752",
+ "transactionsTrie" : "cfc2916b33aaaa6a7830094c0263aef4fe4a1454cf87b5100f2de19c41c13131",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa0b8c31ad209a8624a82602118ed6e47055e53c53d595a6a03b7514b974e720256a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a03319cd884d889029a50f134caa28adbe8b700c1259f0a42226e466da8ab4832ba0cfc2916b33aaaa6a7830094c0263aef4fe4a1454cf87b5100f2de19c41c13131a0cad92e1582f57725638bb0ea0bc584af5248d4381e1312fbd72a3b07f51756fdb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202400a8401dab6218253a0845538a75280a018a527d5b26bef58d1ea8674e1ed472d0724caf7e03009e80cb9b2040db4f25088428f2155fa715212f86ef86c09861977420dc000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ba005c2b115902c0c82ccbbaf2d141c67ce6dc9bc0d9cbdcfe97128613bb01cff71a02068387a52f95bc64939881fd3b5f578a5ca3f00883c7688840ad24c6fa13a2ec0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x1977420dc000",
+ "nonce" : "0x09",
+ "r" : "0x05c2b115902c0c82ccbbaf2d141c67ce6dc9bc0d9cbdcfe97128613bb01cff71",
+ "s" : "0x2068387a52f95bc64939881fd3b5f578a5ca3f00883c7688840ad24c6fa13a2e",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1b",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020280",
+ "extraData" : "0x",
+ "gasLimit" : "0x01da3f8c",
+ "gasUsed" : "0x53a0",
+ "hash" : "baecbd6174d5424b9cc52e5134b6be038c907e816e66b508f00b4d3f45973313",
+ "mixHash" : "3f8410374bc0259d8467c33d64f1ea8ed42e2e8c8f5c71f8c03c758d0cafcc02",
+ "nonce" : "02de431f33ebe736",
+ "number" : "0x0b",
+ "parentHash" : "2e7c26bdf04d8f0915849cbaf00690e3c95bcfac2cae83dd46ca33b18d9e062a",
+ "receiptTrie" : "421847b09c6ad8a62011ebc76b354731da29a1d1eaf6265b10b9cdbb3076f9f6",
+ "stateRoot" : "cb70b1cf4ee57e12acb4bb1391f614db040a3965df05468ad257e9a5433bd118",
+ "timestamp" : "0x5538a754",
+ "transactionsTrie" : "d2e3a335491fdc488316d6495717dd1ae0352382597dd456f57ddf236ee43863",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf9026ef901faa02e7c26bdf04d8f0915849cbaf00690e3c95bcfac2cae83dd46ca33b18d9e062aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0cb70b1cf4ee57e12acb4bb1391f614db040a3965df05468ad257e9a5433bd118a0d2e3a335491fdc488316d6495717dd1ae0352382597dd456f57ddf236ee43863a0421847b09c6ad8a62011ebc76b354731da29a1d1eaf6265b10b9cdbb3076f9f6b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202800b8401da3f8c8253a0845538a75480a03f8410374bc0259d8467c33d64f1ea8ed42e2e8c8f5c71f8c03c758d0cafcc028802de431f33ebe736f86ef86c0a861b48eb57e000830cf85094095e7baea6a6c7c4c2dfeb977efac326af552d870a86ffffffffffff1ba0a5ba32bc26106331be807c37908128f8bc3845aa68ea5f34790b052619eb05b7a0e86ff4928c3d40dcb2261965c851a87d772fccd82ed51528a3f46c1b772df37bc0",
+ "transactions" : [
+ {
+ "data" : "0xffffffffffff",
+ "gasLimit" : "0x0cf850",
+ "gasPrice" : "0x1b48eb57e000",
+ "nonce" : "0x0a",
+ "r" : "0xa5ba32bc26106331be807c37908128f8bc3845aa68ea5f34790b052619eb05b7",
+ "s" : "0xe86ff4928c3d40dcb2261965c851a87d772fccd82ed51528a3f46c1b772df37b",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "v" : "0x1b",
+ "value" : "0x0a"
+ }
+ ],
+ "uncleHeaders" : [
+ ]
+ }
+ ],
+ "genesisBlockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020000",
+ "extraData" : "0x42",
+ "gasLimit" : "0x01df5e70",
+ "gasUsed" : "0x00",
+ "hash" : "5624076d15018935ba2c0e94550c27d944acc5c9cd91a0fac44d1bf2e8d8c1af",
+ "mixHash" : "4c7b8b6f1f6583310fb562de164dc923378f3f247c3a6363c41e8cc2ba2c8d20",
+ "nonce" : "7368690a112011b6",
+ "number" : "0x00",
+ "parentHash" : "0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "71f7c8fb1ecac2ee69cd5aa02564d358fc641845977fa4e30c65be195167bb45",
+ "timestamp" : "0x54c98c81",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "genesisRLP" : "0xf901fdf901f8a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a071f7c8fb1ecac2ee69cd5aa02564d358fc641845977fa4e30c65be195167bb45a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808401df5e70808454c98c8142a04c7b8b6f1f6583310fb562de164dc923378f3f247c3a6363c41e8cc2ba2c8d20887368690a112011b6c0c0",
+ "postState" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x6e",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "8888f1f195afa192cfee860698584c030f4c9db1" : {
+ "balance" : "0x01265834588b4a0000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x1d14a0215cf8145fe6a7ff92",
+ "code" : "0x",
+ "nonce" : "0x0b",
+ "storage" : {
+ }
+ },
+ "aaaf5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x02540be400",
+ "code" : "0x60003551",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "pre" : {
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x1d14a0219e54822428000000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "aaaf5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x02540be400",
+ "code" : "0x60003551",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ }
+ },
+ "notxs" : {
+ "blocks" : [
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020000",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "47f0a9d868b01909c0c2d25af3dd60703c19cf5dc271483d6b0e0a9e6640c4a6",
+ "mixHash" : "a8506386aa6e3712ef3d4781397cba81250d7ef975033d955417fbe35f4bea48",
+ "nonce" : "75f3ff607745085a",
+ "number" : "0x01",
+ "parentHash" : "809e8a50a7e0fc3cd185d412b796cb339c741776c3bed55aee42a24787f8d235",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "130c47b5b9bb100c3ad8d4923b7fb05eb736959817ba0e3bd3a8a6f1a5294622",
+ "timestamp" : "0x5538a75e",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0809e8a50a7e0fc3cd185d412b796cb339c741776c3bed55aee42a24787f8d235a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0130c47b5b9bb100c3ad8d4923b7fb05eb736959817ba0e3bd3a8a6f1a5294622a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd880845538a75e80a0a8506386aa6e3712ef3d4781397cba81250d7ef975033d955417fbe35f4bea488875f3ff607745085ac0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020040",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "6dfa76680a522625675d5747906e1ea230c0aebfaba11a1e22030709b648d89b",
+ "mixHash" : "2f416c8f18432865bde341066c838561d36c7bc3ae729dd39b1dc072bc1545a7",
+ "nonce" : "68b116f1bae82db7",
+ "number" : "0x02",
+ "parentHash" : "47f0a9d868b01909c0c2d25af3dd60703c19cf5dc271483d6b0e0a9e6640c4a6",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "b9985a0b5c09bb476161bcd55aa5fddf7601e4791b19b9b192b99bd74384edeb",
+ "timestamp" : "0x5538a760",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a047f0a9d868b01909c0c2d25af3dd60703c19cf5dc271483d6b0e0a9e6640c4a6a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0b9985a0b5c09bb476161bcd55aa5fddf7601e4791b19b9b192b99bd74384edeba056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302004002832fefd880845538a76080a02f416c8f18432865bde341066c838561d36c7bc3ae729dd39b1dc072bc1545a78868b116f1bae82db7c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020080",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "73914762ddc26c131d382895da467aa2cd383f80f8b9b69264e4d5b582c35d64",
+ "mixHash" : "ac38df1a1a33b671076d1b2dc2623e0513c5efbafce0688514c1cccf55e1ae4e",
+ "nonce" : "895683ed98c195b3",
+ "number" : "0x03",
+ "parentHash" : "6dfa76680a522625675d5747906e1ea230c0aebfaba11a1e22030709b648d89b",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "cee58bb47d7cf3384bca134f9a7a5bdc7a04109857b787f2bde15367b7c32670",
+ "timestamp" : "0x5538a763",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a06dfa76680a522625675d5747906e1ea230c0aebfaba11a1e22030709b648d89ba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0cee58bb47d7cf3384bca134f9a7a5bdc7a04109857b787f2bde15367b7c32670a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefd880845538a76380a0ac38df1a1a33b671076d1b2dc2623e0513c5efbafce0688514c1cccf55e1ae4e88895683ed98c195b3c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x0200c0",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "31ef42e5ddd536c19cb1859364b55bcb59ee3f9265d8d4b47be037aa60947fc4",
+ "mixHash" : "197c2a2d11cd3eeaf461b7b779a6537326bac64ed7619bdce2c7ae51af40b3c1",
+ "nonce" : "36df57e1007a5b55",
+ "number" : "0x04",
+ "parentHash" : "73914762ddc26c131d382895da467aa2cd383f80f8b9b69264e4d5b582c35d64",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "645a16d67c4815332138035b0bea20efe4a4c87d8c99115879139d60de145a87",
+ "timestamp" : "0x5538a764",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a073914762ddc26c131d382895da467aa2cd383f80f8b9b69264e4d5b582c35d64a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0645a16d67c4815332138035b0bea20efe4a4c87d8c99115879139d60de145a87a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200c004832fefd880845538a76480a0197c2a2d11cd3eeaf461b7b779a6537326bac64ed7619bdce2c7ae51af40b3c18836df57e1007a5b55c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020100",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "dca7822a4eae8625922acb9e2eb02a3e4ce5643eca9402b7e1824736c4c03fd8",
+ "mixHash" : "bf2e3ec9b0791f8dfc4a0d62fa64b8c276734f48e4fe2fc4f3e29a807dcb864e",
+ "nonce" : "825a137021265359",
+ "number" : "0x05",
+ "parentHash" : "31ef42e5ddd536c19cb1859364b55bcb59ee3f9265d8d4b47be037aa60947fc4",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "7b7a6770afe4e80b3e7a4cac3cfd36bb530144a05b83ecbe1e6ac10950b6bd2c",
+ "timestamp" : "0x5538a76a",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a031ef42e5ddd536c19cb1859364b55bcb59ee3f9265d8d4b47be037aa60947fc4a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07b7a6770afe4e80b3e7a4cac3cfd36bb530144a05b83ecbe1e6ac10950b6bd2ca056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302010005832fefd880845538a76a80a0bf2e3ec9b0791f8dfc4a0d62fa64b8c276734f48e4fe2fc4f3e29a807dcb864e88825a137021265359c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020140",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "439b94861d88270d9687a895cd316500f4cdac79ee150eaeaee3dec88b5abc71",
+ "mixHash" : "9a96834acbbb7885fc542b15a078ca43a5faf1ddab0f44e19cf0dd7afd7a6bad",
+ "nonce" : "57f0a770bc1ea5c1",
+ "number" : "0x06",
+ "parentHash" : "dca7822a4eae8625922acb9e2eb02a3e4ce5643eca9402b7e1824736c4c03fd8",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "e2c524c66ec1292244cda79b022c197f1ff6900c1fa661a87745be81d48e75b2",
+ "timestamp" : "0x5538a76c",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0dca7822a4eae8625922acb9e2eb02a3e4ce5643eca9402b7e1824736c4c03fd8a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0e2c524c66ec1292244cda79b022c197f1ff6900c1fa661a87745be81d48e75b2a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302014006832fefd880845538a76c80a09a96834acbbb7885fc542b15a078ca43a5faf1ddab0f44e19cf0dd7afd7a6bad8857f0a770bc1ea5c1c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020180",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "67db645661956f5fc790a168c28c2da3703f569b123d9dc6f8d58927c67ddbbf",
+ "mixHash" : "f6cc086925349161bf060c66e1424fd5b9db854c6b5a8e6fc05fe53db30f5627",
+ "nonce" : "bb41180332dd2950",
+ "number" : "0x07",
+ "parentHash" : "439b94861d88270d9687a895cd316500f4cdac79ee150eaeaee3dec88b5abc71",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "06ff5e5441c599e3089e358179fd5e62c3f942e502c7e1aade26a6623467fd3f",
+ "timestamp" : "0x5538a76e",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0439b94861d88270d9687a895cd316500f4cdac79ee150eaeaee3dec88b5abc71a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a006ff5e5441c599e3089e358179fd5e62c3f942e502c7e1aade26a6623467fd3fa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302018007832fefd880845538a76e80a0f6cc086925349161bf060c66e1424fd5b9db854c6b5a8e6fc05fe53db30f562788bb41180332dd2950c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x0201c0",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "f3a1767385f423afc6ff458a02c140c5b0be5d05a34adea240c63048232f2c1a",
+ "mixHash" : "7f1aef45e7ce0d0498f56a2b2951e7588d654d8a916084dc027a438690667e35",
+ "nonce" : "32dd98068f6d8997",
+ "number" : "0x08",
+ "parentHash" : "67db645661956f5fc790a168c28c2da3703f569b123d9dc6f8d58927c67ddbbf",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "219f3e12a3e49c0f6aec2379231377ca45fad1badc4553d18c5febe5bc57b21e",
+ "timestamp" : "0x5538a770",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a067db645661956f5fc790a168c28c2da3703f569b123d9dc6f8d58927c67ddbbfa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0219f3e12a3e49c0f6aec2379231377ca45fad1badc4553d18c5febe5bc57b21ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830201c008832fefd880845538a77080a07f1aef45e7ce0d0498f56a2b2951e7588d654d8a916084dc027a438690667e358832dd98068f6d8997c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020200",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "1a6ea3a2e402119d190c16c0aae8fefd2babbfb65a6e6aafc6312c7d9facef64",
+ "mixHash" : "209b0a3086ac40f86db9cca7f3ccd844a32cdf6c86d3ec3e218ee004300cdf62",
+ "nonce" : "aae4dac4bc5bbe21",
+ "number" : "0x09",
+ "parentHash" : "f3a1767385f423afc6ff458a02c140c5b0be5d05a34adea240c63048232f2c1a",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "97ada6c76d560c6774d62acd1be339a0c84c65aa85167c4ea120819ebb20e267",
+ "timestamp" : "0x5538a775",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0f3a1767385f423afc6ff458a02c140c5b0be5d05a34adea240c63048232f2c1aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a097ada6c76d560c6774d62acd1be339a0c84c65aa85167c4ea120819ebb20e267a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302020009832fefd880845538a77580a0209b0a3086ac40f86db9cca7f3ccd844a32cdf6c86d3ec3e218ee004300cdf6288aae4dac4bc5bbe21c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x0201c0",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "0b11ad642f3ce723c0edd0a2f36efffa9e803caebcaca65c42eee1751e951972",
+ "mixHash" : "38adaf514f9f402280fc4cc64769133513e47d8ca734d22a87300c4b5f415a62",
+ "nonce" : "d633e198edd0de68",
+ "number" : "0x0a",
+ "parentHash" : "1a6ea3a2e402119d190c16c0aae8fefd2babbfb65a6e6aafc6312c7d9facef64",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "c662553363b8624b50cd8f83a2a28ec38b6cf0c029db61cb21e06d7df87fb256",
+ "timestamp" : "0x5538a77f",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a01a6ea3a2e402119d190c16c0aae8fefd2babbfb65a6e6aafc6312c7d9facef64a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0c662553363b8624b50cd8f83a2a28ec38b6cf0c029db61cb21e06d7df87fb256a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830201c00a832fefd880845538a77f80a038adaf514f9f402280fc4cc64769133513e47d8ca734d22a87300c4b5f415a6288d633e198edd0de68c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020180",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "4767e681edb3841ab50c8e93e392d7419770f8d9a2a433519aa9bd60e0882a02",
+ "mixHash" : "58de3f756f792acf9aa934f8fb0e200f69e1ad451c3bf57c0ff8c9d8abc2194b",
+ "nonce" : "9103a8edaf61663e",
+ "number" : "0x0b",
+ "parentHash" : "0b11ad642f3ce723c0edd0a2f36efffa9e803caebcaca65c42eee1751e951972",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "abf442c91bd7a4c33475c25b1122d703380b584d896270b871094d38a0d0aef6",
+ "timestamp" : "0x5538a78a",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a00b11ad642f3ce723c0edd0a2f36efffa9e803caebcaca65c42eee1751e951972a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0abf442c91bd7a4c33475c25b1122d703380b584d896270b871094d38a0d0aef6a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830201800b832fefd880845538a78a80a058de3f756f792acf9aa934f8fb0e200f69e1ad451c3bf57c0ff8c9d8abc2194b889103a8edaf61663ec0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x0201c0",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "7fb093722f172dea14ba6b80683a9cf1116a7958a4f2abea73cb3e9622d4f38b",
+ "mixHash" : "6b9dfb5de1f650235fe12708d42e1589c5953ce5b449098df20ececbc55a8f44",
+ "nonce" : "f4f4f658dd9aea35",
+ "number" : "0x0c",
+ "parentHash" : "4767e681edb3841ab50c8e93e392d7419770f8d9a2a433519aa9bd60e0882a02",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "1207d611eb2e41cb0b88450b2147dda91faae935bd3d5161aabae3f53b161dfb",
+ "timestamp" : "0x5538a78b",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a04767e681edb3841ab50c8e93e392d7419770f8d9a2a433519aa9bd60e0882a02a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a01207d611eb2e41cb0b88450b2147dda91faae935bd3d5161aabae3f53b161dfba056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830201c00c832fefd880845538a78b80a06b9dfb5de1f650235fe12708d42e1589c5953ce5b449098df20ececbc55a8f4488f4f4f658dd9aea35c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020200",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "1d46b465a614410a4f7f2b04b3762ae3be33b990546af0432bef5f6585cec0fd",
+ "mixHash" : "756bff490992f38e9f712e01f6ad5120dddba9f35b744cec2b891560cf932433",
+ "nonce" : "f74118b26e25b5f6",
+ "number" : "0x0d",
+ "parentHash" : "7fb093722f172dea14ba6b80683a9cf1116a7958a4f2abea73cb3e9622d4f38b",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "fe5e595a5535975ba51c6b05155e7dce4faa3192db5332a525d016c8984b8df4",
+ "timestamp" : "0x5538a78d",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a07fb093722f172dea14ba6b80683a9cf1116a7958a4f2abea73cb3e9622d4f38ba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0fe5e595a5535975ba51c6b05155e7dce4faa3192db5332a525d016c8984b8df4a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202000d832fefd880845538a78d80a0756bff490992f38e9f712e01f6ad5120dddba9f35b744cec2b891560cf93243388f74118b26e25b5f6c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020240",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "7ea34281d582e1ea8b08d59ac6350129dade7f11cdf6202a6cb66aa4869620f1",
+ "mixHash" : "6f004ab13b2dd36f53bc550dfe8b3a9aac98364923219cba936aade103484895",
+ "nonce" : "d358efdffc300893",
+ "number" : "0x0e",
+ "parentHash" : "1d46b465a614410a4f7f2b04b3762ae3be33b990546af0432bef5f6585cec0fd",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "32ace66cc90d56560941dff5c26586ce226810bab31c3185eb6f50c073f9ba41",
+ "timestamp" : "0x5538a792",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a01d46b465a614410a4f7f2b04b3762ae3be33b990546af0432bef5f6585cec0fda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a032ace66cc90d56560941dff5c26586ce226810bab31c3185eb6f50c073f9ba41a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202400e832fefd880845538a79280a06f004ab13b2dd36f53bc550dfe8b3a9aac98364923219cba936aade10348489588d358efdffc300893c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020280",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "640499c45a2be65f1c6a0a678b19c48b9bce9f47c30fd812079df7fbe75c3257",
+ "mixHash" : "c805e7e428abd2a55322177c58c57c4fb2334efe74114fafdcfce5a2e9e0948f",
+ "nonce" : "90cf7bf2090c62c5",
+ "number" : "0x0f",
+ "parentHash" : "7ea34281d582e1ea8b08d59ac6350129dade7f11cdf6202a6cb66aa4869620f1",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "d547f0f911ea72363dddd32217912dde108b0f7e92d806043bc51b5c7174a1c6",
+ "timestamp" : "0x5538a794",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a07ea34281d582e1ea8b08d59ac6350129dade7f11cdf6202a6cb66aa4869620f1a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0d547f0f911ea72363dddd32217912dde108b0f7e92d806043bc51b5c7174a1c6a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202800f832fefd880845538a79480a0c805e7e428abd2a55322177c58c57c4fb2334efe74114fafdcfce5a2e9e0948f8890cf7bf2090c62c5c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x0202c0",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "615d6cc2ab3f03d8fbd8a50d40c4ae118cc64feb2317646570bc9cdaf258f0bd",
+ "mixHash" : "79ad74c2702a1b73656cbe29a8f4ac99391c331b35cf3412ca936978928a493d",
+ "nonce" : "df718cf2e3b5ba26",
+ "number" : "0x10",
+ "parentHash" : "640499c45a2be65f1c6a0a678b19c48b9bce9f47c30fd812079df7fbe75c3257",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "c81230a13d15be7f4a00eb49ffe8aa52d1efe04fc2758e8c7f1798590cc9ffe0",
+ "timestamp" : "0x5538a796",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0640499c45a2be65f1c6a0a678b19c48b9bce9f47c30fd812079df7fbe75c3257a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0c81230a13d15be7f4a00eb49ffe8aa52d1efe04fc2758e8c7f1798590cc9ffe0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202c010832fefd880845538a79680a079ad74c2702a1b73656cbe29a8f4ac99391c331b35cf3412ca936978928a493d88df718cf2e3b5ba26c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020300",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "f94ecf9e4e9e851bad6df3f905620c14c9210ce7ad9167d480125f6d85018d69",
+ "mixHash" : "db4e0abd687098a3096769c917bcd9dfb60e308049ca023c0c1c7c3c450df344",
+ "nonce" : "f35750da43ad1cf3",
+ "number" : "0x11",
+ "parentHash" : "615d6cc2ab3f03d8fbd8a50d40c4ae118cc64feb2317646570bc9cdaf258f0bd",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "7fa67b91600c2040088efb008c1b423276d0d3e953314820993a16c6875a1dc4",
+ "timestamp" : "0x5538a799",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0615d6cc2ab3f03d8fbd8a50d40c4ae118cc64feb2317646570bc9cdaf258f0bda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07fa67b91600c2040088efb008c1b423276d0d3e953314820993a16c6875a1dc4a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302030011832fefd880845538a79980a0db4e0abd687098a3096769c917bcd9dfb60e308049ca023c0c1c7c3c450df34488f35750da43ad1cf3c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020340",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "fcbaa816dd598ac6706d3661b933a0044e9b382c70da8cf3e770f32fb61ea49b",
+ "mixHash" : "0621dabe234b1ccf25b24f79eebd21bb033d8495f2451479d1459cc6a695afde",
+ "nonce" : "c0a61dd16b636bb1",
+ "number" : "0x12",
+ "parentHash" : "f94ecf9e4e9e851bad6df3f905620c14c9210ce7ad9167d480125f6d85018d69",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "0ccb85e9350cf9f121734d4a68570b9b46325710b0b82a7dd3d06fb195ec8291",
+ "timestamp" : "0x5538a7a0",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0f94ecf9e4e9e851bad6df3f905620c14c9210ce7ad9167d480125f6d85018d69a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a00ccb85e9350cf9f121734d4a68570b9b46325710b0b82a7dd3d06fb195ec8291a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302034012832fefd880845538a7a080a00621dabe234b1ccf25b24f79eebd21bb033d8495f2451479d1459cc6a695afde88c0a61dd16b636bb1c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020380",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "2a69085c1ccc65213aa304b90d9c5a48d4eebacdac075e50036d82061a6f4beb",
+ "mixHash" : "673a53a720c3db8368729dbb783954f2d606060a017a4c140397a99db38a780d",
+ "nonce" : "4609d1ec58f14ad3",
+ "number" : "0x13",
+ "parentHash" : "fcbaa816dd598ac6706d3661b933a0044e9b382c70da8cf3e770f32fb61ea49b",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "f482bbbce789bf4bd43e41ec65a7bcb8ad851a0775e69a47aba3c9a05f9d999e",
+ "timestamp" : "0x5538a7a3",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0fcbaa816dd598ac6706d3661b933a0044e9b382c70da8cf3e770f32fb61ea49ba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0f482bbbce789bf4bd43e41ec65a7bcb8ad851a0775e69a47aba3c9a05f9d999ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302038013832fefd880845538a7a380a0673a53a720c3db8368729dbb783954f2d606060a017a4c140397a99db38a780d884609d1ec58f14ad3c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x0203c0",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "fedd6170b15fa600a745ddab1efb1bf8296ee6ee2c27617f2a6ce8175b5e0a2e",
+ "mixHash" : "5db57f0c33e3731c35925f45b78ef729c5544704e7b8fec2671fcfdf23be0dab",
+ "nonce" : "0103bc59565a7f6f",
+ "number" : "0x14",
+ "parentHash" : "2a69085c1ccc65213aa304b90d9c5a48d4eebacdac075e50036d82061a6f4beb",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "d7073aa8551fa3daa4aa5472bb3e43f5bb5e573b201e808036ad7e495f55c42e",
+ "timestamp" : "0x5538a7a6",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a02a69085c1ccc65213aa304b90d9c5a48d4eebacdac075e50036d82061a6f4beba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0d7073aa8551fa3daa4aa5472bb3e43f5bb5e573b201e808036ad7e495f55c42ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830203c014832fefd880845538a7a680a05db57f0c33e3731c35925f45b78ef729c5544704e7b8fec2671fcfdf23be0dab880103bc59565a7f6fc0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020400",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "e1df0804176b100d8d2d7b79bf4d8c5eace23dc3c58ae51a9baf3bc910ecbab0",
+ "mixHash" : "4f8a74b89229befa02b29a095a4bd9876bfa00882ed8a303c2afbc6f70946f78",
+ "nonce" : "8d14fb9671dbba8c",
+ "number" : "0x15",
+ "parentHash" : "fedd6170b15fa600a745ddab1efb1bf8296ee6ee2c27617f2a6ce8175b5e0a2e",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "699fcb8a94bcd563c0d37c378604beb18d3f159852d22812eecc9dde6aa317a8",
+ "timestamp" : "0x5538a7a7",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0fedd6170b15fa600a745ddab1efb1bf8296ee6ee2c27617f2a6ce8175b5e0a2ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0699fcb8a94bcd563c0d37c378604beb18d3f159852d22812eecc9dde6aa317a8a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302040015832fefd880845538a7a780a04f8a74b89229befa02b29a095a4bd9876bfa00882ed8a303c2afbc6f70946f78888d14fb9671dbba8cc0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020440",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "0141df916e5490489a63d35823e871ffc7bf772019806469358d3a2ad37adead",
+ "mixHash" : "67cb4dfd151f50b7f76fdcc0ab78df07856ec2d983fafd6fd8dba1fc363a08d4",
+ "nonce" : "5d4aaae4b194b449",
+ "number" : "0x16",
+ "parentHash" : "e1df0804176b100d8d2d7b79bf4d8c5eace23dc3c58ae51a9baf3bc910ecbab0",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "078d4f7edbb3ac3bad24c2fbe011dcdedd500e871b32793abbe238e3992102b3",
+ "timestamp" : "0x5538a7ab",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0e1df0804176b100d8d2d7b79bf4d8c5eace23dc3c58ae51a9baf3bc910ecbab0a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0078d4f7edbb3ac3bad24c2fbe011dcdedd500e871b32793abbe238e3992102b3a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302044016832fefd880845538a7ab80a067cb4dfd151f50b7f76fdcc0ab78df07856ec2d983fafd6fd8dba1fc363a08d4885d4aaae4b194b449c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020480",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "e8f4cecbed7e150cd7e064a23dee008d11e207c8a55b9b7bda772f1ebf38bbd3",
+ "mixHash" : "d5b014685da1ed66a9c8938241d307d0c384c2e3647dc78d4a0eeaa2a9cd6a1d",
+ "nonce" : "4bc22925e990602f",
+ "number" : "0x17",
+ "parentHash" : "0141df916e5490489a63d35823e871ffc7bf772019806469358d3a2ad37adead",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "6e8655126c1e554d957388f1ae012cc57b779013e3320e735ea0fa581ffbfe48",
+ "timestamp" : "0x5538a7af",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a00141df916e5490489a63d35823e871ffc7bf772019806469358d3a2ad37adeada01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a06e8655126c1e554d957388f1ae012cc57b779013e3320e735ea0fa581ffbfe48a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302048017832fefd880845538a7af80a0d5b014685da1ed66a9c8938241d307d0c384c2e3647dc78d4a0eeaa2a9cd6a1d884bc22925e990602fc0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ },
+ {
+ "blockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x0204c0",
+ "extraData" : "0x",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "0634fd0c6daf55ca566985632ce27334438443d0bed9e62d659154c8a22d0d63",
+ "mixHash" : "a871df6c85cf1faedda57c3fbea207647392e0fef4e8cad8dff58896d4f26d01",
+ "nonce" : "fcbd9ba2bfd3b6a9",
+ "number" : "0x18",
+ "parentHash" : "e8f4cecbed7e150cd7e064a23dee008d11e207c8a55b9b7bda772f1ebf38bbd3",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "e5c505f392f44a967844bb14688871f78262039473fac0c8cf8e8664d5bbdbab",
+ "timestamp" : "0x5538a7b2",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "rlp" : "0xf901fcf901f7a0e8f4cecbed7e150cd7e064a23dee008d11e207c8a55b9b7bda772f1ebf38bbd3a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0e5c505f392f44a967844bb14688871f78262039473fac0c8cf8e8664d5bbdbaba056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830204c018832fefd880845538a7b280a0a871df6c85cf1faedda57c3fbea207647392e0fef4e8cad8dff58896d4f26d0188fcbd9ba2bfd3b6a9c0c0",
+ "transactions" : [
+ ],
+ "uncleHeaders" : [
+ ]
+ }
+ ],
+ "genesisBlockHeader" : {
+ "bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
+ "difficulty" : "0x020000",
+ "extraData" : "0x42",
+ "gasLimit" : "0x2fefd8",
+ "gasUsed" : "0x00",
+ "hash" : "809e8a50a7e0fc3cd185d412b796cb339c741776c3bed55aee42a24787f8d235",
+ "mixHash" : "b1e1c5aa40a3ee30993a71f1883df5d8f067dced04d5977d7e46e8636c75ae25",
+ "nonce" : "7ae211e1f3bbb60b",
+ "number" : "0x00",
+ "parentHash" : "0000000000000000000000000000000000000000000000000000000000000000",
+ "receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "stateRoot" : "cafd881ab193703b83816c49ff6c2bf6ba6f464a1be560c42106128c8dbc35e7",
+ "timestamp" : "0x54c98c81",
+ "transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
+ },
+ "genesisRLP" : "0xf901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0cafd881ab193703b83816c49ff6c2bf6ba6f464a1be560c42106128c8dbc35e7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a0b1e1c5aa40a3ee30993a71f1883df5d8f067dced04d5977d7e46e8636c75ae25887ae211e1f3bbb60bc0c0",
+ "postState" : {
+ "8888f1f195afa192cfee860698584c030f4c9db1" : {
+ "balance" : "0x01f399b1438a100000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x02540be400",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "pre" : {
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x02540be400",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/files/BlockTests/bcRPC_API_Test.json b/tests/files/BlockTests/bcRPC_API_Test.json
index 0f57f8be5..93bba9da2 100644
--- a/tests/files/BlockTests/bcRPC_API_Test.json
+++ b/tests/files/BlockTests/bcRPC_API_Test.json
@@ -9,28 +9,28 @@
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x078674",
- "hash" : "378b2e7e4c1a4104efbe8aeb66bc93c6691448b4414de942c8d9a8ed5073ea45",
- "mixHash" : "e12805eed5abc4600d45d6ded1fe45489b105af4125d4d5c2ef0dbd1ceae93a0",
- "nonce" : "6718aa2c7ff82e09",
+ "hash" : "d6b7b6914c86c0df80247bbf01fd1bf50ae7bbe1c7127bd2a0f368a0198b00c1",
+ "mixHash" : "8386f50ae9e8204affa9531d2fca8748161706ea8d60804677c7f4112269f60f",
+ "nonce" : "518442c32ae7be87",
"number" : "0x01",
- "parentHash" : "54f4cd6188cf2fa089d60c64fe5498d16c35f56ef063a5c0d1d640d51e8a5d1d",
+ "parentHash" : "1fdd5d3e8797e20cb596be04f2378f74a8ca756e85336efa97e6db5bdc2aaa01",
"receiptTrie" : "a2bd925fcbb8b1ec39612553b17c9265ab198f5af25cc564655114bf5a28c75d",
"stateRoot" : "10f907738cb593a1838c7eb1b3a67b50b296862208937e59a438172396e7d125",
- "timestamp" : "0x5534c545",
- "transactionsTrie" : "01629ba27d78ecaa9bd9784afdf6287f791cc17f6825f976bee21c2b5c7505de",
+ "timestamp" : "0x553a1aeb",
+ "transactionsTrie" : "74ef6f97d356a232430d7643aa7ce6b39ba1d452a2e680e0f2405147d0479a65",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90967f901faa054f4cd6188cf2fa089d60c64fe5498d16c35f56ef063a5c0d1d640d51e8a5d1da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a010f907738cb593a1838c7eb1b3a67b50b296862208937e59a438172396e7d125a001629ba27d78ecaa9bd9784afdf6287f791cc17f6825f976bee21c2b5c7505dea0a2bd925fcbb8b1ec39612553b17c9265ab198f5af25cc564655114bf5a28c75db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd883078674845534c54580a0e12805eed5abc4600d45d6ded1fe45489b105af4125d4d5c2ef0dbd1ceae93a0886718aa2c7ff82e09f90766f907638001832fefd8800ab907155b5b610705806100106000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063102accc11461012c57806312a7b9141461013a5780631774e6461461014c5780631e26fd331461015d5780631f9030371461016e578063343a875d1461018057806338cc4831146101955780634e7ad367146101bd57806357cb2fc4146101cb57806365538c73146101e057806368895979146101ee57806376bc21d9146102005780639a19a9531461020e5780639dc2c8f51461021f578063a53b1c1e1461022d578063a67808571461023e578063b61c05031461024c578063c2b12a731461025a578063d2282dc51461026b578063e30081a01461027c578063e8beef5b1461028d578063f38b06001461029b578063f5b53e17146102a9578063fd408767146102bb57005b6101346104d6565b60006000f35b61014261039b565b8060005260206000f35b610157600435610326565b60006000f35b6101686004356102c9565b60006000f35b610176610442565b8060005260206000f35b6101886103d3565b8060ff1660005260206000f35b61019d610413565b8073ffffffffffffffffffffffffffffffffffffffff1660005260206000f35b6101c56104c5565b60006000f35b6101d36103b7565b8060000b60005260206000f35b6101e8610454565b60006000f35b6101f6610401565b8060005260206000f35b61020861051f565b60006000f35b6102196004356102e5565b60006000f35b610227610693565b60006000f35b610238600435610342565b60006000f35b610246610484565b60006000f35b610254610493565b60006000f35b61026560043561038d565b60006000f35b610276600435610350565b60006000f35b61028760043561035e565b60006000f35b6102956105b4565b60006000f35b6102a3610547565b60006000f35b6102b16103ef565b8060005260206000f35b6102c3610600565b60006000f35b80600060006101000a81548160ff021916908302179055505b50565b80600060016101000a81548160ff02191690837f01000000000000000000000000000000000000000000000000000000000000009081020402179055505b50565b80600060026101000a81548160ff021916908302179055505b50565b806001600050819055505b50565b806002600050819055505b50565b80600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b50565b806004600050819055505b50565b6000600060009054906101000a900460ff1690506103b4565b90565b6000600060019054906101000a900460000b90506103d0565b90565b6000600060029054906101000a900460ff1690506103ec565b90565b600060016000505490506103fe565b90565b60006002600050549050610410565b90565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905061043f565b90565b60006004600050549050610451565b90565b7f65c9ac8011e286e89d02a269890f41d67ca2cc597b2c76c7c69321ff492be5806000602a81526020016000a15b565b6000602a81526020016000a05b565b60017f81933b308056e7e85668661dcd102b1f22795b4431f9cf4625794f381c271c6b6000602a81526020016000a25b565b60016000602a81526020016000a15b565b3373ffffffffffffffffffffffffffffffffffffffff1660017f0e216b62efbb97e751a2ce09f607048751720397ecfb9eef1e48a6644948985b6000602a81526020016000a35b565b3373ffffffffffffffffffffffffffffffffffffffff1660016000602a81526020016000a25b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660017f317b31292193c2a4f561cc40a95ea0d97a2733f14af6d6d59522473e1f3ae65f6000602a81526020016000a45b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660016000602a81526020016000a35b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660017fd5f0a30e4be0c6be577a71eceb7464245a796a7e6a55c0d971837b250de05f4e60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe98152602001602a81526020016000a45b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff16600160007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe98152602001602a81526020016000a35b561ca0468a5ff5fe3de32328b0ce0a943ccdfa4463b8b3e82f593793bb2aeca14cc304a0eec480d7c7a9254598653a7caede5f62082fae9b76e7e61a50b91bbd70ae9b3dc0",
+ "rlp" : "0xf90967f901faa01fdd5d3e8797e20cb596be04f2378f74a8ca756e85336efa97e6db5bdc2aaa01a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a010f907738cb593a1838c7eb1b3a67b50b296862208937e59a438172396e7d125a074ef6f97d356a232430d7643aa7ce6b39ba1d452a2e680e0f2405147d0479a65a0a2bd925fcbb8b1ec39612553b17c9265ab198f5af25cc564655114bf5a28c75db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001832fefd88307867484553a1aeb80a08386f50ae9e8204affa9531d2fca8748161706ea8d60804677c7f4112269f60f88518442c32ae7be87f90766f907638001832fefd8800ab907155b5b610705806100106000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063102accc11461012c57806312a7b9141461013a5780631774e6461461014c5780631e26fd331461015d5780631f9030371461016e578063343a875d1461018057806338cc4831146101955780634e7ad367146101bd57806357cb2fc4146101cb57806365538c73146101e057806368895979146101ee57806376bc21d9146102005780639a19a9531461020e5780639dc2c8f51461021f578063a53b1c1e1461022d578063a67808571461023e578063b61c05031461024c578063c2b12a731461025a578063d2282dc51461026b578063e30081a01461027c578063e8beef5b1461028d578063f38b06001461029b578063f5b53e17146102a9578063fd408767146102bb57005b6101346104d6565b60006000f35b61014261039b565b8060005260206000f35b610157600435610326565b60006000f35b6101686004356102c9565b60006000f35b610176610442565b8060005260206000f35b6101886103d3565b8060ff1660005260206000f35b61019d610413565b8073ffffffffffffffffffffffffffffffffffffffff1660005260206000f35b6101c56104c5565b60006000f35b6101d36103b7565b8060000b60005260206000f35b6101e8610454565b60006000f35b6101f6610401565b8060005260206000f35b61020861051f565b60006000f35b6102196004356102e5565b60006000f35b610227610693565b60006000f35b610238600435610342565b60006000f35b610246610484565b60006000f35b610254610493565b60006000f35b61026560043561038d565b60006000f35b610276600435610350565b60006000f35b61028760043561035e565b60006000f35b6102956105b4565b60006000f35b6102a3610547565b60006000f35b6102b16103ef565b8060005260206000f35b6102c3610600565b60006000f35b80600060006101000a81548160ff021916908302179055505b50565b80600060016101000a81548160ff02191690837f01000000000000000000000000000000000000000000000000000000000000009081020402179055505b50565b80600060026101000a81548160ff021916908302179055505b50565b806001600050819055505b50565b806002600050819055505b50565b80600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b50565b806004600050819055505b50565b6000600060009054906101000a900460ff1690506103b4565b90565b6000600060019054906101000a900460000b90506103d0565b90565b6000600060029054906101000a900460ff1690506103ec565b90565b600060016000505490506103fe565b90565b60006002600050549050610410565b90565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905061043f565b90565b60006004600050549050610451565b90565b7f65c9ac8011e286e89d02a269890f41d67ca2cc597b2c76c7c69321ff492be5806000602a81526020016000a15b565b6000602a81526020016000a05b565b60017f81933b308056e7e85668661dcd102b1f22795b4431f9cf4625794f381c271c6b6000602a81526020016000a25b565b60016000602a81526020016000a15b565b3373ffffffffffffffffffffffffffffffffffffffff1660017f0e216b62efbb97e751a2ce09f607048751720397ecfb9eef1e48a6644948985b6000602a81526020016000a35b565b3373ffffffffffffffffffffffffffffffffffffffff1660016000602a81526020016000a25b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660017f317b31292193c2a4f561cc40a95ea0d97a2733f14af6d6d59522473e1f3ae65f6000602a81526020016000a45b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660016000602a81526020016000a35b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660017fd5f0a30e4be0c6be577a71eceb7464245a796a7e6a55c0d971837b250de05f4e60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe98152602001602a81526020016000a45b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff16600160007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe98152602001602a81526020016000a35b561ba0d19321283f84c1b5848a45f523748e65c858b788af97b8fb12b2f5bb16475ef9a005c68ca1c32712f3fa7b470eb5302fc49e179a45ab052f364f72a0b7b2a59f31c0",
"transactions" : [
{
"data" : "0x5b5b610705806100106000396000f3006000357c010000000000000000000000000000000000000000000000000000000090048063102accc11461012c57806312a7b9141461013a5780631774e6461461014c5780631e26fd331461015d5780631f9030371461016e578063343a875d1461018057806338cc4831146101955780634e7ad367146101bd57806357cb2fc4146101cb57806365538c73146101e057806368895979146101ee57806376bc21d9146102005780639a19a9531461020e5780639dc2c8f51461021f578063a53b1c1e1461022d578063a67808571461023e578063b61c05031461024c578063c2b12a731461025a578063d2282dc51461026b578063e30081a01461027c578063e8beef5b1461028d578063f38b06001461029b578063f5b53e17146102a9578063fd408767146102bb57005b6101346104d6565b60006000f35b61014261039b565b8060005260206000f35b610157600435610326565b60006000f35b6101686004356102c9565b60006000f35b610176610442565b8060005260206000f35b6101886103d3565b8060ff1660005260206000f35b61019d610413565b8073ffffffffffffffffffffffffffffffffffffffff1660005260206000f35b6101c56104c5565b60006000f35b6101d36103b7565b8060000b60005260206000f35b6101e8610454565b60006000f35b6101f6610401565b8060005260206000f35b61020861051f565b60006000f35b6102196004356102e5565b60006000f35b610227610693565b60006000f35b610238600435610342565b60006000f35b610246610484565b60006000f35b610254610493565b60006000f35b61026560043561038d565b60006000f35b610276600435610350565b60006000f35b61028760043561035e565b60006000f35b6102956105b4565b60006000f35b6102a3610547565b60006000f35b6102b16103ef565b8060005260206000f35b6102c3610600565b60006000f35b80600060006101000a81548160ff021916908302179055505b50565b80600060016101000a81548160ff02191690837f01000000000000000000000000000000000000000000000000000000000000009081020402179055505b50565b80600060026101000a81548160ff021916908302179055505b50565b806001600050819055505b50565b806002600050819055505b50565b80600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b50565b806004600050819055505b50565b6000600060009054906101000a900460ff1690506103b4565b90565b6000600060019054906101000a900460000b90506103d0565b90565b6000600060029054906101000a900460ff1690506103ec565b90565b600060016000505490506103fe565b90565b60006002600050549050610410565b90565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905061043f565b90565b60006004600050549050610451565b90565b7f65c9ac8011e286e89d02a269890f41d67ca2cc597b2c76c7c69321ff492be5806000602a81526020016000a15b565b6000602a81526020016000a05b565b60017f81933b308056e7e85668661dcd102b1f22795b4431f9cf4625794f381c271c6b6000602a81526020016000a25b565b60016000602a81526020016000a15b565b3373ffffffffffffffffffffffffffffffffffffffff1660017f0e216b62efbb97e751a2ce09f607048751720397ecfb9eef1e48a6644948985b6000602a81526020016000a35b565b3373ffffffffffffffffffffffffffffffffffffffff1660016000602a81526020016000a25b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660017f317b31292193c2a4f561cc40a95ea0d97a2733f14af6d6d59522473e1f3ae65f6000602a81526020016000a45b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660016000602a81526020016000a35b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660017fd5f0a30e4be0c6be577a71eceb7464245a796a7e6a55c0d971837b250de05f4e60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe98152602001602a81526020016000a45b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff16600160007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe98152602001602a81526020016000a35b56",
"gasLimit" : "0x2fefd8",
"gasPrice" : "0x01",
"nonce" : "0x00",
- "r" : "0x468a5ff5fe3de32328b0ce0a943ccdfa4463b8b3e82f593793bb2aeca14cc304",
- "s" : "0xeec480d7c7a9254598653a7caede5f62082fae9b76e7e61a50b91bbd70ae9b3d",
+ "r" : "0xd19321283f84c1b5848a45f523748e65c858b788af97b8fb12b2f5bb16475ef9",
+ "s" : "0x05c68ca1c32712f3fa7b470eb5302fc49e179a45ab052f364f72a0b7b2a59f31",
"to" : "",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -45,28 +45,28 @@
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x53f0",
- "hash" : "878a132166f53adb7c993ded4cfb687977397d63d873fcdbeb06c18cac907a5c",
- "mixHash" : "f0265d206a5b41da8ddc893c2006d78d3546ded997a9c6d44f47f97088f87c95",
- "nonce" : "3a3ed53dd53c6e75",
+ "hash" : "6a5437704faaf9147a8a1cb00e538db144e35d9742a0d454663556b4aeb247de",
+ "mixHash" : "c79fef76b89c30030921ca479a68abc9deddfa630a691a2d0ede573666013600",
+ "nonce" : "3ab8d39aed81f766",
"number" : "0x02",
- "parentHash" : "378b2e7e4c1a4104efbe8aeb66bc93c6691448b4414de942c8d9a8ed5073ea45",
+ "parentHash" : "d6b7b6914c86c0df80247bbf01fd1bf50ae7bbe1c7127bd2a0f368a0198b00c1",
"receiptTrie" : "9e268dc33eafaf36e9c943ad6107534adfa928a3a4eac728d3b2aab747b57d42",
"stateRoot" : "6ac36e54d9c8d94075d00b7a59cfbf95a3a17ac301390bfbf83170cbeff7fa15",
- "timestamp" : "0x5534c546",
- "transactionsTrie" : "85bbede05b69e4e95570bee6ca9b5fc23e71298be5140769f07f17df5ccb0168",
+ "timestamp" : "0x553a1aec",
+ "transactionsTrie" : "ea4edc18358e63c6372a53426bf5ebc75858d4b39aa324d3d0368f1308d60223",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a0378b2e7e4c1a4104efbe8aeb66bc93c6691448b4414de942c8d9a8ed5073ea45a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a06ac36e54d9c8d94075d00b7a59cfbf95a3a17ac301390bfbf83170cbeff7fa15a085bbede05b69e4e95570bee6ca9b5fc23e71298be5140769f07f17df5ccb0168a09e268dc33eafaf36e9c943ad6107534adfa928a3a4eac728d3b2aab747b57d42b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302004002832fefd88253f0845534c54680a0f0265d206a5b41da8ddc893c2006d78d3546ded997a9c6d44f47f97088f87c95883a3ed53dd53c6e75f866f86401018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8412a7b9141ca0946ecdfe13096a2e09788f978c64b9de5bd0b037cc16d40ab90e83f6df3e6bbfa09127ed90c4ab509c7836ccf28592bead2a789b6ab0725241feeac3b61fa57cb6c0",
+ "rlp" : "0xf90265f901f9a0d6b7b6914c86c0df80247bbf01fd1bf50ae7bbe1c7127bd2a0f368a0198b00c1a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a06ac36e54d9c8d94075d00b7a59cfbf95a3a17ac301390bfbf83170cbeff7fa15a0ea4edc18358e63c6372a53426bf5ebc75858d4b39aa324d3d0368f1308d60223a09e268dc33eafaf36e9c943ad6107534adfa928a3a4eac728d3b2aab747b57d42b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302004002832fefd88253f084553a1aec80a0c79fef76b89c30030921ca479a68abc9deddfa630a691a2d0ede573666013600883ab8d39aed81f766f866f86401018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8412a7b9141ba06601f3b40f25bf1f12984d9c02d2719f52b3ca0718018e60051ccb8d248f030ca0535c264ffc21e2685eafe6f95f843f0b6a179b9bdab41fad1c941aec448246cfc0",
"transactions" : [
{
"data" : "0x12a7b914",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x01",
- "r" : "0x946ecdfe13096a2e09788f978c64b9de5bd0b037cc16d40ab90e83f6df3e6bbf",
- "s" : "0x9127ed90c4ab509c7836ccf28592bead2a789b6ab0725241feeac3b61fa57cb6",
+ "r" : "0x6601f3b40f25bf1f12984d9c02d2719f52b3ca0718018e60051ccb8d248f030c",
+ "s" : "0x535c264ffc21e2685eafe6f95f843f0b6a179b9bdab41fad1c941aec448246cf",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -81,26 +81,26 @@
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x549e",
- "hash" : "9fa75a297a5c19556f3d5d1e332dc51de8473b7ab3530f593d933262981f6bb9",
- "mixHash" : "b2a7cbf1cd43534cfb260bda0d6dd83c82c17f322ab833176d92be55ee67c823",
- "nonce" : "219dde311a01e7a6",
+ "hash" : "58900ee69afd523a5e887da398a54201b8fb79cb4af9a34056d1038c7d2bef58",
+ "mixHash" : "d525d4dca8117b958e33c12fdc052b8a14a106c263255116062a01062a78647c",
+ "nonce" : "8952faeae052875a",
"number" : "0x03",
- "parentHash" : "878a132166f53adb7c993ded4cfb687977397d63d873fcdbeb06c18cac907a5c",
+ "parentHash" : "6a5437704faaf9147a8a1cb00e538db144e35d9742a0d454663556b4aeb247de",
"receiptTrie" : "38593ec385f1e040205a8586fd8095390c5ebf75699bdf6ed73ca719d90eeeb0",
"stateRoot" : "f1133199d44695dfa8fd1bcfe424d82854b5cebef75bddd7e40ea94cda515bcb",
- "timestamp" : "0x5534c547",
- "transactionsTrie" : "ec96dd16131edd27093bd7fae7ae3822454c5d053c92f30b54705ddf699febb2",
+ "timestamp" : "0x553a1aed",
+ "transactionsTrie" : "52da03f666314357f747784f8d370da9bfa34820e4c5965fe33954bb7bc778b4",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a0878a132166f53adb7c993ded4cfb687977397d63d873fcdbeb06c18cac907a5ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0f1133199d44695dfa8fd1bcfe424d82854b5cebef75bddd7e40ea94cda515bcba0ec96dd16131edd27093bd7fae7ae3822454c5d053c92f30b54705ddf699febb2a038593ec385f1e040205a8586fd8095390c5ebf75699bdf6ed73ca719d90eeeb0b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefd882549e845534c54780a0b2a7cbf1cd43534cfb260bda0d6dd83c82c17f322ab833176d92be55ee67c82388219dde311a01e7a6f866f86402018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8457cb2fc41ba04d2d467743753d55d9fdfb0f482957afff8bd256627e7cfe591e9dd1d94e19fea01965f59354b8c6c0ae670c901a737fe2c50f323075137afc27e68a6fd6121fa3c0",
+ "rlp" : "0xf90265f901f9a06a5437704faaf9147a8a1cb00e538db144e35d9742a0d454663556b4aeb247dea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0f1133199d44695dfa8fd1bcfe424d82854b5cebef75bddd7e40ea94cda515bcba052da03f666314357f747784f8d370da9bfa34820e4c5965fe33954bb7bc778b4a038593ec385f1e040205a8586fd8095390c5ebf75699bdf6ed73ca719d90eeeb0b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefd882549e84553a1aed80a0d525d4dca8117b958e33c12fdc052b8a14a106c263255116062a01062a78647c888952faeae052875af866f86402018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8457cb2fc41ba0ea113b40bbbefcd412531bc1cbe681e11a4a0d901f686d1ede803939567307d9a04e7206f3616d77b145c11351cfc752dd6bf8dc7090210a32f81c6ac49a4ccfc1c0",
"transactions" : [
{
"data" : "0x57cb2fc4",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x02",
- "r" : "0x4d2d467743753d55d9fdfb0f482957afff8bd256627e7cfe591e9dd1d94e19fe",
- "s" : "0x1965f59354b8c6c0ae670c901a737fe2c50f323075137afc27e68a6fd6121fa3",
+ "r" : "0xea113b40bbbefcd412531bc1cbe681e11a4a0d901f686d1ede803939567307d9",
+ "s" : "0x4e7206f3616d77b145c11351cfc752dd6bf8dc7090210a32f81c6ac49a4ccfc1",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -117,28 +117,28 @@
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5458",
- "hash" : "6af7d2ae9baaa1d90050819ac54935b49effbb05eb363efc49a1821bbc8aa41c",
- "mixHash" : "e4439912703768a07111db6482391b59852779ab48802cdb02b14db581577a91",
- "nonce" : "39332bc6292f36be",
+ "hash" : "acd70eb08047049f3252a4c9cf26a61740ee66446472f090b22b20b53810c86f",
+ "mixHash" : "4e05d740b04b219ea97337e69d377591608856324f8a3b9130ea3a8c59c61e81",
+ "nonce" : "9ffd316795dcc0f7",
"number" : "0x04",
- "parentHash" : "9fa75a297a5c19556f3d5d1e332dc51de8473b7ab3530f593d933262981f6bb9",
+ "parentHash" : "58900ee69afd523a5e887da398a54201b8fb79cb4af9a34056d1038c7d2bef58",
"receiptTrie" : "7c7284ae5dd5e0a3f0fc2fd49639dadc04f914a75bf5992522f5b3721e070bae",
"stateRoot" : "13487ffef45cee322268189692d3a97a15e897021ac7b7e789acc888abaeefc6",
- "timestamp" : "0x5534c549",
- "transactionsTrie" : "8ba818b8e414aa439f1e492168c1042a620e3a3d62eb0a62bd9c1df3fd371087",
- "uncleHash" : "8a68905074dcad2eeb4663cc02aa401db1cd259cbd6061fd73b9ddc0439d5062"
+ "timestamp" : "0x553a1af0",
+ "transactionsTrie" : "426267a667a788adc78eb103cb3e35959ef9f588ef6c0ecf073a3ca06fd3a691",
+ "uncleHash" : "c1a60650263b2cdda530211ad5f36059a635be3994dd6d4f2bd106be5764a3f5"
},
- "rlp" : "0xf9065bf901f9a09fa75a297a5c19556f3d5d1e332dc51de8473b7ab3530f593d933262981f6bb9a08a68905074dcad2eeb4663cc02aa401db1cd259cbd6061fd73b9ddc0439d5062948888f1f195afa192cfee860698584c030f4c9db1a013487ffef45cee322268189692d3a97a15e897021ac7b7e789acc888abaeefc6a08ba818b8e414aa439f1e492168c1042a620e3a3d62eb0a62bd9c1df3fd371087a07c7284ae5dd5e0a3f0fc2fd49639dadc04f914a75bf5992522f5b3721e070baeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200c004832fefd8825458845534c54980a0e4439912703768a07111db6482391b59852779ab48802cdb02b14db581577a918839332bc6292f36bef866f86403018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84343a875d1ca0b2bffa60356dc3247f132e82ab091d7479db488087158f22eae788d093de610ba0e80ab961e0b02e1ab8ee1973ceae616814a9fc29d332a57006527b079adee92bf903f4f901f7a0378b2e7e4c1a4104efbe8aeb66bc93c6691448b4414de942c8d9a8ed5073ea45a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794a94f5374fce5edbc8e2a8697c15331677e6ebf0ba010f907738cb593a1838c7eb1b3a67b50b296862208937e59a438172396e7d125a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302004002832fefd880845534c54a80a0621c495f13c1c8fdc146073f2333b197508a45c607aea89e20daf48b256b9a7c88613cc82512bb4114f901f7a0378b2e7e4c1a4104efbe8aeb66bc93c6691448b4414de942c8d9a8ed5073ea45a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794bcde5374fce5edbc8e2a8697c15331677e6ebf0ba010f907738cb593a1838c7eb1b3a67b50b296862208937e59a438172396e7d125a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302004002832fefd880845534c54a80a01362646bf80a7a4fce86562dc7ec476f0f870a1d68a927f06a826684a957d384881a5132c3680c15b4",
+ "rlp" : "0xf9065bf901f9a058900ee69afd523a5e887da398a54201b8fb79cb4af9a34056d1038c7d2bef58a0c1a60650263b2cdda530211ad5f36059a635be3994dd6d4f2bd106be5764a3f5948888f1f195afa192cfee860698584c030f4c9db1a013487ffef45cee322268189692d3a97a15e897021ac7b7e789acc888abaeefc6a0426267a667a788adc78eb103cb3e35959ef9f588ef6c0ecf073a3ca06fd3a691a07c7284ae5dd5e0a3f0fc2fd49639dadc04f914a75bf5992522f5b3721e070baeb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200c004832fefd882545884553a1af080a04e05d740b04b219ea97337e69d377591608856324f8a3b9130ea3a8c59c61e81889ffd316795dcc0f7f866f86403018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84343a875d1ba0f973683225d7bb32f7a74684276f8e023f7f5f7083b5b6550fed500bdd33bb1aa08603b4d5033ed50f2b42ec40f64376948ddfeb6c60424968e03e618b8afbd871f903f4f901f7a0d6b7b6914c86c0df80247bbf01fd1bf50ae7bbe1c7127bd2a0f368a0198b00c1a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794a94f5374fce5edbc8e2a8697c15331677e6ebf0ba010f907738cb593a1838c7eb1b3a67b50b296862208937e59a438172396e7d125a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302004002832fefd88084553a1af180a075fbc03b95158629f8ace5fe111a727f88b46396e4e6fa4143a4b02e1570acad8899f9901e4ff1a2faf901f7a0d6b7b6914c86c0df80247bbf01fd1bf50ae7bbe1c7127bd2a0f368a0198b00c1a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794bcde5374fce5edbc8e2a8697c15331677e6ebf0ba010f907738cb593a1838c7eb1b3a67b50b296862208937e59a438172396e7d125a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302004002832fefd88084553a1af180a0e2aa04d2f51a8518939987dcbd946752cf0c17c08c54d6cd9d4458b834a4c9518804d885858a7a7730",
"transactions" : [
{
"data" : "0x343a875d",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x03",
- "r" : "0xb2bffa60356dc3247f132e82ab091d7479db488087158f22eae788d093de610b",
- "s" : "0xe80ab961e0b02e1ab8ee1973ceae616814a9fc29d332a57006527b079adee92b",
+ "r" : "0xf973683225d7bb32f7a74684276f8e023f7f5f7083b5b6550fed500bdd33bb1a",
+ "s" : "0x8603b4d5033ed50f2b42ec40f64376948ddfeb6c60424968e03e618b8afbd871",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -150,14 +150,14 @@
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x00",
- "hash" : "3d0667466ef9d31194d780f4fbf2ffa71381a090dae910e939073f735960006c",
- "mixHash" : "621c495f13c1c8fdc146073f2333b197508a45c607aea89e20daf48b256b9a7c",
- "nonce" : "613cc82512bb4114",
+ "hash" : "0ec8631b88df1820f4948f8c152fd60f5a88d7d7c7c72eb387ca97aea2b64ee0",
+ "mixHash" : "75fbc03b95158629f8ace5fe111a727f88b46396e4e6fa4143a4b02e1570acad",
+ "nonce" : "99f9901e4ff1a2fa",
"number" : "0x02",
- "parentHash" : "378b2e7e4c1a4104efbe8aeb66bc93c6691448b4414de942c8d9a8ed5073ea45",
+ "parentHash" : "d6b7b6914c86c0df80247bbf01fd1bf50ae7bbe1c7127bd2a0f368a0198b00c1",
"receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"stateRoot" : "10f907738cb593a1838c7eb1b3a67b50b296862208937e59a438172396e7d125",
- "timestamp" : "0x5534c54a",
+ "timestamp" : "0x553a1af1",
"transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
@@ -168,14 +168,14 @@
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x00",
- "hash" : "822356f54c65bbb761f084cec261ef90b1a3f6405b7aa60742e2db0952cab225",
- "mixHash" : "1362646bf80a7a4fce86562dc7ec476f0f870a1d68a927f06a826684a957d384",
- "nonce" : "1a5132c3680c15b4",
+ "hash" : "453152c7e16a384c7c4fb320552ddd37cc7b0220a5b07b0c5cb90ae342aefef5",
+ "mixHash" : "e2aa04d2f51a8518939987dcbd946752cf0c17c08c54d6cd9d4458b834a4c951",
+ "nonce" : "04d885858a7a7730",
"number" : "0x02",
- "parentHash" : "378b2e7e4c1a4104efbe8aeb66bc93c6691448b4414de942c8d9a8ed5073ea45",
+ "parentHash" : "d6b7b6914c86c0df80247bbf01fd1bf50ae7bbe1c7127bd2a0f368a0198b00c1",
"receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"stateRoot" : "10f907738cb593a1838c7eb1b3a67b50b296862208937e59a438172396e7d125",
- "timestamp" : "0x5534c54a",
+ "timestamp" : "0x553a1af1",
"transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
}
@@ -189,28 +189,28 @@
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x559f",
- "hash" : "5ddb15aca031f12ef5a0bffa2639b1e28efd3c10e78efebe238894e4693134e2",
- "mixHash" : "6874dea799fb1bb284af63aac5bdc85e147ef99df10116dcb319eb61b12c7142",
- "nonce" : "a1fd764b1366721e",
+ "hash" : "ec888645674536c75986ac50f6eda033a0c75227dbf04df3e8434410000ff220",
+ "mixHash" : "3f4d30c4863331d869bd8a52285558a08a716edc3d0d7b167f29d5bf29c137d5",
+ "nonce" : "798a036543994172",
"number" : "0x05",
- "parentHash" : "6af7d2ae9baaa1d90050819ac54935b49effbb05eb363efc49a1821bbc8aa41c",
+ "parentHash" : "acd70eb08047049f3252a4c9cf26a61740ee66446472f090b22b20b53810c86f",
"receiptTrie" : "440148dd71cbfbe3b40056aaf6abcbcde2e5d7df031418d47e1b4bb538885429",
"stateRoot" : "05b695e78b90773709e3dfcd69676b6905797c8a5e5e1d478bf3934cc688be1f",
- "timestamp" : "0x5534c54c",
- "transactionsTrie" : "fa8c7ac5e9701332809e3a52c1fe986ddb53306572e779c8c65d6e5e4be680d6",
+ "timestamp" : "0x553a1af4",
+ "transactionsTrie" : "5049186610b85a8453357ed589ce4f4c8315829b5d99671274617139ef11537f",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a06af7d2ae9baaa1d90050819ac54935b49effbb05eb363efc49a1821bbc8aa41ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a005b695e78b90773709e3dfcd69676b6905797c8a5e5e1d478bf3934cc688be1fa0fa8c7ac5e9701332809e3a52c1fe986ddb53306572e779c8c65d6e5e4be680d6a0440148dd71cbfbe3b40056aaf6abcbcde2e5d7df031418d47e1b4bb538885429b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302010005832fefd882559f845534c54c80a06874dea799fb1bb284af63aac5bdc85e147ef99df10116dcb319eb61b12c714288a1fd764b1366721ef866f86404018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84f5b53e171ca0263b15530dc17570f59cdd04d0b603c13c5e4a9ba721beaed7692cdbe4afa0eea0a824920502548beab7d62656f73df455e0d14e315f7ef2e06f6d0fc3112fffdcc0",
+ "rlp" : "0xf90265f901f9a0acd70eb08047049f3252a4c9cf26a61740ee66446472f090b22b20b53810c86fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a005b695e78b90773709e3dfcd69676b6905797c8a5e5e1d478bf3934cc688be1fa05049186610b85a8453357ed589ce4f4c8315829b5d99671274617139ef11537fa0440148dd71cbfbe3b40056aaf6abcbcde2e5d7df031418d47e1b4bb538885429b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302010005832fefd882559f84553a1af480a03f4d30c4863331d869bd8a52285558a08a716edc3d0d7b167f29d5bf29c137d588798a036543994172f866f86404018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84f5b53e171ba01bf1d5ed703098694bc7447196f53d12ecda10d4dfafbe86ce900be0ba17bec6a045a05178dca8351e9f97339863c9cd3637156a0ea96b2875911fc4d8f5273a3ac0",
"transactions" : [
{
"data" : "0xf5b53e17",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x04",
- "r" : "0x263b15530dc17570f59cdd04d0b603c13c5e4a9ba721beaed7692cdbe4afa0ee",
- "s" : "0xa824920502548beab7d62656f73df455e0d14e315f7ef2e06f6d0fc3112fffdc",
+ "r" : "0x1bf1d5ed703098694bc7447196f53d12ecda10d4dfafbe86ce900be0ba17bec6",
+ "s" : "0x45a05178dca8351e9f97339863c9cd3637156a0ea96b2875911fc4d8f5273a3a",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -225,26 +225,26 @@
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5497",
- "hash" : "e5f3b5f5f66fdd2138f3ec8430d065d6bbe9c3f46318b7124f1f9640987fab3c",
- "mixHash" : "9ee065306cb6280c61c8dc3c003637816fee6b2e8a891e46bda4c4cda30012a9",
- "nonce" : "abbb14154dbc17b2",
+ "hash" : "d26e7b050165785fe7606c3eaf32c8a715e167474357c63634aeb35fbb536ec3",
+ "mixHash" : "eec17cbd1c89f15a2de1a710081ceded226e72fcd89dffa2375854cb40921d2c",
+ "nonce" : "939b3636c6867162",
"number" : "0x06",
- "parentHash" : "5ddb15aca031f12ef5a0bffa2639b1e28efd3c10e78efebe238894e4693134e2",
+ "parentHash" : "ec888645674536c75986ac50f6eda033a0c75227dbf04df3e8434410000ff220",
"receiptTrie" : "63dc489e1be33e3b4203c02a9ec3e2562bbd9c2c334777eff1f25332b31169e2",
"stateRoot" : "13c3fbe6a1368f7d800e6f5f0529d9dd6339b4782f757b7c33370e96f46abe67",
- "timestamp" : "0x5534c54d",
- "transactionsTrie" : "d7d9efe934aa5a93caaed12ec94fc6c4bf85b6c18cd2502a6e92de0c52ccc90e",
+ "timestamp" : "0x553a1af8",
+ "transactionsTrie" : "fb0669d44d55ef586d622f8172045af6164d1c5e4c5b7e580d962057eec7d0df",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a05ddb15aca031f12ef5a0bffa2639b1e28efd3c10e78efebe238894e4693134e2a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a013c3fbe6a1368f7d800e6f5f0529d9dd6339b4782f757b7c33370e96f46abe67a0d7d9efe934aa5a93caaed12ec94fc6c4bf85b6c18cd2502a6e92de0c52ccc90ea063dc489e1be33e3b4203c02a9ec3e2562bbd9c2c334777eff1f25332b31169e2b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302014006832fefd8825497845534c54d80a09ee065306cb6280c61c8dc3c003637816fee6b2e8a891e46bda4c4cda30012a988abbb14154dbc17b2f866f86405018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84688959791ba06c5155ccd8258621322115d14f697757344fa2136878bfe0e459e92926d54213a064ce66c7b50c829257ef43f8364fd1f7f5479f79993c6af1ee177e66f014bdcac0",
+ "rlp" : "0xf90265f901f9a0ec888645674536c75986ac50f6eda033a0c75227dbf04df3e8434410000ff220a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a013c3fbe6a1368f7d800e6f5f0529d9dd6339b4782f757b7c33370e96f46abe67a0fb0669d44d55ef586d622f8172045af6164d1c5e4c5b7e580d962057eec7d0dfa063dc489e1be33e3b4203c02a9ec3e2562bbd9c2c334777eff1f25332b31169e2b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302014006832fefd882549784553a1af880a0eec17cbd1c89f15a2de1a710081ceded226e72fcd89dffa2375854cb40921d2c88939b3636c6867162f866f86405018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84688959791ba0b02b76ab0ed25ee4caba34ea8d1f0676e58d1d0fb7655ea28f54f001e2ffe1d5a0fb77cb0e1e4de4fce1596e084ab22eac19f501205691eb9ebc2ded7e8b4a48f7c0",
"transactions" : [
{
"data" : "0x68895979",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x05",
- "r" : "0x6c5155ccd8258621322115d14f697757344fa2136878bfe0e459e92926d54213",
- "s" : "0x64ce66c7b50c829257ef43f8364fd1f7f5479f79993c6af1ee177e66f014bdca",
+ "r" : "0xb02b76ab0ed25ee4caba34ea8d1f0676e58d1d0fb7655ea28f54f001e2ffe1d5",
+ "s" : "0xfb77cb0e1e4de4fce1596e084ab22eac19f501205691eb9ebc2ded7e8b4a48f7",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -257,30 +257,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020180",
+ "difficulty" : "0x020100",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5464",
- "hash" : "6e380068ad5c2d06c6b117faf6d1750ee064609e5922a7823c84fe3e1628605f",
- "mixHash" : "48e0a26595c2edcd54ce6d5a2416af3ed366f552d96da0762e48cf17373345d5",
- "nonce" : "83c3b3182156abdb",
+ "hash" : "f3db36d163a5233895fd1a1915100034ceac9c0680d894798527a7543ab26423",
+ "mixHash" : "c12e213a45641ccb6f3a8ff75ad6c10e9b53ca9bf6670214ae7479bad9b49a06",
+ "nonce" : "b3326a293811016b",
"number" : "0x07",
- "parentHash" : "e5f3b5f5f66fdd2138f3ec8430d065d6bbe9c3f46318b7124f1f9640987fab3c",
+ "parentHash" : "d26e7b050165785fe7606c3eaf32c8a715e167474357c63634aeb35fbb536ec3",
"receiptTrie" : "5cf916582a8730d4a0f01fb64a059d2f23f379c950eacee4a8bdb5882b5ce0ea",
"stateRoot" : "9615bf81ba46645a835cb4f9fa3e95a31b80f4bb3e1c4b91e48e23e27d226ff0",
- "timestamp" : "0x5534c54f",
- "transactionsTrie" : "0e4083a2e00022ff8488e96105d4587409a3cb09e9d1c78c124cd31805d73f17",
+ "timestamp" : "0x553a1b01",
+ "transactionsTrie" : "8e3cb106ae0505f0e93f7f94fae832ce7c07e2c091c1da085de796977738df66",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a0e5f3b5f5f66fdd2138f3ec8430d065d6bbe9c3f46318b7124f1f9640987fab3ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a09615bf81ba46645a835cb4f9fa3e95a31b80f4bb3e1c4b91e48e23e27d226ff0a00e4083a2e00022ff8488e96105d4587409a3cb09e9d1c78c124cd31805d73f17a05cf916582a8730d4a0f01fb64a059d2f23f379c950eacee4a8bdb5882b5ce0eab90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302018007832fefd8825464845534c54f80a048e0a26595c2edcd54ce6d5a2416af3ed366f552d96da0762e48cf17373345d58883c3b3182156abdbf866f86406018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8438cc48311ba06bc06b5ce5ada51955aa0f2c4d4666117154d26d26c4a146435b0a916cc860cfa09a9309df857bd9436a3d77b57d99a72f72115efc76bdbd7240b9e51ae90e6f78c0",
+ "rlp" : "0xf90265f901f9a0d26e7b050165785fe7606c3eaf32c8a715e167474357c63634aeb35fbb536ec3a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a09615bf81ba46645a835cb4f9fa3e95a31b80f4bb3e1c4b91e48e23e27d226ff0a08e3cb106ae0505f0e93f7f94fae832ce7c07e2c091c1da085de796977738df66a05cf916582a8730d4a0f01fb64a059d2f23f379c950eacee4a8bdb5882b5ce0eab90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302010007832fefd882546484553a1b0180a0c12e213a45641ccb6f3a8ff75ad6c10e9b53ca9bf6670214ae7479bad9b49a0688b3326a293811016bf866f86406018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8438cc48311ba0a33d30cd6a09b7714be8e82e066c2a55b3b6b044043135ad08775fb044a440b5a0e905f0c0492a4c6d4e3d53f3123abef6013cfd7adbff8bf4f04cfb0915f35f31c0",
"transactions" : [
{
"data" : "0x38cc4831",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x06",
- "r" : "0x6bc06b5ce5ada51955aa0f2c4d4666117154d26d26c4a146435b0a916cc860cf",
- "s" : "0x9a9309df857bd9436a3d77b57d99a72f72115efc76bdbd7240b9e51ae90e6f78",
+ "r" : "0xa33d30cd6a09b7714be8e82e066c2a55b3b6b044043135ad08775fb044a440b5",
+ "s" : "0xe905f0c0492a4c6d4e3d53f3123abef6013cfd7adbff8bf4f04cfb0915f35f31",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -293,32 +293,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x0201c0",
+ "difficulty" : "0x020140",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5413",
- "hash" : "3a5768ce44ea7900df696ad2818ad2227fa8ed9befc36091d52393a2318a7d18",
- "mixHash" : "bfd8b440503c2bcf14d10aa235f6c2582f4230c8a85249d3e1eb98d863240d51",
- "nonce" : "d5de6ca7d9cde3f0",
+ "hash" : "4d696d70c311d3681e9b2bccd61c076b997537675ce3923408bce108e9b2a1ac",
+ "mixHash" : "373eb43cec62310dd967a700701957f7b660c3e84ba336ba089f54bf3f2cec50",
+ "nonce" : "4e45ac3e48b5b4ee",
"number" : "0x08",
- "parentHash" : "6e380068ad5c2d06c6b117faf6d1750ee064609e5922a7823c84fe3e1628605f",
+ "parentHash" : "f3db36d163a5233895fd1a1915100034ceac9c0680d894798527a7543ab26423",
"receiptTrie" : "589022e821066b90e00f216ad9572220703ea73b1270df17eaa64cc97402db01",
"stateRoot" : "3a7c5b1cb9831060094640aa24519a16dc191418b0f42c5a2eb3d4bf83712153",
- "timestamp" : "0x5534c550",
- "transactionsTrie" : "418c6e484268e2e45a23b0e1e6f6b4584d76641c3d99f90255d84e698be23b75",
+ "timestamp" : "0x553a1b07",
+ "transactionsTrie" : "0352d1a5a10e25f75d8aeb51b3a7597cb191f8065db7e480d32795089e0d72de",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a06e380068ad5c2d06c6b117faf6d1750ee064609e5922a7823c84fe3e1628605fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a03a7c5b1cb9831060094640aa24519a16dc191418b0f42c5a2eb3d4bf83712153a0418c6e484268e2e45a23b0e1e6f6b4584d76641c3d99f90255d84e698be23b75a0589022e821066b90e00f216ad9572220703ea73b1270df17eaa64cc97402db01b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830201c008832fefd8825413845534c55080a0bfd8b440503c2bcf14d10aa235f6c2582f4230c8a85249d3e1eb98d863240d5188d5de6ca7d9cde3f0f866f86407018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a841f9030371ca01231bb40a65da857019d64b05bb31648a4eabc2f76076286e3a8af4d88c433e9a0270264ab1e388eaba9f74b2de86a943effe75f4949e2495fad9598778020fa4ec0",
+ "rlp" : "0xf90265f901f9a0f3db36d163a5233895fd1a1915100034ceac9c0680d894798527a7543ab26423a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a03a7c5b1cb9831060094640aa24519a16dc191418b0f42c5a2eb3d4bf83712153a00352d1a5a10e25f75d8aeb51b3a7597cb191f8065db7e480d32795089e0d72dea0589022e821066b90e00f216ad9572220703ea73b1270df17eaa64cc97402db01b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302014008832fefd882541384553a1b0780a0373eb43cec62310dd967a700701957f7b660c3e84ba336ba089f54bf3f2cec50884e45ac3e48b5b4eef866f86407018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a841f9030371ba09fe2b36a46252693e4858f7fd1a0d8d5ac6b297d66839ed8680a5ae45eac3657a0fc2bedbc65f017070ceca48e433cb6dbacd4c89fd70afc520f49b17fdfd3a123c0",
"transactions" : [
{
"data" : "0x1f903037",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x07",
- "r" : "0x1231bb40a65da857019d64b05bb31648a4eabc2f76076286e3a8af4d88c433e9",
- "s" : "0x270264ab1e388eaba9f74b2de86a943effe75f4949e2495fad9598778020fa4e",
+ "r" : "0x9fe2b36a46252693e4858f7fd1a0d8d5ac6b297d66839ed8680a5ae45eac3657",
+ "s" : "0xfc2bedbc65f017070ceca48e433cb6dbacd4c89fd70afc520f49b17fdfd3a123",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -329,30 +329,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020200",
+ "difficulty" : "0x020180",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0xa2f8",
- "hash" : "39d78fff4f31a218864526ff0724ebeaead14a4facd1fe0386d6e3d1dbe5b343",
- "mixHash" : "15f39f65d9c76606a48e384d788c23c9bd5bdf266d36002c30f3a3158619abce",
- "nonce" : "7c6e3c032b9c18ea",
+ "hash" : "0fb21bc2d9a55766b54358e6418137af0c271c624b447ee66fa42f97da5c322a",
+ "mixHash" : "9372bd379d3d6d4bdd58074ac10fe5698af0fdc531af3590d54fc51969988174",
+ "nonce" : "a015d385bdbe5d06",
"number" : "0x09",
- "parentHash" : "3a5768ce44ea7900df696ad2818ad2227fa8ed9befc36091d52393a2318a7d18",
+ "parentHash" : "4d696d70c311d3681e9b2bccd61c076b997537675ce3923408bce108e9b2a1ac",
"receiptTrie" : "64e0a1fc7cc366296edbbadadab9d71472d307a386a6f3f1a54721f1ae671845",
"stateRoot" : "7f7b5f97eeedacb4e7640401f78c30a0c6f1c95e764537e07fac8a7acc78a69b",
- "timestamp" : "0x5534c551",
- "transactionsTrie" : "ebf7b44ad6adf45ee136e2e28aa0c5fe7294e0008d8ca1c8c48d5a3087e9344b",
+ "timestamp" : "0x553a1b0d",
+ "transactionsTrie" : "7c1a1bb126243730c64eb119d67c399bb1ebde77e8987aee61929b4aa0af47fe",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90285f901f9a03a5768ce44ea7900df696ad2818ad2227fa8ed9befc36091d52393a2318a7d18a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07f7b5f97eeedacb4e7640401f78c30a0c6f1c95e764537e07fac8a7acc78a69ba0ebf7b44ad6adf45ee136e2e28aa0c5fe7294e0008d8ca1c8c48d5a3087e9344ba064e0a1fc7cc366296edbbadadab9d71472d307a386a6f3f1a54721f1ae671845b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302020009832fefd882a2f8845534c55180a015f39f65d9c76606a48e384d788c23c9bd5bdf266d36002c30f3a3158619abce887c6e3c032b9c18eaf886f88408018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa41e26fd3300000000000000000000000000000000000000000000000000000000000000011ca033d60db1feaed1ae0b9b9de08bd27274d10af5b5590ba12fa22251e120ff9abca098608a8fe25473e7161d4dd4af0e2eaf1df24b525d1cf647fb5662a473721dc3c0",
+ "rlp" : "0xf90285f901f9a04d696d70c311d3681e9b2bccd61c076b997537675ce3923408bce108e9b2a1aca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07f7b5f97eeedacb4e7640401f78c30a0c6f1c95e764537e07fac8a7acc78a69ba07c1a1bb126243730c64eb119d67c399bb1ebde77e8987aee61929b4aa0af47fea064e0a1fc7cc366296edbbadadab9d71472d307a386a6f3f1a54721f1ae671845b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302018009832fefd882a2f884553a1b0d80a09372bd379d3d6d4bdd58074ac10fe5698af0fdc531af3590d54fc5196998817488a015d385bdbe5d06f886f88408018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa41e26fd3300000000000000000000000000000000000000000000000000000000000000011ca0114290129b80b3e6a7e8134389bf15498066df4f28909d81f8b9689d6e8cf115a082da0ae029b68410cae53899a62ac6245aabec88cee2068bea017abc56a8279ec0",
"transactions" : [
{
"data" : "0x1e26fd330000000000000000000000000000000000000000000000000000000000000001",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x08",
- "r" : "0x33d60db1feaed1ae0b9b9de08bd27274d10af5b5590ba12fa22251e120ff9abc",
- "s" : "0x98608a8fe25473e7161d4dd4af0e2eaf1df24b525d1cf647fb5662a473721dc3",
+ "r" : "0x114290129b80b3e6a7e8134389bf15498066df4f28909d81f8b9689d6e8cf115",
+ "s" : "0x82da0ae029b68410cae53899a62ac6245aabec88cee2068bea017abc56a8279e",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1c",
"value" : "0x0a"
@@ -365,30 +365,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020240",
+ "difficulty" : "0x0201c0",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x6860",
- "hash" : "4393786b2b4d2cba2bac8f8c0d38ba2a00c270d685f0b61c643c386981668cee",
- "mixHash" : "d9a88dfb0e146b10077a3ce710c54f34e43bb5666bab34205fd135cf4ace739c",
- "nonce" : "6dac8958d9a61b30",
+ "hash" : "245310bb4b10cb54e594627e089006a6d9f86c4bd406730e72099930e0743fb0",
+ "mixHash" : "42a758e91ef25c2563a016f2a3482cc2db7f221172c437066bea07a6be317ade",
+ "nonce" : "8ca4fd52eda18ce3",
"number" : "0x0a",
- "parentHash" : "39d78fff4f31a218864526ff0724ebeaead14a4facd1fe0386d6e3d1dbe5b343",
+ "parentHash" : "0fb21bc2d9a55766b54358e6418137af0c271c624b447ee66fa42f97da5c322a",
"receiptTrie" : "73ae5eeeea54ab221be9e3a3a0a3f3d9a67b0643073205beceb21f680a0db8d8",
"stateRoot" : "2595e32018af7ddb1ea6cb157a859795af0efe922381b3e8601c6a657631892f",
- "timestamp" : "0x5534c553",
- "transactionsTrie" : "bca0d415b637bfd35b38b66ba20132c2b183749ba3b5da853cea8bc97e2ef830",
+ "timestamp" : "0x553a1b10",
+ "transactionsTrie" : "579fe2652d6c91df8218db89694c9659c5759f6d80a941a8e006c0fcacf2d745",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90285f901f9a039d78fff4f31a218864526ff0724ebeaead14a4facd1fe0386d6e3d1dbe5b343a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a02595e32018af7ddb1ea6cb157a859795af0efe922381b3e8601c6a657631892fa0bca0d415b637bfd35b38b66ba20132c2b183749ba3b5da853cea8bc97e2ef830a073ae5eeeea54ab221be9e3a3a0a3f3d9a67b0643073205beceb21f680a0db8d8b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202400a832fefd8826860845534c55380a0d9a88dfb0e146b10077a3ce710c54f34e43bb5666bab34205fd135cf4ace739c886dac8958d9a61b30f886f88409018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa41e26fd3300000000000000000000000000000000000000000000000000000000000000011ca0c9f124216720c8114536f3c21b9a929d61aeea75caf1342e8d36bc267a09e56fa04c5c0e4871704371916c328c8fe7e991d12cb79c8021b107a275ef8e8fb55f93c0",
+ "rlp" : "0xf90285f901f9a00fb21bc2d9a55766b54358e6418137af0c271c624b447ee66fa42f97da5c322aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a02595e32018af7ddb1ea6cb157a859795af0efe922381b3e8601c6a657631892fa0579fe2652d6c91df8218db89694c9659c5759f6d80a941a8e006c0fcacf2d745a073ae5eeeea54ab221be9e3a3a0a3f3d9a67b0643073205beceb21f680a0db8d8b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830201c00a832fefd882686084553a1b1080a042a758e91ef25c2563a016f2a3482cc2db7f221172c437066bea07a6be317ade888ca4fd52eda18ce3f886f88409018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa41e26fd3300000000000000000000000000000000000000000000000000000000000000011ca0ffa51d5821f5f15901f1c3a3f35fa5c79e8dc7011dd04923d27009e0b028d4f7a04f45bee7a3ea7737a7abee7f19e74a8ada7338ad570eef96d06ff37e6470ec03c0",
"transactions" : [
{
"data" : "0x1e26fd330000000000000000000000000000000000000000000000000000000000000001",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x09",
- "r" : "0xc9f124216720c8114536f3c21b9a929d61aeea75caf1342e8d36bc267a09e56f",
- "s" : "0x4c5c0e4871704371916c328c8fe7e991d12cb79c8021b107a275ef8e8fb55f93",
+ "r" : "0xffa51d5821f5f15901f1c3a3f35fa5c79e8dc7011dd04923d27009e0b028d4f7",
+ "s" : "0x4f45bee7a3ea7737a7abee7f19e74a8ada7338ad570eef96d06ff37e6470ec03",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1c",
"value" : "0x0a"
@@ -401,32 +401,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020280",
+ "difficulty" : "0x020180",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x7103",
- "hash" : "acbc21529a83269f4f190d73e093bbdafe94f32f843a2ffa0e72de182f58e048",
- "mixHash" : "1f6148bf9a7a211140fa0cf781e5ddaa369b3070c0cd0e5f1af81b0065d803a3",
- "nonce" : "eb641238be657fd8",
+ "hash" : "3c5169a6974478a1a8e6e8de5f942a67837d91abfd28c9a2231e5f7113fe6e22",
+ "mixHash" : "8aaa2ba249f0f6ac6b7b4883e6401f253a742e91e19c94494ecc2594827957f1",
+ "nonce" : "1e0ac4c77cfb4944",
"number" : "0x0b",
- "parentHash" : "4393786b2b4d2cba2bac8f8c0d38ba2a00c270d685f0b61c643c386981668cee",
+ "parentHash" : "245310bb4b10cb54e594627e089006a6d9f86c4bd406730e72099930e0743fb0",
"receiptTrie" : "74da98cf55b2a8c3376b9ab866daeecf11ab2707e5811c675e97eafe90ed6366",
"stateRoot" : "1ab62fae338274ec04fd250c732d5655c0e97312e30f7a8808344d9872f74828",
- "timestamp" : "0x5534c555",
- "transactionsTrie" : "cfe9cf95bb9c3b2a6d93a359ad79f32e64f108c5dd1548ddf1e282a925fc8e0f",
+ "timestamp" : "0x553a1b18",
+ "transactionsTrie" : "1265f76f9afc7d647e080b7bf5056a3ff6ef71f7bb192ada2e30e78869e5b108",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90285f901f9a04393786b2b4d2cba2bac8f8c0d38ba2a00c270d685f0b61c643c386981668ceea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a01ab62fae338274ec04fd250c732d5655c0e97312e30f7a8808344d9872f74828a0cfe9cf95bb9c3b2a6d93a359ad79f32e64f108c5dd1548ddf1e282a925fc8e0fa074da98cf55b2a8c3376b9ab866daeecf11ab2707e5811c675e97eafe90ed6366b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202800b832fefd8827103845534c55580a01f6148bf9a7a211140fa0cf781e5ddaa369b3070c0cd0e5f1af81b0065d803a388eb641238be657fd8f886f8840a018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa49a19a953fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1ba00ad6f0f4cfec4adb86426e8800f07954b301a4a40d57dea4b10d2260679181bfa0d5381f0f6e929db8863be9154e1759e6eccf3c93aafc33c89e50e9de32254919c0",
+ "rlp" : "0xf90285f901f9a0245310bb4b10cb54e594627e089006a6d9f86c4bd406730e72099930e0743fb0a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a01ab62fae338274ec04fd250c732d5655c0e97312e30f7a8808344d9872f74828a01265f76f9afc7d647e080b7bf5056a3ff6ef71f7bb192ada2e30e78869e5b108a074da98cf55b2a8c3376b9ab866daeecf11ab2707e5811c675e97eafe90ed6366b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830201800b832fefd882710384553a1b1880a08aaa2ba249f0f6ac6b7b4883e6401f253a742e91e19c94494ecc2594827957f1881e0ac4c77cfb4944f886f8840a018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa49a19a953fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1ca01fd1ab50a8d8587a0aeeb71047c88f1511bc6652c05e5bb5871965ad10849921a0121a6514d03417bfbb9f444c124fc99f1307fd23920cb4a65d8894cc6e465e2ac0",
"transactions" : [
{
"data" : "0x9a19a953fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x0a",
- "r" : "0x0ad6f0f4cfec4adb86426e8800f07954b301a4a40d57dea4b10d2260679181bf",
- "s" : "0xd5381f0f6e929db8863be9154e1759e6eccf3c93aafc33c89e50e9de32254919",
+ "r" : "0x1fd1ab50a8d8587a0aeeb71047c88f1511bc6652c05e5bb5871965ad10849921",
+ "s" : "0x121a6514d03417bfbb9f444c124fc99f1307fd23920cb4a65d8894cc6e465e2a",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1b",
+ "v" : "0x1c",
"value" : "0x0a"
}
],
@@ -437,30 +437,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x0202c0",
+ "difficulty" : "0x0201c0",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x6854",
- "hash" : "a7bd8fa39182e5fd3243b4a563f3541aa3153c38f2238a91c8b9e65e69c60fa5",
- "mixHash" : "2534ca4fd655acf0e97a3bd0ae79b98cc9c1b60ba024342449192fb25dfba703",
- "nonce" : "b49de944850d3283",
+ "hash" : "717f9726b8b87eb4d91be4c6557ac5b9bff9a343abdedee7cb1b3c000c99ebb6",
+ "mixHash" : "8823405e0b4d6a5511d4e9eeba7506ab6c9aad856847cb45933b94228bfd92c5",
+ "nonce" : "13357c2aae943c0d",
"number" : "0x0c",
- "parentHash" : "acbc21529a83269f4f190d73e093bbdafe94f32f843a2ffa0e72de182f58e048",
+ "parentHash" : "3c5169a6974478a1a8e6e8de5f942a67837d91abfd28c9a2231e5f7113fe6e22",
"receiptTrie" : "27225877542d8990dc50b6eeb24cd96465e82fbb76d161f250b8148674f98b78",
"stateRoot" : "2ba54f2871185522061e06b2565c8bf923db4de01d2c4ca30ce958be76934e4f",
- "timestamp" : "0x5534c556",
- "transactionsTrie" : "cf401dd52338ddbe71ac487f102f584150b1a574d7f58d7f9482483bed6c988a",
+ "timestamp" : "0x553a1b1d",
+ "transactionsTrie" : "60b301dccd7bcd2fd1a816436f885cc4cbed6fd1f83c70fc45e74529c26e5a0c",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90285f901f9a0acbc21529a83269f4f190d73e093bbdafe94f32f843a2ffa0e72de182f58e048a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a02ba54f2871185522061e06b2565c8bf923db4de01d2c4ca30ce958be76934e4fa0cf401dd52338ddbe71ac487f102f584150b1a574d7f58d7f9482483bed6c988aa027225877542d8990dc50b6eeb24cd96465e82fbb76d161f250b8148674f98b78b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202c00c832fefd8826854845534c55680a02534ca4fd655acf0e97a3bd0ae79b98cc9c1b60ba024342449192fb25dfba70388b49de944850d3283f886f8840b018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa41774e64600000000000000000000000000000000000000000000000000000000000000081ba01c7bb7b3f62b2937d353814615aa09047731f8a3aeca4ac951160254976c5893a06ffe181f392252d2e2e09beef5543c7a57398c80ec93885e38f9ad282f1b534bc0",
+ "rlp" : "0xf90285f901f9a03c5169a6974478a1a8e6e8de5f942a67837d91abfd28c9a2231e5f7113fe6e22a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a02ba54f2871185522061e06b2565c8bf923db4de01d2c4ca30ce958be76934e4fa060b301dccd7bcd2fd1a816436f885cc4cbed6fd1f83c70fc45e74529c26e5a0ca027225877542d8990dc50b6eeb24cd96465e82fbb76d161f250b8148674f98b78b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830201c00c832fefd882685484553a1b1d80a08823405e0b4d6a5511d4e9eeba7506ab6c9aad856847cb45933b94228bfd92c58813357c2aae943c0df886f8840b018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa41774e64600000000000000000000000000000000000000000000000000000000000000081ba0822b861c319b411d935cad77d270c1775ac80f6dfd1c71bb6115c5d083f9c1c3a0d55ec36b8276709003690f524b65240b3d73fdf300fcf84ed2a11e29d8887fb9c0",
"transactions" : [
{
"data" : "0x1774e6460000000000000000000000000000000000000000000000000000000000000008",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x0b",
- "r" : "0x1c7bb7b3f62b2937d353814615aa09047731f8a3aeca4ac951160254976c5893",
- "s" : "0x6ffe181f392252d2e2e09beef5543c7a57398c80ec93885e38f9ad282f1b534b",
+ "r" : "0x822b861c319b411d935cad77d270c1775ac80f6dfd1c71bb6115c5d083f9c1c3",
+ "s" : "0xd55ec36b8276709003690f524b65240b3d73fdf300fcf84ed2a11e29d8887fb9",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -473,32 +473,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020300",
+ "difficulty" : "0x020200",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0xab4e",
- "hash" : "b34714f73fe981d44a730c44c3fca3c8e92be139396efa5f15e90e777702602e",
- "mixHash" : "c9738acf0e5a0b525d4495e988ff56f0576bc154ff3407a3ebe70bc6841e4780",
- "nonce" : "e1c6f4a13d63cc97",
+ "hash" : "6110fdb4eb62e7be998451a7f8b2b64f58345ca1a2fa10724ebec46ed2c2da37",
+ "mixHash" : "87ac6316439df1f11d3cb437596170de5a67e3b3cd2877658154d6ab4a0deb74",
+ "nonce" : "e5290f2d114004f9",
"number" : "0x0d",
- "parentHash" : "a7bd8fa39182e5fd3243b4a563f3541aa3153c38f2238a91c8b9e65e69c60fa5",
+ "parentHash" : "717f9726b8b87eb4d91be4c6557ac5b9bff9a343abdedee7cb1b3c000c99ebb6",
"receiptTrie" : "97bccf681ea6d6a178ea68933942fc2352bc7396e0bfa07f553bcf4b738141fd",
"stateRoot" : "018714953e28428a08630f5ac0a3e00d9842e422825b283ea5b49670c7c134eb",
- "timestamp" : "0x5534c557",
- "transactionsTrie" : "32cf580698e38972f47350771bc8d77faef08d7b7f222c70b90eace291c48c6e",
+ "timestamp" : "0x553a1b20",
+ "transactionsTrie" : "b5940c63fd7989841a79b7d5fd76c78a2e97026dffc51bb05f4a1d19f9260cd7",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90285f901f9a0a7bd8fa39182e5fd3243b4a563f3541aa3153c38f2238a91c8b9e65e69c60fa5a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0018714953e28428a08630f5ac0a3e00d9842e422825b283ea5b49670c7c134eba032cf580698e38972f47350771bc8d77faef08d7b7f222c70b90eace291c48c6ea097bccf681ea6d6a178ea68933942fc2352bc7396e0bfa07f553bcf4b738141fdb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830203000d832fefd882ab4e845534c55780a0c9738acf0e5a0b525d4495e988ff56f0576bc154ff3407a3ebe70bc6841e478088e1c6f4a13d63cc97f886f8840c018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa4a53b1c1effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee1ba09f70a1f92a15d05ed75877b16d8fd06a1b6992b60b88e21de0d3106a2d7cca6ba0744a29ee77756639449fb7c356f588119778f4fc2f282c5cf30f9978afe4c1e3c0",
+ "rlp" : "0xf90285f901f9a0717f9726b8b87eb4d91be4c6557ac5b9bff9a343abdedee7cb1b3c000c99ebb6a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0018714953e28428a08630f5ac0a3e00d9842e422825b283ea5b49670c7c134eba0b5940c63fd7989841a79b7d5fd76c78a2e97026dffc51bb05f4a1d19f9260cd7a097bccf681ea6d6a178ea68933942fc2352bc7396e0bfa07f553bcf4b738141fdb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202000d832fefd882ab4e84553a1b2080a087ac6316439df1f11d3cb437596170de5a67e3b3cd2877658154d6ab4a0deb7488e5290f2d114004f9f886f8840c018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa4a53b1c1effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee1ca099734b88caadd2e835537b568630a3e5ebc6fb56bf44d5577c929872b707c81aa0faefc92ccef716b6e9a1903ee4294d953d75bd7b6a47ca1a38a20a61be56d67ac0",
"transactions" : [
{
"data" : "0xa53b1c1effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x0c",
- "r" : "0x9f70a1f92a15d05ed75877b16d8fd06a1b6992b60b88e21de0d3106a2d7cca6b",
- "s" : "0x744a29ee77756639449fb7c356f588119778f4fc2f282c5cf30f9978afe4c1e3",
+ "r" : "0x99734b88caadd2e835537b568630a3e5ebc6fb56bf44d5577c929872b707c81a",
+ "s" : "0xfaefc92ccef716b6e9a1903ee4294d953d75bd7b6a47ca1a38a20a61be56d67a",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1b",
+ "v" : "0x1c",
"value" : "0x0a"
}
],
@@ -509,32 +509,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020340",
+ "difficulty" : "0x020240",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0xaba6",
- "hash" : "c3fdd59ac6b56f168f158d378de0433b9a416138d734aa959ef5a5cd77d38f2e",
- "mixHash" : "8edf7e9c3708bb4df64e7c3a0c4136b6b88963f89e19b81ee34a551764fe7cf9",
- "nonce" : "0c177ecf0bf1a8f2",
+ "hash" : "703ad65827a044b72c538376cd3de9b60bdafa44a6abc735a0539fbf3c2eda0a",
+ "mixHash" : "1694fa01dc36ed4a5c2d8bd1d02d14a5f3d3d28c7dafccbda5e0bf16fcef6bf1",
+ "nonce" : "424225a8be230fc1",
"number" : "0x0e",
- "parentHash" : "b34714f73fe981d44a730c44c3fca3c8e92be139396efa5f15e90e777702602e",
+ "parentHash" : "6110fdb4eb62e7be998451a7f8b2b64f58345ca1a2fa10724ebec46ed2c2da37",
"receiptTrie" : "4dd52b5266416165e8a724878585b2e427db7d7fbc0c783c070de34f76c1e8a8",
"stateRoot" : "c16882972e1e84132ed2565729dd757538e45e63550da4354b94c938c4ef4d6c",
- "timestamp" : "0x5534c55a",
- "transactionsTrie" : "750e94e7a7c1b4766f8b4a3a90728146b074a60ca48c103c1747079c2bde6414",
+ "timestamp" : "0x553a1b22",
+ "transactionsTrie" : "13e22eb2177477ff526b988b3156d4f1eae2941c9f0225ed76976e938390dad2",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90285f901f9a0b34714f73fe981d44a730c44c3fca3c8e92be139396efa5f15e90e777702602ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0c16882972e1e84132ed2565729dd757538e45e63550da4354b94c938c4ef4d6ca0750e94e7a7c1b4766f8b4a3a90728146b074a60ca48c103c1747079c2bde6414a04dd52b5266416165e8a724878585b2e427db7d7fbc0c783c070de34f76c1e8a8b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830203400e832fefd882aba6845534c55a80a08edf7e9c3708bb4df64e7c3a0c4136b6b88963f89e19b81ee34a551764fe7cf9880c177ecf0bf1a8f2f886f8840d018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa4d2282dc5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee1ca060b2083f795a383415877d900898b3785ccc79f6f6c62814edcbf6a6f88c65b0a0d0cf5d13cbf971d50dcd6794d93917fec9c86790655d1725b2cb2e248414eab5c0",
+ "rlp" : "0xf90285f901f9a06110fdb4eb62e7be998451a7f8b2b64f58345ca1a2fa10724ebec46ed2c2da37a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0c16882972e1e84132ed2565729dd757538e45e63550da4354b94c938c4ef4d6ca013e22eb2177477ff526b988b3156d4f1eae2941c9f0225ed76976e938390dad2a04dd52b5266416165e8a724878585b2e427db7d7fbc0c783c070de34f76c1e8a8b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202400e832fefd882aba684553a1b2280a01694fa01dc36ed4a5c2d8bd1d02d14a5f3d3d28c7dafccbda5e0bf16fcef6bf188424225a8be230fc1f886f8840d018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa4d2282dc5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee1ba0d967317560234c79006e70fe5c21f1a49bf5468a41985d95036131241d26cbd3a0d425f9c32657945bf06fcdbfc96ca561187116cf2eb92e86aba9419128682a35c0",
"transactions" : [
{
"data" : "0xd2282dc5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x0d",
- "r" : "0x60b2083f795a383415877d900898b3785ccc79f6f6c62814edcbf6a6f88c65b0",
- "s" : "0xd0cf5d13cbf971d50dcd6794d93917fec9c86790655d1725b2cb2e248414eab5",
+ "r" : "0xd967317560234c79006e70fe5c21f1a49bf5468a41985d95036131241d26cbd3",
+ "s" : "0xd425f9c32657945bf06fcdbfc96ca561187116cf2eb92e86aba9419128682a35",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -545,30 +545,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020380",
+ "difficulty" : "0x020280",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0xabd8",
- "hash" : "fc11ef9b3b84a7ab7e2a3e7771106c591fc75339cc5653c03dfd6bcf13ece915",
- "mixHash" : "0eaaf7addf17921f267c8a82d71bdb9602b9a86dddcf8b14bba28a01178b00eb",
- "nonce" : "a6b993acc50dd648",
+ "hash" : "dcbd2a5cb48a1743e9cf45333dd93e51fc2dc2c02d3e4427c801f34a66152b02",
+ "mixHash" : "4b3735d1148cc8d35b88141a2bff6708a03901814c6528bbf1ed9e0ef25b4268",
+ "nonce" : "a32c31e4f69e69f4",
"number" : "0x0f",
- "parentHash" : "c3fdd59ac6b56f168f158d378de0433b9a416138d734aa959ef5a5cd77d38f2e",
+ "parentHash" : "703ad65827a044b72c538376cd3de9b60bdafa44a6abc735a0539fbf3c2eda0a",
"receiptTrie" : "3b49b06cd78d6ae74b75898b4bb8f06a4ccbbc3efde37346c26b16eaee3e7ac3",
"stateRoot" : "dc67cfcfbb430e581431424d4fb1e3b4df9dd8db498a60259fbd2bfdfcb9fe38",
- "timestamp" : "0x5534c55b",
- "transactionsTrie" : "0aadfd4539f9059da36c6646ef565c69ed078985fe0069820a5bb09b6112b9f5",
+ "timestamp" : "0x553a1b24",
+ "transactionsTrie" : "4fd1b88ece193cded9f50d31647817c72f7771acc0b46e8053d507f9cebab3a5",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90285f901f9a0c3fdd59ac6b56f168f158d378de0433b9a416138d734aa959ef5a5cd77d38f2ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0dc67cfcfbb430e581431424d4fb1e3b4df9dd8db498a60259fbd2bfdfcb9fe38a00aadfd4539f9059da36c6646ef565c69ed078985fe0069820a5bb09b6112b9f5a03b49b06cd78d6ae74b75898b4bb8f06a4ccbbc3efde37346c26b16eaee3e7ac3b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830203800f832fefd882abd8845534c55b80a00eaaf7addf17921f267c8a82d71bdb9602b9a86dddcf8b14bba28a01178b00eb88a6b993acc50dd648f886f8840e018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa4e30081a0aabbccffffffffffffffffffffffffffffffffffffffffffffffffffffffffee1ca04814756819851a83e492651fec08d49e5e67155fc220115bf7e12333f2589770a0084ab983bdb91b534927f728d783c5522dfa8a27af75de2eb5e0d7e884683286c0",
+ "rlp" : "0xf90285f901f9a0703ad65827a044b72c538376cd3de9b60bdafa44a6abc735a0539fbf3c2eda0aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0dc67cfcfbb430e581431424d4fb1e3b4df9dd8db498a60259fbd2bfdfcb9fe38a04fd1b88ece193cded9f50d31647817c72f7771acc0b46e8053d507f9cebab3a5a03b49b06cd78d6ae74b75898b4bb8f06a4ccbbc3efde37346c26b16eaee3e7ac3b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202800f832fefd882abd884553a1b2480a04b3735d1148cc8d35b88141a2bff6708a03901814c6528bbf1ed9e0ef25b426888a32c31e4f69e69f4f886f8840e018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa4e30081a0aabbccffffffffffffffffffffffffffffffffffffffffffffffffffffffffee1ca09f5dc5e33744b7475046b3e6f1b0fa4ce1fc899441d9b75ef6ecc6c8a3a84796a0142d1bfdf70c44de98561b7ba062a9f1b1d17eb174ae066f40b63716667a732ac0",
"transactions" : [
{
"data" : "0xe30081a0aabbccffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x0e",
- "r" : "0x4814756819851a83e492651fec08d49e5e67155fc220115bf7e12333f2589770",
- "s" : "0x084ab983bdb91b534927f728d783c5522dfa8a27af75de2eb5e0d7e884683286",
+ "r" : "0x9f5dc5e33744b7475046b3e6f1b0fa4ce1fc899441d9b75ef6ecc6c8a3a84796",
+ "s" : "0x142d1bfdf70c44de98561b7ba062a9f1b1d17eb174ae066f40b63716667a732a",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1c",
"value" : "0x0a"
@@ -581,30 +581,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x0203c0",
+ "difficulty" : "0x0202c0",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0xab90",
- "hash" : "6e7d063a881e9786dc038c7864fcecb10d30178ce5ca94535208f6e730b73109",
- "mixHash" : "e93f0a64e3bdc1979582a05dbeaf439179233e2280bcb0dc9308714643d7637d",
- "nonce" : "347961a5cbaae889",
+ "hash" : "78c9c1449e9a55e2d8b73cd9a6efef264ed84a86e295f6cde2a81ab7e854e464",
+ "mixHash" : "42d21b287c8e21ad4fcefa3f56c9ade18b64d7bd89a686ba7fc1a467db08e3af",
+ "nonce" : "9afcf9b165a393c8",
"number" : "0x10",
- "parentHash" : "fc11ef9b3b84a7ab7e2a3e7771106c591fc75339cc5653c03dfd6bcf13ece915",
+ "parentHash" : "dcbd2a5cb48a1743e9cf45333dd93e51fc2dc2c02d3e4427c801f34a66152b02",
"receiptTrie" : "aee9ce41f25cdf0985c64015659434c05cfac046480ce9a1a21bfe49203a731e",
"stateRoot" : "93899270093534acc8bda5d5090e01a0007a31575fd2784f5bfe21c3ccc67055",
- "timestamp" : "0x5534c55d",
- "transactionsTrie" : "f20a173a62497127412f73a06f239e79da9d111bf247b20dcb4aaeb2da2b2e2f",
+ "timestamp" : "0x553a1b26",
+ "transactionsTrie" : "c94ad7fce5faeb9cdf7eb51d60514ee7dff4daba674f318f5b708be13bcd1e2c",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90285f901f9a0fc11ef9b3b84a7ab7e2a3e7771106c591fc75339cc5653c03dfd6bcf13ece915a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a093899270093534acc8bda5d5090e01a0007a31575fd2784f5bfe21c3ccc67055a0f20a173a62497127412f73a06f239e79da9d111bf247b20dcb4aaeb2da2b2e2fa0aee9ce41f25cdf0985c64015659434c05cfac046480ce9a1a21bfe49203a731eb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830203c010832fefd882ab90845534c55d80a0e93f0a64e3bdc1979582a05dbeaf439179233e2280bcb0dc9308714643d7637d88347961a5cbaae889f886f8840f018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa4c2b12a73aabbccffffffffffffffffffffffffffffffffffffffffffffffffffffffffee1ba0a42131ab49971faaabd695febd9f311fe0140fa07adb5e3892403681c2a8a389a0b5cfe62808b1c4dd070c5c0b22fbefe560eb98c978a268a7364139b3696954d9c0",
+ "rlp" : "0xf90285f901f9a0dcbd2a5cb48a1743e9cf45333dd93e51fc2dc2c02d3e4427c801f34a66152b02a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a093899270093534acc8bda5d5090e01a0007a31575fd2784f5bfe21c3ccc67055a0c94ad7fce5faeb9cdf7eb51d60514ee7dff4daba674f318f5b708be13bcd1e2ca0aee9ce41f25cdf0985c64015659434c05cfac046480ce9a1a21bfe49203a731eb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830202c010832fefd882ab9084553a1b2680a042d21b287c8e21ad4fcefa3f56c9ade18b64d7bd89a686ba7fc1a467db08e3af889afcf9b165a393c8f886f8840f018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0aa4c2b12a73aabbccffffffffffffffffffffffffffffffffffffffffffffffffffffffffee1ba0777ac634b4d7c2323e260f850c8228dc1571b2270c933aa8ead1766a9476b911a05e2c46faff7b8a0f1223778004cce98640f1d5e03be80c870cd7490b8ac185e8c0",
"transactions" : [
{
"data" : "0xc2b12a73aabbccffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x0f",
- "r" : "0xa42131ab49971faaabd695febd9f311fe0140fa07adb5e3892403681c2a8a389",
- "s" : "0xb5cfe62808b1c4dd070c5c0b22fbefe560eb98c978a268a7364139b3696954d9",
+ "r" : "0x777ac634b4d7c2323e260f850c8228dc1571b2270c933aa8ead1766a9476b911",
+ "s" : "0x5e2c46faff7b8a0f1223778004cce98640f1d5e03be80c870cd7490b8ac185e8",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -617,32 +617,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020400",
+ "difficulty" : "0x020300",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x549e",
- "hash" : "6a0f7c075630466d1cf16a644c167a3801736baca809605ba08970d1cd9dd152",
- "mixHash" : "645c7e1572d95e5ecd867b403125114d0f86126a484355e9e465bca07a815183",
- "nonce" : "6ec127171c6de662",
+ "hash" : "b17a877a3afdd748b8016dbbd8361a3f88c48cce703a49c728793bb62725da11",
+ "mixHash" : "12f990e9135432c1f53ee9bbf95c7b91cf52bc3bfce1ba17776568f263db9f16",
+ "nonce" : "36ce0ef92bc363a2",
"number" : "0x11",
- "parentHash" : "6e7d063a881e9786dc038c7864fcecb10d30178ce5ca94535208f6e730b73109",
+ "parentHash" : "78c9c1449e9a55e2d8b73cd9a6efef264ed84a86e295f6cde2a81ab7e854e464",
"receiptTrie" : "d00039d362a5c6415eb9636ebe9284a6038b1c7c68f017eb425028eac2bf01e3",
"stateRoot" : "b027e2f22116dd4d9fb509215348d0518d3bf55ce3b3a045bcad066788727be4",
- "timestamp" : "0x5534c55e",
- "transactionsTrie" : "2e765c5552ce30c56c2af4c5f8c4ad5ef10a51e3d3a85554fd9936323c738e24",
+ "timestamp" : "0x553a1b28",
+ "transactionsTrie" : "fa5ec0e8ace9d9bf2ce507265e098d7774b779f90438e30a0758e4a33f963a89",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a06e7d063a881e9786dc038c7864fcecb10d30178ce5ca94535208f6e730b73109a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0b027e2f22116dd4d9fb509215348d0518d3bf55ce3b3a045bcad066788727be4a02e765c5552ce30c56c2af4c5f8c4ad5ef10a51e3d3a85554fd9936323c738e24a0d00039d362a5c6415eb9636ebe9284a6038b1c7c68f017eb425028eac2bf01e3b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302040011832fefd882549e845534c55e80a0645c7e1572d95e5ecd867b403125114d0f86126a484355e9e465bca07a815183886ec127171c6de662f866f86410018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8457cb2fc41ca0b6c15e9a13fb684000bb61aed60b9039e88b9caeaaae828fdf7bf68736060f3aa05b522b53a44f42fa0f80bd983b510f50a30c8efd66c1d57602bcaf748da76bc6c0",
+ "rlp" : "0xf90265f901f9a078c9c1449e9a55e2d8b73cd9a6efef264ed84a86e295f6cde2a81ab7e854e464a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0b027e2f22116dd4d9fb509215348d0518d3bf55ce3b3a045bcad066788727be4a0fa5ec0e8ace9d9bf2ce507265e098d7774b779f90438e30a0758e4a33f963a89a0d00039d362a5c6415eb9636ebe9284a6038b1c7c68f017eb425028eac2bf01e3b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302030011832fefd882549e84553a1b2880a012f990e9135432c1f53ee9bbf95c7b91cf52bc3bfce1ba17776568f263db9f168836ce0ef92bc363a2f866f86410018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8457cb2fc41ba0f5a38f22bd0cd640cce4acd459ec094d1bbd23b462eee5b123a1cfc42abbdabfa0b3f392a84179bb324f4ffec5995cb23f9e814ae4cf52448c59d17e57bed30185c0",
"transactions" : [
{
"data" : "0x57cb2fc4",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x10",
- "r" : "0xb6c15e9a13fb684000bb61aed60b9039e88b9caeaaae828fdf7bf68736060f3a",
- "s" : "0x5b522b53a44f42fa0f80bd983b510f50a30c8efd66c1d57602bcaf748da76bc6",
+ "r" : "0xf5a38f22bd0cd640cce4acd459ec094d1bbd23b462eee5b123a1cfc42abbdabf",
+ "s" : "0xb3f392a84179bb324f4ffec5995cb23f9e814ae4cf52448c59d17e57bed30185",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -653,30 +653,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020440",
+ "difficulty" : "0x020340",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5458",
- "hash" : "c74e84fcf9b05a5e2291643ef09d753aaecbaf31c5277f95bf36a9ccdf0f859a",
- "mixHash" : "381b76a05032ab04959b37800f5ebecdffd445a0b66e91218c4cb000b9730a6b",
- "nonce" : "fa46df04875a02d0",
+ "hash" : "6db890e5d3abdc120f5a7bead3bed3f31b13c9d02072b78acd9f945133861b23",
+ "mixHash" : "e65f87d73a4fb22ac3d89f9a72718cd69edc0066e356024d0ba08c920f745ee4",
+ "nonce" : "31b9ad7ff7f852a2",
"number" : "0x12",
- "parentHash" : "6a0f7c075630466d1cf16a644c167a3801736baca809605ba08970d1cd9dd152",
+ "parentHash" : "b17a877a3afdd748b8016dbbd8361a3f88c48cce703a49c728793bb62725da11",
"receiptTrie" : "6a3255d8655bffccd98f2f09f2b5e0909f3ce0dafd3b3ea190aec98478fb5c69",
"stateRoot" : "1697d53ad7dc409245bdefd38d1aa26e29a4a6f6bff7cbde5e2443f2abf9803e",
- "timestamp" : "0x5534c560",
- "transactionsTrie" : "8027697fbe613d921d521ef32e5604cafdeb746ec595807c5e8278bd9b0f5e57",
+ "timestamp" : "0x553a1b2a",
+ "transactionsTrie" : "47a08c6c0c3512b9da4ad93f17a4adfc2d8b6bba71e98a6584b6629a1fe05090",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a06a0f7c075630466d1cf16a644c167a3801736baca809605ba08970d1cd9dd152a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a01697d53ad7dc409245bdefd38d1aa26e29a4a6f6bff7cbde5e2443f2abf9803ea08027697fbe613d921d521ef32e5604cafdeb746ec595807c5e8278bd9b0f5e57a06a3255d8655bffccd98f2f09f2b5e0909f3ce0dafd3b3ea190aec98478fb5c69b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302044012832fefd8825458845534c56080a0381b76a05032ab04959b37800f5ebecdffd445a0b66e91218c4cb000b9730a6b88fa46df04875a02d0f866f86411018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84343a875d1ca0404f97505f84040fcb88010cf96f5e21406683c3b8113b2a527a571826f9a083a00de3b9e243904bcdd18a19fbd4334d759b5e341e988612ad3f2a55a8738ec5edc0",
+ "rlp" : "0xf90265f901f9a0b17a877a3afdd748b8016dbbd8361a3f88c48cce703a49c728793bb62725da11a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a01697d53ad7dc409245bdefd38d1aa26e29a4a6f6bff7cbde5e2443f2abf9803ea047a08c6c0c3512b9da4ad93f17a4adfc2d8b6bba71e98a6584b6629a1fe05090a06a3255d8655bffccd98f2f09f2b5e0909f3ce0dafd3b3ea190aec98478fb5c69b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302034012832fefd882545884553a1b2a80a0e65f87d73a4fb22ac3d89f9a72718cd69edc0066e356024d0ba08c920f745ee48831b9ad7ff7f852a2f866f86411018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84343a875d1ca0a2c019e62a5f2b3ce75f6237129a6ba25021e4190a1271f8b43aa99f52e18558a0baddcd92bf29dbca23dd51ff62e4ae6f0a8727e21c26ce3f2b1609604a4d8faac0",
"transactions" : [
{
"data" : "0x343a875d",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x11",
- "r" : "0x404f97505f84040fcb88010cf96f5e21406683c3b8113b2a527a571826f9a083",
- "s" : "0x0de3b9e243904bcdd18a19fbd4334d759b5e341e988612ad3f2a55a8738ec5ed",
+ "r" : "0xa2c019e62a5f2b3ce75f6237129a6ba25021e4190a1271f8b43aa99f52e18558",
+ "s" : "0xbaddcd92bf29dbca23dd51ff62e4ae6f0a8727e21c26ce3f2b1609604a4d8faa",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1c",
"value" : "0x0a"
@@ -689,32 +689,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020480",
+ "difficulty" : "0x020300",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x559f",
- "hash" : "9f06fda911416d8f6076b7edda13676e12cce84d102c8fe7a563fb4c1b64e83c",
- "mixHash" : "d650b688157ee13af15c8fb4f921b885a6cda2015b5def4c95ec431050896f42",
- "nonce" : "b1e76522474fee6d",
+ "hash" : "520cbc2a7df1b9c7d8ab85e5104be709954bb7f5aa2aacbbb761027597577187",
+ "mixHash" : "39b7b4e23f200ace4a92960e585e0b5bb4d6c0ceb6120f527771bc6183d3430f",
+ "nonce" : "480ddf1bbbc17fd0",
"number" : "0x13",
- "parentHash" : "c74e84fcf9b05a5e2291643ef09d753aaecbaf31c5277f95bf36a9ccdf0f859a",
+ "parentHash" : "6db890e5d3abdc120f5a7bead3bed3f31b13c9d02072b78acd9f945133861b23",
"receiptTrie" : "0449a7bf0b8d6ddd7a9971b479e4f25b17775fd6d5824116802a1457f2fef15c",
"stateRoot" : "9c20b665a4728c222b7131738851c6538c5f195e658bf18bf4a26656ee70b06c",
- "timestamp" : "0x5534c562",
- "transactionsTrie" : "9e9ca9f4c38e568749d06f545794afa42fc18df1af0ca0882ba92b2a89c47aa9",
+ "timestamp" : "0x553a1b35",
+ "transactionsTrie" : "279c5b112080d16893ccf44f85bf950bd44deaa886bfdead20278195cbd27473",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a0c74e84fcf9b05a5e2291643ef09d753aaecbaf31c5277f95bf36a9ccdf0f859aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a09c20b665a4728c222b7131738851c6538c5f195e658bf18bf4a26656ee70b06ca09e9ca9f4c38e568749d06f545794afa42fc18df1af0ca0882ba92b2a89c47aa9a00449a7bf0b8d6ddd7a9971b479e4f25b17775fd6d5824116802a1457f2fef15cb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302048013832fefd882559f845534c56280a0d650b688157ee13af15c8fb4f921b885a6cda2015b5def4c95ec431050896f4288b1e76522474fee6df866f86412018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84f5b53e171ca0f232239ee3e2ac123b885af12a72af5cb433b08d3c9eafd0b6387c91f9ad2d5ea011434088150f7aa69c54f63c9aa8de71fd4dafc58ea5e70bde0443cca9381b2ac0",
+ "rlp" : "0xf90265f901f9a06db890e5d3abdc120f5a7bead3bed3f31b13c9d02072b78acd9f945133861b23a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a09c20b665a4728c222b7131738851c6538c5f195e658bf18bf4a26656ee70b06ca0279c5b112080d16893ccf44f85bf950bd44deaa886bfdead20278195cbd27473a00449a7bf0b8d6ddd7a9971b479e4f25b17775fd6d5824116802a1457f2fef15cb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302030013832fefd882559f84553a1b3580a039b7b4e23f200ace4a92960e585e0b5bb4d6c0ceb6120f527771bc6183d3430f88480ddf1bbbc17fd0f866f86412018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84f5b53e171ba008c014909ce3e24b6ee71bf4d862961bde18bcfb2352e94f7d54a4a3b6a99b7fa04bc547ee276d9e91402f08e6d72329c221f2bf7978190327a81a3cdf57bc96c3c0",
"transactions" : [
{
"data" : "0xf5b53e17",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x12",
- "r" : "0xf232239ee3e2ac123b885af12a72af5cb433b08d3c9eafd0b6387c91f9ad2d5e",
- "s" : "0x11434088150f7aa69c54f63c9aa8de71fd4dafc58ea5e70bde0443cca9381b2a",
+ "r" : "0x08c014909ce3e24b6ee71bf4d862961bde18bcfb2352e94f7d54a4a3b6a99b7f",
+ "s" : "0x4bc547ee276d9e91402f08e6d72329c221f2bf7978190327a81a3cdf57bc96c3",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -725,32 +725,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x0204c0",
+ "difficulty" : "0x020340",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5497",
- "hash" : "98a1574109947873b677a662861e6c7a9cfd7e0bb6475a50fbbf2b73d30e3d9b",
- "mixHash" : "3e08f6a2dbb8a5909a25d74d07f88e5f962ebb81ce1b231081401e93725284e0",
- "nonce" : "cb6ec3075134d10c",
+ "hash" : "2b35d9b2bb764826ce577eea9e2d80f7ef349874055c33af4a3de75a0bc89657",
+ "mixHash" : "a23284214a6042725253a2b5d2290479fc65e7943233ab21c3205548c47f6e51",
+ "nonce" : "ae8a83e1af53de9d",
"number" : "0x14",
- "parentHash" : "9f06fda911416d8f6076b7edda13676e12cce84d102c8fe7a563fb4c1b64e83c",
+ "parentHash" : "520cbc2a7df1b9c7d8ab85e5104be709954bb7f5aa2aacbbb761027597577187",
"receiptTrie" : "c348bead08ac910f10f445129eb4a6b6aefd64a58f0b5b029717f8ebe4c0492e",
"stateRoot" : "1140349caa83eb9b871b48f5ef394c6f9b6c3f73f961f5e0f6a99cb20476b3e1",
- "timestamp" : "0x5534c563",
- "transactionsTrie" : "77aeca3be69a05daaced4ed4659b46b8ea909f1475ec5e1132fdc17694064837",
+ "timestamp" : "0x553a1b37",
+ "transactionsTrie" : "0cc8bab2ec27a244b676be79f417a95c7e287342a93cede879b11cde4d713286",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a09f06fda911416d8f6076b7edda13676e12cce84d102c8fe7a563fb4c1b64e83ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a01140349caa83eb9b871b48f5ef394c6f9b6c3f73f961f5e0f6a99cb20476b3e1a077aeca3be69a05daaced4ed4659b46b8ea909f1475ec5e1132fdc17694064837a0c348bead08ac910f10f445129eb4a6b6aefd64a58f0b5b029717f8ebe4c0492eb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830204c014832fefd8825497845534c56380a03e08f6a2dbb8a5909a25d74d07f88e5f962ebb81ce1b231081401e93725284e088cb6ec3075134d10cf866f86413018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84688959791ba0d38b7e0c45465ac503f31f811a1e52de63efe7ce1367e731dc0b750989c5ae1fa0506a32aee181ce8b203f28e40a2edf1d968cfbc6f56e0b69eafbffb6d9e7aee6c0",
+ "rlp" : "0xf90265f901f9a0520cbc2a7df1b9c7d8ab85e5104be709954bb7f5aa2aacbbb761027597577187a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a01140349caa83eb9b871b48f5ef394c6f9b6c3f73f961f5e0f6a99cb20476b3e1a00cc8bab2ec27a244b676be79f417a95c7e287342a93cede879b11cde4d713286a0c348bead08ac910f10f445129eb4a6b6aefd64a58f0b5b029717f8ebe4c0492eb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302034014832fefd882549784553a1b3780a0a23284214a6042725253a2b5d2290479fc65e7943233ab21c3205548c47f6e5188ae8a83e1af53de9df866f86413018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84688959791ca01e90fc522e6dedabe169e131bbe7a7289481aaf3ae808ec25f9c89bf71daf9e4a014def4c70f2b52833809e05dfd5bcd7d2c76b04ca91b084c47085ec6611f1b15c0",
"transactions" : [
{
"data" : "0x68895979",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x13",
- "r" : "0xd38b7e0c45465ac503f31f811a1e52de63efe7ce1367e731dc0b750989c5ae1f",
- "s" : "0x506a32aee181ce8b203f28e40a2edf1d968cfbc6f56e0b69eafbffb6d9e7aee6",
+ "r" : "0x1e90fc522e6dedabe169e131bbe7a7289481aaf3ae808ec25f9c89bf71daf9e4",
+ "s" : "0x14def4c70f2b52833809e05dfd5bcd7d2c76b04ca91b084c47085ec6611f1b15",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1b",
+ "v" : "0x1c",
"value" : "0x0a"
}
],
@@ -761,32 +761,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020500",
+ "difficulty" : "0x020380",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5464",
- "hash" : "d4b905f7f945beca75e6ba5ff591e1cafa6d5e5cc5de3a87a5d680b1416f0ea1",
- "mixHash" : "16bd26d7368826e8eb299d4d55ee1c9cbb183a246fc067f7ed3f3b53d61d148a",
- "nonce" : "266261c3fd875276",
+ "hash" : "587a1d7f3350ca5fba8021cd4a758171d15951d861e8c4348177ca5b5eaf03e7",
+ "mixHash" : "c8f67e6435438c6e51c6fb888a17791d320a3edac5335f8562e43fe6610e30ef",
+ "nonce" : "a94a46e711322b93",
"number" : "0x15",
- "parentHash" : "98a1574109947873b677a662861e6c7a9cfd7e0bb6475a50fbbf2b73d30e3d9b",
+ "parentHash" : "2b35d9b2bb764826ce577eea9e2d80f7ef349874055c33af4a3de75a0bc89657",
"receiptTrie" : "044097f6b0128b8e9bb7bab10807226db7d16623984e51e65faf25cfdbdbd0ad",
"stateRoot" : "f084cfb8e3b42408c22b277233a1a96e6a131e7153d00f6e5660f6bb6a03c950",
- "timestamp" : "0x5534c564",
- "transactionsTrie" : "d7d8b70294fc27804eaa069631dfdb75ca8413d1f0edc6bece410af96b255b83",
+ "timestamp" : "0x553a1b3b",
+ "transactionsTrie" : "66b5c7724765bb87a7776b72356a748075b546cb3a15a8bb4ff1efcd78f205c6",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a098a1574109947873b677a662861e6c7a9cfd7e0bb6475a50fbbf2b73d30e3d9ba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0f084cfb8e3b42408c22b277233a1a96e6a131e7153d00f6e5660f6bb6a03c950a0d7d8b70294fc27804eaa069631dfdb75ca8413d1f0edc6bece410af96b255b83a0044097f6b0128b8e9bb7bab10807226db7d16623984e51e65faf25cfdbdbd0adb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302050015832fefd8825464845534c56480a016bd26d7368826e8eb299d4d55ee1c9cbb183a246fc067f7ed3f3b53d61d148a88266261c3fd875276f866f86414018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8438cc48311ca0f23586ccc7e75b6161b5f8ece1dff8acc1e2e3a1baa2f0c93837c3e6f683bd5fa0876be82a0ed6f4c42243a5a91792698fe392b5900bfaa6e50606d808624bc4a1c0",
+ "rlp" : "0xf90265f901f9a02b35d9b2bb764826ce577eea9e2d80f7ef349874055c33af4a3de75a0bc89657a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0f084cfb8e3b42408c22b277233a1a96e6a131e7153d00f6e5660f6bb6a03c950a066b5c7724765bb87a7776b72356a748075b546cb3a15a8bb4ff1efcd78f205c6a0044097f6b0128b8e9bb7bab10807226db7d16623984e51e65faf25cfdbdbd0adb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302038015832fefd882546484553a1b3b80a0c8f67e6435438c6e51c6fb888a17791d320a3edac5335f8562e43fe6610e30ef88a94a46e711322b93f866f86414018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8438cc48311ba0bbaeeaeb3161ad86d9eec10c91e9dcf22389eabccc64529f7cb3608e9d321fd1a0492222141ecfaafcf4cd4a3bb346788b722a19493b736f3d7a51264fa85eaa05c0",
"transactions" : [
{
"data" : "0x38cc4831",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x14",
- "r" : "0xf23586ccc7e75b6161b5f8ece1dff8acc1e2e3a1baa2f0c93837c3e6f683bd5f",
- "s" : "0x876be82a0ed6f4c42243a5a91792698fe392b5900bfaa6e50606d808624bc4a1",
+ "r" : "0xbbaeeaeb3161ad86d9eec10c91e9dcf22389eabccc64529f7cb3608e9d321fd1",
+ "s" : "0x492222141ecfaafcf4cd4a3bb346788b722a19493b736f3d7a51264fa85eaa05",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -797,30 +797,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020540",
+ "difficulty" : "0x0203c0",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5413",
- "hash" : "339cf9f5c483f77b4a2a051d80bf88766b9df16b38f112d26e58e6e647480374",
- "mixHash" : "26e662287ae422d108f49ded4a2083c44a9bc4d7626fdce37aff2c471e35ad2a",
- "nonce" : "0d2e0efd55194fd3",
+ "hash" : "14b0f66c6f430818dc17dc245d8a43059027b06f651ba86260e01af3bf945776",
+ "mixHash" : "6896781b7ff793c821c28217021d2c90d656a8f99aeeec7c0b741903a0a2e9c3",
+ "nonce" : "fa8bd416304e01b0",
"number" : "0x16",
- "parentHash" : "d4b905f7f945beca75e6ba5ff591e1cafa6d5e5cc5de3a87a5d680b1416f0ea1",
+ "parentHash" : "587a1d7f3350ca5fba8021cd4a758171d15951d861e8c4348177ca5b5eaf03e7",
"receiptTrie" : "fe82ed596e679b78eb505783cbe3306ef1921656135a6cd940e2d6f08d77494b",
"stateRoot" : "f5c1031829c5480356705d4e2592f8a6f734be06aae5583ffdb19d954138952b",
- "timestamp" : "0x5534c566",
- "transactionsTrie" : "c45b33f183e69504ea2e8fbeb076e54e74ac5e3d606151d7eb8ed41e2cbfd365",
+ "timestamp" : "0x553a1b3f",
+ "transactionsTrie" : "ad34df785593b263d639efcedb147592b326b8baa95abfa726572635de7022b9",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a0d4b905f7f945beca75e6ba5ff591e1cafa6d5e5cc5de3a87a5d680b1416f0ea1a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0f5c1031829c5480356705d4e2592f8a6f734be06aae5583ffdb19d954138952ba0c45b33f183e69504ea2e8fbeb076e54e74ac5e3d606151d7eb8ed41e2cbfd365a0fe82ed596e679b78eb505783cbe3306ef1921656135a6cd940e2d6f08d77494bb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302054016832fefd8825413845534c56680a026e662287ae422d108f49ded4a2083c44a9bc4d7626fdce37aff2c471e35ad2a880d2e0efd55194fd3f866f86415018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a841f9030371ba093adafbddaedc687452e4ca3f63eac9b5cf7ad51624ab2343c822b6a5b71e8a1a00ff2e15f7463fc08bdf24a3f1e79a072faeb1882b195d98bdaecf5b05a6fd689c0",
+ "rlp" : "0xf90265f901f9a0587a1d7f3350ca5fba8021cd4a758171d15951d861e8c4348177ca5b5eaf03e7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0f5c1031829c5480356705d4e2592f8a6f734be06aae5583ffdb19d954138952ba0ad34df785593b263d639efcedb147592b326b8baa95abfa726572635de7022b9a0fe82ed596e679b78eb505783cbe3306ef1921656135a6cd940e2d6f08d77494bb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830203c016832fefd882541384553a1b3f80a06896781b7ff793c821c28217021d2c90d656a8f99aeeec7c0b741903a0a2e9c388fa8bd416304e01b0f866f86415018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a841f9030371ba0cdf9581ac0fac62b42a5ebc9433a8d1c8c96bee3526bff088acde46734c51fc6a0589b033800e6dcf83bbff29c9bbbdc628b233898a2963df13562d0eb3f6207c3c0",
"transactions" : [
{
"data" : "0x1f903037",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x15",
- "r" : "0x93adafbddaedc687452e4ca3f63eac9b5cf7ad51624ab2343c822b6a5b71e8a1",
- "s" : "0x0ff2e15f7463fc08bdf24a3f1e79a072faeb1882b195d98bdaecf5b05a6fd689",
+ "r" : "0xcdf9581ac0fac62b42a5ebc9433a8d1c8c96bee3526bff088acde46734c51fc6",
+ "s" : "0x589b033800e6dcf83bbff29c9bbbdc628b233898a2963df13562d0eb3f6207c3",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -833,30 +833,30 @@
"blockHeader" : {
"bloom" : "00000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000002000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000020000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020580",
+ "difficulty" : "0x020400",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x582e",
- "hash" : "84752783c27f0117e71dee9a48c59fa24b76fa30fd249a058e17fc958127f96d",
- "mixHash" : "95785381a82fabb0cb8d95b68c320d8175193e7fa98357909fc93274cb2f14e3",
- "nonce" : "73598b7e6e8fd137",
+ "hash" : "d3336805f07457a6a44c4f09c1548092aa32ac3f026025157a2cc1af7d1996cc",
+ "mixHash" : "c7d467f151251c43795500a47c8530879882a5e79986969e828929713cb76763",
+ "nonce" : "ad32f51f301dff55",
"number" : "0x17",
- "parentHash" : "339cf9f5c483f77b4a2a051d80bf88766b9df16b38f112d26e58e6e647480374",
+ "parentHash" : "14b0f66c6f430818dc17dc245d8a43059027b06f651ba86260e01af3bf945776",
"receiptTrie" : "c667dc6b41d8ea8731caf54152788aeecfc7ec096f5484c421e6a2e7edbc593f",
"stateRoot" : "cb53b9f20460ef47656d23a141e1c85564f9dac9f1bc4c8b1217b63a1510635c",
- "timestamp" : "0x5534c568",
- "transactionsTrie" : "bda571301e04b1b9278d2b30654377fe48803a83bb92d1b6f7be1aca7a2dbfb8",
+ "timestamp" : "0x553a1b42",
+ "transactionsTrie" : "022bf917fdf0d09743801dee9e5f3111850fad37d6409dec806c52b8e11079ba",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a0339cf9f5c483f77b4a2a051d80bf88766b9df16b38f112d26e58e6e647480374a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0cb53b9f20460ef47656d23a141e1c85564f9dac9f1bc4c8b1217b63a1510635ca0bda571301e04b1b9278d2b30654377fe48803a83bb92d1b6f7be1aca7a2dbfb8a0c667dc6b41d8ea8731caf54152788aeecfc7ec096f5484c421e6a2e7edbc593fb90100000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000020000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000200000000008302058017832fefd882582e845534c56880a095785381a82fabb0cb8d95b68c320d8175193e7fa98357909fc93274cb2f14e38873598b7e6e8fd137f866f86416018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8465538c731ca01cc47962617017a55590f6f61e97bc46789174e32d49ca00f7be48d144a8a100a04201793f16ea143353bf98eeb6b0fc4120cacfde8ee16187920671d1fe22921ac0",
+ "rlp" : "0xf90265f901f9a014b0f66c6f430818dc17dc245d8a43059027b06f651ba86260e01af3bf945776a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0cb53b9f20460ef47656d23a141e1c85564f9dac9f1bc4c8b1217b63a1510635ca0022bf917fdf0d09743801dee9e5f3111850fad37d6409dec806c52b8e11079baa0c667dc6b41d8ea8731caf54152788aeecfc7ec096f5484c421e6a2e7edbc593fb90100000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000020000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000200000000008302040017832fefd882582e84553a1b4280a0c7d467f151251c43795500a47c8530879882a5e79986969e828929713cb7676388ad32f51f301dff55f866f86416018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8465538c731ca06d8ea9f24309b4b72a61d7a05cf704324dad5a44d4f66d556d42efab6c4a1b8aa08b68d796fc8bb066e7cc8fcabc566fc6e51896ecb85bf94a1b765113bef8ecb4c0",
"transactions" : [
{
"data" : "0x65538c73",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x16",
- "r" : "0x1cc47962617017a55590f6f61e97bc46789174e32d49ca00f7be48d144a8a100",
- "s" : "0x4201793f16ea143353bf98eeb6b0fc4120cacfde8ee16187920671d1fe22921a",
+ "r" : "0x6d8ea9f24309b4b72a61d7a05cf704324dad5a44d4f66d556d42efab6c4a1b8a",
+ "s" : "0x8b68d796fc8bb066e7cc8fcabc566fc6e51896ecb85bf94a1b765113bef8ecb4",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1c",
"value" : "0x0a"
@@ -869,32 +869,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x0205c0",
+ "difficulty" : "0x020440",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5738",
- "hash" : "e8014da18a004dca198cba842783a6b7109383103126f71a5213cf3cb55a48b9",
- "mixHash" : "95af2b9f721a4a8532c18efb417c784b511f650cab059ccfb22756319d37a06a",
- "nonce" : "57fa282d640acf3b",
+ "hash" : "ae707b8676e57311a51b1feed590fa170ff32b9ef98ad9f8ff29c7cd7cdea168",
+ "mixHash" : "c83369968e4032cb665e1944036cbfb63589fcd75fd66f3753a5f786f2d45d1a",
+ "nonce" : "b1e73a306b9ed091",
"number" : "0x18",
- "parentHash" : "84752783c27f0117e71dee9a48c59fa24b76fa30fd249a058e17fc958127f96d",
+ "parentHash" : "d3336805f07457a6a44c4f09c1548092aa32ac3f026025157a2cc1af7d1996cc",
"receiptTrie" : "181307e1d7796e3bcb467ce57441efe9f6e50f2161af66b21806ab7ffcc9ec1b",
"stateRoot" : "f5c10182486d29a22b8b24d36ee948fdd8c17db91ebbf8c41995cb1456dfea57",
- "timestamp" : "0x5534c56a",
- "transactionsTrie" : "b1243810d5e1cf0ab170e2d52645bca09250d479a2f0ba41e8ee79afba586a46",
+ "timestamp" : "0x553a1b46",
+ "transactionsTrie" : "665ca7bfb2c995b0363bdf9907a91c760aec6758c4e3837c9e97fc7f617af433",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a084752783c27f0117e71dee9a48c59fa24b76fa30fd249a058e17fc958127f96da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0f5c10182486d29a22b8b24d36ee948fdd8c17db91ebbf8c41995cb1456dfea57a0b1243810d5e1cf0ab170e2d52645bca09250d479a2f0ba41e8ee79afba586a46a0181307e1d7796e3bcb467ce57441efe9f6e50f2161af66b21806ab7ffcc9ec1bb9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000830205c018832fefd8825738845534c56a80a095af2b9f721a4a8532c18efb417c784b511f650cab059ccfb22756319d37a06a8857fa282d640acf3bf866f86417018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84a67808571ba095a11d00e0911602facdf4a7e350a8c0ec10683ec1474e01316fcafb3020499ca0ab8c3fb0afe7519ca8c850c0a2c725c59c1855183c812ca3149ca367a2653fe5c0",
+ "rlp" : "0xf90265f901f9a0d3336805f07457a6a44c4f09c1548092aa32ac3f026025157a2cc1af7d1996cca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0f5c10182486d29a22b8b24d36ee948fdd8c17db91ebbf8c41995cb1456dfea57a0665ca7bfb2c995b0363bdf9907a91c760aec6758c4e3837c9e97fc7f617af433a0181307e1d7796e3bcb467ce57441efe9f6e50f2161af66b21806ab7ffcc9ec1bb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000008302044018832fefd882573884553a1b4680a0c83369968e4032cb665e1944036cbfb63589fcd75fd66f3753a5f786f2d45d1a88b1e73a306b9ed091f866f86417018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84a67808571ca0bbdeb52c1d81752e672de5acfd55b0ea36766b7d3f3461c52e9a46240c226f81a040f01629f5768bd52183115acad2f36ef3f7b964a4634289461f1c0e5a4096d6c0",
"transactions" : [
{
"data" : "0xa6780857",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x17",
- "r" : "0x95a11d00e0911602facdf4a7e350a8c0ec10683ec1474e01316fcafb3020499c",
- "s" : "0xab8c3fb0afe7519ca8c850c0a2c725c59c1855183c812ca3149ca367a2653fe5",
+ "r" : "0xbbdeb52c1d81752e672de5acfd55b0ea36766b7d3f3461c52e9a46240c226f81",
+ "s" : "0x40f01629f5768bd52183115acad2f36ef3f7b964a4634289461f1c0e5a4096d6",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1b",
+ "v" : "0x1c",
"value" : "0x0a"
}
],
@@ -905,32 +905,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000002000000000000000000000000000000000002000000000000000100000000000000000000000000000000000000000000000000000000000800000000040000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020600",
+ "difficulty" : "0x020480",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5a42",
- "hash" : "7e3ac3494f6e913e9b2ca8e59a40a1b3db0027efccc447f14f5fa9cc33e2812a",
- "mixHash" : "6efdb118be55d464bf9475a11b04ee6bcb9c8238d7c3158c4e911f540d034aae",
- "nonce" : "83701fca1267ac6d",
+ "hash" : "480722b0eac8201dad9ebd9947aea7a238737e49015806b03d288fcaeac2406a",
+ "mixHash" : "c3db822c42b702faac9aaa17aa2f00003fa9d01b71ea08209bfcb654eb946797",
+ "nonce" : "6b45447ed7f1d9be",
"number" : "0x19",
- "parentHash" : "e8014da18a004dca198cba842783a6b7109383103126f71a5213cf3cb55a48b9",
+ "parentHash" : "ae707b8676e57311a51b1feed590fa170ff32b9ef98ad9f8ff29c7cd7cdea168",
"receiptTrie" : "c08df662cdedd41d4c76d25c9db1036aac4572b52b8a7c6b332479cb41981eed",
"stateRoot" : "2a623945454824b5bd3b14254ec60e3e3c379f676a537b253bb84a1e70b797a4",
- "timestamp" : "0x5534c56b",
- "transactionsTrie" : "4f465193d7d127bd1023cd1fa3e0769f7f769c9986836ba2aa6c92c6e3627124",
+ "timestamp" : "0x553a1b4b",
+ "transactionsTrie" : "73dc53021684d9b645ab4278e5f8f6dd3b0077377dffe7919ffd2aee970cca27",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a0e8014da18a004dca198cba842783a6b7109383103126f71a5213cf3cb55a48b9a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a02a623945454824b5bd3b14254ec60e3e3c379f676a537b253bb84a1e70b797a4a04f465193d7d127bd1023cd1fa3e0769f7f769c9986836ba2aa6c92c6e3627124a0c08df662cdedd41d4c76d25c9db1036aac4572b52b8a7c6b332479cb41981eedb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000020000000000000000000000000000000000020000000000000001000000000000000000000000000000000000000000000000000000000008000000000400000000000000000000000000000000000000000000000000000000000000000008302060019832fefd8825a42845534c56b80a06efdb118be55d464bf9475a11b04ee6bcb9c8238d7c3158c4e911f540d034aae8883701fca1267ac6df866f86418018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84b61c05031ba0f13029035b159c203d673a17a0fade005afecd92475911e6626b8d1d578a96e9a067154293d360e6a88f20a3fbe92ff0456732a718836e53dc4b60070d1c8208dcc0",
+ "rlp" : "0xf90265f901f9a0ae707b8676e57311a51b1feed590fa170ff32b9ef98ad9f8ff29c7cd7cdea168a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a02a623945454824b5bd3b14254ec60e3e3c379f676a537b253bb84a1e70b797a4a073dc53021684d9b645ab4278e5f8f6dd3b0077377dffe7919ffd2aee970cca27a0c08df662cdedd41d4c76d25c9db1036aac4572b52b8a7c6b332479cb41981eedb90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000020000000000000000000000000000000000020000000000000001000000000000000000000000000000000000000000000000000000000008000000000400000000000000000000000000000000000000000000000000000000000000000008302048019832fefd8825a4284553a1b4b80a0c3db822c42b702faac9aaa17aa2f00003fa9d01b71ea08209bfcb654eb946797886b45447ed7f1d9bef866f86418018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84b61c05031ca0384cc3ed79d090df94bc11e12b2ff782f59313e080200721bf4047727509c1aca03ff8f6807c96dafc12128453a4b44050e8a030de42386979cb9a4dcd36cb4e7bc0",
"transactions" : [
{
"data" : "0xb61c0503",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x18",
- "r" : "0xf13029035b159c203d673a17a0fade005afecd92475911e6626b8d1d578a96e9",
- "s" : "0x67154293d360e6a88f20a3fbe92ff0456732a718836e53dc4b60070d1c8208dc",
+ "r" : "0x384cc3ed79d090df94bc11e12b2ff782f59313e080200721bf4047727509c1ac",
+ "s" : "0x3ff8f6807c96dafc12128453a4b44050e8a030de42386979cb9a4dcd36cb4e7b",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1b",
+ "v" : "0x1c",
"value" : "0x0a"
}
],
@@ -941,32 +941,32 @@
"blockHeader" : {
"bloom" : "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000040000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020640",
+ "difficulty" : "0x020440",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5802",
- "hash" : "5952a7e91a34c2fb1d3e997ebf2d3c42e6c067e60c2f24eb966514dc5e694412",
- "mixHash" : "0b132dfb0b8cd7880823d5610852376b97750e4a7b6a8157892cd3af0724baef",
- "nonce" : "1c15f1ed87d633dd",
+ "hash" : "08ec22cd37de62f2258619fff391091503c8904a13afae05b716e9a5aba888a4",
+ "mixHash" : "dbaf204d073383189ba74e196b98570529510cf9fc7912ab87040517b6b2033e",
+ "nonce" : "57ec950b08646a02",
"number" : "0x1a",
- "parentHash" : "7e3ac3494f6e913e9b2ca8e59a40a1b3db0027efccc447f14f5fa9cc33e2812a",
+ "parentHash" : "480722b0eac8201dad9ebd9947aea7a238737e49015806b03d288fcaeac2406a",
"receiptTrie" : "eb58c04360a8707c47317e726f0497e8ee256d8925512071281b92eded677ac4",
"stateRoot" : "ef49df1d8b209193b24bd68de9919da0e05804295ae0804df0bfc172beba8bb8",
- "timestamp" : "0x5534c56d",
- "transactionsTrie" : "ee93bd9e331b2ba80c933dc3da36bf2e2673e067de18fa0d89ba256543706cca",
+ "timestamp" : "0x553a1b53",
+ "transactionsTrie" : "8027c24744eaf0c10d936371174f79343431c956099aba2e85bd4754b46d5400",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a07e3ac3494f6e913e9b2ca8e59a40a1b3db0027efccc447f14f5fa9cc33e2812aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef49df1d8b209193b24bd68de9919da0e05804295ae0804df0bfc172beba8bb8a0ee93bd9e331b2ba80c933dc3da36bf2e2673e067de18fa0d89ba256543706ccaa0eb58c04360a8707c47317e726f0497e8ee256d8925512071281b92eded677ac4b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000040000000000000000000000000000000000000000000000000000000000000000000830206401a832fefd8825802845534c56d80a00b132dfb0b8cd7880823d5610852376b97750e4a7b6a8157892cd3af0724baef881c15f1ed87d633ddf866f86419018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a844e7ad3671ca0def610a586c108827ee66278ccfffeb73d3feb3e51c41dfa8dd23930329fc7b2a0939db842c48bec7ba67ad90b3527dbf2cdbb1a84ddd71abf996c9e4ae76edd96c0",
+ "rlp" : "0xf90265f901f9a0480722b0eac8201dad9ebd9947aea7a238737e49015806b03d288fcaeac2406aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef49df1d8b209193b24bd68de9919da0e05804295ae0804df0bfc172beba8bb8a08027c24744eaf0c10d936371174f79343431c956099aba2e85bd4754b46d5400a0eb58c04360a8707c47317e726f0497e8ee256d8925512071281b92eded677ac4b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000040000000000000000000000000000000000000000000000000000000000000000000830204401a832fefd882580284553a1b5380a0dbaf204d073383189ba74e196b98570529510cf9fc7912ab87040517b6b2033e8857ec950b08646a02f866f86419018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a844e7ad3671ba05b0770ae37df4cbbd6da2e502b1d572bb2bc72fae9fae4cc488605b277385587a0bd8727444ede984e2bfd9184a1ac9340f5c16127be355147511ac685b3a370d4c0",
"transactions" : [
{
"data" : "0x4e7ad367",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x19",
- "r" : "0xdef610a586c108827ee66278ccfffeb73d3feb3e51c41dfa8dd23930329fc7b2",
- "s" : "0x939db842c48bec7ba67ad90b3527dbf2cdbb1a84ddd71abf996c9e4ae76edd96",
+ "r" : "0x5b0770ae37df4cbbd6da2e502b1d572bb2bc72fae9fae4cc488605b277385587",
+ "s" : "0xbd8727444ede984e2bfd9184a1ac9340f5c16127be355147511ac685b3a370d4",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
- "v" : "0x1c",
+ "v" : "0x1b",
"value" : "0x0a"
}
],
@@ -977,30 +977,30 @@
"blockHeader" : {
"bloom" : "00200000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000008000800000000040000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020680",
+ "difficulty" : "0x020480",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5a61",
- "hash" : "43fc62971c4a178b4c2fe000b92b797d59bbb5f5c0dd72d361e3efdd21591234",
- "mixHash" : "af86e913f2cbc3437f69bb2e27ccbfaea180561ff5e3d7b20946807f907029e9",
- "nonce" : "39472edb7c3ae4ae",
+ "hash" : "41fa955b3425bcacc96e995bf7b8b55c4a4788581f151e56b5ce25abb023dfbb",
+ "mixHash" : "c2e2b3f0cf085846b3b6c7f1c700c7072984fd36c17320ee68d20d0c43a27559",
+ "nonce" : "14d9d1f8d1f508cc",
"number" : "0x1b",
- "parentHash" : "5952a7e91a34c2fb1d3e997ebf2d3c42e6c067e60c2f24eb966514dc5e694412",
+ "parentHash" : "08ec22cd37de62f2258619fff391091503c8904a13afae05b716e9a5aba888a4",
"receiptTrie" : "76e8b8f5d3f286863894b00ae5a303f8f81d34c89de8caed6929a3b1fae7b911",
"stateRoot" : "7a56ddcde169a9aec1a427d86d5e2dd62d9d717fe5046994d1aa4c30152f4064",
- "timestamp" : "0x5534c56e",
- "transactionsTrie" : "7d9a561fa9eaf16c04e846c9176034540e1d56d62842b49d9adec512860b07bb",
+ "timestamp" : "0x553a1b55",
+ "transactionsTrie" : "789f27f3026e996a2db7ffb2ed406f125bc768848ec1e2eec690c917dbb3999b",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a05952a7e91a34c2fb1d3e997ebf2d3c42e6c067e60c2f24eb966514dc5e694412a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07a56ddcde169a9aec1a427d86d5e2dd62d9d717fe5046994d1aa4c30152f4064a07d9a561fa9eaf16c04e846c9176034540e1d56d62842b49d9adec512860b07bba076e8b8f5d3f286863894b00ae5a303f8f81d34c89de8caed6929a3b1fae7b911b9010000200000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000008000800000000040000000000000000000000000000000000000000000000000000000000000000000830206801b832fefd8825a61845534c56e80a0af86e913f2cbc3437f69bb2e27ccbfaea180561ff5e3d7b20946807f907029e98839472edb7c3ae4aef866f8641a018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84102accc11ba058d38bab40f0657fe1a6b06c819b74e567ce434ce62fe3040703783b580ca5c5a0deb85218cd4b0d5afbdfee4de225481dee0595968f16770b3804fbef66fc607ac0",
+ "rlp" : "0xf90265f901f9a008ec22cd37de62f2258619fff391091503c8904a13afae05b716e9a5aba888a4a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07a56ddcde169a9aec1a427d86d5e2dd62d9d717fe5046994d1aa4c30152f4064a0789f27f3026e996a2db7ffb2ed406f125bc768848ec1e2eec690c917dbb3999ba076e8b8f5d3f286863894b00ae5a303f8f81d34c89de8caed6929a3b1fae7b911b9010000200000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000008000800000000040000000000000000000000000000000000000000000000000000000000000000000830204801b832fefd8825a6184553a1b5580a0c2e2b3f0cf085846b3b6c7f1c700c7072984fd36c17320ee68d20d0c43a275598814d9d1f8d1f508ccf866f8641a018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84102accc11ba0565067b016a1d3553ed9f5723cc006881602464380095341a55b830b5248963ba0ec1ace5b778ddbcbcb684a10624d9a869e099376b48bede894a4712eafd561d6c0",
"transactions" : [
{
"data" : "0x102accc1",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x1a",
- "r" : "0x58d38bab40f0657fe1a6b06c819b74e567ce434ce62fe3040703783b580ca5c5",
- "s" : "0xdeb85218cd4b0d5afbdfee4de225481dee0595968f16770b3804fbef66fc607a",
+ "r" : "0x565067b016a1d3553ed9f5723cc006881602464380095341a55b830b5248963b",
+ "s" : "0xec1ace5b778ddbcbcb684a10624d9a869e099376b48bede894a4712eafd561d6",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -1013,30 +1013,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000040000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x0206c0",
+ "difficulty" : "0x0204c0",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x59d9",
- "hash" : "47daaf847d4f036c38ec7f3d8fcf61018eed34df83d477846451d1f2b40b9038",
- "mixHash" : "a92c23a10512cc6bd13b2bd7e429e2b691864c03805e55854f0f897f360b2bb4",
- "nonce" : "f0773be68eb505fa",
+ "hash" : "78e97c4bafaa550e6ca2716c4cca7b34cd80034b0c89aabb1cec761a4178be6a",
+ "mixHash" : "800a60b10ebc30e787d95c51c5ee8ea98a5ad55c082e2cdd7933c4427f717bc1",
+ "nonce" : "0b2cf1efc5d8305e",
"number" : "0x1c",
- "parentHash" : "43fc62971c4a178b4c2fe000b92b797d59bbb5f5c0dd72d361e3efdd21591234",
+ "parentHash" : "41fa955b3425bcacc96e995bf7b8b55c4a4788581f151e56b5ce25abb023dfbb",
"receiptTrie" : "65d713278408fc3a3edf8f155300e7a06c59bf358c29a23f105c392d225ec5ee",
"stateRoot" : "c0353d0d5da74d43b9d424ab4972b30104586d64cb82ddf2c03a3f877678a781",
- "timestamp" : "0x5534c570",
- "transactionsTrie" : "7afa39c9f0fcc6a460a982add4600bd350aa54a008d6142a467a78e03d7af681",
+ "timestamp" : "0x553a1b5c",
+ "transactionsTrie" : "36e50447ca86edc0df207cc74f9f161daf3d4feccc9ca4dd4559291317ba69ef",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a043fc62971c4a178b4c2fe000b92b797d59bbb5f5c0dd72d361e3efdd21591234a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0c0353d0d5da74d43b9d424ab4972b30104586d64cb82ddf2c03a3f877678a781a07afa39c9f0fcc6a460a982add4600bd350aa54a008d6142a467a78e03d7af681a065d713278408fc3a3edf8f155300e7a06c59bf358c29a23f105c392d225ec5eeb9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000040000000000000000000000000000000000000000000000000000000000000000000830206c01c832fefd88259d9845534c57080a0a92c23a10512cc6bd13b2bd7e429e2b691864c03805e55854f0f897f360b2bb488f0773be68eb505faf866f8641b018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8476bc21d91ba0c1f4120f5c71d7db04dd9ef9aa8a02bf5c1435f0553bb2590fbe43fa80d89749a0ef250209a53d1e9d9503572ddd6da4b422ef7f4a4634cea150545dd41f4c793fc0",
+ "rlp" : "0xf90265f901f9a041fa955b3425bcacc96e995bf7b8b55c4a4788581f151e56b5ce25abb023dfbba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0c0353d0d5da74d43b9d424ab4972b30104586d64cb82ddf2c03a3f877678a781a036e50447ca86edc0df207cc74f9f161daf3d4feccc9ca4dd4559291317ba69efa065d713278408fc3a3edf8f155300e7a06c59bf358c29a23f105c392d225ec5eeb9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000040000000000000000000000000000000000000000000000000000000000000000000830204c01c832fefd88259d984553a1b5c80a0800a60b10ebc30e787d95c51c5ee8ea98a5ad55c082e2cdd7933c4427f717bc1880b2cf1efc5d8305ef866f8641b018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a8476bc21d91ba0237cd54d6191727bca5f7270db7fbc6a138fa6fa4f9653a7507e42a6c4c930f5a09da79a8953c5e048ee12374743fbdbd74abdbcebd8bd18d45c3fbd930ff3fb49c0",
"transactions" : [
{
"data" : "0x76bc21d9",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x1b",
- "r" : "0xc1f4120f5c71d7db04dd9ef9aa8a02bf5c1435f0553bb2590fbe43fa80d89749",
- "s" : "0xef250209a53d1e9d9503572ddd6da4b422ef7f4a4634cea150545dd41f4c793f",
+ "r" : "0x237cd54d6191727bca5f7270db7fbc6a138fa6fa4f9653a7507e42a6c4c930f5",
+ "s" : "0x9da79a8953c5e048ee12374743fbdbd74abdbcebd8bd18d45c3fbd930ff3fb49",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -1049,30 +1049,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000240000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200080000000000000002000000000000000000000000000000000000000000000000000800000000000000000000000200000000000000000000000000000000000800000000040000000000000000000000000000000000000000010000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020700",
+ "difficulty" : "0x020500",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5d71",
- "hash" : "4f1466f6366728a6bd6015641777e339e92d1aa22f759cea7086f0e37c6b429c",
- "mixHash" : "66bdf98cb8f369796aa091665edb0ac264f59a4d826e8b0002cf4b7a5d88c792",
- "nonce" : "931c1152e320e3cd",
+ "hash" : "815ab3b219c8fec53c583affcda77bdae5c7f4cd16a8d127a72e101abf659948",
+ "mixHash" : "127b2a374152d144a6c5818e1a873185ca71ce3d67076d4df2b4a88bbb956775",
+ "nonce" : "3638bc45e39f352c",
"number" : "0x1d",
- "parentHash" : "47daaf847d4f036c38ec7f3d8fcf61018eed34df83d477846451d1f2b40b9038",
+ "parentHash" : "78e97c4bafaa550e6ca2716c4cca7b34cd80034b0c89aabb1cec761a4178be6a",
"receiptTrie" : "8498f39de18410f7bcbe74c00a66af847d535e07503c007cea15910862415379",
"stateRoot" : "a3b09d9fecda245dadfc9a4f5471307f063300910d07ec18d33efa767a52d84f",
- "timestamp" : "0x5534c572",
- "transactionsTrie" : "cd89fc94b35307cf57efc239933ea7ba420946dbaf865dc5b23ec2644f873312",
+ "timestamp" : "0x553a1b5d",
+ "transactionsTrie" : "fc3a34c3d85a6b3406727ff64aea4de89dc3d958e85da8dd38a13b3bb7bf3450",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a047daaf847d4f036c38ec7f3d8fcf61018eed34df83d477846451d1f2b40b9038a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0a3b09d9fecda245dadfc9a4f5471307f063300910d07ec18d33efa767a52d84fa0cd89fc94b35307cf57efc239933ea7ba420946dbaf865dc5b23ec2644f873312a08498f39de18410f7bcbe74c00a66af847d535e07503c007cea15910862415379b9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000240000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200080000000000000002000000000000000000000000000000000000000000000000000800000000000000000000000200000000000000000000000000000000000800000000040000000000000000000000000000000000000000010000000000000000000000000830207001d832fefd8825d71845534c57280a066bdf98cb8f369796aa091665edb0ac264f59a4d826e8b0002cf4b7a5d88c79288931c1152e320e3cdf866f8641c018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84f38b06001ca00cd83d985dd8e1887ad1c4153bc216c93d078fb59e1443ea73c871f6aeee5bf7a049ff3a8b497682e9b58e6272497f9105c76b957c2c2a1f1c43be12b6400ccc47c0",
+ "rlp" : "0xf90265f901f9a078e97c4bafaa550e6ca2716c4cca7b34cd80034b0c89aabb1cec761a4178be6aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0a3b09d9fecda245dadfc9a4f5471307f063300910d07ec18d33efa767a52d84fa0fc3a34c3d85a6b3406727ff64aea4de89dc3d958e85da8dd38a13b3bb7bf3450a08498f39de18410f7bcbe74c00a66af847d535e07503c007cea15910862415379b9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000240000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200080000000000000002000000000000000000000000000000000000000000000000000800000000000000000000000200000000000000000000000000000000000800000000040000000000000000000000000000000000000000010000000000000000000000000830205001d832fefd8825d7184553a1b5d80a0127b2a374152d144a6c5818e1a873185ca71ce3d67076d4df2b4a88bbb956775883638bc45e39f352cf866f8641c018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84f38b06001ca084bd487a08249befeca7e57a6e8ccc54b948b1f8a12605386d26931acda99727a0a410fce7795ea2c91c0b3ebd6a2c8f85ca7828dfbbfb959f8f65b04593cbb12ec0",
"transactions" : [
{
"data" : "0xf38b0600",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x1c",
- "r" : "0x0cd83d985dd8e1887ad1c4153bc216c93d078fb59e1443ea73c871f6aeee5bf7",
- "s" : "0x49ff3a8b497682e9b58e6272497f9105c76b957c2c2a1f1c43be12b6400ccc47",
+ "r" : "0x84bd487a08249befeca7e57a6e8ccc54b948b1f8a12605386d26931acda99727",
+ "s" : "0xa410fce7795ea2c91c0b3ebd6a2c8f85ca7828dfbbfb959f8f65b04593cbb12e",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1c",
"value" : "0x0a"
@@ -1085,30 +1085,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000800000000040000000000000000000000000000000000000000010000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020740",
+ "difficulty" : "0x020540",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5c21",
- "hash" : "a6e40cd9b784c5e54295b6abf14e9f92a689cc3e57a86dcac3f4728f007f509b",
- "mixHash" : "f8e4b8625a17b619f67e0d574ea7c6a0f2b5f056f032e5e13c2b4ded4b2d04cb",
- "nonce" : "f86ee916c97c2206",
+ "hash" : "143b15610d95d496d57a47cd509e41a8c45e01a31e7002549b838f7b9e0f8938",
+ "mixHash" : "97c2af84b6ed20bae0f9cf4ac124eacb77c8b10aca3ab1bdadf83e98c9443e2b",
+ "nonce" : "bdf576bbdd0e92ab",
"number" : "0x1e",
- "parentHash" : "4f1466f6366728a6bd6015641777e339e92d1aa22f759cea7086f0e37c6b429c",
+ "parentHash" : "815ab3b219c8fec53c583affcda77bdae5c7f4cd16a8d127a72e101abf659948",
"receiptTrie" : "4c151deed91a8ab9fb1517a9fb47003a8faf6ed5efd0c48c8b985490a7dadc20",
"stateRoot" : "ee62a0359cc4cb19f62503b6c918e78956c21141138dcae5026074aa89052769",
- "timestamp" : "0x5534c573",
- "transactionsTrie" : "f777556d7039f06bb28c1661a16e1883b8d6888131ac7c9013533d111bd05307",
+ "timestamp" : "0x553a1b60",
+ "transactionsTrie" : "e19c76c245dda4a111187cb0c5eaaa5d8661d8660b3fbadc6fecd2464ee1f654",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a04f1466f6366728a6bd6015641777e339e92d1aa22f759cea7086f0e37c6b429ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ee62a0359cc4cb19f62503b6c918e78956c21141138dcae5026074aa89052769a0f777556d7039f06bb28c1661a16e1883b8d6888131ac7c9013533d111bd05307a04c151deed91a8ab9fb1517a9fb47003a8faf6ed5efd0c48c8b985490a7dadc20b9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000800000000040000000000000000000000000000000000000000010000000000000000000000000830207401e832fefd8825c21845534c57380a0f8e4b8625a17b619f67e0d574ea7c6a0f2b5f056f032e5e13c2b4ded4b2d04cb88f86ee916c97c2206f866f8641d018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84e8beef5b1ba0391581f8f55a600a9b31f2c6d1a169c1547890fe721095c7d3478b7195500981a068e04915b219c736bc5ee7578080c8d0d5d8af5272145db2e5f42c3f9e0f03f7c0",
+ "rlp" : "0xf90265f901f9a0815ab3b219c8fec53c583affcda77bdae5c7f4cd16a8d127a72e101abf659948a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ee62a0359cc4cb19f62503b6c918e78956c21141138dcae5026074aa89052769a0e19c76c245dda4a111187cb0c5eaaa5d8661d8660b3fbadc6fecd2464ee1f654a04c151deed91a8ab9fb1517a9fb47003a8faf6ed5efd0c48c8b985490a7dadc20b9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000800000000040000000000000000000000000000000000000000010000000000000000000000000830205401e832fefd8825c2184553a1b6080a097c2af84b6ed20bae0f9cf4ac124eacb77c8b10aca3ab1bdadf83e98c9443e2b88bdf576bbdd0e92abf866f8641d018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84e8beef5b1ba090f54598b876b4d4b0620ee85c0d049aaa77be5c8081927152991685a42f36e5a0138aabb91f7d93334aa79896120c61d0a04f3a091a0204ec42ba1ad5dd3d829fc0",
"transactions" : [
{
"data" : "0xe8beef5b",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x1d",
- "r" : "0x391581f8f55a600a9b31f2c6d1a169c1547890fe721095c7d3478b7195500981",
- "s" : "0x68e04915b219c736bc5ee7578080c8d0d5d8af5272145db2e5f42c3f9e0f03f7",
+ "r" : "0x90f54598b876b4d4b0620ee85c0d049aaa77be5c8081927152991685a42f36e5",
+ "s" : "0x138aabb91f7d93334aa79896120c61d0a04f3a091a0204ec42ba1ad5dd3d829f",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -1121,30 +1121,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200000000000000000002000000000000000000000001000000000000000000000000000000000000000000000000000200000000000000000000000000000000000800000000040000000000001000000000000000000000000000010000000000000000000400000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x020780",
+ "difficulty" : "0x020580",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5eef",
- "hash" : "be25f1be50da9f7226d7bf952531c2b36e881464aa75cb4460461fd02e00ea46",
- "mixHash" : "82a80fc028507ba0c25d46903363484c164f47834c5d65f43cac01c253b1a0a5",
- "nonce" : "f8dcc3cef52e59ff",
+ "hash" : "9208f12904f14166da28160c47d4c25a42a9588e1d4e3d302d50109c827272ea",
+ "mixHash" : "4008a6fa7efd92ecf45da379537f2a938a4c839d6d812467818262cfa3081d49",
+ "nonce" : "8197bce79a944542",
"number" : "0x1f",
- "parentHash" : "a6e40cd9b784c5e54295b6abf14e9f92a689cc3e57a86dcac3f4728f007f509b",
+ "parentHash" : "143b15610d95d496d57a47cd509e41a8c45e01a31e7002549b838f7b9e0f8938",
"receiptTrie" : "1697903d9a0ed2794f7bf5ed04eeae12037ae12024f3da59a6e2f2956c5fa9ab",
"stateRoot" : "65c6edf7081080d3cc1d7895e70eb12d897cb3c95ae3e66fe2a351d8891f01ba",
- "timestamp" : "0x5534c576",
- "transactionsTrie" : "6ec9e8ce0f468158d4f629ce6438fdf51467e5d2b01e7120f43b9af80c1d879c",
+ "timestamp" : "0x553a1b61",
+ "transactionsTrie" : "c57a7eee7b0a12de09021d9a5b4982e7fa7a8f0672c53dd1d576b1238dee1e34",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a0a6e40cd9b784c5e54295b6abf14e9f92a689cc3e57a86dcac3f4728f007f509ba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a065c6edf7081080d3cc1d7895e70eb12d897cb3c95ae3e66fe2a351d8891f01baa06ec9e8ce0f468158d4f629ce6438fdf51467e5d2b01e7120f43b9af80c1d879ca01697903d9a0ed2794f7bf5ed04eeae12037ae12024f3da59a6e2f2956c5fa9abb9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200000000000000000002000000000000000000000001000000000000000000000000000000000000000000000000000200000000000000000000000000000000000800000000040000000000001000000000000000000000000000010000000000000000000400000830207801f832fefd8825eef845534c57680a082a80fc028507ba0c25d46903363484c164f47834c5d65f43cac01c253b1a0a588f8dcc3cef52e59fff866f8641e018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84fd4087671ba0b9e9e071e5ba9c2997a642985c27e20cebd0dcbb40c249291c019033001951a8a0040efe0a297c9cfb4fac18d1bebb63015777c9aec1eccaf297049d8947c52896c0",
+ "rlp" : "0xf90265f901f9a0143b15610d95d496d57a47cd509e41a8c45e01a31e7002549b838f7b9e0f8938a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a065c6edf7081080d3cc1d7895e70eb12d897cb3c95ae3e66fe2a351d8891f01baa0c57a7eee7b0a12de09021d9a5b4982e7fa7a8f0672c53dd1d576b1238dee1e34a01697903d9a0ed2794f7bf5ed04eeae12037ae12024f3da59a6e2f2956c5fa9abb9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200000000000000000002000000000000000000000001000000000000000000000000000000000000000000000000000200000000000000000000000000000000000800000000040000000000001000000000000000000000000000010000000000000000000400000830205801f832fefd8825eef84553a1b6180a04008a6fa7efd92ecf45da379537f2a938a4c839d6d812467818262cfa3081d49888197bce79a944542f866f8641e018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a84fd4087671ba00a930d4d88e105d2e45944d0bbb2df437dbd3d84d748a2611e1674f59b94068da0fbc24f43010cc519224f3014a6a037a9ed32719328662f05c75634db6286d3fcc0",
"transactions" : [
{
"data" : "0xfd408767",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x1e",
- "r" : "0xb9e9e071e5ba9c2997a642985c27e20cebd0dcbb40c249291c019033001951a8",
- "s" : "0x040efe0a297c9cfb4fac18d1bebb63015777c9aec1eccaf297049d8947c52896",
+ "r" : "0x0a930d4d88e105d2e45944d0bbb2df437dbd3d84d748a2611e1674f59b94068d",
+ "s" : "0xfbc24f43010cc519224f3014a6a037a9ed32719328662f05c75634db6286d3fc",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -1157,30 +1157,30 @@
"blockHeader" : {
"bloom" : "00000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000800000000040000000000000000000000000000000000000000010000000000000000000000000",
"coinbase" : "8888f1f195afa192cfee860698584c030f4c9db1",
- "difficulty" : "0x0207c0",
+ "difficulty" : "0x0205c0",
"extraData" : "0x",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x5c99",
- "hash" : "fa1f5bd36066b12b181d62caf8ec98c764beab18448e3b23beb351e9ae4b2d4e",
- "mixHash" : "3edb0a2ecbdac61c7117fd3b7d0e91822ccd4365cbc2bb72dd35a823b38c66a0",
- "nonce" : "cec7d3651fd46eb2",
+ "hash" : "9189026494323cd572434e5acc21b36c9304558ba23b54d4ecadd0f4967797e6",
+ "mixHash" : "aabd9a191dd2e5b79368319c1b6df0dfb7f1c7df8abf7909f12bd11d7e48b4d3",
+ "nonce" : "679f7172c2857ced",
"number" : "0x20",
- "parentHash" : "be25f1be50da9f7226d7bf952531c2b36e881464aa75cb4460461fd02e00ea46",
+ "parentHash" : "9208f12904f14166da28160c47d4c25a42a9588e1d4e3d302d50109c827272ea",
"receiptTrie" : "0c0de6a71f4890c734921d5a7f9cb99217d00e082ff82e04c8dda6d5c11400bc",
"stateRoot" : "54dda68af07643f68739a6e9612ad157a26ae7e2ce81f77842bb5835fbcde583",
- "timestamp" : "0x5534c577",
- "transactionsTrie" : "10cc817cfb7062cd8e1b14bd0b87356b6d31ef6ec768457d5adc0e2106f23409",
+ "timestamp" : "0x553a1b66",
+ "transactionsTrie" : "eb52c48b53e3ef1cbd6231f680d6d1ed453a9a8ba2d3ab4ff2112173b0d5b015",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "rlp" : "0xf90265f901f9a0be25f1be50da9f7226d7bf952531c2b36e881464aa75cb4460461fd02e00ea46a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a054dda68af07643f68739a6e9612ad157a26ae7e2ce81f77842bb5835fbcde583a010cc817cfb7062cd8e1b14bd0b87356b6d31ef6ec768457d5adc0e2106f23409a00c0de6a71f4890c734921d5a7f9cb99217d00e082ff82e04c8dda6d5c11400bcb9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000800000000040000000000000000000000000000000000000000010000000000000000000000000830207c020832fefd8825c99845534c57780a03edb0a2ecbdac61c7117fd3b7d0e91822ccd4365cbc2bb72dd35a823b38c66a088cec7d3651fd46eb2f866f8641f018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a849dc2c8f51ba058140f6ae0955e89301708f2cd9a10dd0e061bfcf75b2338a9b4f05b33ffa7d3a0654baa73574e2916437c0e537b1fa47a9ff291f337cf79e98d6e243d72610d0ec0",
+ "rlp" : "0xf90265f901f9a09208f12904f14166da28160c47d4c25a42a9588e1d4e3d302d50109c827272eaa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a054dda68af07643f68739a6e9612ad157a26ae7e2ce81f77842bb5835fbcde583a0eb52c48b53e3ef1cbd6231f680d6d1ed453a9a8ba2d3ab4ff2112173b0d5b015a00c0de6a71f4890c734921d5a7f9cb99217d00e082ff82e04c8dda6d5c11400bcb9010000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000080000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000400000000000000000200000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000800000000040000000000000000000000000000000000000000010000000000000000000000000830205c020832fefd8825c9984553a1b6680a0aabd9a191dd2e5b79368319c1b6df0dfb7f1c7df8abf7909f12bd11d7e48b4d388679f7172c2857cedf866f8641f018304cb2f946295ee1b4f6dd65047762f924ecd367c17eabf8f0a849dc2c8f51ba03b3791e0d7840dbb21fc080828a5d619bd6691c1822e0cdf52dc047ca3781aeaa0836973b87f9e48969a62e8c0a738d0d60094572295676a2fedd6acb1c9fbd841c0",
"transactions" : [
{
"data" : "0x9dc2c8f5",
"gasLimit" : "0x04cb2f",
"gasPrice" : "0x01",
"nonce" : "0x1f",
- "r" : "0x58140f6ae0955e89301708f2cd9a10dd0e061bfcf75b2338a9b4f05b33ffa7d3",
- "s" : "0x654baa73574e2916437c0e537b1fa47a9ff291f337cf79e98d6e243d72610d0e",
+ "r" : "0x3b3791e0d7840dbb21fc080828a5d619bd6691c1822e0cdf52dc047ca3781aea",
+ "s" : "0x836973b87f9e48969a62e8c0a738d0d60094572295676a2fedd6acb1c9fbd841",
"to" : "6295ee1b4f6dd65047762f924ecd367c17eabf8f",
"v" : "0x1b",
"value" : "0x0a"
@@ -1197,9 +1197,9 @@
"extraData" : "0x42",
"gasLimit" : "0x2fefd8",
"gasUsed" : "0x00",
- "hash" : "54f4cd6188cf2fa089d60c64fe5498d16c35f56ef063a5c0d1d640d51e8a5d1d",
- "mixHash" : "b5cf162f473f15a360b25fddfc3821ce2839a151c53450daaace4e934dd99065",
- "nonce" : "b80936ea9b913f76",
+ "hash" : "1fdd5d3e8797e20cb596be04f2378f74a8ca756e85336efa97e6db5bdc2aaa01",
+ "mixHash" : "8fd5be84e48ed46e5e3c58558dc399f3c33ff1db866e42f23543d8d6dcbb6038",
+ "nonce" : "5fece04ddaeb9244",
"number" : "0x00",
"parentHash" : "0000000000000000000000000000000000000000000000000000000000000000",
"receiptTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
@@ -1208,14 +1208,14 @@
"transactionsTrie" : "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"uncleHash" : "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
- "genesisRLP" : "0xf901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07dba07d6b448a186e9612e5f737d1c909dce473e53199901a302c00646d523c1a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a0b5cf162f473f15a360b25fddfc3821ce2839a151c53450daaace4e934dd9906588b80936ea9b913f76c0c0",
+ "genesisRLP" : "0xf901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a07dba07d6b448a186e9612e5f737d1c909dce473e53199901a302c00646d523c1a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a08fd5be84e48ed46e5e3c58558dc399f3c33ff1db866e42f23543d8d6dcbb6038885fece04ddaeb9244c0c0",
"postState" : {
"6295ee1b4f6dd65047762f924ecd367c17eabf8f" : {
"balance" : "0x0140",
"code" : "0x6000357c010000000000000000000000000000000000000000000000000000000090048063102accc11461012c57806312a7b9141461013a5780631774e6461461014c5780631e26fd331461015d5780631f9030371461016e578063343a875d1461018057806338cc4831146101955780634e7ad367146101bd57806357cb2fc4146101cb57806365538c73146101e057806368895979146101ee57806376bc21d9146102005780639a19a9531461020e5780639dc2c8f51461021f578063a53b1c1e1461022d578063a67808571461023e578063b61c05031461024c578063c2b12a731461025a578063d2282dc51461026b578063e30081a01461027c578063e8beef5b1461028d578063f38b06001461029b578063f5b53e17146102a9578063fd408767146102bb57005b6101346104d6565b60006000f35b61014261039b565b8060005260206000f35b610157600435610326565b60006000f35b6101686004356102c9565b60006000f35b610176610442565b8060005260206000f35b6101886103d3565b8060ff1660005260206000f35b61019d610413565b8073ffffffffffffffffffffffffffffffffffffffff1660005260206000f35b6101c56104c5565b60006000f35b6101d36103b7565b8060000b60005260206000f35b6101e8610454565b60006000f35b6101f6610401565b8060005260206000f35b61020861051f565b60006000f35b6102196004356102e5565b60006000f35b610227610693565b60006000f35b610238600435610342565b60006000f35b610246610484565b60006000f35b610254610493565b60006000f35b61026560043561038d565b60006000f35b610276600435610350565b60006000f35b61028760043561035e565b60006000f35b6102956105b4565b60006000f35b6102a3610547565b60006000f35b6102b16103ef565b8060005260206000f35b6102c3610600565b60006000f35b80600060006101000a81548160ff021916908302179055505b50565b80600060016101000a81548160ff02191690837f01000000000000000000000000000000000000000000000000000000000000009081020402179055505b50565b80600060026101000a81548160ff021916908302179055505b50565b806001600050819055505b50565b806002600050819055505b50565b80600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b50565b806004600050819055505b50565b6000600060009054906101000a900460ff1690506103b4565b90565b6000600060019054906101000a900460000b90506103d0565b90565b6000600060029054906101000a900460ff1690506103ec565b90565b600060016000505490506103fe565b90565b60006002600050549050610410565b90565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905061043f565b90565b60006004600050549050610451565b90565b7f65c9ac8011e286e89d02a269890f41d67ca2cc597b2c76c7c69321ff492be5806000602a81526020016000a15b565b6000602a81526020016000a05b565b60017f81933b308056e7e85668661dcd102b1f22795b4431f9cf4625794f381c271c6b6000602a81526020016000a25b565b60016000602a81526020016000a15b565b3373ffffffffffffffffffffffffffffffffffffffff1660017f0e216b62efbb97e751a2ce09f607048751720397ecfb9eef1e48a6644948985b6000602a81526020016000a35b565b3373ffffffffffffffffffffffffffffffffffffffff1660016000602a81526020016000a25b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660017f317b31292193c2a4f561cc40a95ea0d97a2733f14af6d6d59522473e1f3ae65f6000602a81526020016000a45b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660016000602a81526020016000a35b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff1660017fd5f0a30e4be0c6be577a71eceb7464245a796a7e6a55c0d971837b250de05f4e60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe98152602001602a81526020016000a45b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6001023373ffffffffffffffffffffffffffffffffffffffff16600160007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe98152602001602a81526020016000a35b56",
"nonce" : "0x00",
"storage" : {
- "0x" : "0x08fa01",
+ "0x00" : "0x08fa01",
"0x01" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
"0x02" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
"0x03" : "0xaabbccffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
@@ -1250,8 +1250,9 @@
"code" : "0x",
"nonce" : "0x00",
"storage" : {
- }
+ },
+ "privateKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"
}
}
}
-} \ No newline at end of file
+}
diff --git a/tests/files/StateTests/RandomTests/st201504131821CPPJIT.json b/tests/files/StateTests/RandomTests/st201504131821CPPJIT.json
new file mode 100644
index 000000000..01f87ad96
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504131821CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "5623894562375",
+ "currentGasLimit" : "115792089237316195423570985008687907853269984665640564039457584007913129639935",
+ "currentNumber" : "0",
+ "currentTimestamp" : "1",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0",
+ "code" : "0x057fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000100000000000000000000000000000000000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3c7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9d0718f377825843028dfa02158878",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "1410816787",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "999999998589183259",
+ "code" : "0x",
+ "nonce" : "1",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "f8bdbcffd96ce72abd3cd418b18faf596fb05e65184326457f9f3e5bc7527642",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0",
+ "code" : "0x057fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000100000000000000000000000000000000000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3c7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9d0718f377825843028dfa02158878",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "46",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "1000000000000000000",
+ "code" : "0x",
+ "nonce" : "0",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x057fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000100000000000000000000000000000000000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3c7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9d0718f377825843028dfa02158878",
+ "gasLimit" : "0x54175ae5",
+ "gasPrice" : "1",
+ "nonce" : "0",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "1741399653"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504150854CPPJIT.json b/tests/files/StateTests/RandomTests/st201504150854CPPJIT.json
new file mode 100644
index 000000000..47051aae4
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504150854CPPJIT.json
@@ -0,0 +1,72 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "5623894562375",
+ "currentGasLimit" : "115792089237316195423570985008687907853269984665640564039457584007913129639935",
+ "currentNumber" : "0",
+ "currentTimestamp" : "1",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "433564740",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff047f000000000000000000000000000000000000000000000000000000000000000105133641010b811160005155",
+ "nonce" : "0",
+ "storage" : {
+ "0x" : "0x01"
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "48531",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "999999999566386775",
+ "code" : "0x",
+ "nonce" : "1",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "50e2af1155fb85652e8831e0bee5ad482a4d4ca1e5eb37320c4e9ed5dacb6ba1",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff047f000000000000000000000000000000000000000000000000000000000000000105133641010b811160005155",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "46",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "1000000000000000000",
+ "code" : "0x",
+ "nonce" : "0",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f00000000000000000000000000000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff047f000000000000000000000000000000000000000000000000000000000000000105133641010b8111",
+ "gasLimit" : "0x3a2ba3d9",
+ "gasPrice" : "1",
+ "nonce" : "0",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "433564740"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504151057CPPJIT.json b/tests/files/StateTests/RandomTests/st201504151057CPPJIT.json
new file mode 100644
index 000000000..75fe43511
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504151057CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "5623894562375",
+ "currentGasLimit" : "115792089237316195423570985008687907853269984665640564039457584007913129639935",
+ "currentNumber" : "0",
+ "currentTimestamp" : "1",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0",
+ "code" : "0x7f000000000000000000000000000000000000000000000000000000000000c3507f0000000000000000000000010000000000000000000000000000000000000000417f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0075a0b64319218663016870455",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "1135806539",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "999999998864193507",
+ "code" : "0x",
+ "nonce" : "1",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "fdfe1d801240a43ff8708f16b426826801ecb0a34ee05d3b6b93937cf359b2b4",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0",
+ "code" : "0x7f000000000000000000000000000000000000000000000000000000000000c3507f0000000000000000000000010000000000000000000000000000000000000000417f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0075a0b64319218663016870455",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "46",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "1000000000000000000",
+ "code" : "0x",
+ "nonce" : "0",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f000000000000000000000000000000000000000000000000000000000000c3507f0000000000000000000000010000000000000000000000000000000000000000417f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0075a0b643192186630168704",
+ "gasLimit" : "0x43b3081d",
+ "gasPrice" : "1",
+ "nonce" : "0",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "1650167023"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504202124CPPJIT.json b/tests/files/StateTests/RandomTests/st201504202124CPPJIT.json
new file mode 100644
index 000000000..0638ca1da
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504202124CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f0000000000000000000000000000000000000000000000000000000000000000447f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000000086053a0b43890710810651116e1555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x06420934",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a121f6fa",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "b9eb731c28b05ecb2edf808265c21744852ae3061758f9351393b9174682d96e",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f0000000000000000000000000000000000000000000000000000000000000000447f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000000086053a0b43890710810651116e1555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f0000000000000000000000000000000000000000000000000000000000000000447f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000000086053a0b43890710810651116e15",
+ "gasLimit" : "0x06420906",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x2dcb28af"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504210245CPPJIT.json b/tests/files/StateTests/RandomTests/st201504210245CPPJIT.json
new file mode 100644
index 000000000..22539fd15
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504210245CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x2217865b",
+ "code" : "0x7f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000000000000000000000000000000000000000c3503b7f000000000000000000000000000000000000000000000000000000000000c3506a7f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000010000000000000000000000000000000000000000745b9b824070397f921960005155",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x611e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3854c18b5",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "404dde61aad41090e3d66c5d042237aed1302501a8da14fb725c3a8d1778f847",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000000000000000000000000000000000000000c3503b7f000000000000000000000000000000000000000000000000000000000000c3506a7f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000010000000000000000000000000000000000000000745b9b824070397f921960005155",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000000000000000000000000000000000000000c3503b7f000000000000000000000000000000000000000000000000000000000000c3506a7f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000010000000000000000000000000000000000000000745b9b824070397f9219",
+ "gasLimit" : "0x6c4dfece",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x2217865b"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504210957CPPJIT.json b/tests/files/StateTests/RandomTests/st201504210957CPPJIT.json
new file mode 100644
index 000000000..e71bea3ac
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504210957CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x025dcd0a",
+ "code" : "0x7f00000000000000000000000100000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f00000000000000000000000000000000000000000000000000000000000000004586",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x68b4",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a505ca70",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "009cdfe12bb06588c7382e66facc4058eb08d3cacaa89e128a7cf367b31d0682",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000100000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f00000000000000000000000000000000000000000000000000000000000000004586",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f00000000000000000000000100000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f00000000000000000000000000000000000000000000000000000000000000004586",
+ "gasLimit" : "0x41aec609",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x025dcd0a"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504211739CPPJIT.json b/tests/files/StateTests/RandomTests/st201504211739CPPJIT.json
new file mode 100644
index 000000000..f1dec45e4
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504211739CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017f00000000000000000000000100000000000000000000000000000000000000007f0000000000000000000000000000000000000000000000000000000000000001077f00000000000000000000000100000000000000000000000000000000000000007f000000000000000000000000000000000000000000000000000000000000c3504419860b9754998d503105a033436e67133a60005155",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x3f263119",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3683dcf15",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "83d3dca909b55515e4a851828b28e38cac53e65c0db3b53997a2c0dda1222de7",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017f00000000000000000000000100000000000000000000000000000000000000007f0000000000000000000000000000000000000000000000000000000000000001077f00000000000000000000000100000000000000000000000000000000000000007f000000000000000000000000000000000000000000000000000000000000c3504419860b9754998d503105a033436e67133a60005155",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f00000000000000000000000000000000000000000000000000000000000000017f00000000000000000000000100000000000000000000000000000000000000007f0000000000000000000000000000000000000000000000000000000000000001077f00000000000000000000000100000000000000000000000000000000000000007f000000000000000000000000000000000000000000000000000000000000c3504419860b9754998d503105a033436e67133a",
+ "gasLimit" : "0x3f2630eb",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x6ec35ec4"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504212038CPPJIT.json b/tests/files/StateTests/RandomTests/st201504212038CPPJIT.json
new file mode 100644
index 000000000..9a24f6097
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504212038CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f0000000000000000000000000000000000000000000000000000000000000000447fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000017f00000000000000000000000000000000000000000000000000000000000000017f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000100000000000000000000000000000000000000001231b61993",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x5436a791",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3532d589d",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "9f7393391be46ce93394f3c033bcee51a83392d9e1c5b0bdc14e9907194c86d4",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f0000000000000000000000000000000000000000000000000000000000000000447fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000017f00000000000000000000000000000000000000000000000000000000000000017f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000100000000000000000000000000000000000000001231b61993",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f0000000000000000000000000000000000000000000000000000000000000000447fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000017f00000000000000000000000000000000000000000000000000000000000000017f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000100000000000000000000000000000000000000001231b61993",
+ "gasLimit" : "0x5436a763",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x14fb56ac"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504230729CPPJIT.json b/tests/files/StateTests/RandomTests/st201504230729CPPJIT.json
new file mode 100644
index 000000000..d46c405c8
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504230729CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f00000000000000000000000000000000000000000000000000000000000000003a457f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b513428284f28a980b4539a39d1408",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x15c86d88",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3919b92a6",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "8c2f569f43a35beb2ec39814565061fa8a58760b6a8cbd8912dd3af6392add3e",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f00000000000000000000000000000000000000000000000000000000000000003a457f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b513428284f28a980b4539a39d1408",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f00000000000000000000000000000000000000000000000000000000000000003a457f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b513428284f28a980b4539a39d1408",
+ "gasLimit" : "0x15c86d5a",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x014eb9ae"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504231639CPPJIT.json b/tests/files/StateTests/RandomTests/st201504231639CPPJIT.json
new file mode 100644
index 000000000..a2d63f37d
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504231639CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f0000000000000000000000000000000000000000000000000000000000000001937fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000c350a094448744",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x7f8db131",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b327d64efd",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "fe6e569b97acd94e53ee5ff7f687b912625f587fc7b4efefba311fe07df2a9c5",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f0000000000000000000000000000000000000000000000000000000000000001937fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000c350a094448744",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f00000000000000000000000000000000000000000000000000000000000000017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f0000000000000000000000000000000000000000000000000000000000000001937fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000c350a094448744",
+ "gasLimit" : "0x7f8db103",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x591bc129"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504231710CPPJIT.json b/tests/files/StateTests/RandomTests/st201504231710CPPJIT.json
new file mode 100644
index 000000000..9af306e4a
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504231710CPPJIT.json
@@ -0,0 +1,72 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x70d690f4",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000001847f00000000000000000000000100000000000000000000000000000000000000003a0761525560005155",
+ "nonce" : "0x00",
+ "storage" : {
+ "0x" : "0x5255"
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0xbef1",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3368cb049",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "d522c2441010ceff4a7042baa0b1578fdb453046c0e2c523cc4c24fafaf906a6",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000001847f00000000000000000000000100000000000000000000000000000000000000003a0761525560005155",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f00000000000000000000000000000000000000000000000000000000000000017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000001847f00000000000000000000000100000000000000000000000000000000000000003a076152",
+ "gasLimit" : "0x74d0d631",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x70d690f4"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504231742CPPJIT.json b/tests/files/StateTests/RandomTests/st201504231742CPPJIT.json
new file mode 100644
index 000000000..24170eab7
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504231742CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000100000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5967f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3a7e737d40070a156482930a0875",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x113ba572",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b396285abc",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "5fa1c5ec4d844c9b52a412710cd5a21585ec367319aa37c7cfbf8d1207f46afb",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000100000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5967f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3a7e737d40070a156482930a0875",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f00000000000000000000000100000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5967f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3a7e737d40070a156482930a0875",
+ "gasLimit" : "0x113ba544",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x70062d18"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504232350CPPJIT.json b/tests/files/StateTests/RandomTests/st201504232350CPPJIT.json
new file mode 100644
index 000000000..d25336a8c
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504232350CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5457f000000000000000000000000000000000000000000000000000000000000c3507f0000000000000000000000000000000000000000000000000000000000000001666b56e87c5a499d5389306e55",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2b37c310",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b37c2c3d1e",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "2fb3e00745e030bc746636fd0ec453ba1694911050dccf7ed5f469a66aefc9c2",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5457f000000000000000000000000000000000000000000000000000000000000c3507f0000000000000000000000000000000000000000000000000000000000000001666b56e87c5a499d5389306e55",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000007f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5457f000000000000000000000000000000000000000000000000000000000000c3507f0000000000000000000000000000000000000000000000000000000000000001666b56e87c5a499d5389306e",
+ "gasLimit" : "0x2b37c2e2",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x275009c5"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504240140CPPJIT.json b/tests/files/StateTests/RandomTests/st201504240140CPPJIT.json
new file mode 100644
index 000000000..4001a8228
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504240140CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x4ca4d183",
+ "code" : "0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507f0000000000000000000000000000000000000000000000000000000000000001719f197c5560005155",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x7b41",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b35abeb36a",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "03d2799b460b160c9f55e5b7e817535b133363d8eb548afffc359d8beeff6d41",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507f0000000000000000000000000000000000000000000000000000000000000001719f197c5560005155",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507f0000000000000000000000000000000000000000000000000000000000000001719f197c",
+ "gasLimit" : "0x1517ae45",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x4ca4d183"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504240220CPPJIT.json b/tests/files/StateTests/RandomTests/st201504240220CPPJIT.json
new file mode 100644
index 000000000..7f78e67cb
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504240220CPPJIT.json
@@ -0,0 +1,72 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x69d65f4b",
+ "code" : "0x7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000010000000000000000000000000000000000000000807f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000000000000000000000000000000000000000c3507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b561859c55",
+ "nonce" : "0x00",
+ "storage" : {
+ "0x859c" : "0x945304eb96065b2a98b57a48a06ae28d285a71b5"
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0xbedd",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b33d8ce206",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "a265c829988541b742b10dcb192bbe97f8c2c06e1a14bd88d479f50e7110aa24",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000010000000000000000000000000000000000000000807f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000000000000000000000000000000000000000c3507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b561859c55",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000010000000000000000000000000000000000000000807f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f00000000000000000000000000000000000000000000000000000000000000017f000000000000000000000000000000000000000000000000000000000000c3507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b561859c",
+ "gasLimit" : "0x09249356",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x69d65f4b"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504240351CPPJIT.json b/tests/files/StateTests/RandomTests/st201504240351CPPJIT.json
new file mode 100644
index 000000000..f2f2fd05f
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504240351CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x731ee4d0",
+ "code" : "0x427f00000000000000000000000000000000000000000000000000000000000000007f0000000000000000000000010000000000000000000000000000000000000000437f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff807f00000000000000000000000100000000000000000000000000000000000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe6844f1389163a444405b123678749c55",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x6873",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b33444b2eb",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "7b8dc9863790422e0b6796b54b923b0a1b7ab71d682312209423b17979319dcf",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x427f00000000000000000000000000000000000000000000000000000000000000007f0000000000000000000000010000000000000000000000000000000000000000437f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff807f00000000000000000000000100000000000000000000000000000000000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe6844f1389163a444405b123678749c55",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x427f00000000000000000000000000000000000000000000000000000000000000007f0000000000000000000000010000000000000000000000000000000000000000437f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff807f00000000000000000000000100000000000000000000000000000000000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe6844f1389163a444405b123678749c",
+ "gasLimit" : "0x2d34dd42",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x731ee4d0"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504240817CPPJIT.json b/tests/files/StateTests/RandomTests/st201504240817CPPJIT.json
new file mode 100644
index 000000000..afa13f3e0
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504240817CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000010000000000000000000000000000000000000000971a7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff447f00000000000000000000000000000000000000000000000000000000000000006989206c0b8a01867bf155",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x10412aa1",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b39722d58d",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "e5fb82c5770b521740b1410ab3d5acd21b3b7ac5665d3ef83f61f5a73c5ad50b",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000010000000000000000000000000000000000000000971a7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff447f00000000000000000000000000000000000000000000000000000000000000006989206c0b8a01867bf155",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000010000000000000000000000000000000000000000971a7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff447f00000000000000000000000000000000000000000000000000000000000000006989206c0b8a01867bf1",
+ "gasLimit" : "0x10412a73",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x774e407c"
+ }
+ }
+}
diff --git a/tests/files/StateTests/RandomTests/st201504241118CPPJIT.json b/tests/files/StateTests/RandomTests/st201504241118CPPJIT.json
new file mode 100644
index 000000000..e59c74d36
--- /dev/null
+++ b/tests/files/StateTests/RandomTests/st201504241118CPPJIT.json
@@ -0,0 +1,71 @@
+{
+ "randomStatetest" : {
+ "env" : {
+ "currentCoinbase" : "945304eb96065b2a98b57a48a06ae28d285a71b5",
+ "currentDifficulty" : "0x051d6a3cd647",
+ "currentGasLimit" : "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "currentNumber" : "0x00",
+ "currentTimestamp" : "0x01",
+ "previousHash" : "5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
+ },
+ "logs" : [
+ ],
+ "out" : "0x",
+ "post" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x5ded2cc8",
+ "code" : "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff417f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe65688dff579830091304",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x7de1",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b349765585",
+ "code" : "0x",
+ "nonce" : "0x01",
+ "storage" : {
+ }
+ }
+ },
+ "postStateRoot" : "92cbb047161785b64b135b84317e4172222e8460510e8d10983eb040f9007336",
+ "pre" : {
+ "095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
+ "balance" : "0x00",
+ "code" : "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff417f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe65688dff579830091304",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "945304eb96065b2a98b57a48a06ae28d285a71b5" : {
+ "balance" : "0x2e",
+ "code" : "0x6000355415600957005b60203560003555",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ },
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
+ "balance" : "0x0de0b6b3a7640000",
+ "code" : "0x",
+ "nonce" : "0x00",
+ "storage" : {
+ }
+ }
+ },
+ "transaction" : {
+ "data" : "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff417f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000ffffffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe65688dff579830091304",
+ "gasLimit" : "0x38323082",
+ "gasPrice" : "0x01",
+ "nonce" : "0x00",
+ "secretKey" : "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
+ "to" : "095e7baea6a6c7c4c2dfeb977efac326af552d87",
+ "value" : "0x5ded2cc8"
+ }
+ }
+}
diff --git a/ui/qt/qwhisper/whisper.go b/ui/qt/qwhisper/whisper.go
index 50b0626f5..4ab6d2e5a 100644
--- a/ui/qt/qwhisper/whisper.go
+++ b/ui/qt/qwhisper/whisper.go
@@ -106,7 +106,7 @@ func filterFromMap(opts map[string]interface{}) (f whisper.Filter) {
if topicList, ok := opts["topics"].(*qml.List); ok {
var topics []string
topicList.Convert(&topics)
- f.Topics = whisper.NewTopicsFromStrings(topics...)
+ f.Topics = whisper.NewFilterTopicsFromStringsFlat(topics...)
}
return
diff --git a/whisper/envelope.go b/whisper/envelope.go
index 07762c300..a4e2fa031 100644
--- a/whisper/envelope.go
+++ b/whisper/envelope.go
@@ -72,6 +72,9 @@ func (self *Envelope) Open(key *ecdsa.PrivateKey) (msg *Message, err error) {
message := &Message{
Flags: data[0],
+ Sent: time.Unix(int64(self.Expiry-self.TTL), 0),
+ TTL: time.Duration(self.TTL) * time.Second,
+ Hash: self.Hash(),
}
data = data[1:]
diff --git a/whisper/envelope_test.go b/whisper/envelope_test.go
new file mode 100644
index 000000000..b64767b2e
--- /dev/null
+++ b/whisper/envelope_test.go
@@ -0,0 +1,142 @@
+package whisper
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/crypto/ecies"
+)
+
+func TestEnvelopeOpen(t *testing.T) {
+ payload := []byte("hello world")
+ message := NewMessage(payload)
+
+ envelope, err := message.Wrap(DefaultPoW, Options{})
+ if err != nil {
+ t.Fatalf("failed to wrap message: %v", err)
+ }
+ opened, err := envelope.Open(nil)
+ if err != nil {
+ t.Fatalf("failed to open envelope: %v", err)
+ }
+ if opened.Flags != message.Flags {
+ t.Fatalf("flags mismatch: have %d, want %d", opened.Flags, message.Flags)
+ }
+ if bytes.Compare(opened.Signature, message.Signature) != 0 {
+ t.Fatalf("signature mismatch: have 0x%x, want 0x%x", opened.Signature, message.Signature)
+ }
+ if bytes.Compare(opened.Payload, message.Payload) != 0 {
+ t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, message.Payload)
+ }
+ if opened.Sent.Unix() != message.Sent.Unix() {
+ t.Fatalf("send time mismatch: have %d, want %d", opened.Sent, message.Sent)
+ }
+ if opened.TTL/time.Second != DefaultTTL/time.Second {
+ t.Fatalf("message TTL mismatch: have %v, want %v", opened.TTL, DefaultTTL)
+ }
+
+ if opened.Hash != envelope.Hash() {
+ t.Fatalf("message hash mismatch: have 0x%x, want 0x%x", opened.Hash, envelope.Hash())
+ }
+}
+
+func TestEnvelopeAnonymousOpenUntargeted(t *testing.T) {
+ payload := []byte("hello envelope")
+ envelope, err := NewMessage(payload).Wrap(DefaultPoW, Options{})
+ if err != nil {
+ t.Fatalf("failed to wrap message: %v", err)
+ }
+ opened, err := envelope.Open(nil)
+ if err != nil {
+ t.Fatalf("failed to open envelope: %v", err)
+ }
+ if opened.To != nil {
+ t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To)
+ }
+ if bytes.Compare(opened.Payload, payload) != 0 {
+ t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload)
+ }
+}
+
+func TestEnvelopeAnonymousOpenTargeted(t *testing.T) {
+ key, err := crypto.GenerateKey()
+ if err != nil {
+ t.Fatalf("failed to generate test identity: %v", err)
+ }
+
+ payload := []byte("hello envelope")
+ envelope, err := NewMessage(payload).Wrap(DefaultPoW, Options{
+ To: &key.PublicKey,
+ })
+ if err != nil {
+ t.Fatalf("failed to wrap message: %v", err)
+ }
+ opened, err := envelope.Open(nil)
+ if err != nil {
+ t.Fatalf("failed to open envelope: %v", err)
+ }
+ if opened.To != nil {
+ t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To)
+ }
+ if bytes.Compare(opened.Payload, payload) == 0 {
+ t.Fatalf("payload match, should have been encrypted: 0x%x", opened.Payload)
+ }
+}
+
+func TestEnvelopeIdentifiedOpenUntargeted(t *testing.T) {
+ key, err := crypto.GenerateKey()
+ if err != nil {
+ t.Fatalf("failed to generate test identity: %v", err)
+ }
+
+ payload := []byte("hello envelope")
+ envelope, err := NewMessage(payload).Wrap(DefaultPoW, Options{})
+ if err != nil {
+ t.Fatalf("failed to wrap message: %v", err)
+ }
+ opened, err := envelope.Open(key)
+ switch err {
+ case nil:
+ t.Fatalf("envelope opened with bad key: %v", opened)
+
+ case ecies.ErrInvalidPublicKey:
+ // Ok, key mismatch but opened
+
+ default:
+ t.Fatalf("failed to open envelope: %v", err)
+ }
+
+ if opened.To != nil {
+ t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To)
+ }
+ if bytes.Compare(opened.Payload, payload) != 0 {
+ t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload)
+ }
+}
+
+func TestEnvelopeIdentifiedOpenTargeted(t *testing.T) {
+ key, err := crypto.GenerateKey()
+ if err != nil {
+ t.Fatalf("failed to generate test identity: %v", err)
+ }
+
+ payload := []byte("hello envelope")
+ envelope, err := NewMessage(payload).Wrap(DefaultPoW, Options{
+ To: &key.PublicKey,
+ })
+ if err != nil {
+ t.Fatalf("failed to wrap message: %v", err)
+ }
+ opened, err := envelope.Open(key)
+ if err != nil {
+ t.Fatalf("failed to open envelope: %v", err)
+ }
+ if opened.To != nil {
+ t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To)
+ }
+ if bytes.Compare(opened.Payload, payload) != 0 {
+ t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload)
+ }
+}
diff --git a/whisper/filter.go b/whisper/filter.go
index 8fcc45afd..c946d9380 100644
--- a/whisper/filter.go
+++ b/whisper/filter.go
@@ -2,12 +2,115 @@
package whisper
-import "crypto/ecdsa"
+import (
+ "crypto/ecdsa"
+
+ "github.com/ethereum/go-ethereum/event/filter"
+)
// Filter is used to subscribe to specific types of whisper messages.
type Filter struct {
- To *ecdsa.PublicKey // Recipient of the message
- From *ecdsa.PublicKey // Sender of the message
- Topics []Topic // Topics to watch messages on
- Fn func(*Message) // Handler in case of a match
+ To *ecdsa.PublicKey // Recipient of the message
+ From *ecdsa.PublicKey // Sender of the message
+ Topics [][]Topic // Topics to filter messages with
+ Fn func(msg *Message) // Handler in case of a match
+}
+
+// NewFilterTopics creates a 2D topic array used by whisper.Filter from binary
+// data elements.
+func NewFilterTopics(data ...[][]byte) [][]Topic {
+ filter := make([][]Topic, len(data))
+ for i, condition := range data {
+ // Handle the special case of condition == [[]byte{}]
+ if len(condition) == 1 && len(condition[0]) == 0 {
+ filter[i] = []Topic{}
+ continue
+ }
+ // Otherwise flatten normally
+ filter[i] = NewTopics(condition...)
+ }
+ return filter
+}
+
+// NewFilterTopicsFlat creates a 2D topic array used by whisper.Filter from flat
+// binary data elements.
+func NewFilterTopicsFlat(data ...[]byte) [][]Topic {
+ filter := make([][]Topic, len(data))
+ for i, element := range data {
+ // Only add non-wildcard topics
+ filter[i] = make([]Topic, 0, 1)
+ if len(element) > 0 {
+ filter[i] = append(filter[i], NewTopic(element))
+ }
+ }
+ return filter
+}
+
+// NewFilterTopicsFromStrings creates a 2D topic array used by whisper.Filter
+// from textual data elements.
+func NewFilterTopicsFromStrings(data ...[]string) [][]Topic {
+ filter := make([][]Topic, len(data))
+ for i, condition := range data {
+ // Handle the special case of condition == [""]
+ if len(condition) == 1 && condition[0] == "" {
+ filter[i] = []Topic{}
+ continue
+ }
+ // Otherwise flatten normally
+ filter[i] = NewTopicsFromStrings(condition...)
+ }
+ return filter
+}
+
+// NewFilterTopicsFromStringsFlat creates a 2D topic array used by whisper.Filter from flat
+// binary data elements.
+func NewFilterTopicsFromStringsFlat(data ...string) [][]Topic {
+ filter := make([][]Topic, len(data))
+ for i, element := range data {
+ // Only add non-wildcard topics
+ filter[i] = make([]Topic, 0, 1)
+ if element != "" {
+ filter[i] = append(filter[i], NewTopicFromString(element))
+ }
+ }
+ return filter
+}
+
+// filterer is the internal, fully initialized filter ready to match inbound
+// messages to a variety of criteria.
+type filterer struct {
+ to string // Recipient of the message
+ from string // Sender of the message
+ matcher *topicMatcher // Topics to filter messages with
+ fn func(data interface{}) // Handler in case of a match
+}
+
+// Compare checks if the specified filter matches the current one.
+func (self filterer) Compare(f filter.Filter) bool {
+ filter := f.(filterer)
+
+ // Check the message sender and recipient
+ if len(self.to) > 0 && self.to != filter.to {
+ return false
+ }
+ if len(self.from) > 0 && self.from != filter.from {
+ return false
+ }
+ // Check the topic filtering
+ topics := make([]Topic, len(filter.matcher.conditions))
+ for i, group := range filter.matcher.conditions {
+ // Message should contain a single topic entry, extract
+ for topics[i], _ = range group {
+ break
+ }
+ }
+ if !self.matcher.Matches(topics) {
+ return false
+ }
+ return true
+}
+
+// Trigger is called when a filter successfully matches an inbound message.
+func (self filterer) Trigger(data interface{}) {
+ self.fn(data)
}
diff --git a/whisper/filter_test.go b/whisper/filter_test.go
new file mode 100644
index 000000000..ca28fd83c
--- /dev/null
+++ b/whisper/filter_test.go
@@ -0,0 +1,199 @@
+package whisper
+
+import (
+ "bytes"
+
+ "testing"
+)
+
+var filterTopicsCreationTests = []struct {
+ topics [][]string
+ filter [][][4]byte
+}{
+ { // Simple topic filter
+ topics: [][]string{
+ {"abc", "def", "ghi"},
+ {"def"},
+ {"ghi", "abc"},
+ },
+ filter: [][][4]byte{
+ {{0x4e, 0x03, 0x65, 0x7a}, {0x34, 0x60, 0x7c, 0x9b}, {0x21, 0x41, 0x7d, 0xf9}},
+ {{0x34, 0x60, 0x7c, 0x9b}},
+ {{0x21, 0x41, 0x7d, 0xf9}, {0x4e, 0x03, 0x65, 0x7a}},
+ },
+ },
+ { // Wild-carded topic filter
+ topics: [][]string{
+ {"abc", "def", "ghi"},
+ {},
+ {""},
+ {"def"},
+ },
+ filter: [][][4]byte{
+ {{0x4e, 0x03, 0x65, 0x7a}, {0x34, 0x60, 0x7c, 0x9b}, {0x21, 0x41, 0x7d, 0xf9}},
+ {},
+ {},
+ {{0x34, 0x60, 0x7c, 0x9b}},
+ },
+ },
+}
+
+var filterTopicsCreationFlatTests = []struct {
+ topics []string
+ filter [][][4]byte
+}{
+ { // Simple topic list
+ topics: []string{"abc", "def", "ghi"},
+ filter: [][][4]byte{
+ {{0x4e, 0x03, 0x65, 0x7a}},
+ {{0x34, 0x60, 0x7c, 0x9b}},
+ {{0x21, 0x41, 0x7d, 0xf9}},
+ },
+ },
+ { // Wild-carded topic list
+ topics: []string{"abc", "", "ghi"},
+ filter: [][][4]byte{
+ {{0x4e, 0x03, 0x65, 0x7a}},
+ {},
+ {{0x21, 0x41, 0x7d, 0xf9}},
+ },
+ },
+}
+
+func TestFilterTopicsCreation(t *testing.T) {
+ // Check full filter creation
+ for i, tt := range filterTopicsCreationTests {
+ // Check the textual creation
+ filter := NewFilterTopicsFromStrings(tt.topics...)
+ if len(filter) != len(tt.topics) {
+ t.Errorf("test %d: condition count mismatch: have %v, want %v", i, len(filter), len(tt.topics))
+ continue
+ }
+ for j, condition := range filter {
+ if len(condition) != len(tt.filter[j]) {
+ t.Errorf("test %d, condition %d: size mismatch: have %v, want %v", i, j, len(condition), len(tt.filter[j]))
+ continue
+ }
+ for k := 0; k < len(condition); k++ {
+ if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 {
+ t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k])
+ }
+ }
+ }
+ // Check the binary creation
+ binary := make([][][]byte, len(tt.topics))
+ for j, condition := range tt.topics {
+ binary[j] = make([][]byte, len(condition))
+ for k, segment := range condition {
+ binary[j][k] = []byte(segment)
+ }
+ }
+ filter = NewFilterTopics(binary...)
+ if len(filter) != len(tt.topics) {
+ t.Errorf("test %d: condition count mismatch: have %v, want %v", i, len(filter), len(tt.topics))
+ continue
+ }
+ for j, condition := range filter {
+ if len(condition) != len(tt.filter[j]) {
+ t.Errorf("test %d, condition %d: size mismatch: have %v, want %v", i, j, len(condition), len(tt.filter[j]))
+ continue
+ }
+ for k := 0; k < len(condition); k++ {
+ if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 {
+ t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k])
+ }
+ }
+ }
+ }
+ // Check flat filter creation
+ for i, tt := range filterTopicsCreationFlatTests {
+ // Check the textual creation
+ filter := NewFilterTopicsFromStringsFlat(tt.topics...)
+ if len(filter) != len(tt.topics) {
+ t.Errorf("test %d: condition count mismatch: have %v, want %v", i, len(filter), len(tt.topics))
+ continue
+ }
+ for j, condition := range filter {
+ if len(condition) != len(tt.filter[j]) {
+ t.Errorf("test %d, condition %d: size mismatch: have %v, want %v", i, j, len(condition), len(tt.filter[j]))
+ continue
+ }
+ for k := 0; k < len(condition); k++ {
+ if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 {
+ t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k])
+ }
+ }
+ }
+ // Check the binary creation
+ binary := make([][]byte, len(tt.topics))
+ for j, topic := range tt.topics {
+ binary[j] = []byte(topic)
+ }
+ filter = NewFilterTopicsFlat(binary...)
+ if len(filter) != len(tt.topics) {
+ t.Errorf("test %d: condition count mismatch: have %v, want %v", i, len(filter), len(tt.topics))
+ continue
+ }
+ for j, condition := range filter {
+ if len(condition) != len(tt.filter[j]) {
+ t.Errorf("test %d, condition %d: size mismatch: have %v, want %v", i, j, len(condition), len(tt.filter[j]))
+ continue
+ }
+ for k := 0; k < len(condition); k++ {
+ if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 {
+ t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k])
+ }
+ }
+ }
+ }
+}
+
+var filterCompareTests = []struct {
+ matcher filterer
+ message filterer
+ match bool
+}{
+ { // Wild-card filter matching anything
+ matcher: filterer{to: "", from: "", matcher: newTopicMatcher()},
+ message: filterer{to: "to", from: "from", matcher: newTopicMatcher(NewFilterTopicsFromStringsFlat("topic")...)},
+ match: true,
+ },
+ { // Filter matching the to field
+ matcher: filterer{to: "to", from: "", matcher: newTopicMatcher()},
+ message: filterer{to: "to", from: "from", matcher: newTopicMatcher(NewFilterTopicsFromStringsFlat("topic")...)},
+ match: true,
+ },
+ { // Filter rejecting the to field
+ matcher: filterer{to: "to", from: "", matcher: newTopicMatcher()},
+ message: filterer{to: "", from: "from", matcher: newTopicMatcher(NewFilterTopicsFromStringsFlat("topic")...)},
+ match: false,
+ },
+ { // Filter matching the from field
+ matcher: filterer{to: "", from: "from", matcher: newTopicMatcher()},
+ message: filterer{to: "to", from: "from", matcher: newTopicMatcher(NewFilterTopicsFromStringsFlat("topic")...)},
+ match: true,
+ },
+ { // Filter rejecting the from field
+ matcher: filterer{to: "", from: "from", matcher: newTopicMatcher()},
+ message: filterer{to: "to", from: "", matcher: newTopicMatcher(NewFilterTopicsFromStringsFlat("topic")...)},
+ match: false,
+ },
+ { // Filter matching the topic field
+ matcher: filterer{to: "", from: "from", matcher: newTopicMatcher(NewFilterTopicsFromStringsFlat("topic")...)},
+ message: filterer{to: "to", from: "from", matcher: newTopicMatcher(NewFilterTopicsFromStringsFlat("topic")...)},
+ match: true,
+ },
+ { // Filter rejecting the topic field
+ matcher: filterer{to: "", from: "", matcher: newTopicMatcher(NewFilterTopicsFromStringsFlat("topic")...)},
+ message: filterer{to: "to", from: "from", matcher: newTopicMatcher()},
+ match: false,
+ },
+}
+
+func TestFilterCompare(t *testing.T) {
+ for i, tt := range filterCompareTests {
+ if match := tt.matcher.Compare(tt.message); match != tt.match {
+ t.Errorf("test %d: match mismatch: have %v, want %v", i, match, tt.match)
+ }
+ }
+}
diff --git a/whisper/message.go b/whisper/message.go
index 07c673567..a80380a92 100644
--- a/whisper/message.go
+++ b/whisper/message.go
@@ -8,21 +8,25 @@ import (
"math/rand"
"time"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
-// Message represents an end-user data packet to trasmit through the Whisper
+// Message represents an end-user data packet to transmit through the Whisper
// protocol. These are wrapped into Envelopes that need not be understood by
// intermediate nodes, just forwarded.
type Message struct {
Flags byte // First bit is signature presence, rest reserved and should be random
Signature []byte
Payload []byte
- Sent int64
- To *ecdsa.PublicKey
+ Sent time.Time // Time when the message was posted into the network
+ TTL time.Duration // Maximum time to live allowed for the message
+
+ To *ecdsa.PublicKey // Message recipient (identity used to decode the message)
+ Hash common.Hash // Message envelope hash to act as a unique id
}
// Options specifies the exact way a message should be wrapped into an Envelope.
@@ -43,7 +47,7 @@ func NewMessage(payload []byte) *Message {
return &Message{
Flags: flags,
Payload: payload,
- Sent: time.Now().Unix(),
+ Sent: time.Now(),
}
}
@@ -64,6 +68,8 @@ func (self *Message) Wrap(pow time.Duration, options Options) (*Envelope, error)
if options.TTL == 0 {
options.TTL = DefaultTTL
}
+ self.TTL = options.TTL
+
// Sign and encrypt the message if requested
if options.From != nil {
if err := self.sign(options.From); err != nil {
@@ -114,9 +120,12 @@ func (self *Message) encrypt(key *ecdsa.PublicKey) (err error) {
}
// decrypt decrypts an encrypted payload with a private key.
-func (self *Message) decrypt(key *ecdsa.PrivateKey) (err error) {
- self.Payload, err = crypto.Decrypt(key, self.Payload)
- return
+func (self *Message) decrypt(key *ecdsa.PrivateKey) error {
+ cleartext, err := crypto.Decrypt(key, self.Payload)
+ if err == nil {
+ self.Payload = cleartext
+ }
+ return err
}
// hash calculates the SHA3 checksum of the message flags and payload.
diff --git a/whisper/message_test.go b/whisper/message_test.go
index 18a254e5c..0b4a24c24 100644
--- a/whisper/message_test.go
+++ b/whisper/message_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"crypto/elliptic"
"testing"
+ "time"
"github.com/ethereum/go-ethereum/crypto"
)
@@ -25,6 +26,9 @@ func TestMessageSimpleWrap(t *testing.T) {
if bytes.Compare(msg.Payload, payload) != 0 {
t.Fatalf("payload mismatch after wrapping: have 0x%x, want 0x%x", msg.Payload, payload)
}
+ if msg.TTL/time.Second != DefaultTTL/time.Second {
+ t.Fatalf("message TTL mismatch: have %v, want %v", msg.TTL, DefaultTTL)
+ }
}
// Tests whether a message can be signed, and wrapped in plain-text.
diff --git a/whisper/peer.go b/whisper/peer.go
index 28abf4260..9fdc28434 100644
--- a/whisper/peer.go
+++ b/whisper/peer.go
@@ -21,20 +21,15 @@ type peer struct {
quit chan struct{}
}
-// newPeer creates and initializes a new whisper peer connection, returning either
-// the newly constructed link or a failure reason.
-func newPeer(host *Whisper, remote *p2p.Peer, rw p2p.MsgReadWriter) (*peer, error) {
- p := &peer{
+// newPeer creates a new whisper peer object, but does not run the handshake itself.
+func newPeer(host *Whisper, remote *p2p.Peer, rw p2p.MsgReadWriter) *peer {
+ return &peer{
host: host,
peer: remote,
ws: rw,
known: set.New(),
quit: make(chan struct{}),
}
- if err := p.handshake(); err != nil {
- return nil, err
- }
- return p, nil
}
// start initiates the peer updater, periodically broadcasting the whisper packets
diff --git a/whisper/topic.go b/whisper/topic.go
index a965c7cc2..c47c94ae1 100644
--- a/whisper/topic.go
+++ b/whisper/topic.go
@@ -11,6 +11,8 @@ import "github.com/ethereum/go-ethereum/crypto"
type Topic [4]byte
// NewTopic creates a topic from the 4 byte prefix of the SHA3 hash of the data.
+//
+// Note, empty topics are considered the wildcard, and cannot be used in messages.
func NewTopic(data []byte) Topic {
prefix := [4]byte{}
copy(prefix[:], crypto.Sha3(data)[:4])
@@ -48,14 +50,75 @@ func (self *Topic) String() string {
return string(self[:])
}
-// TopicSet represents a hash set to check if a topic exists or not.
-type topicSet map[string]struct{}
+// topicMatcher is a filter expression to verify if a list of topics contained
+// in an arriving message matches some topic conditions. The topic matcher is
+// built up of a list of conditions, each of which must be satisfied by the
+// corresponding topic in the message. Each condition may require: a) an exact
+// topic match; b) a match from a set of topics; or c) a wild-card matching all.
+//
+// If a message contains more topics than required by the matcher, those beyond
+// the condition count are ignored and assumed to match.
+//
+// Consider the following sample topic matcher:
+// sample := {
+// {TopicA1, TopicA2, TopicA3},
+// {TopicB},
+// nil,
+// {TopicD1, TopicD2}
+// }
+// In order for a message to pass this filter, it should enumerate at least 4
+// topics, the first any of [TopicA1, TopicA2, TopicA3], the second mandatory
+// "TopicB", the third is ignored by the filter and the fourth either "TopicD1"
+// or "TopicD2". If the message contains further topics, the filter will match
+// them too.
+type topicMatcher struct {
+ conditions []map[Topic]struct{}
+}
+
+// newTopicMatcher create a topic matcher from a list of topic conditions.
+func newTopicMatcher(topics ...[]Topic) *topicMatcher {
+ matcher := make([]map[Topic]struct{}, len(topics))
+ for i, condition := range topics {
+ matcher[i] = make(map[Topic]struct{})
+ for _, topic := range condition {
+ matcher[i][topic] = struct{}{}
+ }
+ }
+ return &topicMatcher{conditions: matcher}
+}
-// NewTopicSet creates a topic hash set from a slice of topics.
-func newTopicSet(topics []Topic) topicSet {
- set := make(map[string]struct{})
- for _, topic := range topics {
- set[topic.String()] = struct{}{}
+// newTopicMatcherFromBinary create a topic matcher from a list of binary conditions.
+func newTopicMatcherFromBinary(data ...[][]byte) *topicMatcher {
+ topics := make([][]Topic, len(data))
+ for i, condition := range data {
+ topics[i] = NewTopics(condition...)
+ }
+ return newTopicMatcher(topics...)
+}
+
+// newTopicMatcherFromStrings creates a topic matcher from a list of textual
+// conditions.
+func newTopicMatcherFromStrings(data ...[]string) *topicMatcher {
+ topics := make([][]Topic, len(data))
+ for i, condition := range data {
+ topics[i] = NewTopicsFromStrings(condition...)
+ }
+ return newTopicMatcher(topics...)
+}
+
+// Matches checks if a list of topics matches this particular condition set.
+func (self *topicMatcher) Matches(topics []Topic) bool {
+ // Mismatch if there aren't enough topics
+ if len(self.conditions) > len(topics) {
+ return false
+ }
+ // Check each topic condition for existence (skip wild-cards)
+ for i := 0; i < len(topics) && i < len(self.conditions); i++ {
+ if len(self.conditions[i]) > 0 {
+ if _, ok := self.conditions[i][topics[i]]; !ok {
+ return false
+ }
+ }
}
- return topicSet(set)
+ return true
}
diff --git a/whisper/topic_test.go b/whisper/topic_test.go
index 4015079dc..976f3e88d 100644
--- a/whisper/topic_test.go
+++ b/whisper/topic_test.go
@@ -9,9 +9,8 @@ var topicCreationTests = []struct {
data []byte
hash [4]byte
}{
- {hash: [4]byte{0xc5, 0xd2, 0x46, 0x01}, data: nil},
- {hash: [4]byte{0xc5, 0xd2, 0x46, 0x01}, data: []byte{}},
{hash: [4]byte{0x8f, 0x9a, 0x2b, 0x7d}, data: []byte("test name")},
+ {hash: [4]byte{0xf2, 0x6e, 0x77, 0x79}, data: []byte("some other test")},
}
func TestTopicCreation(t *testing.T) {
@@ -52,16 +51,149 @@ func TestTopicCreation(t *testing.T) {
}
}
-func TestTopicSetCreation(t *testing.T) {
- topics := make([]Topic, len(topicCreationTests))
- for i, tt := range topicCreationTests {
- topics[i] = NewTopic(tt.data)
+var topicMatcherCreationTest = struct {
+ binary [][][]byte
+ textual [][]string
+ matcher []map[[4]byte]struct{}
+}{
+ binary: [][][]byte{
+ [][]byte{},
+ [][]byte{
+ []byte("Topic A"),
+ },
+ [][]byte{
+ []byte("Topic B1"),
+ []byte("Topic B2"),
+ []byte("Topic B3"),
+ },
+ },
+ textual: [][]string{
+ []string{},
+ []string{"Topic A"},
+ []string{"Topic B1", "Topic B2", "Topic B3"},
+ },
+ matcher: []map[[4]byte]struct{}{
+ map[[4]byte]struct{}{},
+ map[[4]byte]struct{}{
+ [4]byte{0x25, 0xfc, 0x95, 0x66}: struct{}{},
+ },
+ map[[4]byte]struct{}{
+ [4]byte{0x93, 0x6d, 0xec, 0x09}: struct{}{},
+ [4]byte{0x25, 0x23, 0x34, 0xd3}: struct{}{},
+ [4]byte{0x6b, 0xc2, 0x73, 0xd1}: struct{}{},
+ },
+ },
+}
+
+func TestTopicMatcherCreation(t *testing.T) {
+ test := topicMatcherCreationTest
+
+ matcher := newTopicMatcherFromBinary(test.binary...)
+ for i, cond := range matcher.conditions {
+ for topic, _ := range cond {
+ if _, ok := test.matcher[i][topic]; !ok {
+ t.Errorf("condition %d; extra topic found: 0x%x", i, topic[:])
+ }
+ }
}
- set := newTopicSet(topics)
- for i, tt := range topicCreationTests {
- topic := NewTopic(tt.data)
- if _, ok := set[topic.String()]; !ok {
- t.Errorf("topic %d: not found in set", i)
+ for i, cond := range test.matcher {
+ for topic, _ := range cond {
+ if _, ok := matcher.conditions[i][topic]; !ok {
+ t.Errorf("condition %d; topic not found: 0x%x", i, topic[:])
+ }
+ }
+ }
+
+ matcher = newTopicMatcherFromStrings(test.textual...)
+ for i, cond := range matcher.conditions {
+ for topic, _ := range cond {
+ if _, ok := test.matcher[i][topic]; !ok {
+ t.Errorf("condition %d; extra topic found: 0x%x", i, topic[:])
+ }
+ }
+ }
+ for i, cond := range test.matcher {
+ for topic, _ := range cond {
+ if _, ok := matcher.conditions[i][topic]; !ok {
+ t.Errorf("condition %d; topic not found: 0x%x", i, topic[:])
+ }
+ }
+ }
+}
+
+var topicMatcherTests = []struct {
+ filter [][]string
+ topics []string
+ match bool
+}{
+ // Empty topic matcher should match everything
+ {
+ filter: [][]string{},
+ topics: []string{},
+ match: true,
+ },
+ {
+ filter: [][]string{},
+ topics: []string{"a", "b", "c"},
+ match: true,
+ },
+ // Fixed topic matcher should match strictly, but only prefix
+ {
+ filter: [][]string{[]string{"a"}, []string{"b"}},
+ topics: []string{"a"},
+ match: false,
+ },
+ {
+ filter: [][]string{[]string{"a"}, []string{"b"}},
+ topics: []string{"a", "b"},
+ match: true,
+ },
+ {
+ filter: [][]string{[]string{"a"}, []string{"b"}},
+ topics: []string{"a", "b", "c"},
+ match: true,
+ },
+ // Multi-matcher should match any from a sub-group
+ {
+ filter: [][]string{[]string{"a1", "a2"}},
+ topics: []string{"a"},
+ match: false,
+ },
+ {
+ filter: [][]string{[]string{"a1", "a2"}},
+ topics: []string{"a1"},
+ match: true,
+ },
+ {
+ filter: [][]string{[]string{"a1", "a2"}},
+ topics: []string{"a2"},
+ match: true,
+ },
+ // Wild-card condition should match anything
+ {
+ filter: [][]string{[]string{}, []string{"b"}},
+ topics: []string{"a"},
+ match: false,
+ },
+ {
+ filter: [][]string{[]string{}, []string{"b"}},
+ topics: []string{"a", "b"},
+ match: true,
+ },
+ {
+ filter: [][]string{[]string{}, []string{"b"}},
+ topics: []string{"b", "b"},
+ match: true,
+ },
+}
+
+func TestTopicMatcher(t *testing.T) {
+ for i, tt := range topicMatcherTests {
+ topics := NewTopicsFromStrings(tt.topics...)
+
+ matcher := newTopicMatcherFromStrings(tt.filter...)
+ if match := matcher.Matches(topics); match != tt.match {
+ t.Errorf("test %d: match mismatch: have %v, want %v", i, match, tt.match)
}
}
}
diff --git a/whisper/whisper.go b/whisper/whisper.go
index 9317fad50..a48e1e380 100644
--- a/whisper/whisper.go
+++ b/whisper/whisper.go
@@ -58,6 +58,8 @@ type Whisper struct {
quit chan struct{}
}
+// New creates a Whisper client ready to communicate through the Ethereum P2P
+// network.
func New() *Whisper {
whisper := &Whisper{
filters: filter.New(),
@@ -116,11 +118,11 @@ func (self *Whisper) GetIdentity(key *ecdsa.PublicKey) *ecdsa.PrivateKey {
// Watch installs a new message handler to run in case a matching packet arrives
// from the whisper network.
func (self *Whisper) Watch(options Filter) int {
- filter := filter.Generic{
- Str1: string(crypto.FromECDSAPub(options.To)),
- Str2: string(crypto.FromECDSAPub(options.From)),
- Data: newTopicSet(options.Topics),
- Fn: func(data interface{}) {
+ filter := filterer{
+ to: string(crypto.FromECDSAPub(options.To)),
+ from: string(crypto.FromECDSAPub(options.From)),
+ matcher: newTopicMatcher(options.Topics...),
+ fn: func(data interface{}) {
options.Fn(data.(*Message))
},
}
@@ -148,7 +150,7 @@ func (self *Whisper) Stop() {
glog.V(logger.Info).Infoln("Whisper stopped")
}
-// Messages retrieves the currently pooled messages matching a filter id.
+// Messages retrieves all the currently pooled messages matching a filter id.
func (self *Whisper) Messages(id int) []*Message {
messages := make([]*Message, 0)
if filter := self.filters.Get(id); filter != nil {
@@ -163,27 +165,12 @@ func (self *Whisper) Messages(id int) []*Message {
return messages
}
-// func (self *Whisper) RemoveIdentity(key *ecdsa.PublicKey) bool {
-// k := string(crypto.FromECDSAPub(key))
-// if _, ok := self.keys[k]; ok {
-// delete(self.keys, k)
-// return true
-// }
-// return false
-// }
-
// handlePeer is called by the underlying P2P layer when the whisper sub-protocol
// connection is negotiated.
func (self *Whisper) handlePeer(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
- // Create, initialize and start the whisper peer
- whisperPeer, err := newPeer(self, peer, rw)
- if err != nil {
- return err
- }
- whisperPeer.start()
- defer whisperPeer.stop()
+ // Create the new peer and start tracking it
+ whisperPeer := newPeer(self, peer, rw)
- // Start tracking the active peer
self.peerMu.Lock()
self.peers[whisperPeer] = struct{}{}
self.peerMu.Unlock()
@@ -193,6 +180,14 @@ func (self *Whisper) handlePeer(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
delete(self.peers, whisperPeer)
self.peerMu.Unlock()
}()
+
+ // Run the peer handshake and state updates
+ if err := whisperPeer.handshake(); err != nil {
+ return err
+ }
+ whisperPeer.start()
+ defer whisperPeer.stop()
+
// Read and process inbound messages directly to merge into client-global state
for {
// Fetch the next packet and decode the contained envelopes
@@ -267,9 +262,11 @@ func (self *Whisper) open(envelope *Envelope) *Message {
// Iterate over the keys and try to decrypt the message
for _, key := range self.keys {
message, err := envelope.Open(key)
- if err == nil || err == ecies.ErrInvalidPublicKey {
+ if err == nil {
message.To = &key.PublicKey
return message
+ } else if err == ecies.ErrInvalidPublicKey {
+ return message
}
}
// Failed to decrypt, don't return anything
@@ -278,10 +275,14 @@ func (self *Whisper) open(envelope *Envelope) *Message {
// createFilter creates a message filter to check against installed handlers.
func createFilter(message *Message, topics []Topic) filter.Filter {
- return filter.Generic{
- Str1: string(crypto.FromECDSAPub(message.To)),
- Str2: string(crypto.FromECDSAPub(message.Recover())),
- Data: newTopicSet(topics),
+ matcher := make([][]Topic, len(topics))
+ for i, topic := range topics {
+ matcher[i] = []Topic{topic}
+ }
+ return filterer{
+ to: string(crypto.FromECDSAPub(message.To)),
+ from: string(crypto.FromECDSAPub(message.Recover())),
+ matcher: newTopicMatcher(matcher...),
}
}
diff --git a/whisper/whisper_test.go b/whisper/whisper_test.go
index def8e68d8..7c5067f51 100644
--- a/whisper/whisper_test.go
+++ b/whisper/whisper_test.go
@@ -129,7 +129,7 @@ func testBroadcast(anonymous bool, t *testing.T) {
dones[i] = done
targets[i].Watch(Filter{
- Topics: NewTopicsFromStrings("broadcast topic"),
+ Topics: NewFilterTopicsFromStringsFlat("broadcast topic"),
Fn: func(msg *Message) {
close(done)
},
diff --git a/xeth/whisper.go b/xeth/whisper.go
index 342910b5c..edb62c748 100644
--- a/xeth/whisper.go
+++ b/xeth/whisper.go
@@ -1,7 +1,9 @@
+// Contains the external API to the whisper sub-protocol.
+
package xeth
import (
- "errors"
+ "fmt"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -12,109 +14,92 @@ import (
var qlogger = logger.NewLogger("XSHH")
+// Whisper represents the API wrapper around the internal whisper implementation.
type Whisper struct {
*whisper.Whisper
}
+// NewWhisper wraps an internal whisper client into an external API version.
func NewWhisper(w *whisper.Whisper) *Whisper {
return &Whisper{w}
}
-func (self *Whisper) Post(payload string, to, from string, topics []string, priority, ttl uint32) error {
- if priority == 0 {
- priority = 1000
- }
-
- if ttl == 0 {
- ttl = 100
- }
-
- pk := crypto.ToECDSAPub(common.FromHex(from))
- if key := self.Whisper.GetIdentity(pk); key != nil || len(from) == 0 {
- msg := whisper.NewMessage(common.FromHex(payload))
- envelope, err := msg.Wrap(time.Duration(priority*100000), whisper.Options{
- TTL: time.Duration(ttl) * time.Second,
- To: crypto.ToECDSAPub(common.FromHex(to)),
- From: key,
- Topics: whisper.NewTopicsFromStrings(topics...),
- })
-
- if err != nil {
- return err
- }
-
- if err := self.Whisper.Send(envelope); err != nil {
- return err
- }
- } else {
- return errors.New("unmatched pub / priv for seal")
- }
-
- return nil
-}
-
+// NewIdentity generates a new cryptographic identity for the client, and injects
+// it into the known identities for message decryption.
func (self *Whisper) NewIdentity() string {
- key := self.Whisper.NewIdentity()
-
- return common.ToHex(crypto.FromECDSAPub(&key.PublicKey))
+ identity := self.Whisper.NewIdentity()
+ return common.ToHex(crypto.FromECDSAPub(&identity.PublicKey))
}
+// HasIdentity checks if the the whisper node is configured with the private key
+// of the specified public pair.
func (self *Whisper) HasIdentity(key string) bool {
return self.Whisper.HasIdentity(crypto.ToECDSAPub(common.FromHex(key)))
}
-// func (self *Whisper) RemoveIdentity(key string) bool {
-// return self.Whisper.RemoveIdentity(crypto.ToECDSAPub(common.FromHex(key)))
-// }
-
-func (self *Whisper) Watch(opts *Options) int {
- filter := whisper.Filter{
- To: crypto.ToECDSAPub(common.FromHex(opts.To)),
- From: crypto.ToECDSAPub(common.FromHex(opts.From)),
- Topics: whisper.NewTopicsFromStrings(opts.Topics...),
+// Post injects a message into the whisper network for distribution.
+func (self *Whisper) Post(payload string, to, from string, topics []string, priority, ttl uint32) error {
+ // Decode the topic strings
+ topicsDecoded := make([][]byte, len(topics))
+ for i, topic := range topics {
+ topicsDecoded[i] = common.FromHex(topic)
}
-
- var i int
- filter.Fn = func(msg *whisper.Message) {
- opts.Fn(NewWhisperMessage(msg))
+ // Construct the whisper message and transmission options
+ message := whisper.NewMessage(common.FromHex(payload))
+ options := whisper.Options{
+ To: crypto.ToECDSAPub(common.FromHex(to)),
+ TTL: time.Duration(ttl) * time.Second,
+ Topics: whisper.NewTopics(topicsDecoded...),
}
-
- i = self.Whisper.Watch(filter)
-
- return i
-}
-
-func (self *Whisper) Messages(id int) (messages []WhisperMessage) {
- msgs := self.Whisper.Messages(id)
- messages = make([]WhisperMessage, len(msgs))
- for i, message := range msgs {
- messages[i] = NewWhisperMessage(message)
+ if len(from) != 0 {
+ if key := self.Whisper.GetIdentity(crypto.ToECDSAPub(common.FromHex(from))); key != nil {
+ options.From = key
+ } else {
+ return fmt.Errorf("unknown identity to send from: %s", from)
+ }
}
-
- return
+ // Wrap and send the message
+ pow := time.Duration(priority) * time.Millisecond
+ envelope, err := message.Wrap(pow, options)
+ if err != nil {
+ return err
+ }
+ if err := self.Whisper.Send(envelope); err != nil {
+ return err
+ }
+ return nil
}
-type Options struct {
- To string
- From string
- Topics []string
- Fn func(msg WhisperMessage)
+// Watch installs a new message handler to run in case a matching packet arrives
+// from the whisper network.
+func (self *Whisper) Watch(to, from string, topics [][]string, fn func(WhisperMessage)) int {
+ // Decode the topic strings
+ topicsDecoded := make([][][]byte, len(topics))
+ for i, condition := range topics {
+ topicsDecoded[i] = make([][]byte, len(condition))
+ for j, topic := range condition {
+ topicsDecoded[i][j] = common.FromHex(topic)
+ }
+ }
+ // Assemble and inject the filter into the whisper client
+ filter := whisper.Filter{
+ To: crypto.ToECDSAPub(common.FromHex(to)),
+ From: crypto.ToECDSAPub(common.FromHex(from)),
+ Topics: whisper.NewFilterTopics(topicsDecoded...),
+ }
+ filter.Fn = func(message *whisper.Message) {
+ fn(NewWhisperMessage(message))
+ }
+ return self.Whisper.Watch(filter)
}
-type WhisperMessage struct {
- ref *whisper.Message
- Payload string `json:"payload"`
- To string `json:"to"`
- From string `json:"from"`
- Sent int64 `json:"sent"`
-}
+// Messages retrieves all the currently pooled messages matching a filter id.
+func (self *Whisper) Messages(id int) []WhisperMessage {
+ pool := self.Whisper.Messages(id)
-func NewWhisperMessage(msg *whisper.Message) WhisperMessage {
- return WhisperMessage{
- ref: msg,
- Payload: common.ToHex(msg.Payload),
- From: common.ToHex(crypto.FromECDSAPub(msg.Recover())),
- To: common.ToHex(crypto.FromECDSAPub(msg.To)),
- Sent: msg.Sent,
+ messages := make([]WhisperMessage, len(pool))
+ for i, message := range pool {
+ messages[i] = NewWhisperMessage(message)
}
+ return messages
}
diff --git a/xeth/whisper_filter.go b/xeth/whisper_filter.go
new file mode 100644
index 000000000..52e70e041
--- /dev/null
+++ b/xeth/whisper_filter.go
@@ -0,0 +1,84 @@
+// Contains the external API side message filter for watching, pooling and polling
+// matched whisper messages, also serializing data access to avoid duplications.
+
+package xeth
+
+import (
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// whisperFilter is the message cache matching a specific filter, accumulating
+// inbound messages until the are requested by the client.
+type whisperFilter struct {
+ id int // Filter identifier for old message retrieval
+ ref *Whisper // Whisper reference for old message retrieval
+
+ cache []WhisperMessage // Cache of messages not yet polled
+ skip map[common.Hash]struct{} // List of retrieved messages to avoid duplication
+ update time.Time // Time of the last message query
+
+ lock sync.RWMutex // Lock protecting the filter internals
+}
+
+// newWhisperFilter creates a new serialized, poll based whisper topic filter.
+func newWhisperFilter(id int, ref *Whisper) *whisperFilter {
+ return &whisperFilter{
+ id: id,
+ ref: ref,
+
+ update: time.Now(),
+ skip: make(map[common.Hash]struct{}),
+ }
+}
+
+// messages retrieves all the cached messages from the entire pool matching the
+// filter, resetting the filter's change buffer.
+func (w *whisperFilter) messages() []WhisperMessage {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.cache = nil
+ w.update = time.Now()
+
+ w.skip = make(map[common.Hash]struct{})
+ messages := w.ref.Messages(w.id)
+ for _, message := range messages {
+ w.skip[message.ref.Hash] = struct{}{}
+ }
+ return messages
+}
+
+// insert injects a new batch of messages into the filter cache.
+func (w *whisperFilter) insert(messages ...WhisperMessage) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ for _, message := range messages {
+ if _, ok := w.skip[message.ref.Hash]; !ok {
+ w.cache = append(w.cache, messages...)
+ }
+ }
+}
+
+// retrieve fetches all the cached messages from the filter.
+func (w *whisperFilter) retrieve() (messages []WhisperMessage) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ messages, w.cache = w.cache, nil
+ w.update = time.Now()
+
+ return
+}
+
+// activity returns the last time instance when client requests were executed on
+// the filter.
+func (w *whisperFilter) activity() time.Time {
+ w.lock.RLock()
+ defer w.lock.RUnlock()
+
+ return w.update
+}
diff --git a/xeth/whisper_message.go b/xeth/whisper_message.go
new file mode 100644
index 000000000..c8195cec1
--- /dev/null
+++ b/xeth/whisper_message.go
@@ -0,0 +1,37 @@
+// Contains the external API representation of a whisper message.
+
+package xeth
+
+import (
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/whisper"
+)
+
+// WhisperMessage is the external API representation of a whisper.Message.
+type WhisperMessage struct {
+ ref *whisper.Message
+
+ Payload string `json:"payload"`
+ To string `json:"to"`
+ From string `json:"from"`
+ Sent int64 `json:"sent"`
+ TTL int64 `json:"ttl"`
+ Hash string `json:"hash"`
+}
+
+// NewWhisperMessage converts an internal message into an API version.
+func NewWhisperMessage(message *whisper.Message) WhisperMessage {
+ return WhisperMessage{
+ ref: message,
+
+ Payload: common.ToHex(message.Payload),
+ From: common.ToHex(crypto.FromECDSAPub(message.Recover())),
+ To: common.ToHex(crypto.FromECDSAPub(message.To)),
+ Sent: message.Sent.Unix(),
+ TTL: int64(message.TTL / time.Second),
+ Hash: common.ToHex(message.Hash.Bytes()),
+ }
+}
diff --git a/xeth/xeth.go b/xeth/xeth.go
index 251b070e4..692fb338c 100644
--- a/xeth/xeth.go
+++ b/xeth/xeth.go
@@ -97,7 +97,7 @@ done:
}
for id, filter := range self.messages {
- if time.Since(filter.timeout) > filterTickerTime {
+ if time.Since(filter.activity()) > filterTickerTime {
self.Whisper().Unwatch(id)
delete(self.messages, id)
}
@@ -236,6 +236,10 @@ func (self *XEth) CurrentBlock() *types.Block {
return self.backend.ChainManager().CurrentBlock()
}
+func (self *XEth) GasLimit() *big.Int {
+ return self.backend.ChainManager().GasLimit()
+}
+
func (self *XEth) Block(v interface{}) *Block {
if n, ok := v.(int32); ok {
return self.BlockByNumber(int64(n))
@@ -276,6 +280,10 @@ func (self *XEth) IsMining() bool {
return self.backend.IsMining()
}
+func (self *XEth) HashRate() int64 {
+ return self.backend.Miner().HashRate()
+}
+
func (self *XEth) EthVersion() string {
return fmt.Sprintf("%d", self.backend.EthVersion())
}
@@ -448,35 +456,61 @@ func (self *XEth) AllLogs(earliest, latest int64, skip, max int, address []strin
return filter.Find()
}
-func (p *XEth) NewWhisperFilter(opts *Options) int {
+// NewWhisperFilter creates and registers a new message filter to watch for
+// inbound whisper messages. All parameters at this point are assumed to be
+// HEX encoded.
+func (p *XEth) NewWhisperFilter(to, from string, topics [][]string) int {
+ // Pre-define the id to be filled later
var id int
- opts.Fn = func(msg WhisperMessage) {
- p.messagesMut.Lock()
- defer p.messagesMut.Unlock()
- p.messages[id].add(msg) // = append(p.messages[id], msg)
+
+ // Callback to delegate core whisper messages to this xeth filter
+ callback := func(msg WhisperMessage) {
+ p.messagesMut.RLock() // Only read lock to the filter pool
+ defer p.messagesMut.RUnlock()
+ p.messages[id].insert(msg)
}
- id = p.Whisper().Watch(opts)
- p.messages[id] = &whisperFilter{timeout: time.Now()}
+ // Initialize the core whisper filter and wrap into xeth
+ id = p.Whisper().Watch(to, from, topics, callback)
+
+ p.messagesMut.Lock()
+ p.messages[id] = newWhisperFilter(id, p.Whisper())
+ p.messagesMut.Unlock()
+
return id
}
+// UninstallWhisperFilter disables and removes an existing filter.
func (p *XEth) UninstallWhisperFilter(id int) bool {
+ p.messagesMut.Lock()
+ defer p.messagesMut.Unlock()
+
if _, ok := p.messages[id]; ok {
delete(p.messages, id)
return true
}
-
return false
}
-func (self *XEth) MessagesChanged(id int) []WhisperMessage {
- self.messagesMut.Lock()
- defer self.messagesMut.Unlock()
+// WhisperMessages retrieves all the known messages that match a specific filter.
+func (self *XEth) WhisperMessages(id int) []WhisperMessage {
+ self.messagesMut.RLock()
+ defer self.messagesMut.RUnlock()
if self.messages[id] != nil {
- return self.messages[id].get()
+ return self.messages[id].messages()
}
+ return nil
+}
+
+// WhisperMessagesChanged retrieves all the new messages matched by a filter
+// since the last retrieval
+func (self *XEth) WhisperMessagesChanged(id int) []WhisperMessage {
+ self.messagesMut.RLock()
+ defer self.messagesMut.RUnlock()
+ if self.messages[id] != nil {
+ return self.messages[id].retrieve()
+ }
return nil
}
@@ -727,22 +761,6 @@ func (m callmsg) Gas() *big.Int { return m.gas }
func (m callmsg) Value() *big.Int { return m.value }
func (m callmsg) Data() []byte { return m.data }
-type whisperFilter struct {
- messages []WhisperMessage
- timeout time.Time
- id int
-}
-
-func (w *whisperFilter) add(msgs ...WhisperMessage) {
- w.messages = append(w.messages, msgs...)
-}
-func (w *whisperFilter) get() []WhisperMessage {
- w.timeout = time.Now()
- tmp := w.messages
- w.messages = nil
- return tmp
-}
-
type logFilter struct {
logs state.Logs
timeout time.Time