aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/asm/lex_test.go45
-rw-r--r--core/asm/lexer.go2
-rw-r--r--core/bench_test.go1
-rw-r--r--core/block_validator_test.go5
-rw-r--r--core/blockchain.go5
-rw-r--r--core/blockchain_test.go57
-rw-r--r--core/chain_indexer.go396
-rw-r--r--core/chain_indexer_test.go234
-rw-r--r--core/chain_makers.go1
-rw-r--r--core/chain_makers_test.go3
-rw-r--r--core/dao_test.go10
-rw-r--r--core/filter_test.go17
-rw-r--r--core/gen_genesis.go10
-rw-r--r--core/genesis.go1
-rw-r--r--core/genesis_test.go2
-rw-r--r--core/state_processor.go10
-rw-r--r--core/tx_journal.go150
-rw-r--r--core/tx_pool.go171
-rw-r--r--core/tx_pool_test.go229
-rw-r--r--core/types/gen_receipt_json.go9
-rw-r--r--core/types/receipt.go75
-rw-r--r--core/vm/common.go16
-rw-r--r--core/vm/contracts.go304
-rw-r--r--core/vm/contracts_test.go390
-rw-r--r--core/vm/evm.go99
-rw-r--r--core/vm/gas_table.go55
-rw-r--r--core/vm/instructions.go109
-rw-r--r--core/vm/instructions_test.go159
-rw-r--r--core/vm/interpreter.go43
-rw-r--r--core/vm/jump_table.go55
-rw-r--r--core/vm/logger.go25
-rw-r--r--core/vm/memory.go1
-rw-r--r--core/vm/memory_table.go13
-rw-r--r--core/vm/opcodes.go308
-rw-r--r--core/vm/runtime/env.go5
-rw-r--r--core/vm/runtime/runtime.go6
36 files changed, 2483 insertions, 538 deletions
diff --git a/core/asm/lex_test.go b/core/asm/lex_test.go
index 36e67bcf7..e6901d4e3 100644
--- a/core/asm/lex_test.go
+++ b/core/asm/lex_test.go
@@ -16,7 +16,10 @@
package asm
-import "testing"
+import (
+ "reflect"
+ "testing"
+)
func lexAll(src string) []token {
ch := Lex("test.asm", []byte(src), false)
@@ -28,9 +31,41 @@ func lexAll(src string) []token {
return tokens
}
-func TestComment(t *testing.T) {
- tokens := lexAll(";; this is a comment")
- if len(tokens) != 2 { // {new line, EOF}
- t.Error("expected no tokens")
+func TestLexer(t *testing.T) {
+ tests := []struct {
+ input string
+ tokens []token
+ }{
+ {
+ input: ";; this is a comment",
+ tokens: []token{{typ: lineStart}, {typ: eof}},
+ },
+ {
+ input: "0x12345678",
+ tokens: []token{{typ: lineStart}, {typ: number, text: "0x12345678"}, {typ: eof}},
+ },
+ {
+ input: "0x123ggg",
+ tokens: []token{{typ: lineStart}, {typ: number, text: "0x123"}, {typ: element, text: "ggg"}, {typ: eof}},
+ },
+ {
+ input: "12345678",
+ tokens: []token{{typ: lineStart}, {typ: number, text: "12345678"}, {typ: eof}},
+ },
+ {
+ input: "123abc",
+ tokens: []token{{typ: lineStart}, {typ: number, text: "123"}, {typ: element, text: "abc"}, {typ: eof}},
+ },
+ {
+ input: "0123abc",
+ tokens: []token{{typ: lineStart}, {typ: number, text: "0123"}, {typ: element, text: "abc"}, {typ: eof}},
+ },
+ }
+
+ for _, test := range tests {
+ tokens := lexAll(test.input)
+ if !reflect.DeepEqual(tokens, test.tokens) {
+ t.Errorf("input %q\ngot: %+v\nwant: %+v", test.input, tokens, test.tokens)
+ }
}
}
diff --git a/core/asm/lexer.go b/core/asm/lexer.go
index 2770bd35f..d784e5d50 100644
--- a/core/asm/lexer.go
+++ b/core/asm/lexer.go
@@ -254,7 +254,7 @@ func lexInsideString(l *lexer) stateFn {
func lexNumber(l *lexer) stateFn {
acceptance := Numbers
- if l.accept("0") && l.accept("xX") {
+ if l.accept("0") || l.accept("xX") {
acceptance = HexadecimalNumbers
}
l.acceptRun(acceptance)
diff --git a/core/bench_test.go b/core/bench_test.go
index 20676fc97..b9250f7d3 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -300,6 +300,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
}
}
+ chain.Stop()
db.Close()
}
}
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index abe1766b4..c0afc2955 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -44,6 +44,7 @@ func TestHeaderVerification(t *testing.T) {
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer chain.Stop()
for i := 0; i < len(blocks); i++ {
for j, valid := range []bool{true, false} {
@@ -108,9 +109,11 @@ func testHeaderConcurrentVerification(t *testing.T, threads int) {
if valid {
chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
+ chain.Stop()
} else {
chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), new(event.TypeMux), vm.Config{})
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
+ chain.Stop()
}
// Wait for all the verification results
checks := make(map[int]error)
@@ -172,6 +175,8 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) {
// Start the verifications and immediately abort
chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), new(event.TypeMux), vm.Config{})
+ defer chain.Stop()
+
abort, results := chain.engine.VerifyHeaders(chain, headers, seals)
close(abort)
diff --git a/core/blockchain.go b/core/blockchain.go
index bb1c14f43..3fb8be15f 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -827,6 +827,11 @@ func (bc *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err er
bc.mu.Lock()
defer bc.mu.Unlock()
+ if bc.HasBlock(block.Hash()) {
+ log.Trace("Block existed", "hash", block.Hash())
+ return
+ }
+
localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
externTd := new(big.Int).Add(block.Difficulty(), ptd)
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 5fa671e2b..4a0f44940 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -20,6 +20,7 @@ import (
"fmt"
"math/big"
"math/rand"
+ "sync"
"testing"
"time"
@@ -61,6 +62,8 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
if err != nil {
t.Fatal("could not make new canonical in testFork", err)
}
+ defer blockchain2.Stop()
+
// Assert the chains have the same header/block at #i
var hash1, hash2 common.Hash
if full {
@@ -182,6 +185,8 @@ func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *
func TestLastBlock(t *testing.T) {
bchain := newTestBlockChain(false)
+ defer bchain.Stop()
+
block := makeBlockChain(bchain.CurrentBlock(), 1, bchain.chainDb, 0)[0]
bchain.insert(block)
if block.Hash() != GetHeadBlockHash(bchain.chainDb) {
@@ -202,6 +207,8 @@ func testExtendCanonical(t *testing.T, full bool) {
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
+ defer processor.Stop()
+
// Define the difficulty comparator
better := func(td1, td2 *big.Int) {
if td2.Cmp(td1) <= 0 {
@@ -228,6 +235,8 @@ func testShorterFork(t *testing.T, full bool) {
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
+ defer processor.Stop()
+
// Define the difficulty comparator
worse := func(td1, td2 *big.Int) {
if td2.Cmp(td1) >= 0 {
@@ -256,6 +265,8 @@ func testLongerFork(t *testing.T, full bool) {
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
+ defer processor.Stop()
+
// Define the difficulty comparator
better := func(td1, td2 *big.Int) {
if td2.Cmp(td1) <= 0 {
@@ -284,6 +295,8 @@ func testEqualFork(t *testing.T, full bool) {
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
+ defer processor.Stop()
+
// Define the difficulty comparator
equal := func(td1, td2 *big.Int) {
if td2.Cmp(td1) != 0 {
@@ -309,6 +322,8 @@ func testBrokenChain(t *testing.T, full bool) {
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
+ defer blockchain.Stop()
+
// Create a forked chain, and try to insert with a missing link
if full {
chain := makeBlockChain(blockchain.CurrentBlock(), 5, db, forkSeed)[1:]
@@ -385,6 +400,7 @@ func testReorgShort(t *testing.T, full bool) {
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
bc := newTestBlockChain(true)
+ defer bc.Stop()
// Insert an easy and a difficult chain afterwards
if full {
@@ -429,6 +445,7 @@ func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
func testBadHashes(t *testing.T, full bool) {
bc := newTestBlockChain(true)
+ defer bc.Stop()
// Create a chain, ban a hash and try to import
var err error
@@ -453,6 +470,7 @@ func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
func testReorgBadHashes(t *testing.T, full bool) {
bc := newTestBlockChain(true)
+ defer bc.Stop()
// Create a chain, import and ban afterwards
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
@@ -483,6 +501,8 @@ func testReorgBadHashes(t *testing.T, full bool) {
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
+ defer ncm.Stop()
+
if full {
if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
@@ -508,6 +528,8 @@ func testInsertNonceError(t *testing.T, full bool) {
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
+ defer blockchain.Stop()
+
// Create and insert a chain with a failing nonce
var (
failAt int
@@ -589,15 +611,16 @@ func TestFastVsFullChains(t *testing.T) {
archiveDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
archive, _ := NewBlockChain(archiveDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer archive.Stop()
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
-
// Fast import the chain as a non-archive node to test
fastDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
fast, _ := NewBlockChain(fastDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer fast.Stop()
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -678,6 +701,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
+ defer archive.Stop()
+
assert(t, "archive", archive, height, height, height)
archive.Rollback(remove)
assert(t, "archive", archive, height/2, height/2, height/2)
@@ -686,6 +711,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
fastDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
fast, _ := NewBlockChain(fastDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer fast.Stop()
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -709,6 +735,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
+ defer light.Stop()
+
assert(t, "light", light, height, 0, 0)
light.Rollback(remove)
assert(t, "light", light, height/2, 0, 0)
@@ -777,6 +805,7 @@ func TestChainTxReorgs(t *testing.T) {
if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
+ defer blockchain.Stop()
// overwrite the old chain
chain, _ = GenerateChain(gspec.Config, genesis, db, 5, func(i int, gen *BlockGen) {
@@ -845,6 +874,7 @@ func TestLogReorgs(t *testing.T) {
var evmux event.TypeMux
blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), &evmux, vm.Config{})
+ defer blockchain.Stop()
subs := evmux.Subscribe(RemovedLogsEvent{})
chain, _ := GenerateChain(params.TestChainConfig, genesis, db, 2, func(i int, gen *BlockGen) {
@@ -886,6 +916,7 @@ func TestReorgSideEvent(t *testing.T) {
evmux := &event.TypeMux{}
blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
+ defer blockchain.Stop()
chain, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, gen *BlockGen) {})
if _, err := blockchain.InsertChain(chain); err != nil {
@@ -955,11 +986,18 @@ done:
// Tests if the canonical block can be fetched from the database during chain insertion.
func TestCanonicalBlockRetrieval(t *testing.T) {
- bc := newTestBlockChain(false)
+ bc := newTestBlockChain(true)
+ defer bc.Stop()
+
chain, _ := GenerateChain(bc.config, bc.genesisBlock, bc.chainDb, 10, func(i int, gen *BlockGen) {})
+ var pend sync.WaitGroup
+ pend.Add(len(chain))
+
for i := range chain {
go func(block *types.Block) {
+ defer pend.Done()
+
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
for {
ch := GetCanonicalHash(bc.chainDb, block.NumberU64())
@@ -980,8 +1018,11 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
}
}(chain[i])
- bc.InsertChain(types.Blocks{chain[i]})
+ if _, err := bc.InsertChain(types.Blocks{chain[i]}); err != nil {
+ t.Fatalf("failed to insert block %d: %v", i, err)
+ }
}
+ pend.Wait()
}
func TestEIP155Transition(t *testing.T) {
@@ -1001,6 +1042,8 @@ func TestEIP155Transition(t *testing.T) {
)
blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), &mux, vm.Config{})
+ defer blockchain.Stop()
+
blocks, _ := GenerateChain(gspec.Config, genesis, db, 4, func(i int, block *BlockGen) {
var (
tx *types.Transaction
@@ -1104,10 +1147,12 @@ func TestEIP161AccountRemoval(t *testing.T) {
},
Alloc: GenesisAlloc{address: {Balance: funds}},
}
- genesis = gspec.MustCommit(db)
- mux event.TypeMux
- blockchain, _ = NewBlockChain(db, gspec.Config, ethash.NewFaker(), &mux, vm.Config{})
+ genesis = gspec.MustCommit(db)
+ mux event.TypeMux
)
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), &mux, vm.Config{})
+ defer blockchain.Stop()
+
blocks, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, block *BlockGen) {
var (
tx *types.Transaction
diff --git a/core/chain_indexer.go b/core/chain_indexer.go
new file mode 100644
index 000000000..9a88a5b1b
--- /dev/null
+++ b/core/chain_indexer.go
@@ -0,0 +1,396 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// ChainIndexerBackend defines the methods needed to process chain segments in
+// the background and write the segment results into the database. These can be
+// used to create filter blooms or CHTs.
+type ChainIndexerBackend interface {
+ // Reset initiates the processing of a new chain segment, potentially terminating
+ // any partially completed operations (in case of a reorg).
+ Reset(section uint64)
+
+ // Process crunches through the next header in the chain segment. The caller
+ // will ensure a sequential order of headers.
+ Process(header *types.Header)
+
+ // Commit finalizes the section metadata and stores it into the database. This
+ // interface will usually be a batch writer.
+ Commit(db ethdb.Database) error
+}
+
+// ChainIndexer does a post-processing job for equally sized sections of the
+// canonical chain (like BlooomBits and CHT structures). A ChainIndexer is
+// connected to the blockchain through the event system by starting a
+// ChainEventLoop in a goroutine.
+//
+// Further child ChainIndexers can be added which use the output of the parent
+// section indexer. These child indexers receive new head notifications only
+// after an entire section has been finished or in case of rollbacks that might
+// affect already finished sections.
+type ChainIndexer struct {
+ chainDb ethdb.Database // Chain database to index the data from
+ indexDb ethdb.Database // Prefixed table-view of the db to write index metadata into
+ backend ChainIndexerBackend // Background processor generating the index data content
+ children []*ChainIndexer // Child indexers to cascade chain updates to
+
+ active uint32 // Flag whether the event loop was started
+ update chan struct{} // Notification channel that headers should be processed
+ quit chan chan error // Quit channel to tear down running goroutines
+
+ sectionSize uint64 // Number of blocks in a single chain segment to process
+ confirmsReq uint64 // Number of confirmations before processing a completed segment
+
+ storedSections uint64 // Number of sections successfully indexed into the database
+ knownSections uint64 // Number of sections known to be complete (block wise)
+ cascadedHead uint64 // Block number of the last completed section cascaded to subindexers
+
+ throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources
+
+ log log.Logger
+ lock sync.RWMutex
+}
+
+// NewChainIndexer creates a new chain indexer to do background processing on
+// chain segments of a given size after certain number of confirmations passed.
+// The throttling parameter might be used to prevent database thrashing.
+func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer {
+ c := &ChainIndexer{
+ chainDb: chainDb,
+ indexDb: indexDb,
+ backend: backend,
+ update: make(chan struct{}, 1),
+ quit: make(chan chan error),
+ sectionSize: section,
+ confirmsReq: confirm,
+ throttling: throttling,
+ log: log.New("type", kind),
+ }
+ // Initialize database dependent fields and start the updater
+ c.loadValidSections()
+ go c.updateLoop()
+
+ return c
+}
+
+// Start creates a goroutine to feed chain head events into the indexer for
+// cascading background processing.
+func (c *ChainIndexer) Start(currentHeader *types.Header, eventMux *event.TypeMux) {
+ go c.eventLoop(currentHeader, eventMux)
+}
+
+// Close tears down all goroutines belonging to the indexer and returns any error
+// that might have occurred internally.
+func (c *ChainIndexer) Close() error {
+ var errs []error
+
+ // Tear down the primary update loop
+ errc := make(chan error)
+ c.quit <- errc
+ if err := <-errc; err != nil {
+ errs = append(errs, err)
+ }
+ // If needed, tear down the secondary event loop
+ if atomic.LoadUint32(&c.active) != 0 {
+ c.quit <- errc
+ if err := <-errc; err != nil {
+ errs = append(errs, err)
+ }
+ }
+ // Return any failures
+ switch {
+ case len(errs) == 0:
+ return nil
+
+ case len(errs) == 1:
+ return errs[0]
+
+ default:
+ return fmt.Errorf("%v", errs)
+ }
+}
+
+// eventLoop is a secondary - optional - event loop of the indexer which is only
+// started for the outermost indexer to push chain head events into a processing
+// queue.
+func (c *ChainIndexer) eventLoop(currentHeader *types.Header, eventMux *event.TypeMux) {
+ // Mark the chain indexer as active, requiring an additional teardown
+ atomic.StoreUint32(&c.active, 1)
+
+ // Subscribe to chain head events
+ sub := eventMux.Subscribe(ChainEvent{})
+ defer sub.Unsubscribe()
+
+ // Fire the initial new head event to start any outstanding processing
+ c.newHead(currentHeader.Number.Uint64(), false)
+
+ var (
+ prevHeader = currentHeader
+ prevHash = currentHeader.Hash()
+ )
+ for {
+ select {
+ case errc := <-c.quit:
+ // Chain indexer terminating, report no failure and abort
+ errc <- nil
+ return
+
+ case ev, ok := <-sub.Chan():
+ // Received a new event, ensure it's not nil (closing) and update
+ if !ok {
+ errc := <-c.quit
+ errc <- nil
+ return
+ }
+ header := ev.Data.(ChainEvent).Block.Header()
+ if header.ParentHash != prevHash {
+ c.newHead(FindCommonAncestor(c.chainDb, prevHeader, header).Number.Uint64(), true)
+ }
+ c.newHead(header.Number.Uint64(), false)
+
+ prevHeader, prevHash = header, header.Hash()
+ }
+ }
+}
+
+// newHead notifies the indexer about new chain heads and/or reorgs.
+func (c *ChainIndexer) newHead(head uint64, reorg bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // If a reorg happened, invalidate all sections until that point
+ if reorg {
+ // Revert the known section number to the reorg point
+ changed := head / c.sectionSize
+ if changed < c.knownSections {
+ c.knownSections = changed
+ }
+ // Revert the stored sections from the database to the reorg point
+ if changed < c.storedSections {
+ c.setValidSections(changed)
+ }
+ // Update the new head number to te finalized section end and notify children
+ head = changed * c.sectionSize
+
+ if head < c.cascadedHead {
+ c.cascadedHead = head
+ for _, child := range c.children {
+ child.newHead(c.cascadedHead, true)
+ }
+ }
+ return
+ }
+ // No reorg, calculate the number of newly known sections and update if high enough
+ var sections uint64
+ if head >= c.confirmsReq {
+ sections = (head + 1 - c.confirmsReq) / c.sectionSize
+ if sections > c.knownSections {
+ c.knownSections = sections
+
+ select {
+ case c.update <- struct{}{}:
+ default:
+ }
+ }
+ }
+}
+
+// updateLoop is the main event loop of the indexer which pushes chain segments
+// down into the processing backend.
+func (c *ChainIndexer) updateLoop() {
+ var updated time.Time
+
+ for {
+ select {
+ case errc := <-c.quit:
+ // Chain indexer terminating, report no failure and abort
+ errc <- nil
+ return
+
+ case <-c.update:
+ // Section headers completed (or rolled back), update the index
+ c.lock.Lock()
+ if c.knownSections > c.storedSections {
+ // Periodically print an upgrade log message to the user
+ if time.Since(updated) > 8*time.Second {
+ if c.knownSections > c.storedSections+1 {
+ c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections)
+ }
+ updated = time.Now()
+ }
+ // Cache the current section count and head to allow unlocking the mutex
+ section := c.storedSections
+ var oldHead common.Hash
+ if section > 0 {
+ oldHead = c.sectionHead(section - 1)
+ }
+ // Process the newly defined section in the background
+ c.lock.Unlock()
+ newHead, err := c.processSection(section, oldHead)
+ c.lock.Lock()
+
+ // If processing succeeded and no reorgs occcurred, mark the section completed
+ if err == nil && oldHead == c.sectionHead(section-1) {
+ c.setSectionHead(section, newHead)
+ c.setValidSections(section + 1)
+
+ c.cascadedHead = c.storedSections*c.sectionSize - 1
+ for _, child := range c.children {
+ c.log.Trace("Cascading chain index update", "head", c.cascadedHead)
+ child.newHead(c.cascadedHead, false)
+ }
+ } else {
+ // If processing failed, don't retry until further notification
+ c.log.Debug("Chain index processing failed", "section", section, "err", err)
+ c.knownSections = c.storedSections
+ }
+ }
+ // If there are still further sections to process, reschedule
+ if c.knownSections > c.storedSections {
+ time.AfterFunc(c.throttling, func() {
+ select {
+ case c.update <- struct{}{}:
+ default:
+ }
+ })
+ }
+ c.lock.Unlock()
+ }
+ }
+}
+
+// processSection processes an entire section by calling backend functions while
+// ensuring the continuity of the passed headers. Since the chain mutex is not
+// held while processing, the continuity can be broken by a long reorg, in which
+// case the function returns with an error.
+func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) {
+ c.log.Trace("Processing new chain section", "section", section)
+
+ // Reset and partial processing
+ c.backend.Reset(section)
+
+ for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
+ hash := GetCanonicalHash(c.chainDb, number)
+ if hash == (common.Hash{}) {
+ return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number)
+ }
+ header := GetHeader(c.chainDb, hash, number)
+ if header == nil {
+ return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4])
+ } else if header.ParentHash != lastHead {
+ return common.Hash{}, fmt.Errorf("chain reorged during section processing")
+ }
+ c.backend.Process(header)
+ lastHead = header.Hash()
+ }
+ if err := c.backend.Commit(c.chainDb); err != nil {
+ return common.Hash{}, err
+ }
+ return lastHead, nil
+}
+
+// Sections returns the number of processed sections maintained by the indexer
+// and also the information about the last header indexed for potential canonical
+// verifications.
+func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ return c.storedSections, c.storedSections*c.sectionSize - 1, c.sectionHead(c.storedSections - 1)
+}
+
+// AddChildIndexer adds a child ChainIndexer that can use the output of this one
+func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ c.children = append(c.children, indexer)
+
+ // Cascade any pending updates to new children too
+ if c.storedSections > 0 {
+ indexer.newHead(c.storedSections*c.sectionSize-1, false)
+ }
+}
+
+// loadValidSections reads the number of valid sections from the index database
+// and caches is into the local state.
+func (c *ChainIndexer) loadValidSections() {
+ data, _ := c.indexDb.Get([]byte("count"))
+ if len(data) == 8 {
+ c.storedSections = binary.BigEndian.Uint64(data[:])
+ }
+}
+
+// setValidSections writes the number of valid sections to the index database
+func (c *ChainIndexer) setValidSections(sections uint64) {
+ // Set the current number of valid sections in the database
+ var data [8]byte
+ binary.BigEndian.PutUint64(data[:], sections)
+ c.indexDb.Put([]byte("count"), data[:])
+
+ // Remove any reorged sections, caching the valids in the mean time
+ for c.storedSections > sections {
+ c.storedSections--
+ c.removeSectionHead(c.storedSections)
+ }
+ c.storedSections = sections // needed if new > old
+}
+
+// sectionHead retrieves the last block hash of a processed section from the
+// index database.
+func (c *ChainIndexer) sectionHead(section uint64) common.Hash {
+ var data [8]byte
+ binary.BigEndian.PutUint64(data[:], section)
+
+ hash, _ := c.indexDb.Get(append([]byte("shead"), data[:]...))
+ if len(hash) == len(common.Hash{}) {
+ return common.BytesToHash(hash)
+ }
+ return common.Hash{}
+}
+
+// setSectionHead writes the last block hash of a processed section to the index
+// database.
+func (c *ChainIndexer) setSectionHead(section uint64, hash common.Hash) {
+ var data [8]byte
+ binary.BigEndian.PutUint64(data[:], section)
+
+ c.indexDb.Put(append([]byte("shead"), data[:]...), hash.Bytes())
+}
+
+// removeSectionHead removes the reference to a processed section from the index
+// database.
+func (c *ChainIndexer) removeSectionHead(section uint64) {
+ var data [8]byte
+ binary.BigEndian.PutUint64(data[:], section)
+
+ c.indexDb.Delete(append([]byte("shead"), data[:]...))
+}
diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go
new file mode 100644
index 000000000..780e46e43
--- /dev/null
+++ b/core/chain_indexer_test.go
@@ -0,0 +1,234 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "fmt"
+ "math/big"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+// Runs multiple tests with randomized parameters.
+func TestChainIndexerSingle(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ testChainIndexer(t, 1)
+ }
+}
+
+// Runs multiple tests with randomized parameters and different number of
+// chain backends.
+func TestChainIndexerWithChildren(t *testing.T) {
+ for i := 2; i < 8; i++ {
+ testChainIndexer(t, i)
+ }
+}
+
+// testChainIndexer runs a test with either a single chain indexer or a chain of
+// multiple backends. The section size and required confirmation count parameters
+// are randomized.
+func testChainIndexer(t *testing.T, count int) {
+ db, _ := ethdb.NewMemDatabase()
+ defer db.Close()
+
+ // Create a chain of indexers and ensure they all report empty
+ backends := make([]*testChainIndexBackend, count)
+ for i := 0; i < count; i++ {
+ var (
+ sectionSize = uint64(rand.Intn(100) + 1)
+ confirmsReq = uint64(rand.Intn(10))
+ )
+ backends[i] = &testChainIndexBackend{t: t, processCh: make(chan uint64)}
+ backends[i].indexer = NewChainIndexer(db, ethdb.NewTable(db, string([]byte{byte(i)})), backends[i], sectionSize, confirmsReq, 0, fmt.Sprintf("indexer-%d", i))
+ defer backends[i].indexer.Close()
+
+ if sections, _, _ := backends[i].indexer.Sections(); sections != 0 {
+ t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, 0)
+ }
+ if i > 0 {
+ backends[i-1].indexer.AddChildIndexer(backends[i].indexer)
+ }
+ }
+ // notify pings the root indexer about a new head or reorg, then expect
+ // processed blocks if a section is processable
+ notify := func(headNum, failNum uint64, reorg bool) {
+ backends[0].indexer.newHead(headNum, reorg)
+ if reorg {
+ for _, backend := range backends {
+ headNum = backend.reorg(headNum)
+ backend.assertSections()
+ }
+ return
+ }
+ var cascade bool
+ for _, backend := range backends {
+ headNum, cascade = backend.assertBlocks(headNum, failNum)
+ if !cascade {
+ break
+ }
+ backend.assertSections()
+ }
+ }
+ // inject inserts a new random canonical header into the database directly
+ inject := func(number uint64) {
+ header := &types.Header{Number: big.NewInt(int64(number)), Extra: big.NewInt(rand.Int63()).Bytes()}
+ if number > 0 {
+ header.ParentHash = GetCanonicalHash(db, number-1)
+ }
+ WriteHeader(db, header)
+ WriteCanonicalHash(db, header.Hash(), number)
+ }
+ // Start indexer with an already existing chain
+ for i := uint64(0); i <= 100; i++ {
+ inject(i)
+ }
+ notify(100, 100, false)
+
+ // Add new blocks one by one
+ for i := uint64(101); i <= 1000; i++ {
+ inject(i)
+ notify(i, i, false)
+ }
+ // Do a reorg
+ notify(500, 500, true)
+
+ // Create new fork
+ for i := uint64(501); i <= 1000; i++ {
+ inject(i)
+ notify(i, i, false)
+ }
+ for i := uint64(1001); i <= 1500; i++ {
+ inject(i)
+ }
+ // Failed processing scenario where less blocks are available than notified
+ notify(2000, 1500, false)
+
+ // Notify about a reorg (which could have caused the missing blocks if happened during processing)
+ notify(1500, 1500, true)
+
+ // Create new fork
+ for i := uint64(1501); i <= 2000; i++ {
+ inject(i)
+ notify(i, i, false)
+ }
+}
+
+// testChainIndexBackend implements ChainIndexerBackend
+type testChainIndexBackend struct {
+ t *testing.T
+ indexer *ChainIndexer
+ section, headerCnt, stored uint64
+ processCh chan uint64
+}
+
+// assertSections verifies if a chain indexer has the correct number of section.
+func (b *testChainIndexBackend) assertSections() {
+ // Keep trying for 3 seconds if it does not match
+ var sections uint64
+ for i := 0; i < 300; i++ {
+ sections, _, _ = b.indexer.Sections()
+ if sections == b.stored {
+ return
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ b.t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, b.stored)
+}
+
+// assertBlocks expects processing calls after new blocks have arrived. If the
+// failNum < headNum then we are simulating a scenario where a reorg has happened
+// after the processing has started and the processing of a section fails.
+func (b *testChainIndexBackend) assertBlocks(headNum, failNum uint64) (uint64, bool) {
+ var sections uint64
+ if headNum >= b.indexer.confirmsReq {
+ sections = (headNum + 1 - b.indexer.confirmsReq) / b.indexer.sectionSize
+ if sections > b.stored {
+ // expect processed blocks
+ for expectd := b.stored * b.indexer.sectionSize; expectd < sections*b.indexer.sectionSize; expectd++ {
+ if expectd > failNum {
+ // rolled back after processing started, no more process calls expected
+ // wait until updating is done to make sure that processing actually fails
+ var updating bool
+ for i := 0; i < 300; i++ {
+ b.indexer.lock.Lock()
+ updating = b.indexer.knownSections > b.indexer.storedSections
+ b.indexer.lock.Unlock()
+ if !updating {
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ if updating {
+ b.t.Fatalf("update did not finish")
+ }
+ sections = expectd / b.indexer.sectionSize
+ break
+ }
+ select {
+ case <-time.After(10 * time.Second):
+ b.t.Fatalf("Expected processed block #%d, got nothing", expectd)
+ case processed := <-b.processCh:
+ if processed != expectd {
+ b.t.Errorf("Expected processed block #%d, got #%d", expectd, processed)
+ }
+ }
+ }
+ b.stored = sections
+ }
+ }
+ if b.stored == 0 {
+ return 0, false
+ }
+ return b.stored*b.indexer.sectionSize - 1, true
+}
+
+func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
+ firstChanged := headNum / b.indexer.sectionSize
+ if firstChanged < b.stored {
+ b.stored = firstChanged
+ }
+ return b.stored * b.indexer.sectionSize
+}
+
+func (b *testChainIndexBackend) Reset(section uint64) {
+ b.section = section
+ b.headerCnt = 0
+}
+
+func (b *testChainIndexBackend) Process(header *types.Header) {
+ b.headerCnt++
+ if b.headerCnt > b.indexer.sectionSize {
+ b.t.Error("Processing too many headers")
+ }
+ //t.processCh <- header.Number.Uint64()
+ select {
+ case <-time.After(10 * time.Second):
+ b.t.Fatal("Unexpected call to Process")
+ case b.processCh <- header.Number.Uint64():
+ }
+}
+
+func (b *testChainIndexBackend) Commit(db ethdb.Database) error {
+ if b.headerCnt != b.indexer.sectionSize {
+ b.t.Error("Not enough headers processed")
+ }
+ return nil
+}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 38a69d42a..976a8114d 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -218,6 +218,7 @@ func makeHeader(config *params.ChainConfig, parent *types.Block, state *state.St
Number: parent.Number(),
Time: new(big.Int).Sub(time, big.NewInt(10)),
Difficulty: parent.Difficulty(),
+ UncleHash: parent.UncleHash(),
}),
GasLimit: CalcGasLimit(parent),
GasUsed: new(big.Int),
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 3a7c62396..28eb76c63 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -81,7 +81,10 @@ func ExampleGenerateChain() {
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
+
blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
+ defer blockchain.Stop()
+
if i, err := blockchain.InsertChain(chain); err != nil {
fmt.Printf("insert error (block %d): %v\n", chain[i].NumberU64(), err)
return
diff --git a/core/dao_test.go b/core/dao_test.go
index bc9f3f394..99bf1ecae 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -43,11 +43,13 @@ func TestDAOForkRangeExtradata(t *testing.T) {
gspec.MustCommit(proDb)
proConf := &params.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: true}
proBc, _ := NewBlockChain(proDb, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer proBc.Stop()
conDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(conDb)
conConf := &params.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: false}
conBc, _ := NewBlockChain(conDb, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer conBc.Stop()
if _, err := proBc.InsertChain(prefix); err != nil {
t.Fatalf("pro-fork: failed to import chain prefix: %v", err)
@@ -60,7 +62,9 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a pro-fork block, and try to feed into the no-fork chain
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
+
bc, _ := NewBlockChain(db, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
for j := 0; j < len(blocks)/2; j++ {
@@ -81,7 +85,9 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a no-fork block, and try to feed into the pro-fork chain
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
+
bc, _ = NewBlockChain(db, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
for j := 0; j < len(blocks)/2; j++ {
@@ -103,7 +109,9 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
+
bc, _ := NewBlockChain(db, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
for j := 0; j < len(blocks)/2; j++ {
@@ -119,7 +127,9 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
+
bc, _ = NewBlockChain(db, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
for j := 0; j < len(blocks)/2; j++ {
diff --git a/core/filter_test.go b/core/filter_test.go
deleted file mode 100644
index 58e71e305..000000000
--- a/core/filter_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package core
diff --git a/core/gen_genesis.go b/core/gen_genesis.go
index 1f3b4a8aa..4d75704a6 100644
--- a/core/gen_genesis.go
+++ b/core/gen_genesis.go
@@ -13,8 +13,6 @@ import (
"github.com/ethereum/go-ethereum/params"
)
-var _ = (*genesisSpecMarshaling)(nil)
-
func (g Genesis) MarshalJSON() ([]byte, error) {
type Genesis struct {
Config *params.ChainConfig `json:"config"`
@@ -26,7 +24,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
- Number uint64 `json:"number"`
+ Number math.HexOrDecimal64 `json:"number"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
ParentHash common.Hash `json:"parentHash"`
}
@@ -45,7 +43,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.Alloc[common.UnprefixedAddress(k)] = v
}
}
- enc.Number = g.Number
+ enc.Number = math.HexOrDecimal64(g.Number)
enc.GasUsed = math.HexOrDecimal64(g.GasUsed)
enc.ParentHash = g.ParentHash
return json.Marshal(&enc)
@@ -62,7 +60,7 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
Mixhash *common.Hash `json:"mixHash"`
Coinbase *common.Address `json:"coinbase"`
Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
- Number *uint64 `json:"number"`
+ Number *math.HexOrDecimal64 `json:"number"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
ParentHash *common.Hash `json:"parentHash"`
}
@@ -104,7 +102,7 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
g.Alloc[common.Address(k)] = v
}
if dec.Number != nil {
- g.Number = *dec.Number
+ g.Number = uint64(*dec.Number)
}
if dec.GasUsed != nil {
g.GasUsed = uint64(*dec.GasUsed)
diff --git a/core/genesis.go b/core/genesis.go
index a507d522b..fd6ed6115 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -92,6 +92,7 @@ type genesisSpecMarshaling struct {
ExtraData hexutil.Bytes
GasLimit math.HexOrDecimal64
GasUsed math.HexOrDecimal64
+ Number math.HexOrDecimal64
Difficulty *math.HexOrDecimal256
Alloc map[common.UnprefixedAddress]GenesisAccount
}
diff --git a/core/genesis_test.go b/core/genesis_test.go
index bc82fe54e..8b193759f 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -120,6 +120,8 @@ func TestSetupGenesis(t *testing.T) {
// Advance to block #4, past the homestead transition block of customg.
genesis := oldcustomg.MustCommit(db)
bc, _ := NewBlockChain(db, oldcustomg.Config, ethash.NewFullFaker(), new(event.TypeMux), vm.Config{})
+ defer bc.Stop()
+
bc.SetValidator(bproc{})
bc.InsertChain(makeBlockChainWithDiff(genesis, []int{2, 3, 4, 5}, 0))
bc.CurrentBlock()
diff --git a/core/state_processor.go b/core/state_processor.go
index 90f5a4f60..4489cfce2 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -104,11 +104,17 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
}
// Update the state with pending changes
+ var root []byte
+ if config.IsMetropolis(header.Number) {
+ statedb.Finalise()
+ } else {
+ root = statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes()
+ }
usedGas.Add(usedGas, gas)
+
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
// based on the eip phase, we're passing wether the root touch-delete accounts.
- root := statedb.IntermediateRoot(config.IsEIP158(header.Number))
- receipt := types.NewReceipt(root.Bytes(), usedGas)
+ receipt := types.NewReceipt(root, usedGas)
receipt.TxHash = tx.Hash()
receipt.GasUsed = new(big.Int).Set(gas)
// if the transaction created a contract, store the creation address in the receipt.
diff --git a/core/tx_journal.go b/core/tx_journal.go
new file mode 100644
index 000000000..94a9ff9b8
--- /dev/null
+++ b/core/tx_journal.go
@@ -0,0 +1,150 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package core
+
+import (
+ "errors"
+ "io"
+ "os"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// errNoActiveJournal is returned if a transaction is attempted to be inserted
+// into the journal, but no such file is currently open.
+var errNoActiveJournal = errors.New("no active journal")
+
+// txJournal is a rotating log of transactions with the aim of storing locally
+// created transactions to allow non-executed ones to survive node restarts.
+type txJournal struct {
+ path string // Filesystem path to store the transactions at
+ writer io.WriteCloser // Output stream to write new transactions into
+}
+
+// newTxJournal creates a new transaction journal to
+func newTxJournal(path string) *txJournal {
+ return &txJournal{
+ path: path,
+ }
+}
+
+// load parses a transaction journal dump from disk, loading its contents into
+// the specified pool.
+func (journal *txJournal) load(add func(*types.Transaction) error) error {
+ // Skip the parsing if the journal file doens't exist at all
+ if _, err := os.Stat(journal.path); os.IsNotExist(err) {
+ return nil
+ }
+ // Open the journal for loading any past transactions
+ input, err := os.Open(journal.path)
+ if err != nil {
+ return err
+ }
+ defer input.Close()
+
+ // Inject all transactions from the journal into the pool
+ stream := rlp.NewStream(input, 0)
+ total, dropped := 0, 0
+
+ var failure error
+ for {
+ // Parse the next transaction and terminate on error
+ tx := new(types.Transaction)
+ if err = stream.Decode(tx); err != nil {
+ if err != io.EOF {
+ failure = err
+ }
+ break
+ }
+ // Import the transaction and bump the appropriate progress counters
+ total++
+ if err = add(tx); err != nil {
+ log.Debug("Failed to add journaled transaction", "err", err)
+ dropped++
+ continue
+ }
+ }
+ log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped)
+
+ return failure
+}
+
+// insert adds the specified transaction to the local disk journal.
+func (journal *txJournal) insert(tx *types.Transaction) error {
+ if journal.writer == nil {
+ return errNoActiveJournal
+ }
+ if err := rlp.Encode(journal.writer, tx); err != nil {
+ return err
+ }
+ return nil
+}
+
+// rotate regenerates the transaction journal based on the current contents of
+// the transaction pool.
+func (journal *txJournal) rotate(all map[common.Address]types.Transactions) error {
+ // Close the current journal (if any is open)
+ if journal.writer != nil {
+ if err := journal.writer.Close(); err != nil {
+ return err
+ }
+ journal.writer = nil
+ }
+ // Generate a new journal with the contents of the current pool
+ replacement, err := os.OpenFile(journal.path+".new", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
+ if err != nil {
+ return err
+ }
+ journaled := 0
+ for _, txs := range all {
+ for _, tx := range txs {
+ if err = rlp.Encode(replacement, tx); err != nil {
+ replacement.Close()
+ return err
+ }
+ }
+ journaled += len(txs)
+ }
+ replacement.Close()
+
+ // Replace the live journal with the newly generated one
+ if err = os.Rename(journal.path+".new", journal.path); err != nil {
+ return err
+ }
+ sink, err := os.OpenFile(journal.path, os.O_WRONLY|os.O_APPEND, 0755)
+ if err != nil {
+ return err
+ }
+ journal.writer = sink
+ log.Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all))
+
+ return nil
+}
+
+// close flushes the transaction journal contents to disk and closes the file.
+func (journal *txJournal) close() error {
+ var err error
+
+ if journal.writer != nil {
+ err = journal.writer.Close()
+ journal.writer = nil
+ }
+ return err
+}
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 8e2d1b31d..b0c251f92 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -99,7 +99,9 @@ type stateFn func() (*state.StateDB, error)
// TxPoolConfig are the configuration parameters of the transaction pool.
type TxPoolConfig struct {
- NoLocals bool // Whether local transaction handling should be disabled
+ NoLocals bool // Whether local transaction handling should be disabled
+ Journal string // Journal of local transactions to survive node restarts
+ Rejournal time.Duration // Time interval to regenerate the local transaction journal
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
@@ -115,6 +117,9 @@ type TxPoolConfig struct {
// DefaultTxPoolConfig contains the default configurations for the transaction
// pool.
var DefaultTxPoolConfig = TxPoolConfig{
+ Journal: "transactions.rlp",
+ Rejournal: time.Hour,
+
PriceLimit: 1,
PriceBump: 10,
@@ -130,6 +135,10 @@ var DefaultTxPoolConfig = TxPoolConfig{
// unreasonable or unworkable.
func (config *TxPoolConfig) sanitize() TxPoolConfig {
conf := *config
+ if conf.Rejournal < time.Second {
+ log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
+ conf.Rejournal = time.Second
+ }
if conf.PriceLimit < 1 {
log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
@@ -157,18 +166,19 @@ type TxPool struct {
gasPrice *big.Int
eventMux *event.TypeMux
events *event.TypeMuxSubscription
- locals *accountSet
signer types.Signer
mu sync.RWMutex
+ locals *accountSet // Set of local transaction to exepmt from evicion rules
+ journal *txJournal // Journal of local transaction to back up to disk
+
pending map[common.Address]*txList // All currently processable transactions
queue map[common.Address]*txList // Queued but non-processable transactions
beats map[common.Address]time.Time // Last heartbeat from each known account
all map[common.Hash]*types.Transaction // All transactions to allow lookups
priced *txPricedList // All transactions sorted by price
- wg sync.WaitGroup // for shutdown sync
- quit chan struct{}
+ wg sync.WaitGroup // for shutdown sync
homestead bool
}
@@ -194,32 +204,48 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, eventMux *e
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
pendingState: nil,
events: eventMux.Subscribe(ChainHeadEvent{}, RemovedTransactionEvent{}),
- quit: make(chan struct{}),
}
pool.locals = newAccountSet(pool.signer)
pool.priced = newTxPricedList(&pool.all)
- pool.resetState()
+ pool.reset()
- // Start the various events loops and return
- pool.wg.Add(2)
- go pool.eventLoop()
- go pool.expirationLoop()
+ // If local transactions and journaling is enabled, load from disk
+ if !config.NoLocals && config.Journal != "" {
+ pool.journal = newTxJournal(config.Journal)
+
+ if err := pool.journal.load(pool.AddLocal); err != nil {
+ log.Warn("Failed to load transaction journal", "err", err)
+ }
+ if err := pool.journal.rotate(pool.local()); err != nil {
+ log.Warn("Failed to rotate transaction journal", "err", err)
+ }
+ }
+ // Start the event loop and return
+ pool.wg.Add(1)
+ go pool.loop()
return pool
}
-func (pool *TxPool) eventLoop() {
+// loop is the transaction pool's main event loop, waiting for and reacting to
+// outside blockchain events as well as for various reporting and transaction
+// eviction events.
+func (pool *TxPool) loop() {
defer pool.wg.Done()
- // Start a ticker and keep track of interesting pool stats to report
+ // Start the stats reporting and transaction eviction tickers
var prevPending, prevQueued, prevStales int
report := time.NewTicker(statsReportInterval)
defer report.Stop()
- // Track chain events. When a chain events occurs (new chain canon block)
- // we need to know the new state. The new state will help us determine
- // the nonces in the managed state
+ evict := time.NewTicker(evictionInterval)
+ defer evict.Stop()
+
+ journal := time.NewTicker(pool.config.Rejournal)
+ defer journal.Stop()
+
+ // Keep waiting for and reacting to the various events
for {
select {
// Handle any events fired by the system
@@ -235,7 +261,7 @@ func (pool *TxPool) eventLoop() {
pool.homestead = true
}
}
- pool.resetState()
+ pool.reset()
pool.mu.Unlock()
case RemovedTransactionEvent:
@@ -253,11 +279,49 @@ func (pool *TxPool) eventLoop() {
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
prevPending, prevQueued, prevStales = pending, queued, stales
}
+
+ // Handle inactive account transaction eviction
+ case <-evict.C:
+ pool.mu.Lock()
+ for addr := range pool.queue {
+ // Skip local transactions from the eviction mechanism
+ if pool.locals.contains(addr) {
+ continue
+ }
+ // Any non-locals old enough should be removed
+ if time.Since(pool.beats[addr]) > pool.config.Lifetime {
+ for _, tx := range pool.queue[addr].Flatten() {
+ pool.removeTx(tx.Hash())
+ }
+ }
+ }
+ pool.mu.Unlock()
+
+ // Handle local transaction journal rotation
+ case <-journal.C:
+ if pool.journal != nil {
+ pool.mu.Lock()
+ if err := pool.journal.rotate(pool.local()); err != nil {
+ log.Warn("Failed to rotate local tx journal", "err", err)
+ }
+ pool.mu.Unlock()
+ }
}
}
}
-func (pool *TxPool) resetState() {
+// lockedReset is a wrapper around reset to allow calling it in a thread safe
+// manner. This method is only ever used in the tester!
+func (pool *TxPool) lockedReset() {
+ pool.mu.Lock()
+ defer pool.mu.Unlock()
+
+ pool.reset()
+}
+
+// reset retrieves the current state of the blockchain and ensures the content
+// of the transaction pool is valid with regard to the chain state.
+func (pool *TxPool) reset() {
currentState, err := pool.currentState()
if err != nil {
log.Error("Failed reset txpool state", "err", err)
@@ -284,9 +348,11 @@ func (pool *TxPool) resetState() {
// Stop terminates the transaction pool.
func (pool *TxPool) Stop() {
pool.events.Unsubscribe()
- close(pool.quit)
pool.wg.Wait()
+ if pool.journal != nil {
+ pool.journal.close()
+ }
log.Info("Transaction pool stopped")
}
@@ -373,6 +439,22 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
return pending, nil
}
+// local retrieves all currently known local transactions, groupped by origin
+// account and sorted by nonce. The returned transaction set is a copy and can be
+// freely modified by calling code.
+func (pool *TxPool) local() map[common.Address]types.Transactions {
+ txs := make(map[common.Address]types.Transactions)
+ for addr := range pool.locals.accounts {
+ if pending := pool.pending[addr]; pending != nil {
+ txs[addr] = append(txs[addr], pending.Flatten()...)
+ }
+ if queued := pool.queue[addr]; queued != nil {
+ txs[addr] = append(txs[addr], queued.Flatten()...)
+ }
+ }
+ return txs
+}
+
// validateTx checks whether a transaction is valid according to the consensus
// rules and adheres to some heuristic limits of the local node (price and size).
func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
@@ -473,18 +555,22 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) {
}
pool.all[tx.Hash()] = tx
pool.priced.Put(tx)
+ pool.journalTx(from, tx)
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
return old != nil, nil
}
- // New transaction isn't replacing a pending one, push into queue and potentially mark local
+ // New transaction isn't replacing a pending one, push into queue
replace, err := pool.enqueueTx(hash, tx)
if err != nil {
return false, err
}
+ // Mark local addresses and journal local transactions
if local {
pool.locals.add(from)
}
+ pool.journalTx(from, tx)
+
log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
return replace, nil
}
@@ -515,6 +601,18 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er
return old != nil, nil
}
+// journalTx adds the specified transaction to the local disk journal if it is
+// deemed to have been sent from a local account.
+func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) {
+ // Only journal if it's enabled and the transaction is local
+ if pool.journal == nil || !pool.locals.contains(from) {
+ return
+ }
+ if err := pool.journal.insert(tx); err != nil {
+ log.Warn("Failed to journal local transaction", "err", err)
+ }
+}
+
// promoteTx adds a transaction to the pending (processable) list of transactions.
//
// Note, this method assumes the pool lock is held!
@@ -910,39 +1008,6 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
}
}
-// expirationLoop is a loop that periodically iterates over all accounts with
-// queued transactions and drop all that have been inactive for a prolonged amount
-// of time.
-func (pool *TxPool) expirationLoop() {
- defer pool.wg.Done()
-
- evict := time.NewTicker(evictionInterval)
- defer evict.Stop()
-
- for {
- select {
- case <-evict.C:
- pool.mu.Lock()
- for addr := range pool.queue {
- // Skip local transactions from the eviction mechanism
- if pool.locals.contains(addr) {
- continue
- }
- // Any non-locals old enough should be removed
- if time.Since(pool.beats[addr]) > pool.config.Lifetime {
- for _, tx := range pool.queue[addr].Flatten() {
- pool.removeTx(tx.Hash())
- }
- }
- }
- pool.mu.Unlock()
-
- case <-pool.quit:
- return
- }
- }
-}
-
// addressByHeartbeat is an account address tagged with its last activity timestamp.
type addressByHeartbeat struct {
address common.Address
@@ -955,7 +1020,7 @@ func (a addresssByHeartbeat) Len() int { return len(a) }
func (a addresssByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) }
func (a addresssByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-// accountSet is simply a set of addresses to check for existance, and a signer
+// accountSet is simply a set of addresses to check for existence, and a signer
// capable of deriving addresses from transactions.
type accountSet struct {
accounts map[common.Address]struct{}
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index 03ece3886..fcb330051 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -19,8 +19,10 @@ package core
import (
"crypto/ecdsa"
"fmt"
+ "io/ioutil"
"math/big"
"math/rand"
+ "os"
"testing"
"time"
@@ -33,6 +35,15 @@ import (
"github.com/ethereum/go-ethereum/params"
)
+// testTxPoolConfig is a transaction pool configuration without stateful disk
+// sideeffects used during testing.
+var testTxPoolConfig TxPoolConfig
+
+func init() {
+ testTxPoolConfig = DefaultTxPoolConfig
+ testTxPoolConfig.Journal = ""
+}
+
func transaction(nonce uint64, gaslimit *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
return pricedTransaction(nonce, gaslimit, big.NewInt(1), key)
}
@@ -47,8 +58,7 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
key, _ := crypto.GenerateKey()
- pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
- pool.resetState()
+ pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
return pool, key
}
@@ -125,9 +135,8 @@ func TestStateChangeDuringPoolReset(t *testing.T) {
gasLimitFunc := func() *big.Int { return big.NewInt(1000000000) }
- pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, mux, stateFunc, gasLimitFunc)
+ pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, mux, stateFunc, gasLimitFunc)
defer pool.Stop()
- pool.resetState()
nonce := pool.State().GetNonce(address)
if nonce != 0 {
@@ -144,7 +153,7 @@ func TestStateChangeDuringPoolReset(t *testing.T) {
// trigger state change in the background
trigger = true
- pool.resetState()
+ pool.lockedReset()
pendingTx, err := pool.Pending()
if err != nil {
@@ -204,7 +213,7 @@ func TestTransactionQueue(t *testing.T) {
from, _ := deriveSender(tx)
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1000))
- pool.resetState()
+ pool.lockedReset()
pool.enqueueTx(tx.Hash(), tx)
pool.promoteExecutables(currentState, []common.Address{from})
@@ -226,13 +235,15 @@ func TestTransactionQueue(t *testing.T) {
}
pool, key = setupTxPool()
+ defer pool.Stop()
+
tx1 := transaction(0, big.NewInt(100), key)
tx2 := transaction(10, big.NewInt(100), key)
tx3 := transaction(11, big.NewInt(100), key)
from, _ = deriveSender(tx1)
currentState, _ = pool.currentState()
currentState.AddBalance(from, big.NewInt(1000))
- pool.resetState()
+ pool.lockedReset()
pool.enqueueTx(tx1.Hash(), tx1)
pool.enqueueTx(tx2.Hash(), tx2)
@@ -303,7 +314,7 @@ func TestTransactionChainFork(t *testing.T) {
pool.currentState = func() (*state.StateDB, error) { return statedb, nil }
currentState, _ := pool.currentState()
currentState.AddBalance(addr, big.NewInt(100000000000000))
- pool.resetState()
+ pool.lockedReset()
}
resetState()
@@ -331,7 +342,7 @@ func TestTransactionDoubleNonce(t *testing.T) {
pool.currentState = func() (*state.StateDB, error) { return statedb, nil }
currentState, _ := pool.currentState()
currentState.AddBalance(addr, big.NewInt(100000000000000))
- pool.resetState()
+ pool.lockedReset()
}
resetState()
@@ -401,14 +412,14 @@ func TestNonceRecovery(t *testing.T) {
currentState, _ := pool.currentState()
currentState.SetNonce(addr, n)
currentState.AddBalance(addr, big.NewInt(100000000000000))
- pool.resetState()
+ pool.lockedReset()
tx := transaction(n, big.NewInt(100000), key)
if err := pool.AddRemote(tx); err != nil {
t.Error(err)
}
// simulate some weird re-order of transactions and missing nonce(s)
currentState.SetNonce(addr, n-1)
- pool.resetState()
+ pool.lockedReset()
if fn := pool.pendingState.GetNonce(addr); fn != n+1 {
t.Errorf("expected nonce to be %d, got %d", n+1, fn)
}
@@ -422,7 +433,7 @@ func TestRemovedTxEvent(t *testing.T) {
from, _ := deriveSender(tx)
currentState, _ := pool.currentState()
currentState.AddBalance(from, big.NewInt(1000000000000))
- pool.resetState()
+ pool.lockedReset()
pool.eventMux.Post(RemovedTransactionEvent{types.Transactions{tx}})
pool.eventMux.Post(ChainHeadEvent{nil})
if pool.pending[from].Len() != 1 {
@@ -471,7 +482,7 @@ func TestTransactionDropping(t *testing.T) {
if len(pool.all) != 6 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 6)
}
- pool.resetState()
+ pool.lockedReset()
if pool.pending[account].Len() != 3 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
}
@@ -483,7 +494,7 @@ func TestTransactionDropping(t *testing.T) {
}
// Reduce the balance of the account, and check that invalidated transactions are dropped
state.AddBalance(account, big.NewInt(-650))
- pool.resetState()
+ pool.lockedReset()
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
@@ -508,7 +519,7 @@ func TestTransactionDropping(t *testing.T) {
}
// Reduce the block gas limit, check that invalidated transactions are dropped
pool.gasLimit = func() *big.Int { return big.NewInt(100) }
- pool.resetState()
+ pool.lockedReset()
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
@@ -562,7 +573,7 @@ func TestTransactionPostponing(t *testing.T) {
if len(pool.all) != len(txns) {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txns))
}
- pool.resetState()
+ pool.lockedReset()
if pool.pending[account].Len() != len(txns) {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), len(txns))
}
@@ -574,7 +585,7 @@ func TestTransactionPostponing(t *testing.T) {
}
// Reduce the balance of the account, and check that transactions are reorganised
state.AddBalance(account, big.NewInt(-750))
- pool.resetState()
+ pool.lockedReset()
if _, ok := pool.pending[account].txs.items[txns[0].Nonce()]; !ok {
t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txns[0])
@@ -615,28 +626,28 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
state, _ := pool.currentState()
state.AddBalance(account, big.NewInt(1000000))
- pool.resetState()
+ pool.lockedReset()
// Keep queuing up transactions and make sure all above a limit are dropped
- for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue+5; i++ {
+ for i := uint64(1); i <= testTxPoolConfig.AccountQueue+5; i++ {
if err := pool.AddRemote(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
if len(pool.pending) != 0 {
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0)
}
- if i <= DefaultTxPoolConfig.AccountQueue {
+ if i <= testTxPoolConfig.AccountQueue {
if pool.queue[account].Len() != int(i) {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i)
}
} else {
- if pool.queue[account].Len() != int(DefaultTxPoolConfig.AccountQueue) {
- t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), DefaultTxPoolConfig.AccountQueue)
+ if pool.queue[account].Len() != int(testTxPoolConfig.AccountQueue) {
+ t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), testTxPoolConfig.AccountQueue)
}
}
}
- if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue) {
- t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue)
+ if len(pool.all) != int(testTxPoolConfig.AccountQueue) {
+ t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), testTxPoolConfig.AccountQueue)
}
}
@@ -657,13 +668,12 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
- config := DefaultTxPoolConfig
+ config := testTxPoolConfig
config.NoLocals = nolocals
config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
pool := NewTxPool(config, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
defer pool.Stop()
- pool.resetState()
// Create a number of test accounts and fund them (last one will be the local)
state, _ := pool.currentState()
@@ -742,19 +752,18 @@ func TestTransactionQueueTimeLimitingNoLocals(t *testing.T) { testTransactionQue
func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
// Reduce the eviction interval to a testable amount
defer func(old time.Duration) { evictionInterval = old }(evictionInterval)
- evictionInterval = 250 * time.Millisecond
+ evictionInterval = time.Second
// Create the pool to test the non-expiration enforcement
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
- config := DefaultTxPoolConfig
- config.Lifetime = 250 * time.Millisecond
+ config := testTxPoolConfig
+ config.Lifetime = time.Second
config.NoLocals = nolocals
pool := NewTxPool(config, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
defer pool.Stop()
- pool.resetState()
// Create two test accounts to ensure remotes expire but locals do not
local, _ := crypto.GenerateKey()
@@ -771,7 +780,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
if err := pool.AddRemote(pricedTransaction(1, big.NewInt(100000), big.NewInt(1), remote)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
- pending, queued := pool.stats()
+ pending, queued := pool.Stats()
if pending != 0 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
}
@@ -784,7 +793,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
// Wait a bit for eviction to run and clean up any leftovers, and ensure only the local remains
time.Sleep(2 * config.Lifetime)
- pending, queued = pool.stats()
+ pending, queued = pool.Stats()
if pending != 0 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
}
@@ -814,10 +823,10 @@ func TestTransactionPendingLimiting(t *testing.T) {
state, _ := pool.currentState()
state.AddBalance(account, big.NewInt(1000000))
- pool.resetState()
+ pool.lockedReset()
// Keep queuing up transactions and make sure all above a limit are dropped
- for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
+ for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ {
if err := pool.AddRemote(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
@@ -828,8 +837,8 @@ func TestTransactionPendingLimiting(t *testing.T) {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0)
}
}
- if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue+5) {
- t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue+5)
+ if len(pool.all) != int(testTxPoolConfig.AccountQueue+5) {
+ t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), testTxPoolConfig.AccountQueue+5)
}
}
@@ -841,23 +850,27 @@ func TestTransactionPendingLimitingEquivalency(t *testing.T) { testTransactionLi
func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
// Add a batch of transactions to a pool one by one
pool1, key1 := setupTxPool()
+ defer pool1.Stop()
+
account1, _ := deriveSender(transaction(0, big.NewInt(0), key1))
state1, _ := pool1.currentState()
state1.AddBalance(account1, big.NewInt(1000000))
- for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
+ for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ {
if err := pool1.AddRemote(transaction(origin+i, big.NewInt(100000), key1)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
}
// Add a batch of transactions to a pool in one big batch
pool2, key2 := setupTxPool()
+ defer pool2.Stop()
+
account2, _ := deriveSender(transaction(0, big.NewInt(0), key2))
state2, _ := pool2.currentState()
state2.AddBalance(account2, big.NewInt(1000000))
txns := []*types.Transaction{}
- for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
+ for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ {
txns = append(txns, transaction(origin+i, big.NewInt(100000), key2))
}
pool2.AddRemotes(txns)
@@ -888,12 +901,11 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
- config := DefaultTxPoolConfig
+ config := testTxPoolConfig
config.GlobalSlots = config.AccountSlots * 10
pool := NewTxPool(config, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
defer pool.Stop()
- pool.resetState()
// Create a number of test accounts and fund them
state, _ := pool.currentState()
@@ -935,14 +947,13 @@ func TestTransactionCapClearsFromAll(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
- config := DefaultTxPoolConfig
+ config := testTxPoolConfig
config.AccountSlots = 2
config.AccountQueue = 2
config.GlobalSlots = 8
pool := NewTxPool(config, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
defer pool.Stop()
- pool.resetState()
// Create a number of test accounts and fund them
state, _ := pool.currentState()
@@ -970,12 +981,11 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
- config := DefaultTxPoolConfig
+ config := testTxPoolConfig
config.GlobalSlots = 0
pool := NewTxPool(config, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
defer pool.Stop()
- pool.resetState()
// Create a number of test accounts and fund them
state, _ := pool.currentState()
@@ -1019,9 +1029,8 @@ func TestTransactionPoolRepricing(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
- pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+ pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
defer pool.Stop()
- pool.resetState()
// Create a number of test accounts and fund them
state, _ := pool.currentState()
@@ -1048,7 +1057,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
pool.AddRemotes(txs)
pool.AddLocal(ltx)
- pending, queued := pool.stats()
+ pending, queued := pool.Stats()
if pending != 4 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4)
}
@@ -1061,7 +1070,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
// Reprice the pool and check that underpriced transactions get dropped
pool.SetGasPrice(big.NewInt(2))
- pending, queued = pool.stats()
+ pending, queued = pool.Stats()
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
@@ -1086,7 +1095,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
if err := pool.AddLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
- if pending, _ = pool.stats(); pending != 3 {
+ if pending, _ = pool.Stats(); pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
if err := validateTxPoolInternals(pool); err != nil {
@@ -1104,13 +1113,12 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
- config := DefaultTxPoolConfig
+ config := testTxPoolConfig
config.GlobalSlots = 2
config.GlobalQueue = 2
pool := NewTxPool(config, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
defer pool.Stop()
- pool.resetState()
// Create a number of test accounts and fund them
state, _ := pool.currentState()
@@ -1134,7 +1142,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
pool.AddRemotes(txs)
pool.AddLocal(ltx)
- pending, queued := pool.stats()
+ pending, queued := pool.Stats()
if pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
@@ -1158,7 +1166,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
if err := pool.AddRemote(pricedTransaction(3, big.NewInt(100000), big.NewInt(5), keys[1])); err != nil {
t.Fatalf("failed to add well priced transaction: %v", err)
}
- pending, queued = pool.stats()
+ pending, queued = pool.Stats()
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
@@ -1173,7 +1181,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
if err := pool.AddLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
- pending, queued = pool.stats()
+ pending, queued = pool.Stats()
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
@@ -1192,9 +1200,8 @@ func TestTransactionReplacement(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
- pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+ pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
defer pool.Stop()
- pool.resetState()
// Create a test account to add transactions with
key, _ := crypto.GenerateKey()
@@ -1204,7 +1211,7 @@ func TestTransactionReplacement(t *testing.T) {
// Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
price := int64(100)
- threshold := (price * (100 + int64(DefaultTxPoolConfig.PriceBump))) / 100
+ threshold := (price * (100 + int64(testTxPoolConfig.PriceBump))) / 100
if err := pool.AddRemote(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap pending transaction: %v", err)
@@ -1250,6 +1257,114 @@ func TestTransactionReplacement(t *testing.T) {
}
}
+// Tests that local transactions are journaled to disk, but remote transactions
+// get discarded between restarts.
+func TestTransactionJournaling(t *testing.T) { testTransactionJournaling(t, false) }
+func TestTransactionJournalingNoLocals(t *testing.T) { testTransactionJournaling(t, true) }
+
+func testTransactionJournaling(t *testing.T, nolocals bool) {
+ // Create a temporary file for the journal
+ file, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("failed to create temporary journal: %v", err)
+ }
+ journal := file.Name()
+ defer os.Remove(journal)
+
+ // Clean up the temporary file, we only need the path for now
+ file.Close()
+ os.Remove(journal)
+
+ // Create the original pool to inject transaction into the journal
+ db, _ := ethdb.NewMemDatabase()
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
+
+ config := testTxPoolConfig
+ config.NoLocals = nolocals
+ config.Journal = journal
+ config.Rejournal = time.Second
+
+ pool := NewTxPool(config, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+
+ // Create two test accounts to ensure remotes expire but locals do not
+ local, _ := crypto.GenerateKey()
+ remote, _ := crypto.GenerateKey()
+
+ statedb, _ = pool.currentState()
+ statedb.AddBalance(crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000))
+
+ // Add three local and a remote transactions and ensure they are queued up
+ if err := pool.AddLocal(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), local)); err != nil {
+ t.Fatalf("failed to add local transaction: %v", err)
+ }
+ if err := pool.AddLocal(pricedTransaction(1, big.NewInt(100000), big.NewInt(1), local)); err != nil {
+ t.Fatalf("failed to add local transaction: %v", err)
+ }
+ if err := pool.AddLocal(pricedTransaction(2, big.NewInt(100000), big.NewInt(1), local)); err != nil {
+ t.Fatalf("failed to add local transaction: %v", err)
+ }
+ if err := pool.AddRemote(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), remote)); err != nil {
+ t.Fatalf("failed to add remote transaction: %v", err)
+ }
+ pending, queued := pool.Stats()
+ if pending != 4 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4)
+ }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if err := validateTxPoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive
+ pool.Stop()
+ statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
+ pool = NewTxPool(config, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+
+ pending, queued = pool.Stats()
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if nolocals {
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
+ }
+ } else {
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ }
+ if err := validateTxPoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Bump the nonce temporarily and ensure the newly invalidated transaction is removed
+ statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
+ pool.lockedReset()
+ time.Sleep(2 * config.Rejournal)
+ pool.Stop()
+ statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
+ pool = NewTxPool(config, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
+
+ pending, queued = pool.Stats()
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
+ }
+ if nolocals {
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ } else {
+ if queued != 1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ }
+ }
+ if err := validateTxPoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ pool.Stop()
+}
+
// Benchmarks the speed of validating the contents of the pending queue of the
// transaction pool.
func BenchmarkPendingDemotion100(b *testing.B) { benchmarkPendingDemotion(b, 100) }
diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go
index edbd64ba4..eb2e5d42b 100644
--- a/core/types/gen_receipt_json.go
+++ b/core/types/gen_receipt_json.go
@@ -13,7 +13,7 @@ import (
func (r Receipt) MarshalJSON() ([]byte, error) {
type Receipt struct {
- PostState hexutil.Bytes `json:"root" gencodec:"required"`
+ PostState hexutil.Bytes `json:"root"`
CumulativeGasUsed *hexutil.Big `json:"cumulativeGasUsed" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Logs []*Log `json:"logs" gencodec:"required"`
@@ -34,7 +34,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
func (r *Receipt) UnmarshalJSON(input []byte) error {
type Receipt struct {
- PostState hexutil.Bytes `json:"root" gencodec:"required"`
+ PostState hexutil.Bytes `json:"root"`
CumulativeGasUsed *hexutil.Big `json:"cumulativeGasUsed" gencodec:"required"`
Bloom *Bloom `json:"logsBloom" gencodec:"required"`
Logs []*Log `json:"logs" gencodec:"required"`
@@ -46,10 +46,9 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
- if dec.PostState == nil {
- return errors.New("missing required field 'root' for Receipt")
+ if dec.PostState != nil {
+ r.PostState = dec.PostState
}
- r.PostState = dec.PostState
if dec.CumulativeGasUsed == nil {
return errors.New("missing required field 'cumulativeGasUsed' for Receipt")
}
diff --git a/core/types/receipt.go b/core/types/receipt.go
index ef6f6a2bb..c9906b015 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -31,7 +31,7 @@ import (
// Receipt represents the results of a transaction.
type Receipt struct {
// Consensus fields
- PostState []byte `json:"root" gencodec:"required"`
+ PostState []byte `json:"root"`
CumulativeGasUsed *big.Int `json:"cumulativeGasUsed" gencodec:"required"`
Bloom Bloom `json:"logsBloom" gencodec:"required"`
Logs []*Log `json:"logs" gencodec:"required"`
@@ -48,35 +48,88 @@ type receiptMarshaling struct {
GasUsed *hexutil.Big
}
+// homesteadReceiptRLP contains the receipt's Homestead consensus fields, used
+// during RLP serialization.
+type homesteadReceiptRLP struct {
+ PostState []byte
+ CumulativeGasUsed *big.Int
+ Bloom Bloom
+ Logs []*Log
+}
+
+// metropolisReceiptRLP contains the receipt's Metropolis consensus fields, used
+// during RLP serialization.
+type metropolisReceiptRLP struct {
+ CumulativeGasUsed *big.Int
+ Bloom Bloom
+ Logs []*Log
+}
+
// NewReceipt creates a barebone transaction receipt, copying the init fields.
func NewReceipt(root []byte, cumulativeGasUsed *big.Int) *Receipt {
return &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: new(big.Int).Set(cumulativeGasUsed)}
}
// EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt
-// into an RLP stream.
+// into an RLP stream. If no post state is present, metropolis fork is assumed.
func (r *Receipt) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, []interface{}{r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs})
+ if r.PostState == nil {
+ return rlp.Encode(w, &metropolisReceiptRLP{r.CumulativeGasUsed, r.Bloom, r.Logs})
+ }
+ return rlp.Encode(w, &homesteadReceiptRLP{r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs})
}
// DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt
// from an RLP stream.
func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
- var receipt struct {
- PostState []byte
- CumulativeGasUsed *big.Int
- Bloom Bloom
- Logs []*Log
+ // Load the raw bytes since we have multiple possible formats
+ raw, err := s.Raw()
+ if err != nil {
+ return err
}
- if err := s.Decode(&receipt); err != nil {
+ list, _, err := rlp.SplitList(raw)
+ if err != nil {
return err
}
- r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs = receipt.PostState, receipt.CumulativeGasUsed, receipt.Bloom, receipt.Logs
- return nil
+ items, err := rlp.CountValues(list)
+ if err != nil {
+ return err
+ }
+ // Deserialize based on the number of content items
+ switch items {
+ case 3:
+ // Metropolis receipts have 3 components
+ var metro metropolisReceiptRLP
+ if err := rlp.DecodeBytes(raw, &metro); err != nil {
+ return err
+ }
+ r.CumulativeGasUsed = metro.CumulativeGasUsed
+ r.Bloom = metro.Bloom
+ r.Logs = metro.Logs
+ return nil
+
+ case 4:
+ // Homestead receipts have 4 components
+ var home homesteadReceiptRLP
+ if err := rlp.DecodeBytes(raw, &home); err != nil {
+ return err
+ }
+ r.PostState = home.PostState[:]
+ r.CumulativeGasUsed = home.CumulativeGasUsed
+ r.Bloom = home.Bloom
+ r.Logs = home.Logs
+ return nil
+
+ default:
+ return fmt.Errorf("invalid receipt components: %v", items)
+ }
}
// String implements the Stringer interface.
func (r *Receipt) String() string {
+ if r.PostState == nil {
+ return fmt.Sprintf("receipt{cgas=%v bloom=%x logs=%v}", r.CumulativeGasUsed, r.Bloom, r.Logs)
+ }
return fmt.Sprintf("receipt{med=%x cgas=%v bloom=%x logs=%v}", r.PostState, r.CumulativeGasUsed, r.Bloom, r.Logs)
}
diff --git a/core/vm/common.go b/core/vm/common.go
index 779cee006..17de38dec 100644
--- a/core/vm/common.go
+++ b/core/vm/common.go
@@ -34,7 +34,21 @@ func calcMemSize(off, l *big.Int) *big.Int {
// getData returns a slice from the data based on the start and size and pads
// up to size with zero's. This function is overflow safe.
-func getData(data []byte, start, size *big.Int) []byte {
+func getData(data []byte, start uint64, size uint64) []byte {
+ length := uint64(len(data))
+ if start > length {
+ start = length
+ }
+ end := start + size
+ if end > length {
+ end = length
+ }
+ return common.RightPadBytes(data[start:end], int(size))
+}
+
+// getDataBig returns a slice from the data based on the start and size and pads
+// up to size with zero's. This function is overflow safe.
+func getDataBig(data []byte, start *big.Int, size *big.Int) []byte {
dlen := big.NewInt(int64(len(data)))
s := math.BigMin(start, dlen)
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 90b2f913e..790d42bbe 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -22,15 +22,14 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/crypto/bn256"
"github.com/ethereum/go-ethereum/params"
"golang.org/x/crypto/ripemd160"
)
-var errBadPrecompileInput = errors.New("bad pre compile input")
-
-// Precompiled contract is the basic interface for native Go contracts. The implementation
+// PrecompiledContract is the basic interface for native Go contracts. The implementation
// requires a deterministic gas count based on the input size of the Run method of the
// contract.
type PrecompiledContract interface {
@@ -38,52 +37,63 @@ type PrecompiledContract interface {
Run(input []byte) ([]byte, error) // Run runs the precompiled contract
}
-// PrecompiledContracts contains the default set of ethereum contracts
-var PrecompiledContracts = map[common.Address]PrecompiledContract{
+// PrecompiledContractsHomestead contains the default set of pre-compiled Ethereum
+// contracts used in the Frontier and Homestead releases.
+var PrecompiledContractsHomestead = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{1}): &ecrecover{},
common.BytesToAddress([]byte{2}): &sha256hash{},
common.BytesToAddress([]byte{3}): &ripemd160hash{},
common.BytesToAddress([]byte{4}): &dataCopy{},
}
-// RunPrecompile runs and evaluate the output of a precompiled contract defined in contracts.go
+// PrecompiledContractsMetropolis contains the default set of pre-compiled Ethereum
+// contracts used in the Metropolis release.
+var PrecompiledContractsMetropolis = map[common.Address]PrecompiledContract{
+ common.BytesToAddress([]byte{1}): &ecrecover{},
+ common.BytesToAddress([]byte{2}): &sha256hash{},
+ common.BytesToAddress([]byte{3}): &ripemd160hash{},
+ common.BytesToAddress([]byte{4}): &dataCopy{},
+ common.BytesToAddress([]byte{5}): &bigModExp{},
+ common.BytesToAddress([]byte{6}): &bn256Add{},
+ common.BytesToAddress([]byte{7}): &bn256ScalarMul{},
+ common.BytesToAddress([]byte{8}): &bn256Pairing{},
+}
+
+// RunPrecompiledContract runs and evaluates the output of a precompiled contract.
func RunPrecompiledContract(p PrecompiledContract, input []byte, contract *Contract) (ret []byte, err error) {
gas := p.RequiredGas(input)
if contract.UseGas(gas) {
return p.Run(input)
- } else {
- return nil, ErrOutOfGas
}
+ return nil, ErrOutOfGas
}
-// ECRECOVER implemented as a native contract
+// ECRECOVER implemented as a native contract.
type ecrecover struct{}
func (c *ecrecover) RequiredGas(input []byte) uint64 {
return params.EcrecoverGas
}
-func (c *ecrecover) Run(in []byte) ([]byte, error) {
+func (c *ecrecover) Run(input []byte) ([]byte, error) {
const ecRecoverInputLength = 128
- in = common.RightPadBytes(in, ecRecoverInputLength)
- // "in" is (hash, v, r, s), each 32 bytes
+ input = common.RightPadBytes(input, ecRecoverInputLength)
+ // "input" is (hash, v, r, s), each 32 bytes
// but for ecrecover we want (r, s, v)
- r := new(big.Int).SetBytes(in[64:96])
- s := new(big.Int).SetBytes(in[96:128])
- v := in[63] - 27
+ r := new(big.Int).SetBytes(input[64:96])
+ s := new(big.Int).SetBytes(input[96:128])
+ v := input[63] - 27
- // tighter sig s values in homestead only apply to tx sigs
- if !allZero(in[32:63]) || !crypto.ValidateSignatureValues(v, r, s, false) {
- log.Trace("ECRECOVER error: v, r or s value invalid")
+ // tighter sig s values input homestead only apply to tx sigs
+ if !allZero(input[32:63]) || !crypto.ValidateSignatureValues(v, r, s, false) {
return nil, nil
}
// v needs to be at the end for libsecp256k1
- pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
+ pubKey, err := crypto.Ecrecover(input[:32], append(input[64:128], v))
// make sure the public key is a valid one
if err != nil {
- log.Trace("ECRECOVER failed", "err", err)
return nil, nil
}
@@ -91,7 +101,7 @@ func (c *ecrecover) Run(in []byte) ([]byte, error) {
return common.LeftPadBytes(crypto.Keccak256(pubKey[1:])[12:], 32), nil
}
-// SHA256 implemented as a native contract
+// SHA256 implemented as a native contract.
type sha256hash struct{}
// RequiredGas returns the gas required to execute the pre-compiled contract.
@@ -99,14 +109,14 @@ type sha256hash struct{}
// This method does not require any overflow checking as the input size gas costs
// required for anything significant is so high it's impossible to pay for.
func (c *sha256hash) RequiredGas(input []byte) uint64 {
- return uint64(len(input)+31)/32*params.Sha256WordGas + params.Sha256Gas
+ return uint64(len(input)+31)/32*params.Sha256PerWordGas + params.Sha256BaseGas
}
-func (c *sha256hash) Run(in []byte) ([]byte, error) {
- h := sha256.Sum256(in)
+func (c *sha256hash) Run(input []byte) ([]byte, error) {
+ h := sha256.Sum256(input)
return h[:], nil
}
-// RIPMED160 implemented as a native contract
+// RIPMED160 implemented as a native contract.
type ripemd160hash struct{}
// RequiredGas returns the gas required to execute the pre-compiled contract.
@@ -114,15 +124,15 @@ type ripemd160hash struct{}
// This method does not require any overflow checking as the input size gas costs
// required for anything significant is so high it's impossible to pay for.
func (c *ripemd160hash) RequiredGas(input []byte) uint64 {
- return uint64(len(input)+31)/32*params.Ripemd160WordGas + params.Ripemd160Gas
+ return uint64(len(input)+31)/32*params.Ripemd160PerWordGas + params.Ripemd160BaseGas
}
-func (c *ripemd160hash) Run(in []byte) ([]byte, error) {
+func (c *ripemd160hash) Run(input []byte) ([]byte, error) {
ripemd := ripemd160.New()
- ripemd.Write(in)
+ ripemd.Write(input)
return common.LeftPadBytes(ripemd.Sum(nil), 32), nil
}
-// data copy implemented as a native contract
+// data copy implemented as a native contract.
type dataCopy struct{}
// RequiredGas returns the gas required to execute the pre-compiled contract.
@@ -130,8 +140,240 @@ type dataCopy struct{}
// This method does not require any overflow checking as the input size gas costs
// required for anything significant is so high it's impossible to pay for.
func (c *dataCopy) RequiredGas(input []byte) uint64 {
- return uint64(len(input)+31)/32*params.IdentityWordGas + params.IdentityGas
+ return uint64(len(input)+31)/32*params.IdentityPerWordGas + params.IdentityBaseGas
}
func (c *dataCopy) Run(in []byte) ([]byte, error) {
return in, nil
}
+
+// bigModExp implements a native big integer exponential modular operation.
+type bigModExp struct{}
+
+var (
+ big1 = big.NewInt(1)
+ big4 = big.NewInt(4)
+ big8 = big.NewInt(8)
+ big16 = big.NewInt(16)
+ big32 = big.NewInt(32)
+ big64 = big.NewInt(64)
+ big96 = big.NewInt(96)
+ big480 = big.NewInt(480)
+ big1024 = big.NewInt(1024)
+ big3072 = big.NewInt(3072)
+ big199680 = big.NewInt(199680)
+)
+
+// RequiredGas returns the gas required to execute the pre-compiled contract.
+func (c *bigModExp) RequiredGas(input []byte) uint64 {
+ var (
+ baseLen = new(big.Int).SetBytes(getData(input, 0, 32))
+ expLen = new(big.Int).SetBytes(getData(input, 32, 32))
+ modLen = new(big.Int).SetBytes(getData(input, 64, 32))
+ )
+ if len(input) > 96 {
+ input = input[96:]
+ } else {
+ input = input[:0]
+ }
+ // Retrieve the head 32 bytes of exp for the adjusted exponent length
+ var expHead *big.Int
+ if big.NewInt(int64(len(input))).Cmp(baseLen) <= 0 {
+ expHead = new(big.Int)
+ } else {
+ if expLen.Cmp(big32) > 0 {
+ expHead = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), 32))
+ } else {
+ expHead = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), expLen.Uint64()))
+ }
+ }
+ // Calculate the adjusted exponent length
+ var msb int
+ if bitlen := expHead.BitLen(); bitlen > 0 {
+ msb = bitlen - 1
+ }
+ adjExpLen := new(big.Int)
+ if expLen.Cmp(big32) > 0 {
+ adjExpLen.Sub(expLen, big32)
+ adjExpLen.Mul(big8, adjExpLen)
+ }
+ adjExpLen.Add(adjExpLen, big.NewInt(int64(msb)))
+
+ // Calculate the gas cost of the operation
+ gas := new(big.Int).Set(math.BigMax(modLen, baseLen))
+ switch {
+ case gas.Cmp(big64) <= 0:
+ gas.Mul(gas, gas)
+ case gas.Cmp(big1024) <= 0:
+ gas = new(big.Int).Add(
+ new(big.Int).Div(new(big.Int).Mul(gas, gas), big4),
+ new(big.Int).Sub(new(big.Int).Mul(big96, gas), big3072),
+ )
+ default:
+ gas = new(big.Int).Add(
+ new(big.Int).Div(new(big.Int).Mul(gas, gas), big16),
+ new(big.Int).Sub(new(big.Int).Mul(big480, gas), big199680),
+ )
+ }
+ gas.Mul(gas, math.BigMax(adjExpLen, big1))
+ gas.Div(gas, new(big.Int).SetUint64(params.ModExpQuadCoeffDiv))
+
+ if gas.BitLen() > 64 {
+ return math.MaxUint64
+ }
+ return gas.Uint64()
+}
+
+func (c *bigModExp) Run(input []byte) ([]byte, error) {
+ var (
+ baseLen = new(big.Int).SetBytes(getData(input, 0, 32)).Uint64()
+ expLen = new(big.Int).SetBytes(getData(input, 32, 32)).Uint64()
+ modLen = new(big.Int).SetBytes(getData(input, 64, 32)).Uint64()
+ )
+ if len(input) > 96 {
+ input = input[96:]
+ } else {
+ input = input[:0]
+ }
+ // Handle a special case when both the base and mod length is zero
+ if baseLen == 0 && modLen == 0 {
+ return []byte{}, nil
+ }
+ // Retrieve the operands and execute the exponentiation
+ var (
+ base = new(big.Int).SetBytes(getData(input, 0, baseLen))
+ exp = new(big.Int).SetBytes(getData(input, baseLen, expLen))
+ mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen))
+ )
+ if mod.BitLen() == 0 {
+ // Modulo 0 is undefined, return zero
+ return common.LeftPadBytes([]byte{}, int(modLen)), nil
+ }
+ return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil
+}
+
+var (
+ // errNotOnCurve is returned if a point being unmarshalled as a bn256 elliptic
+ // curve point is not on the curve.
+ errNotOnCurve = errors.New("point not on elliptic curve")
+
+ // errInvalidCurvePoint is returned if a point being unmarshalled as a bn256
+ // elliptic curve point is invalid.
+ errInvalidCurvePoint = errors.New("invalid elliptic curve point")
+)
+
+// newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point,
+// returning it, or an error if the point is invalid.
+func newCurvePoint(blob []byte) (*bn256.G1, error) {
+ p, onCurve := new(bn256.G1).Unmarshal(blob)
+ if !onCurve {
+ return nil, errNotOnCurve
+ }
+ gx, gy, _, _ := p.CurvePoints()
+ if gx.Cmp(bn256.P) >= 0 || gy.Cmp(bn256.P) >= 0 {
+ return nil, errInvalidCurvePoint
+ }
+ return p, nil
+}
+
+// newTwistPoint unmarshals a binary blob into a bn256 elliptic curve point,
+// returning it, or an error if the point is invalid.
+func newTwistPoint(blob []byte) (*bn256.G2, error) {
+ p, onCurve := new(bn256.G2).Unmarshal(blob)
+ if !onCurve {
+ return nil, errNotOnCurve
+ }
+ x2, y2, _, _ := p.CurvePoints()
+ if x2.Real().Cmp(bn256.P) >= 0 || x2.Imag().Cmp(bn256.P) >= 0 ||
+ y2.Real().Cmp(bn256.P) >= 0 || y2.Imag().Cmp(bn256.P) >= 0 {
+ return nil, errInvalidCurvePoint
+ }
+ return p, nil
+}
+
+// bn256Add implements a native elliptic curve point addition.
+type bn256Add struct{}
+
+// RequiredGas returns the gas required to execute the pre-compiled contract.
+func (c *bn256Add) RequiredGas(input []byte) uint64 {
+ return params.Bn256AddGas
+}
+
+func (c *bn256Add) Run(input []byte) ([]byte, error) {
+ x, err := newCurvePoint(getData(input, 0, 64))
+ if err != nil {
+ return nil, err
+ }
+ y, err := newCurvePoint(getData(input, 64, 64))
+ if err != nil {
+ return nil, err
+ }
+ res := new(bn256.G1)
+ res.Add(x, y)
+ return res.Marshal(), nil
+}
+
+// bn256ScalarMul implements a native elliptic curve scalar multiplication.
+type bn256ScalarMul struct{}
+
+// RequiredGas returns the gas required to execute the pre-compiled contract.
+func (c *bn256ScalarMul) RequiredGas(input []byte) uint64 {
+ return params.Bn256ScalarMulGas
+}
+
+func (c *bn256ScalarMul) Run(input []byte) ([]byte, error) {
+ p, err := newCurvePoint(getData(input, 0, 64))
+ if err != nil {
+ return nil, err
+ }
+ res := new(bn256.G1)
+ res.ScalarMult(p, new(big.Int).SetBytes(getData(input, 64, 32)))
+ return res.Marshal(), nil
+}
+
+var (
+ // true32Byte is returned if the bn256 pairing check succeeds.
+ true32Byte = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
+
+ // false32Byte is returned if the bn256 pairing check fails.
+ false32Byte = make([]byte, 32)
+
+ // errBadPairingInput is returned if the bn256 pairing input is invalid.
+ errBadPairingInput = errors.New("bad elliptic curve pairing size")
+)
+
+// bn256Pairing implements a pairing pre-compile for the bn256 curve
+type bn256Pairing struct{}
+
+// RequiredGas returns the gas required to execute the pre-compiled contract.
+func (c *bn256Pairing) RequiredGas(input []byte) uint64 {
+ return params.Bn256PairingBaseGas + uint64(len(input)/192)*params.Bn256PairingPerPointGas
+}
+
+func (c *bn256Pairing) Run(input []byte) ([]byte, error) {
+ // Handle some corner cases cheaply
+ if len(input)%192 > 0 {
+ return nil, errBadPairingInput
+ }
+ // Convert the input into a set of coordinates
+ var (
+ cs []*bn256.G1
+ ts []*bn256.G2
+ )
+ for i := 0; i < len(input); i += 192 {
+ c, err := newCurvePoint(input[i : i+64])
+ if err != nil {
+ return nil, err
+ }
+ t, err := newTwistPoint(input[i+64 : i+192])
+ if err != nil {
+ return nil, err
+ }
+ cs = append(cs, c)
+ ts = append(ts, t)
+ }
+ // Execute the pairing checks and return the results
+ if bn256.PairingCheck(cs, ts) {
+ return true32Byte, nil
+ }
+ return false32Byte, nil
+}
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index 830a8f69d..022070ab8 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -1 +1,391 @@
package vm
+
+import (
+ "fmt"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// precompiledTest defines the input/output pairs for precompiled contract tests.
+type precompiledTest struct {
+ input, expected string
+ gas uint64
+ name string
+}
+
+// modexpTests are the test and benchmark data for the modexp precompiled contract.
+var modexpTests = []precompiledTest{
+ {
+ input: "0000000000000000000000000000000000000000000000000000000000000001" +
+ "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000020" +
+ "03" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "eip_example1",
+ }, {
+ input: "0000000000000000000000000000000000000000000000000000000000000000" +
+ "0000000000000000000000000000000000000000000000000000000000000020" +
+ "0000000000000000000000000000000000000000000000000000000000000020" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
+ expected: "0000000000000000000000000000000000000000000000000000000000000000",
+ name: "eip_example2",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb502fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b",
+ expected: "60008f1614cc01dcfb6bfb09c625cf90b47d4468db81b5f8b7a39d42f332eab9b2da8f2d95311648a8f243f4bb13cfb3d8f7f2a3c014122ebb3ed41b02783adc",
+ name: "nagydani-1-square",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb503fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b",
+ expected: "4834a46ba565db27903b1c720c9d593e84e4cbd6ad2e64b31885d944f68cd801f92225a8961c952ddf2797fa4701b330c85c4b363798100b921a1a22a46a7fec",
+ name: "nagydani-1-qube",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5010001fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b",
+ expected: "c36d804180c35d4426b57b50c5bfcca5c01856d104564cd513b461d3c8b8409128a5573e416d0ebe38f5f736766d9dc27143e4da981dfa4d67f7dc474cbee6d2",
+ name: "nagydani-1-pow0x10001",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5102e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087",
+ expected: "981dd99c3b113fae3e3eaa9435c0dc96779a23c12a53d1084b4f67b0b053a27560f627b873e3f16ad78f28c94f14b6392def26e4d8896c5e3c984e50fa0b3aa44f1da78b913187c6128baa9340b1e9c9a0fd02cb78885e72576da4a8f7e5a113e173a7a2889fde9d407bd9f06eb05bc8fc7b4229377a32941a02bf4edcc06d70",
+ name: "nagydani-2-square",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5103e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087",
+ expected: "d89ceb68c32da4f6364978d62aaa40d7b09b59ec61eb3c0159c87ec3a91037f7dc6967594e530a69d049b64adfa39c8fa208ea970cfe4b7bcd359d345744405afe1cbf761647e32b3184c7fbe87cee8c6c7ff3b378faba6c68b83b6889cb40f1603ee68c56b4c03d48c595c826c041112dc941878f8c5be828154afd4a16311f",
+ name: "nagydani-2-qube",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51010001e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087",
+ expected: "ad85e8ef13fd1dd46eae44af8b91ad1ccae5b7a1c92944f92a19f21b0b658139e0cabe9c1f679507c2de354bf2c91ebd965d1e633978a830d517d2f6f8dd5fd58065d58559de7e2334a878f8ec6992d9b9e77430d4764e863d77c0f87beede8f2f7f2ab2e7222f85cc9d98b8467f4bb72e87ef2882423ebdb6daf02dddac6db2",
+ name: "nagydani-2-pow0x10001",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb02d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d",
+ expected: "affc7507ea6d84751ec6b3f0d7b99dbcc263f33330e450d1b3ff0bc3d0874320bf4edd57debd587306988157958cb3cfd369cc0c9c198706f635c9e0f15d047df5cb44d03e2727f26b083c4ad8485080e1293f171c1ed52aef5993a5815c35108e848c951cf1e334490b4a539a139e57b68f44fee583306f5b85ffa57206b3ee5660458858534e5386b9584af3c7f67806e84c189d695e5eb96e1272d06ec2df5dc5fabc6e94b793718c60c36be0a4d031fc84cd658aa72294b2e16fc240aef70cb9e591248e38bd49c5a554d1afa01f38dab72733092f7555334bbef6c8c430119840492380aa95fa025dcf699f0a39669d812b0c6946b6091e6e235337b6f8",
+ name: "nagydani-3-square",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb03d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d",
+ expected: "1b280ecd6a6bf906b806d527c2a831e23b238f89da48449003a88ac3ac7150d6a5e9e6b3be4054c7da11dd1e470ec29a606f5115801b5bf53bc1900271d7c3ff3cd5ed790d1c219a9800437a689f2388ba1a11d68f6a8e5b74e9a3b1fac6ee85fc6afbac599f93c391f5dc82a759e3c6c0ab45ce3f5d25d9b0c1bf94cf701ea6466fc9a478dacc5754e593172b5111eeba88557048bceae401337cd4c1182ad9f700852bc8c99933a193f0b94cf1aedbefc48be3bc93ef5cb276d7c2d5462ac8bb0c8fe8923a1db2afe1c6b90d59c534994a6a633f0ead1d638fdc293486bb634ff2c8ec9e7297c04241a61c37e3ae95b11d53343d4ba2b4cc33d2cfa7eb705e",
+ name: "nagydani-3-qube",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb010001d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d",
+ expected: "37843d7c67920b5f177372fa56e2a09117df585f81df8b300fba245b1175f488c99476019857198ed459ed8d9799c377330e49f4180c4bf8e8f66240c64f65ede93d601f957b95b83efdee1e1bfde74169ff77002eaf078c71815a9220c80b2e3b3ff22c2f358111d816ebf83c2999026b6de50bfc711ff68705d2f40b753424aefc9f70f08d908b5a20276ad613b4ab4309a3ea72f0c17ea9df6b3367d44fb3acab11c333909e02e81ea2ed404a712d3ea96bba87461720e2d98723e7acd0520ac1a5212dbedcd8dc0c1abf61d4719e319ff4758a774790b8d463cdfe131d1b2dcfee52d002694e98e720cb6ae7ccea353bc503269ba35f0f63bf8d7b672a76",
+ name: "nagydani-3-pow0x10001",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8102df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f",
+ expected: "8a5aea5f50dcc03dc7a7a272b5aeebc040554dbc1ffe36753c4fc75f7ed5f6c2cc0de3a922bf96c78bf0643a73025ad21f45a4a5cadd717612c511ab2bff1190fe5f1ae05ba9f8fe3624de1de2a817da6072ddcdb933b50216811dbe6a9ca79d3a3c6b3a476b079fd0d05f04fb154e2dd3e5cb83b148a006f2bcbf0042efb2ae7b916ea81b27aac25c3bf9a8b6d35440062ad8eae34a83f3ffa2cc7b40346b62174a4422584f72f95316f6b2bee9ff232ba9739301c97c99a9ded26c45d72676eb856ad6ecc81d36a6de36d7f9dafafee11baa43a4b0d5e4ecffa7b9b7dcefd58c397dd373e6db4acd2b2c02717712e6289bed7c813b670c4a0c6735aa7f3b0f1ce556eae9fcc94b501b2c8781ba50a8c6220e8246371c3c7359fe4ef9da786ca7d98256754ca4e496be0a9174bedbecb384bdf470779186d6a833f068d2838a88d90ef3ad48ff963b67c39cc5a3ee123baf7bf3125f64e77af7f30e105d72c4b9b5b237ed251e4c122c6d8c1405e736299c3afd6db16a28c6a9cfa68241e53de4cd388271fe534a6a9b0dbea6171d170db1b89858468885d08fecbd54c8e471c3e25d48e97ba450b96d0d87e00ac732aaa0d3ce4309c1064bd8a4c0808a97e0143e43a24cfa847635125cd41c13e0574487963e9d725c01375db99c31da67b4cf65eff555f0c0ac416c727ff8d438ad7c42030551d68c2e7adda0abb1ca7c10",
+ name: "nagydani-4-square",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8103df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f",
+ expected: "5a2664252aba2d6e19d9600da582cdd1f09d7a890ac48e6b8da15ae7c6ff1856fc67a841ac2314d283ffa3ca81a0ecf7c27d89ef91a5a893297928f5da0245c99645676b481b7e20a566ee6a4f2481942bee191deec5544600bb2441fd0fb19e2ee7d801ad8911c6b7750affec367a4b29a22942c0f5f4744a4e77a8b654da2a82571037099e9c6d930794efe5cdca73c7b6c0844e386bdca8ea01b3d7807146bb81365e2cdc6475f8c23e0ff84463126189dc9789f72bbce2e3d2d114d728a272f1345122de23df54c922ec7a16e5c2a8f84da8871482bd258c20a7c09bbcd64c7a96a51029bbfe848736a6ba7bf9d931a9b7de0bcaf3635034d4958b20ae9ab3a95a147b0421dd5f7ebff46c971010ebfc4adbbe0ad94d5498c853e7142c450d8c71de4b2f84edbf8acd2e16d00c8115b150b1c30e553dbb82635e781379fe2a56360420ff7e9f70cc64c00aba7e26ed13c7c19622865ae07248daced36416080f35f8cc157a857ed70ea4f347f17d1bee80fa038abd6e39b1ba06b97264388b21364f7c56e192d4b62d9b161405f32ab1e2594e86243e56fcf2cb30d21adef15b9940f91af681da24328c883d892670c6aa47940867a81830a82b82716895db810df1b834640abefb7db2092dd92912cb9a735175bc447be40a503cf22dfe565b4ed7a3293ca0dfd63a507430b323ee248ec82e843b673c97ad730728cebc",
+ name: "nagydani-4-qube",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81010001df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f",
+ expected: "bed8b970c4a34849fc6926b08e40e20b21c15ed68d18f228904878d4370b56322d0da5789da0318768a374758e6375bfe4641fca5285ec7171828922160f48f5ca7efbfee4d5148612c38ad683ae4e3c3a053d2b7c098cf2b34f2cb19146eadd53c86b2d7ccf3d83b2c370bfb840913ee3879b1057a6b4e07e110b6bcd5e958bc71a14798c91d518cc70abee264b0d25a4110962a764b364ac0b0dd1ee8abc8426d775ec0f22b7e47b32576afaf1b5a48f64573ed1c5c29f50ab412188d9685307323d990802b81dacc06c6e05a1e901830ba9fcc67688dc29c5e27bde0a6e845ca925f5454b6fb3747edfaa2a5820838fb759eadf57f7cb5cec57fc213ddd8a4298fa079c3c0f472b07fb15aa6a7f0a3780bd296ff6a62e58ef443870b02260bd4fd2bbc98255674b8e1f1f9f8d33c7170b0ebbea4523b695911abbf26e41885344823bd0587115fdd83b721a4e8457a31c9a84b3d3520a07e0e35df7f48e5a9d534d0ec7feef1ff74de6a11e7f93eab95175b6ce22c68d78a642ad642837897ec11349205d8593ac19300207572c38d29ca5dfa03bc14cdbc32153c80e5cc3e739403d34c75915e49beb43094cc6dcafb3665b305ddec9286934ae66ec6b777ca528728c851318eb0f207b39f1caaf96db6eeead6b55ed08f451939314577d42bcc9f97c0b52d0234f88fd07e4c1d7780fdebc025cfffcb572cb27a8c33963",
+ name: "nagydani-4-pow0x10001",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf02e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad",
+ expected: "d61fe4e3f32ac260915b5b03b78a86d11bfc41d973fce5b0cc59035cf8289a8a2e3878ea15fa46565b0d806e2f85b53873ea20ed653869b688adf83f3ef444535bf91598ff7e80f334fb782539b92f39f55310cc4b35349ab7b278346eda9bc37c0d8acd3557fae38197f412f8d9e57ce6a76b7205c23564cab06e5615be7c6f05c3d05ec690cba91da5e89d55b152ff8dd2157dc5458190025cf94b1ad98f7cbe64e9482faba95e6b33844afc640892872b44a9932096508f4a782a4805323808f23e54b6ff9b841dbfa87db3505ae4f687972c18ea0f0d0af89d36c1c2a5b14560c153c3fee406f5cf15cfd1c0bb45d767426d465f2f14c158495069d0c5955a00150707862ecaae30624ebacdd8ac33e4e6aab3ff90b6ba445a84689386b9e945d01823a65874444316e83767290fcff630d2477f49d5d8ffdd200e08ee1274270f86ed14c687895f6caf5ce528bd970c20d2408a9ba66216324c6a011ac4999098362dbd98a038129a2d40c8da6ab88318aa3046cb660327cc44236d9e5d2163bd0959062195c51ed93d0088b6f92051fc99050ece2538749165976233697ab4b610385366e5ce0b02ad6b61c168ecfbedcdf74278a38de340fd7a5fead8e588e294795f9b011e2e60377a89e25c90e145397cdeabc60fd32444a6b7642a611a83c464d8b8976666351b4865c37b02e6dc21dbcdf5f930341707b618cc0f03c3122646b3385c9df9f2ec730eec9d49e7dfc9153b6e6289da8c4f0ebea9ccc1b751948e3bb7171c9e4d57423b0eeeb79095c030cb52677b3f7e0b45c30f645391f3f9c957afa549c4e0b2465b03c67993cd200b1af01035962edbc4c9e89b31c82ac121987d6529dafdeef67a132dc04b6dc68e77f22862040b75e2ceb9ff16da0fca534e6db7bd12fa7b7f51b6c08c1e23dfcdb7acbd2da0b51c87ffbced065a612e9b1c8bba9b7e2d8d7a2f04fcc4aaf355b60d764879a76b5e16762d5f2f55d585d0c8e82df6940960cddfb72c91dfa71f6b4e1c6ca25dfc39a878e998a663c04fe29d5e83b9586d047b4d7ff70a9f0d44f127e7d741685ca75f11629128d916a0ffef4be586a30c4b70389cc746e84ebf177c01ee8a4511cfbb9d1ecf7f7b33c7dd8177896e10bbc82f838dcd6db7ac67de62bf46b6a640fb580c5d1d2708f3862e3d2b645d0d18e49ef088053e3a220adc0e033c2afcfe61c90e32151152eb3caaf746c5e377d541cafc6cbb0cc0fa48b5caf1728f2e1957f5addfc234f1a9d89e40d49356c9172d0561a695fce6dab1d412321bbf407f63766ffd7b6b3d79bcfa07991c5a9709849c1008689e3b47c50d613980bec239fb64185249d055b30375ccb4354d71fe4d05648fbf6c80634dfc3575f2f24abb714c1e4c95e8896763bf4316e954c7ad19e5780ab7a040ca6fb9271f90a8b22ae738daf6cb",
+ name: "nagydani-5-square",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf03e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad",
+ expected: "5f9c70ec884926a89461056ad20ac4c30155e817f807e4d3f5bb743d789c83386762435c3627773fa77da5144451f2a8aad8adba88e0b669f5377c5e9bad70e45c86fe952b613f015a9953b8a5de5eaee4566acf98d41e327d93a35bd5cef4607d025e58951167957df4ff9b1627649d3943805472e5e293d3efb687cfd1e503faafeb2840a3e3b3f85d016051a58e1c9498aab72e63b748d834b31eb05d85dcde65e27834e266b85c75cc4ec0135135e0601cb93eeeb6e0010c8ceb65c4c319623c5e573a2c8c9fbbf7df68a930beb412d3f4dfd146175484f45d7afaa0d2e60684af9b34730f7c8438465ad3e1d0c3237336722f2aa51095bd5759f4b8ab4dda111b684aa3dac62a761722e7ae43495b7709933512c81c4e3c9133a51f7ce9f2b51fcec064f65779666960b4e45df3900f54311f5613e8012dd1b8efd359eda31a778264c72aa8bb419d862734d769076bce2810011989a45374e5c5d8729fec21427f0bf397eacbb4220f603cf463a4b0c94efd858ffd9768cd60d6ce68d755e0fbad007ce5c2223d70c7018345a102e4ab3c60a13a9e7794303156d4c2063e919f2153c13961fb324c80b240742f47773a7a8e25b3e3fb19b00ce839346c6eb3c732fbc6b888df0b1fe0a3d07b053a2e9402c267b2d62f794d8a2840526e3ade15ce2264496ccd7519571dfde47f7a4bb16292241c20b2be59f3f8fb4f6383f232d838c5a22d8c95b6834d9d2ca493f5a505ebe8899503b0e8f9b19e6e2dd81c1628b80016d02097e0134de51054c4e7674824d4d758760fc52377d2cad145e259aa2ffaf54139e1a66b1e0c1c191e32ac59474c6b526f5b3ba07d3e5ec286eddf531fcd5292869be58c9f22ef91026159f7cf9d05ef66b4299f4da48cc1635bf2243051d342d378a22c83390553e873713c0454ce5f3234397111ac3fe3207b86f0ed9fc025c81903e1748103692074f83824fda6341be4f95ff00b0a9a208c267e12fa01825054cc0513629bf3dbb56dc5b90d4316f87654a8be18227978ea0a8a522760cad620d0d14fd38920fb7321314062914275a5f99f677145a6979b156bd82ecd36f23f8e1273cc2759ecc0b2c69d94dad5211d1bed939dd87ed9e07b91d49713a6e16ade0a98aea789f04994e318e4ff2c8a188cd8d43aeb52c6daa3bc29b4af50ea82a247c5cd67b573b34cbadcc0a376d3bbd530d50367b42705d870f2e27a8197ef46070528bfe408360faa2ebb8bf76e9f388572842bcb119f4d84ee34ae31f5cc594f23705a49197b181fb78ed1ec99499c690f843a4d0cf2e226d118e9372271054fbabdcc5c92ae9fefaef0589cd0e722eaf30c1703ec4289c7fd81beaa8a455ccee5298e31e2080c10c366a6fcf56f7d13582ad0bcad037c612b710fc595b70fbefaaca23623b60c6c39b11beb8e5843b6b3dac60f",
+ name: "nagydani-5-qube",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf010001e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad",
+ expected: "5a0eb2bdf0ac1cae8e586689fa16cd4b07dfdedaec8a110ea1fdb059dd5253231b6132987598dfc6e11f86780428982d50cf68f67ae452622c3b336b537ef3298ca645e8f89ee39a26758206a5a3f6409afc709582f95274b57b71fae5c6b74619ae6f089a5393c5b79235d9caf699d23d88fb873f78379690ad8405e34c19f5257d596580c7a6a7206a3712825afe630c76b31cdb4a23e7f0632e10f14f4e282c81a66451a26f8df2a352b5b9f607a7198449d1b926e27036810368e691a74b91c61afa73d9d3b99453e7c8b50fd4f09c039a2f2feb5c419206694c31b92df1d9586140cb3417b38d0c503c7b508cc2ed12e813a1c795e9829eb39ee78eeaf360a169b491a1d4e419574e712402de9d48d54c1ae5e03739b7156615e8267e1fb0a897f067afd11fb33f6e24182d7aaaaa18fe5bc1982f20d6b871e5a398f0f6f718181d31ec225cfa9a0a70124ed9a70031bdf0c1c7829f708b6e17d50419ef361cf77d99c85f44607186c8d683106b8bd38a49b5d0fb503b397a83388c5678dcfcc737499d84512690701ed621a6f0172aecf037184ddf0f2453e4053024018e5ab2e30d6d5363b56e8b41509317c99042f517247474ab3abc848e00a07f69c254f46f2a05cf6ed84e5cc906a518fdcfdf2c61ce731f24c5264f1a25fc04934dc28aec112134dd523f70115074ca34e3807aa4cb925147f3a0ce152d323bd8c675ace446d0fd1ae30c4b57f0eb2c23884bc18f0964c0114796c5b6d080c3d89175665fbf63a6381a6a9da39ad070b645c8bb1779506da14439a9f5b5d481954764ea114fac688930bc68534d403cff4210673b6a6ff7ae416b7cd41404c3d3f282fcd193b86d0f54d0006c2a503b40d5c3930da980565b8f9630e9493a79d1c03e74e5f93ac8e4dc1a901ec5e3b3e57049124c7b72ea345aa359e782285d9e6a5c144a378111dd02c40855ff9c2be9b48425cb0b2fd62dc8678fd151121cf26a65e917d65d8e0dacfae108eb5508b601fb8ffa370be1f9a8b749a2d12eeab81f41079de87e2d777994fa4d28188c579ad327f9957fb7bdecec5c680844dd43cb57cf87aeb763c003e65011f73f8c63442df39a92b946a6bd968a1c1e4d5fa7d88476a68bd8e20e5b70a99259c7d3f85fb1b65cd2e93972e6264e74ebf289b8b6979b9b68a85cd5b360c1987f87235c3c845d62489e33acf85d53fa3561fe3a3aee18924588d9c6eba4edb7a4d106b31173e42929f6f0c48c80ce6a72d54eca7c0fe870068b7a7c89c63cdda593f5b32d3cb4ea8a32c39f00ab449155757172d66763ed9527019d6de6c9f2416aa6203f4d11c9ebee1e1d3845099e55504446448027212616167eb36035726daa7698b075286f5379cd3e93cb3e0cf4f9cb8d017facbb5550ed32d5ec5400ae57e47e2bf78d1eaeff9480cc765ceff39db500",
+ name: "nagydani-5-pow0x10001",
+ },
+}
+
+// bn256AddTests are the test and benchmark data for the bn256 addition precompiled
+// contract.
+var bn256AddTests = []precompiledTest{
+ {
+ input: "18b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f3726607c2b7f58a84bd6145f00c9c2bc0bb1a187f20ff2c92963a88019e7c6a014eed06614e20c147e940f2d70da3f74c9a17df361706a4485c742bd6788478fa17d7",
+ expected: "2243525c5efd4b9c3d3c45ac0ca3fe4dd85e830a4ce6b65fa1eeaee202839703301d1d33be6da8e509df21cc35964723180eed7532537db9ae5e7d48f195c915",
+ name: "chfast1",
+ }, {
+ input: "2243525c5efd4b9c3d3c45ac0ca3fe4dd85e830a4ce6b65fa1eeaee202839703301d1d33be6da8e509df21cc35964723180eed7532537db9ae5e7d48f195c91518b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f37266",
+ expected: "2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb721611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb204",
+ name: "chfast2",
+ }, {
+ input: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ expected: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ name: "cdetrio1",
+ }, {
+ input: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ expected: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ name: "cdetrio2",
+ }, {
+ input: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ expected: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ name: "cdetrio3",
+ }, {
+ input: "",
+ expected: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ name: "cdetrio4",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ expected: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ name: "cdetrio5",
+ }, {
+ input: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ expected: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ name: "cdetrio6",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ expected: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ name: "cdetrio7",
+ }, {
+ input: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ expected: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ name: "cdetrio8",
+ }, {
+ input: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ expected: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ name: "cdetrio9",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ expected: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ name: "cdetrio10",
+ }, {
+ input: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
+ expected: "030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd315ed738c0e0a7c92e7845f96b2ae9c0a68a6a449e3538fc7ff3ebf7a5a18a2c4",
+ name: "cdetrio11",
+ }, {
+ input: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ expected: "030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd315ed738c0e0a7c92e7845f96b2ae9c0a68a6a449e3538fc7ff3ebf7a5a18a2c4",
+ name: "cdetrio12",
+ }, {
+ input: "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d98",
+ expected: "15bf2bb17880144b5d1cd2b1f46eff9d617bffd1ca57c37fb5a49bd84e53cf66049c797f9ce0d17083deb32b5e36f2ea2a212ee036598dd7624c168993d1355f",
+ name: "cdetrio13",
+ }, {
+ input: "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa92e83f8d734803fc370eba25ed1f6b8768bd6d83887b87165fc2434fe11a830cb00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ expected: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ name: "cdetrio14",
+ },
+}
+
+// bn256ScalarMulTests are the test and benchmark data for the bn256 scalar
+// multipication precompiled contract.
+var bn256ScalarMulTests = []precompiledTest{
+ {
+ input: "2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb721611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb20400000000000000000000000000000000000000000000000011138ce750fa15c2",
+ expected: "070a8d6a982153cae4be29d434e8faef8a47b274a053f5a4ee2a6c9c13c31e5c031b8ce914eba3a9ffb989f9cdd5b0f01943074bf4f0f315690ec3cec6981afc",
+ name: "chfast1",
+ }, {
+ input: "070a8d6a982153cae4be29d434e8faef8a47b274a053f5a4ee2a6c9c13c31e5c031b8ce914eba3a9ffb989f9cdd5b0f01943074bf4f0f315690ec3cec6981afc30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46",
+ expected: "025a6f4181d2b4ea8b724290ffb40156eb0adb514c688556eb79cdea0752c2bb2eff3f31dea215f1eb86023a133a996eb6300b44da664d64251d05381bb8a02e",
+ name: "chfast2",
+ }, {
+ input: "025a6f4181d2b4ea8b724290ffb40156eb0adb514c688556eb79cdea0752c2bb2eff3f31dea215f1eb86023a133a996eb6300b44da664d64251d05381bb8a02e183227397098d014dc2822db40c0ac2ecbc0b548b438e5469e10460b6c3e7ea3",
+ expected: "14789d0d4a730b354403b5fac948113739e276c23e0258d8596ee72f9cd9d3230af18a63153e0ec25ff9f2951dd3fa90ed0197bfef6e2a1a62b5095b9d2b4a27",
+ name: "chfast3",
+ },
+}
+
+// bn256PairingTests are the test and benchmark data for the bn256 pairing check
+// precompiled contract.
+var bn256PairingTests = []precompiledTest{
+ {
+ input: "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "jeff1",
+ }, {
+ input: "2eca0c7238bf16e83e7a1e6c5d49540685ff51380f309842a98561558019fc0203d3260361bb8451de5ff5ecd17f010ff22f5c31cdf184e9020b06fa5997db841213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f06967a1237ebfeca9aaae0d6d0bab8e28c198c5a339ef8a2407e31cdac516db922160fa257a5fd5b280642ff47b65eca77e626cb685c84fa6d3b6882a283ddd1198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "jeff2",
+ }, {
+ input: "0f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd216da2f5cb6be7a0aa72c440c53c9bbdfec6c36c7d515536431b3a865468acbba2e89718ad33c8bed92e210e81d1853435399a271913a6520736a4729cf0d51eb01a9e2ffa2e92599b68e44de5bcf354fa2642bd4f26b259daa6f7ce3ed57aeb314a9a87b789a58af499b314e13c3d65bede56c07ea2d418d6874857b70763713178fb49a2d6cd347dc58973ff49613a20757d0fcc22079f9abd10c3baee245901b9e027bd5cfc2cb5db82d4dc9677ac795ec500ecd47deee3b5da006d6d049b811d7511c78158de484232fc68daf8a45cf217d1c2fae693ff5871e8752d73b21198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "jeff3",
+ }, {
+ input: "2f2ea0b3da1e8ef11914acf8b2e1b32d99df51f5f4f206fc6b947eae860eddb6068134ddb33dc888ef446b648d72338684d678d2eb2371c61a50734d78da4b7225f83c8b6ab9de74e7da488ef02645c5a16a6652c3c71a15dc37fe3a5dcb7cb122acdedd6308e3bb230d226d16a105295f523a8a02bfc5e8bd2da135ac4c245d065bbad92e7c4e31bf3757f1fe7362a63fbfee50e7dc68da116e67d600d9bf6806d302580dc0661002994e7cd3a7f224e7ddc27802777486bf80f40e4ca3cfdb186bac5188a98c45e6016873d107f5cd131f3a3e339d0375e58bd6219347b008122ae2b09e539e152ec5364e7e2204b03d11d3caa038bfc7cd499f8176aacbee1f39e4e4afc4bc74790a4a028aff2c3d2538731fb755edefd8cb48d6ea589b5e283f150794b6736f670d6a1033f9b46c6f5204f50813eb85c8dc4b59db1c5d39140d97ee4d2b36d99bc49974d18ecca3e7ad51011956051b464d9e27d46cc25e0764bb98575bd466d32db7b15f582b2d5c452b36aa394b789366e5e3ca5aabd415794ab061441e51d01e94640b7e3084a07e02c78cf3103c542bc5b298669f211b88da1679b0b64a63b7e0e7bfe52aae524f73a55be7fe70c7e9bfc94b4cf0da1213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "jeff4",
+ }, {
+ input: "20a754d2071d4d53903e3b31a7e98ad6882d58aec240ef981fdf0a9d22c5926a29c853fcea789887315916bbeb89ca37edb355b4f980c9a12a94f30deeed30211213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f1abb4a25eb9379ae96c84fff9f0540abcfc0a0d11aeda02d4f37e4baf74cb0c11073b3ff2cdbb38755f8691ea59e9606696b3ff278acfc098fa8226470d03869217cee0a9ad79a4493b5253e2e4e3a39fc2df38419f230d341f60cb064a0ac290a3d76f140db8418ba512272381446eb73958670f00cf46f1d9e64cba057b53c26f64a8ec70387a13e41430ed3ee4a7db2059cc5fc13c067194bcc0cb49a98552fd72bd9edb657346127da132e5b82ab908f5816c826acb499e22f2412d1a2d70f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd2198a1f162a73261f112401aa2db79c7dab1533c9935c77290a6ce3b191f2318d198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "jeff5",
+ }, {
+ input: "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c103188585e2364128fe25c70558f1560f4f9350baf3959e603cc91486e110936198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
+ expected: "0000000000000000000000000000000000000000000000000000000000000000",
+ name: "jeff6",
+ }, { // ecpairing_empty_data_insufficient_gas
+ input: "",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "empty_data",
+ }, { // ecpairing_one_point_insufficient_gas
+ input: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
+ expected: "0000000000000000000000000000000000000000000000000000000000000000",
+ name: "one_point",
+ }, { // ecpairing_two_point_match_2
+ input: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "two_point_match_2",
+ }, { // ecpairing_two_point_match_3
+ input: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "two_point_match_3",
+ }, { // ecpairing_two_point_match_4
+ input: "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "two_point_match_4",
+ }, {
+ input: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "ten_point_match_1",
+ }, {
+ input: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "ten_point_match_2",
+ }, { // ecpairing_two_point_match_4
+ input: "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75",
+ expected: "0000000000000000000000000000000000000000000000000000000000000001",
+ name: "ten_point_match_3",
+ },
+}
+
+func testPrecompiled(addr string, test precompiledTest, t *testing.T) {
+ p := PrecompiledContractsMetropolis[common.HexToAddress(addr)]
+ in := common.Hex2Bytes(test.input)
+ contract := NewContract(AccountRef(common.HexToAddress("1337")),
+ nil, new(big.Int), p.RequiredGas(in))
+ t.Run(fmt.Sprintf("%s-Gas=%d", test.name, contract.Gas), func(t *testing.T) {
+ if res, err := RunPrecompiledContract(p, in, contract); err != nil {
+ t.Error(err)
+ } else if common.Bytes2Hex(res) != test.expected {
+ t.Errorf("Expected %v, got %v", test.expected, common.Bytes2Hex(res))
+ }
+ })
+}
+
+func benchmarkPrecompiled(addr string, test precompiledTest, bench *testing.B) {
+ p := PrecompiledContractsMetropolis[common.HexToAddress(addr)]
+ in := common.Hex2Bytes(test.input)
+ reqGas := p.RequiredGas(in)
+ contract := NewContract(AccountRef(common.HexToAddress("1337")),
+ nil, new(big.Int), reqGas)
+
+ var (
+ res []byte
+ err error
+ data = make([]byte, len(in))
+ )
+
+ bench.Run(fmt.Sprintf("%s-Gas=%d", test.name, contract.Gas), func(bench *testing.B) {
+ bench.ResetTimer()
+ for i := 0; i < bench.N; i++ {
+ contract.Gas = reqGas
+ copy(data, in)
+ res, err = RunPrecompiledContract(p, data, contract)
+ }
+ bench.StopTimer()
+ //Check if it is correct
+ if err != nil {
+ bench.Error(err)
+ return
+ }
+ if common.Bytes2Hex(res) != test.expected {
+ bench.Error(fmt.Sprintf("Expected %v, got %v", test.expected, common.Bytes2Hex(res)))
+ return
+ }
+ })
+}
+
+// Benchmarks the sample inputs from the ECRECOVER precompile.
+func BenchmarkPrecompiledEcrecover(bench *testing.B) {
+ t := precompiledTest{
+ input: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
+ expected: "000000000000000000000000ceaccac640adf55b2028469bd36ba501f28b699d",
+ name: "",
+ }
+ benchmarkPrecompiled("01", t, bench)
+}
+
+// Benchmarks the sample inputs from the SHA256 precompile.
+func BenchmarkPrecompiledSha256(bench *testing.B) {
+ t := precompiledTest{
+ input: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
+ expected: "811c7003375852fabd0d362e40e68607a12bdabae61a7d068fe5fdd1dbbf2a5d",
+ name: "128",
+ }
+ benchmarkPrecompiled("02", t, bench)
+}
+
+// Benchmarks the sample inputs from the RIPEMD precompile.
+func BenchmarkPrecompiledRipeMD(bench *testing.B) {
+ t := precompiledTest{
+ input: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
+ expected: "0000000000000000000000009215b8d9882ff46f0dfde6684d78e831467f65e6",
+ name: "128",
+ }
+ benchmarkPrecompiled("03", t, bench)
+}
+
+// Benchmarks the sample inputs from the identiy precompile.
+func BenchmarkPrecompiledIdentity(bench *testing.B) {
+ t := precompiledTest{
+ input: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
+ expected: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
+ name: "128",
+ }
+ benchmarkPrecompiled("04", t, bench)
+}
+
+// Tests the sample inputs from the ModExp EIP 198.
+func TestPrecompiledModExp(t *testing.T) {
+ for _, test := range modexpTests {
+ testPrecompiled("05", test, t)
+ }
+}
+
+// Benchmarks the sample inputs from the ModExp EIP 198.
+func BenchmarkPrecompiledModExp(bench *testing.B) {
+ for _, test := range modexpTests {
+ benchmarkPrecompiled("05", test, bench)
+ }
+}
+
+// Tests the sample inputs from the elliptic curve addition EIP 213.
+func TestPrecompiledBn256Add(t *testing.T) {
+ for _, test := range bn256AddTests {
+ testPrecompiled("06", test, t)
+ }
+}
+
+// Benchmarks the sample inputs from the elliptic curve addition EIP 213.
+func BenchmarkPrecompiledBn256Add(bench *testing.B) {
+ for _, test := range bn256AddTests {
+ benchmarkPrecompiled("06", test, bench)
+ }
+}
+
+// Tests the sample inputs from the elliptic curve scalar multiplication EIP 213.
+func TestPrecompiledBn256ScalarMul(t *testing.T) {
+ for _, test := range bn256ScalarMulTests {
+ testPrecompiled("07", test, t)
+ }
+}
+
+// Benchmarks the sample inputs from the elliptic curve scalar multiplication EIP 213.
+func BenchmarkPrecompiledBn256ScalarMul(bench *testing.B) {
+ for _, test := range bn256ScalarMulTests {
+ benchmarkPrecompiled("07", test, bench)
+ }
+}
+
+// Tests the sample inputs from the elliptic curve pairing check EIP 197.
+func TestPrecompiledBn256Pairing(t *testing.T) {
+ for _, test := range bn256PairingTests {
+ testPrecompiled("08", test, t)
+ }
+}
+
+// Behcnmarks the sample inputs from the elliptic curve pairing check EIP 197.
+func BenchmarkPrecompiledBn256Pairing(bench *testing.B) {
+ for _, test := range bn256PairingTests {
+ benchmarkPrecompiled("08", test, bench)
+ }
+}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 9296cc7ca..448acd469 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -36,12 +36,14 @@ type (
// run runs the given contract and takes care of running precompiles with a fallback to the byte code interpreter.
func run(evm *EVM, snapshot int, contract *Contract, input []byte) ([]byte, error) {
if contract.CodeAddr != nil {
- precompiledContracts := PrecompiledContracts
- if p := precompiledContracts[*contract.CodeAddr]; p != nil {
+ precompiles := PrecompiledContractsHomestead
+ if evm.ChainConfig().IsMetropolis(evm.BlockNumber) {
+ precompiles = PrecompiledContractsMetropolis
+ }
+ if p := precompiles[*contract.CodeAddr]; p != nil {
return RunPrecompiledContract(p, input, contract)
}
}
-
return evm.interpreter.Run(snapshot, contract, input)
}
@@ -100,8 +102,8 @@ type EVM struct {
abort int32
}
-// NewEVM retutrns a new EVM evmironment. The returned EVM is not thread safe
-// and should only ever be used *once*.
+// NewEVM retutrns a new EVM . The returned EVM is not thread safe and should
+// only ever be used *once*.
func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmConfig Config) *EVM {
evm := &EVM{
Context: ctx,
@@ -121,19 +123,20 @@ func (evm *EVM) Cancel() {
atomic.StoreInt32(&evm.abort, 1)
}
-// Call executes the contract associated with the addr with the given input as parameters. It also handles any
-// necessary value transfer required and takes the necessary steps to create accounts and reverses the state in
-// case of an execution error or failed value transfer.
+// Call executes the contract associated with the addr with the given input as
+// parameters. It also handles any necessary value transfer required and takes
+// the necessary steps to create accounts and reverses the state in case of an
+// execution error or failed value transfer.
func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) {
if evm.vmConfig.NoRecursion && evm.depth > 0 {
return nil, gas, nil
}
- // Depth check execution. Fail if we're trying to execute above the
- // limit.
+ // Fail if we're trying to execute above the call depth limit
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
}
+ // Fail if we're trying to transfer more than the available balance
if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, gas, ErrInsufficientBalance
}
@@ -143,10 +146,13 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
snapshot = evm.StateDB.Snapshot()
)
if !evm.StateDB.Exist(addr) {
- if PrecompiledContracts[addr] == nil && evm.ChainConfig().IsEIP158(evm.BlockNumber) && value.Sign() == 0 {
+ precompiles := PrecompiledContractsHomestead
+ if evm.ChainConfig().IsMetropolis(evm.BlockNumber) {
+ precompiles = PrecompiledContractsMetropolis
+ }
+ if precompiles[addr] == nil && evm.ChainConfig().IsEIP158(evm.BlockNumber) && value.Sign() == 0 {
return nil, gas, nil
}
-
evm.StateDB.CreateAccount(addr)
}
evm.Transfer(evm.StateDB, caller.Address(), to.Address(), value)
@@ -168,21 +174,23 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
return ret, contract.Gas, err
}
-// CallCode executes the contract associated with the addr with the given input as parameters. It also handles any
-// necessary value transfer required and takes the necessary steps to create accounts and reverses the state in
-// case of an execution error or failed value transfer.
+// CallCode executes the contract associated with the addr with the given input
+// as parameters. It also handles any necessary value transfer required and takes
+// the necessary steps to create accounts and reverses the state in case of an
+// execution error or failed value transfer.
//
-// CallCode differs from Call in the sense that it executes the given address' code with the caller as context.
+// CallCode differs from Call in the sense that it executes the given address'
+// code with the caller as context.
func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) {
if evm.vmConfig.NoRecursion && evm.depth > 0 {
return nil, gas, nil
}
- // Depth check execution. Fail if we're trying to execute above the
- // limit.
+ // Fail if we're trying to execute above the call depth limit
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
}
+ // Fail if we're trying to transfer more than the available balance
if !evm.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, gas, ErrInsufficientBalance
}
@@ -206,18 +214,16 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
return ret, contract.Gas, err
}
-// DelegateCall executes the contract associated with the addr with the given input as parameters.
-// It reverses the state in case of an execution error.
+// DelegateCall executes the contract associated with the addr with the given input
+// as parameters. It reverses the state in case of an execution error.
//
-// DelegateCall differs from CallCode in the sense that it executes the given address' code with the caller as context
-// and the caller is set to the caller of the caller.
+// DelegateCall differs from CallCode in the sense that it executes the given address'
+// code with the caller as context and the caller is set to the caller of the caller.
func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) {
if evm.vmConfig.NoRecursion && evm.depth > 0 {
return nil, gas, nil
}
-
- // Depth check execution. Fail if we're trying to execute above the
- // limit.
+ // Fail if we're trying to execute above the call depth limit
if evm.depth > int(params.CallCreateDepth) {
return nil, gas, ErrDepth
}
@@ -227,7 +233,7 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
to = AccountRef(caller.Address())
)
- // Iinitialise a new contract and make initialise the delegate values
+ // Initialise a new contract and make initialise the delegate values
contract := NewContract(caller, to, nil, gas).AsDelegate()
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
@@ -240,6 +246,47 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
return ret, contract.Gas, err
}
+// StaticCall executes the contract associated with the addr with the given input
+// as parameters while disallowing any modifications to the state during the call.
+// Opcodes that attempt to perform such modifications will result in exceptions
+// instead of performing the modifications.
+func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) {
+ if evm.vmConfig.NoRecursion && evm.depth > 0 {
+ return nil, gas, nil
+ }
+ // Fail if we're trying to execute above the call depth limit
+ if evm.depth > int(params.CallCreateDepth) {
+ return nil, gas, ErrDepth
+ }
+ // Make sure the readonly is only set if we aren't in readonly yet
+ // this makes also sure that the readonly flag isn't removed for
+ // child calls.
+ if !evm.interpreter.readOnly {
+ evm.interpreter.readOnly = true
+ defer func() { evm.interpreter.readOnly = false }()
+ }
+
+ var (
+ to = AccountRef(addr)
+ snapshot = evm.StateDB.Snapshot()
+ )
+ // Initialise a new contract and set the code that is to be used by the
+ // EVM. The contract is a scoped environment for this execution context
+ // only.
+ contract := NewContract(caller, to, new(big.Int), gas)
+ contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
+
+ // When an error was returned by the EVM or when setting the creation code
+ // above we revert to the snapshot and consume any gas remaining. Additionally
+ // when we're in Homestead this also counts for code storage gas errors.
+ ret, err = run(evm, snapshot, contract, input)
+ if err != nil {
+ contract.UseGas(contract.Gas)
+ evm.StateDB.RevertToSnapshot(snapshot)
+ }
+ return ret, contract.Gas, err
+}
+
// Create creates a new contract using code as deployment code.
func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
if evm.vmConfig.NoRecursion && evm.depth > 0 {
diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go
index 761ca4450..a6346bd80 100644
--- a/core/vm/gas_table.go
+++ b/core/vm/gas_table.go
@@ -65,7 +65,33 @@ func constGasFunc(gas uint64) gasFunc {
}
}
-func gasCalldataCopy(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+func gasCallDataCopy(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ gas, err := memoryGasCost(mem, memorySize)
+ if err != nil {
+ return 0, err
+ }
+
+ var overflow bool
+ if gas, overflow = math.SafeAdd(gas, GasFastestStep); overflow {
+ return 0, errGasUintOverflow
+ }
+
+ words, overflow := bigUint64(stack.Back(2))
+ if overflow {
+ return 0, errGasUintOverflow
+ }
+
+ if words, overflow = math.SafeMul(toWordSize(words), params.CopyGas); overflow {
+ return 0, errGasUintOverflow
+ }
+
+ if gas, overflow = math.SafeAdd(gas, words); overflow {
+ return 0, errGasUintOverflow
+ }
+ return gas, nil
+}
+
+func gasReturnDataCopy(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
gas, err := memoryGasCost(mem, memorySize)
if err != nil {
return 0, err
@@ -423,6 +449,33 @@ func gasDelegateCall(gt params.GasTable, evm *EVM, contract *Contract, stack *St
return gas, nil
}
+func gasStaticCall(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ gas, err := memoryGasCost(mem, memorySize)
+ if err != nil {
+ return 0, err
+ }
+ var overflow bool
+ if gas, overflow = math.SafeAdd(gas, gt.Calls); overflow {
+ return 0, errGasUintOverflow
+ }
+
+ cg, err := callGas(gt, contract.Gas, gas, stack.Back(0))
+ if err != nil {
+ return 0, err
+ }
+ // Replace the stack item with the new gas calculation. This means that
+ // either the original item is left on the stack or the item is replaced by:
+ // (availableGas - gas) * 63 / 64
+ // We replace the stack item so that it's available when the opCall instruction is
+ // called.
+ stack.data[stack.len()-1] = new(big.Int).SetUint64(cg)
+
+ if gas, overflow = math.SafeAdd(gas, cg); overflow {
+ return 0, errGasUintOverflow
+ }
+ return gas, nil
+}
+
func gasPush(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
return GasFastestStep, nil
}
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index f5164fcdd..4d6197912 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -17,6 +17,7 @@
package vm
import (
+ "errors"
"fmt"
"math/big"
@@ -28,7 +29,9 @@ import (
)
var (
- bigZero = new(big.Int)
+ bigZero = new(big.Int)
+ errWriteProtection = errors.New("evm: write protection")
+ errReturnDataOutOfBounds = errors.New("evm: return data out of bounds")
)
func opAdd(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
@@ -240,6 +243,7 @@ func opAnd(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stac
evm.interpreter.intPool.put(y)
return nil, nil
}
+
func opOr(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
x, y := stack.pop(), stack.pop()
stack.push(x.Or(x, y))
@@ -247,6 +251,7 @@ func opOr(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack
evm.interpreter.intPool.put(y)
return nil, nil
}
+
func opXor(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
x, y := stack.pop(), stack.pop()
stack.push(x.Xor(x, y))
@@ -266,6 +271,7 @@ func opByte(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta
evm.interpreter.intPool.put(th)
return nil, nil
}
+
func opAddmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
x, y, z := stack.pop(), stack.pop(), stack.pop()
if z.Cmp(bigZero) > 0 {
@@ -279,6 +285,7 @@ func opAddmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S
evm.interpreter.intPool.put(y, z)
return nil, nil
}
+
func opMulmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
x, y, z := stack.pop(), stack.pop(), stack.pop()
if z.Cmp(bigZero) > 0 {
@@ -336,25 +343,47 @@ func opCallValue(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack
return nil, nil
}
-func opCalldataLoad(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
- stack.push(new(big.Int).SetBytes(getData(contract.Input, stack.pop(), common.Big32)))
+func opCallDataLoad(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+ stack.push(new(big.Int).SetBytes(getDataBig(contract.Input, stack.pop(), big32)))
return nil, nil
}
-func opCalldataSize(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opCallDataSize(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
stack.push(evm.interpreter.intPool.get().SetInt64(int64(len(contract.Input))))
return nil, nil
}
-func opCalldataCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+func opCallDataCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+ var (
+ memOffset = stack.pop()
+ dataOffset = stack.pop()
+ length = stack.pop()
+ )
+ memory.Set(memOffset.Uint64(), length.Uint64(), getDataBig(contract.Input, dataOffset, length))
+
+ evm.interpreter.intPool.put(memOffset, dataOffset, length)
+ return nil, nil
+}
+
+func opReturnDataSize(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+ stack.push(evm.interpreter.intPool.get().SetUint64(uint64(len(evm.interpreter.returnData))))
+ return nil, nil
+}
+
+func opReturnDataCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
var (
- mOff = stack.pop()
- cOff = stack.pop()
- l = stack.pop()
+ memOffset = stack.pop()
+ dataOffset = stack.pop()
+ length = stack.pop()
)
- memory.Set(mOff.Uint64(), l.Uint64(), getData(contract.Input, cOff, l))
+ defer evm.interpreter.intPool.put(memOffset, dataOffset, length)
+
+ end := new(big.Int).Add(dataOffset, length)
+ if end.BitLen() > 64 || uint64(len(evm.interpreter.returnData)) < end.Uint64() {
+ return nil, errReturnDataOutOfBounds
+ }
+ memory.Set(memOffset.Uint64(), length.Uint64(), evm.interpreter.returnData[dataOffset.Uint64():end.Uint64()])
- evm.interpreter.intPool.put(mOff, cOff, l)
return nil, nil
}
@@ -376,31 +405,28 @@ func opCodeSize(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack
func opCodeCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
var (
- mOff = stack.pop()
- cOff = stack.pop()
- l = stack.pop()
+ memOffset = stack.pop()
+ codeOffset = stack.pop()
+ length = stack.pop()
)
- codeCopy := getData(contract.Code, cOff, l)
-
- memory.Set(mOff.Uint64(), l.Uint64(), codeCopy)
+ codeCopy := getDataBig(contract.Code, codeOffset, length)
+ memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
- evm.interpreter.intPool.put(mOff, cOff, l)
+ evm.interpreter.intPool.put(memOffset, codeOffset, length)
return nil, nil
}
func opExtCodeCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
var (
- addr = common.BigToAddress(stack.pop())
- mOff = stack.pop()
- cOff = stack.pop()
- l = stack.pop()
+ addr = common.BigToAddress(stack.pop())
+ memOffset = stack.pop()
+ codeOffset = stack.pop()
+ length = stack.pop()
)
- codeCopy := getData(evm.StateDB.GetCode(addr), cOff, l)
-
- memory.Set(mOff.Uint64(), l.Uint64(), codeCopy)
-
- evm.interpreter.intPool.put(mOff, cOff, l)
+ codeCopy := getDataBig(evm.StateDB.GetCode(addr), codeOffset, length)
+ memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
+ evm.interpreter.intPool.put(memOffset, codeOffset, length)
return nil, nil
}
@@ -505,6 +531,7 @@ func opJump(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta
evm.interpreter.intPool.put(pos)
return nil, nil
}
+
func opJumpi(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
pos, cond := stack.pop(), stack.pop()
if cond.Sign() != 0 {
@@ -520,6 +547,7 @@ func opJumpi(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *St
evm.interpreter.intPool.put(pos, cond)
return nil, nil
}
+
func opJumpdest(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
return nil, nil
}
@@ -656,6 +684,35 @@ func opDelegateCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, st
return ret, nil
}
+func opStaticCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+ // pop gas
+ gas := stack.pop().Uint64()
+ // pop address
+ addr := stack.pop()
+ // pop input size and offset
+ inOffset, inSize := stack.pop(), stack.pop()
+ // pop return size and offset
+ retOffset, retSize := stack.pop(), stack.pop()
+
+ address := common.BigToAddress(addr)
+
+ // Get the arguments from the memory
+ args := memory.Get(inOffset.Int64(), inSize.Int64())
+
+ ret, returnGas, err := evm.StaticCall(contract, address, args, gas)
+ if err != nil {
+ stack.push(new(big.Int))
+ } else {
+ stack.push(big.NewInt(1))
+
+ memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
+ }
+ contract.Gas += returnGas
+
+ evm.interpreter.intPool.put(addr, inOffset, inSize, retOffset, retSize)
+ return ret, nil
+}
+
func opReturn(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
offset, size := stack.pop(), stack.pop()
ret := memory.GetPtr(offset.Int64(), size.Int64())
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index 03c42c561..18644989c 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -1,7 +1,6 @@
package vm
import (
- "fmt"
"math/big"
"testing"
@@ -64,197 +63,169 @@ func opBenchmark(bench *testing.B, op func(pc *uint64, evm *EVM, contract *Contr
}
}
-func precompiledBenchmark(addr, input, expected string, gas uint64, bench *testing.B) {
+func BenchmarkOpAdd64(b *testing.B) {
+ x := "ffffffff"
+ y := "fd37f3e2bba2c4f"
- contract := NewContract(AccountRef(common.HexToAddress("1337")),
- nil, new(big.Int), gas)
-
- p := PrecompiledContracts[common.HexToAddress(addr)]
- in := common.Hex2Bytes(input)
- var (
- res []byte
- err error
- )
- data := make([]byte, len(in))
- bench.ResetTimer()
- for i := 0; i < bench.N; i++ {
- contract.Gas = gas
- copy(data, in)
- res, err = RunPrecompiledContract(p, data, contract)
- }
- bench.StopTimer()
- //Check if it is correct
- if err != nil {
- bench.Error(err)
- return
- }
- if common.Bytes2Hex(res) != expected {
- bench.Error(fmt.Sprintf("Expected %v, got %v", expected, common.Bytes2Hex(res)))
- return
- }
+ opBenchmark(b, opAdd, x, y)
}
-func BenchmarkPrecompiledEcdsa(bench *testing.B) {
- var (
- addr = "01"
- inp = "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02"
- exp = "000000000000000000000000ceaccac640adf55b2028469bd36ba501f28b699d"
- gas = uint64(4000000)
- )
- precompiledBenchmark(addr, inp, exp, gas, bench)
-}
-func BenchmarkPrecompiledSha256(bench *testing.B) {
- var (
- addr = "02"
- inp = "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02"
- exp = "811c7003375852fabd0d362e40e68607a12bdabae61a7d068fe5fdd1dbbf2a5d"
- gas = uint64(4000000)
- )
- precompiledBenchmark(addr, inp, exp, gas, bench)
-}
-func BenchmarkPrecompiledRipeMD(bench *testing.B) {
- var (
- addr = "03"
- inp = "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02"
- exp = "0000000000000000000000009215b8d9882ff46f0dfde6684d78e831467f65e6"
- gas = uint64(4000000)
- )
- precompiledBenchmark(addr, inp, exp, gas, bench)
-}
-func BenchmarkPrecompiledIdentity(bench *testing.B) {
- var (
- addr = "04"
- inp = "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02"
- exp = "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02"
- gas = uint64(4000000)
- )
- precompiledBenchmark(addr, inp, exp, gas, bench)
+func BenchmarkOpAdd128(b *testing.B) {
+ x := "ffffffffffffffff"
+ y := "f5470b43c6549b016288e9a65629687"
+
+ opBenchmark(b, opAdd, x, y)
}
-func BenchmarkOpAdd(b *testing.B) {
- x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
- y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+
+func BenchmarkOpAdd256(b *testing.B) {
+ x := "0802431afcbce1fc194c9eaa417b2fb67dc75a95db0bc7ec6b1c8af11df6a1da9"
+ y := "a1f5aac137876480252e5dcac62c354ec0d42b76b0642b6181ed099849ea1d57"
opBenchmark(b, opAdd, x, y)
+}
+
+func BenchmarkOpSub64(b *testing.B) {
+ x := "51022b6317003a9d"
+ y := "a20456c62e00753a"
+ opBenchmark(b, opSub, x, y)
}
-func BenchmarkOpSub(b *testing.B) {
- x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
- y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+
+func BenchmarkOpSub128(b *testing.B) {
+ x := "4dde30faaacdc14d00327aac314e915d"
+ y := "9bbc61f5559b829a0064f558629d22ba"
opBenchmark(b, opSub, x, y)
+}
+func BenchmarkOpSub256(b *testing.B) {
+ x := "4bfcd8bb2ac462735b48a17580690283980aa2d679f091c64364594df113ea37"
+ y := "97f9b1765588c4e6b69142eb00d20507301545acf3e1238c86c8b29be227d46e"
+
+ opBenchmark(b, opSub, x, y)
}
+
func BenchmarkOpMul(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opMul, x, y)
+}
+func BenchmarkOpDiv256(b *testing.B) {
+ x := "ff3f9014f20db29ae04af2c2d265de17"
+ y := "fe7fb0d1f59dfe9492ffbf73683fd1e870eec79504c60144cc7f5fc2bad1e611"
+ opBenchmark(b, opDiv, x, y)
}
-func BenchmarkOpDiv(b *testing.B) {
- x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
- y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+func BenchmarkOpDiv128(b *testing.B) {
+ x := "fdedc7f10142ff97"
+ y := "fbdfda0e2ce356173d1993d5f70a2b11"
opBenchmark(b, opDiv, x, y)
+}
+func BenchmarkOpDiv64(b *testing.B) {
+ x := "fcb34eb3"
+ y := "f97180878e839129"
+ opBenchmark(b, opDiv, x, y)
}
+
func BenchmarkOpSdiv(b *testing.B) {
- x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
- y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
+ x := "ff3f9014f20db29ae04af2c2d265de17"
+ y := "fe7fb0d1f59dfe9492ffbf73683fd1e870eec79504c60144cc7f5fc2bad1e611"
opBenchmark(b, opSdiv, x, y)
-
}
+
func BenchmarkOpMod(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opMod, x, y)
-
}
+
func BenchmarkOpSmod(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opSmod, x, y)
-
}
+
func BenchmarkOpExp(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opExp, x, y)
-
}
+
func BenchmarkOpSignExtend(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opSignExtend, x, y)
-
}
+
func BenchmarkOpLt(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opLt, x, y)
-
}
+
func BenchmarkOpGt(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opGt, x, y)
-
}
+
func BenchmarkOpSlt(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opSlt, x, y)
-
}
+
func BenchmarkOpSgt(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opSgt, x, y)
-
}
+
func BenchmarkOpEq(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opEq, x, y)
-
}
+
func BenchmarkOpAnd(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opAnd, x, y)
-
}
+
func BenchmarkOpOr(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opOr, x, y)
-
}
+
func BenchmarkOpXor(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opXor, x, y)
-
}
+
func BenchmarkOpByte(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opByte, x, y)
-
}
func BenchmarkOpAddmod(b *testing.B) {
@@ -263,22 +234,12 @@ func BenchmarkOpAddmod(b *testing.B) {
z := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opAddmod, x, y, z)
-
}
+
func BenchmarkOpMulmod(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
z := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
opBenchmark(b, opMulmod, x, y, z)
-
}
-
-//func BenchmarkOpSha3(b *testing.B) {
-// x := "0"
-// y := "32"
-//
-// opBenchmark(b,opSha3, x, y)
-//
-//
-//}
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 545f7d650..954839f2e 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -19,12 +19,10 @@ package vm
import (
"fmt"
"sync/atomic"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
@@ -46,7 +44,7 @@ type Config struct {
// Enable recording of SHA3/keccak preimages
EnablePreimageRecording bool
// JumpTable contains the EVM instruction table. This
- // may me left uninitialised and will be set the default
+ // may be left uninitialised and will be set to the default
// table.
JumpTable [256]operation
}
@@ -61,7 +59,8 @@ type Interpreter struct {
gasTable params.GasTable
intPool *intPool
- readonly bool
+ readOnly bool // Whether to throw on stateful modifications
+ returnData []byte // Last CALL's return data for subsequent reuse
}
// NewInterpreter returns a new instance of the Interpreter.
@@ -71,6 +70,8 @@ func NewInterpreter(evm *EVM, cfg Config) *Interpreter {
// we'll set the default jump table.
if !cfg.JumpTable[STOP].valid {
switch {
+ case evm.ChainConfig().IsMetropolis(evm.BlockNumber):
+ cfg.JumpTable = metropolisInstructionSet
case evm.ChainConfig().IsHomestead(evm.BlockNumber):
cfg.JumpTable = homesteadInstructionSet
default:
@@ -87,6 +88,18 @@ func NewInterpreter(evm *EVM, cfg Config) *Interpreter {
}
func (in *Interpreter) enforceRestrictions(op OpCode, operation operation, stack *Stack) error {
+ if in.evm.chainRules.IsMetropolis {
+ if in.readOnly {
+ // If the interpreter is operating in readonly mode, make sure no
+ // state-modifying operation is performed. The 3rd stack item
+ // for a call operation is the value. Transfering value from one
+ // account to the others means the state is modified and should also
+ // return with an error.
+ if operation.writes || (op == CALL && stack.Back(2).BitLen() > 0) {
+ return errWriteProtection
+ }
+ }
+ }
return nil
}
@@ -97,9 +110,14 @@ func (in *Interpreter) enforceRestrictions(op OpCode, operation operation, stack
// considered a revert-and-consume-all-gas operation. No error specific checks
// should be handled to reduce complexity and errors further down the in.
func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret []byte, err error) {
+ // Increment the call depth which is restricted to 1024
in.evm.depth++
defer func() { in.evm.depth-- }()
+ // Reset the previous call's return data. It's unimportant to preserve the old buffer
+ // as every returning call will return new data anyway.
+ in.returnData = nil
+
// Don't bother with the execution if there's no code.
if len(contract.Code) == 0 {
return nil, nil
@@ -122,19 +140,12 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
)
contract.Input = input
- // User defer pattern to check for an error and, based on the error being nil or not, use all gas and return.
defer func() {
if err != nil && in.cfg.Debug {
- // XXX For debugging
- //fmt.Printf("%04d: %8v cost = %-8d stack = %-8d ERR = %v\n", pc, op, cost, stack.len(), err)
in.cfg.Tracer.CaptureState(in.evm, pc, op, contract.Gas, cost, mem, stack, contract, in.evm.depth, err)
}
}()
- log.Debug("interpreter running contract", "hash", codehash[:])
- tstart := time.Now()
- defer log.Debug("interpreter finished running contract", "hash", codehash[:], "elapsed", time.Since(tstart))
-
// The Interpreter main run loop (contextual). This loop runs until either an
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
// the execution of one of the operations or until the done flag is set by the
@@ -190,8 +201,6 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
if in.cfg.Debug {
in.cfg.Tracer.CaptureState(in.evm, pc, op, contract.Gas, cost, mem, stack, contract, in.evm.depth, err)
}
- // XXX For debugging
- //fmt.Printf("%04d: %8v cost = %-8d stack = %-8d\n", pc, op, cost, stack.len())
// execute the operation
res, err := operation.execute(&pc, in.evm, contract, mem, stack)
@@ -209,10 +218,10 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
case !operation.jumps:
pc++
}
- // if the operation returned a value make sure that is also set
- // the last return data.
- if res != nil {
- mem.lastReturn = ret
+ // if the operation clears the return data (e.g. it has returning data)
+ // set the last return to the result of the operation.
+ if operation.returns {
+ in.returnData = res
}
}
return nil, nil
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 0034eacb7..2d238f7a1 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -53,13 +53,45 @@ type operation struct {
valid bool
// reverts determined whether the operation reverts state
reverts bool
+ // returns determines whether the opertions sets the return data
+ returns bool
}
var (
- frontierInstructionSet = NewFrontierInstructionSet()
- homesteadInstructionSet = NewHomesteadInstructionSet()
+ frontierInstructionSet = NewFrontierInstructionSet()
+ homesteadInstructionSet = NewHomesteadInstructionSet()
+ metropolisInstructionSet = NewMetropolisInstructionSet()
)
+// NewMetropolisInstructionSet returns the frontier, homestead and
+// metropolis instructions.
+func NewMetropolisInstructionSet() [256]operation {
+ // instructions that can be executed during the homestead phase.
+ instructionSet := NewHomesteadInstructionSet()
+ instructionSet[STATICCALL] = operation{
+ execute: opStaticCall,
+ gasCost: gasStaticCall,
+ validateStack: makeStackFunc(6, 1),
+ memorySize: memoryStaticCall,
+ valid: true,
+ returns: true,
+ }
+ instructionSet[RETURNDATASIZE] = operation{
+ execute: opReturnDataSize,
+ gasCost: constGasFunc(GasQuickStep),
+ validateStack: makeStackFunc(0, 1),
+ valid: true,
+ }
+ instructionSet[RETURNDATACOPY] = operation{
+ execute: opReturnDataCopy,
+ gasCost: gasReturnDataCopy,
+ validateStack: makeStackFunc(3, 0),
+ memorySize: memoryReturnDataCopy,
+ valid: true,
+ }
+ return instructionSet
+}
+
// NewHomesteadInstructionSet returns the frontier and homestead
// instructions that can be executed during the homestead phase.
func NewHomesteadInstructionSet() [256]operation {
@@ -70,6 +102,7 @@ func NewHomesteadInstructionSet() [256]operation {
validateStack: makeStackFunc(6, 1),
memorySize: memoryDelegateCall,
valid: true,
+ returns: true,
}
return instructionSet
}
@@ -255,22 +288,22 @@ func NewFrontierInstructionSet() [256]operation {
valid: true,
},
CALLDATALOAD: {
- execute: opCalldataLoad,
+ execute: opCallDataLoad,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(1, 1),
valid: true,
},
CALLDATASIZE: {
- execute: opCalldataSize,
+ execute: opCallDataSize,
gasCost: constGasFunc(GasQuickStep),
validateStack: makeStackFunc(0, 1),
valid: true,
},
CALLDATACOPY: {
- execute: opCalldataCopy,
- gasCost: gasCalldataCopy,
+ execute: opCallDataCopy,
+ gasCost: gasCallDataCopy,
validateStack: makeStackFunc(3, 0),
- memorySize: memoryCalldataCopy,
+ memorySize: memoryCallDataCopy,
valid: true,
},
CODESIZE: {
@@ -810,6 +843,7 @@ func NewFrontierInstructionSet() [256]operation {
validateStack: makeStackFunc(2, 0),
memorySize: memoryLog,
valid: true,
+ writes: true,
},
LOG1: {
execute: makeLog(1),
@@ -817,6 +851,7 @@ func NewFrontierInstructionSet() [256]operation {
validateStack: makeStackFunc(3, 0),
memorySize: memoryLog,
valid: true,
+ writes: true,
},
LOG2: {
execute: makeLog(2),
@@ -824,6 +859,7 @@ func NewFrontierInstructionSet() [256]operation {
validateStack: makeStackFunc(4, 0),
memorySize: memoryLog,
valid: true,
+ writes: true,
},
LOG3: {
execute: makeLog(3),
@@ -831,6 +867,7 @@ func NewFrontierInstructionSet() [256]operation {
validateStack: makeStackFunc(5, 0),
memorySize: memoryLog,
valid: true,
+ writes: true,
},
LOG4: {
execute: makeLog(4),
@@ -838,6 +875,7 @@ func NewFrontierInstructionSet() [256]operation {
validateStack: makeStackFunc(6, 0),
memorySize: memoryLog,
valid: true,
+ writes: true,
},
CREATE: {
execute: opCreate,
@@ -846,6 +884,7 @@ func NewFrontierInstructionSet() [256]operation {
memorySize: memoryCreate,
valid: true,
writes: true,
+ returns: true,
},
CALL: {
execute: opCall,
@@ -853,6 +892,7 @@ func NewFrontierInstructionSet() [256]operation {
validateStack: makeStackFunc(7, 1),
memorySize: memoryCall,
valid: true,
+ returns: true,
},
CALLCODE: {
execute: opCallCode,
@@ -860,6 +900,7 @@ func NewFrontierInstructionSet() [256]operation {
validateStack: makeStackFunc(7, 1),
memorySize: memoryCall,
valid: true,
+ returns: true,
},
RETURN: {
execute: opReturn,
diff --git a/core/vm/logger.go b/core/vm/logger.go
index 17a9c9ec3..b73b13bd9 100644
--- a/core/vm/logger.go
+++ b/core/vm/logger.go
@@ -196,20 +196,27 @@ func (l *StructLogger) StructLogs() []StructLog {
// WriteTrace writes a formatted trace to the given writer
func WriteTrace(writer io.Writer, logs []StructLog) {
for _, log := range logs {
- fmt.Fprintf(writer, "%-10spc=%08d gas=%v cost=%v", log.Op, log.Pc, log.Gas, log.GasCost)
+ fmt.Fprintf(writer, "%-16spc=%08d gas=%v cost=%v", log.Op, log.Pc, log.Gas, log.GasCost)
if log.Err != nil {
fmt.Fprintf(writer, " ERROR: %v", log.Err)
}
- fmt.Fprintf(writer, "\n")
+ fmt.Fprintln(writer)
- for i := len(log.Stack) - 1; i >= 0; i-- {
- fmt.Fprintf(writer, "%08d %x\n", len(log.Stack)-i-1, math.PaddedBigBytes(log.Stack[i], 32))
+ if len(log.Stack) > 0 {
+ fmt.Fprintln(writer, "Stack:")
+ for i := len(log.Stack) - 1; i >= 0; i-- {
+ fmt.Fprintf(writer, "%08d %x\n", len(log.Stack)-i-1, math.PaddedBigBytes(log.Stack[i], 32))
+ }
}
-
- fmt.Fprint(writer, hex.Dump(log.Memory))
-
- for h, item := range log.Storage {
- fmt.Fprintf(writer, "%x: %x\n", h, item)
+ if len(log.Memory) > 0 {
+ fmt.Fprintln(writer, "Memory:")
+ fmt.Fprint(writer, hex.Dump(log.Memory))
+ }
+ if len(log.Storage) > 0 {
+ fmt.Fprintln(writer, "Storage:")
+ for h, item := range log.Storage {
+ fmt.Fprintf(writer, "%x: %x\n", h, item)
+ }
}
fmt.Fprintln(writer)
}
diff --git a/core/vm/memory.go b/core/vm/memory.go
index 6dbee94ef..99a84d227 100644
--- a/core/vm/memory.go
+++ b/core/vm/memory.go
@@ -22,7 +22,6 @@ import "fmt"
type Memory struct {
store []byte
lastGasCost uint64
- lastReturn []byte
}
func NewMemory() *Memory {
diff --git a/core/vm/memory_table.go b/core/vm/memory_table.go
index 654137c70..f1b671adc 100644
--- a/core/vm/memory_table.go
+++ b/core/vm/memory_table.go
@@ -26,7 +26,11 @@ func memorySha3(stack *Stack) *big.Int {
return calcMemSize(stack.Back(0), stack.Back(1))
}
-func memoryCalldataCopy(stack *Stack) *big.Int {
+func memoryCallDataCopy(stack *Stack) *big.Int {
+ return calcMemSize(stack.Back(0), stack.Back(2))
+}
+
+func memoryReturnDataCopy(stack *Stack) *big.Int {
return calcMemSize(stack.Back(0), stack.Back(2))
}
@@ -74,6 +78,13 @@ func memoryDelegateCall(stack *Stack) *big.Int {
return math.BigMax(x, y)
}
+func memoryStaticCall(stack *Stack) *big.Int {
+ x := calcMemSize(stack.Back(4), stack.Back(5))
+ y := calcMemSize(stack.Back(2), stack.Back(3))
+
+ return math.BigMax(x, y)
+}
+
func memoryReturn(stack *Stack) *big.Int {
return calcMemSize(stack.Back(0), stack.Back(1))
}
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index d4ba7f156..be87cae18 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -82,10 +82,11 @@ const (
GASPRICE
EXTCODESIZE
EXTCODECOPY
+ RETURNDATASIZE
+ RETURNDATACOPY
)
const (
-
// 0x40 range - block operations
BLOCKHASH OpCode = 0x40 + iota
COINBASE
@@ -201,6 +202,7 @@ const (
CALLCODE
RETURN
DELEGATECALL
+ STATICCALL = 0xfa
SELFDESTRUCT = 0xff
)
@@ -238,27 +240,29 @@ var opCodeToString = map[OpCode]string{
SHA3: "SHA3",
// 0x30 range - closure state
- ADDRESS: "ADDRESS",
- BALANCE: "BALANCE",
- ORIGIN: "ORIGIN",
- CALLER: "CALLER",
- CALLVALUE: "CALLVALUE",
- CALLDATALOAD: "CALLDATALOAD",
- CALLDATASIZE: "CALLDATASIZE",
- CALLDATACOPY: "CALLDATACOPY",
- CODESIZE: "CODESIZE",
- CODECOPY: "CODECOPY",
- GASPRICE: "GASPRICE",
+ ADDRESS: "ADDRESS",
+ BALANCE: "BALANCE",
+ ORIGIN: "ORIGIN",
+ CALLER: "CALLER",
+ CALLVALUE: "CALLVALUE",
+ CALLDATALOAD: "CALLDATALOAD",
+ CALLDATASIZE: "CALLDATASIZE",
+ CALLDATACOPY: "CALLDATACOPY",
+ CODESIZE: "CODESIZE",
+ CODECOPY: "CODECOPY",
+ GASPRICE: "GASPRICE",
+ EXTCODESIZE: "EXTCODESIZE",
+ EXTCODECOPY: "EXTCODECOPY",
+ RETURNDATASIZE: "RETURNDATASIZE",
+ RETURNDATACOPY: "RETURNDATACOPY",
// 0x40 range - block operations
- BLOCKHASH: "BLOCKHASH",
- COINBASE: "COINBASE",
- TIMESTAMP: "TIMESTAMP",
- NUMBER: "NUMBER",
- DIFFICULTY: "DIFFICULTY",
- GASLIMIT: "GASLIMIT",
- EXTCODESIZE: "EXTCODESIZE",
- EXTCODECOPY: "EXTCODECOPY",
+ BLOCKHASH: "BLOCKHASH",
+ COINBASE: "COINBASE",
+ TIMESTAMP: "TIMESTAMP",
+ NUMBER: "NUMBER",
+ DIFFICULTY: "DIFFICULTY",
+ GASLIMIT: "GASLIMIT",
// 0x50 range - 'storage' and execution
POP: "POP",
@@ -355,6 +359,7 @@ var opCodeToString = map[OpCode]string{
RETURN: "RETURN",
CALLCODE: "CALLCODE",
DELEGATECALL: "DELEGATECALL",
+ STATICCALL: "STATICCALL",
SELFDESTRUCT: "SELFDESTRUCT",
PUSH: "PUSH",
@@ -372,136 +377,139 @@ func (o OpCode) String() string {
}
var stringToOp = map[string]OpCode{
- "STOP": STOP,
- "ADD": ADD,
- "MUL": MUL,
- "SUB": SUB,
- "DIV": DIV,
- "SDIV": SDIV,
- "MOD": MOD,
- "SMOD": SMOD,
- "EXP": EXP,
- "NOT": NOT,
- "LT": LT,
- "GT": GT,
- "SLT": SLT,
- "SGT": SGT,
- "EQ": EQ,
- "ISZERO": ISZERO,
- "SIGNEXTEND": SIGNEXTEND,
- "AND": AND,
- "OR": OR,
- "XOR": XOR,
- "BYTE": BYTE,
- "ADDMOD": ADDMOD,
- "MULMOD": MULMOD,
- "SHA3": SHA3,
- "ADDRESS": ADDRESS,
- "BALANCE": BALANCE,
- "ORIGIN": ORIGIN,
- "CALLER": CALLER,
- "CALLVALUE": CALLVALUE,
- "CALLDATALOAD": CALLDATALOAD,
- "CALLDATASIZE": CALLDATASIZE,
- "CALLDATACOPY": CALLDATACOPY,
- "DELEGATECALL": DELEGATECALL,
- "CODESIZE": CODESIZE,
- "CODECOPY": CODECOPY,
- "GASPRICE": GASPRICE,
- "BLOCKHASH": BLOCKHASH,
- "COINBASE": COINBASE,
- "TIMESTAMP": TIMESTAMP,
- "NUMBER": NUMBER,
- "DIFFICULTY": DIFFICULTY,
- "GASLIMIT": GASLIMIT,
- "EXTCODESIZE": EXTCODESIZE,
- "EXTCODECOPY": EXTCODECOPY,
- "POP": POP,
- "MLOAD": MLOAD,
- "MSTORE": MSTORE,
- "MSTORE8": MSTORE8,
- "SLOAD": SLOAD,
- "SSTORE": SSTORE,
- "JUMP": JUMP,
- "JUMPI": JUMPI,
- "PC": PC,
- "MSIZE": MSIZE,
- "GAS": GAS,
- "JUMPDEST": JUMPDEST,
- "PUSH1": PUSH1,
- "PUSH2": PUSH2,
- "PUSH3": PUSH3,
- "PUSH4": PUSH4,
- "PUSH5": PUSH5,
- "PUSH6": PUSH6,
- "PUSH7": PUSH7,
- "PUSH8": PUSH8,
- "PUSH9": PUSH9,
- "PUSH10": PUSH10,
- "PUSH11": PUSH11,
- "PUSH12": PUSH12,
- "PUSH13": PUSH13,
- "PUSH14": PUSH14,
- "PUSH15": PUSH15,
- "PUSH16": PUSH16,
- "PUSH17": PUSH17,
- "PUSH18": PUSH18,
- "PUSH19": PUSH19,
- "PUSH20": PUSH20,
- "PUSH21": PUSH21,
- "PUSH22": PUSH22,
- "PUSH23": PUSH23,
- "PUSH24": PUSH24,
- "PUSH25": PUSH25,
- "PUSH26": PUSH26,
- "PUSH27": PUSH27,
- "PUSH28": PUSH28,
- "PUSH29": PUSH29,
- "PUSH30": PUSH30,
- "PUSH31": PUSH31,
- "PUSH32": PUSH32,
- "DUP1": DUP1,
- "DUP2": DUP2,
- "DUP3": DUP3,
- "DUP4": DUP4,
- "DUP5": DUP5,
- "DUP6": DUP6,
- "DUP7": DUP7,
- "DUP8": DUP8,
- "DUP9": DUP9,
- "DUP10": DUP10,
- "DUP11": DUP11,
- "DUP12": DUP12,
- "DUP13": DUP13,
- "DUP14": DUP14,
- "DUP15": DUP15,
- "DUP16": DUP16,
- "SWAP1": SWAP1,
- "SWAP2": SWAP2,
- "SWAP3": SWAP3,
- "SWAP4": SWAP4,
- "SWAP5": SWAP5,
- "SWAP6": SWAP6,
- "SWAP7": SWAP7,
- "SWAP8": SWAP8,
- "SWAP9": SWAP9,
- "SWAP10": SWAP10,
- "SWAP11": SWAP11,
- "SWAP12": SWAP12,
- "SWAP13": SWAP13,
- "SWAP14": SWAP14,
- "SWAP15": SWAP15,
- "SWAP16": SWAP16,
- "LOG0": LOG0,
- "LOG1": LOG1,
- "LOG2": LOG2,
- "LOG3": LOG3,
- "LOG4": LOG4,
- "CREATE": CREATE,
- "CALL": CALL,
- "RETURN": RETURN,
- "CALLCODE": CALLCODE,
- "SELFDESTRUCT": SELFDESTRUCT,
+ "STOP": STOP,
+ "ADD": ADD,
+ "MUL": MUL,
+ "SUB": SUB,
+ "DIV": DIV,
+ "SDIV": SDIV,
+ "MOD": MOD,
+ "SMOD": SMOD,
+ "EXP": EXP,
+ "NOT": NOT,
+ "LT": LT,
+ "GT": GT,
+ "SLT": SLT,
+ "SGT": SGT,
+ "EQ": EQ,
+ "ISZERO": ISZERO,
+ "SIGNEXTEND": SIGNEXTEND,
+ "AND": AND,
+ "OR": OR,
+ "XOR": XOR,
+ "BYTE": BYTE,
+ "ADDMOD": ADDMOD,
+ "MULMOD": MULMOD,
+ "SHA3": SHA3,
+ "ADDRESS": ADDRESS,
+ "BALANCE": BALANCE,
+ "ORIGIN": ORIGIN,
+ "CALLER": CALLER,
+ "CALLVALUE": CALLVALUE,
+ "CALLDATALOAD": CALLDATALOAD,
+ "CALLDATASIZE": CALLDATASIZE,
+ "CALLDATACOPY": CALLDATACOPY,
+ "DELEGATECALL": DELEGATECALL,
+ "STATICCALL": STATICCALL,
+ "CODESIZE": CODESIZE,
+ "CODECOPY": CODECOPY,
+ "GASPRICE": GASPRICE,
+ "EXTCODESIZE": EXTCODESIZE,
+ "EXTCODECOPY": EXTCODECOPY,
+ "RETURNDATASIZE": RETURNDATASIZE,
+ "RETURNDATACOPY": RETURNDATACOPY,
+ "BLOCKHASH": BLOCKHASH,
+ "COINBASE": COINBASE,
+ "TIMESTAMP": TIMESTAMP,
+ "NUMBER": NUMBER,
+ "DIFFICULTY": DIFFICULTY,
+ "GASLIMIT": GASLIMIT,
+ "POP": POP,
+ "MLOAD": MLOAD,
+ "MSTORE": MSTORE,
+ "MSTORE8": MSTORE8,
+ "SLOAD": SLOAD,
+ "SSTORE": SSTORE,
+ "JUMP": JUMP,
+ "JUMPI": JUMPI,
+ "PC": PC,
+ "MSIZE": MSIZE,
+ "GAS": GAS,
+ "JUMPDEST": JUMPDEST,
+ "PUSH1": PUSH1,
+ "PUSH2": PUSH2,
+ "PUSH3": PUSH3,
+ "PUSH4": PUSH4,
+ "PUSH5": PUSH5,
+ "PUSH6": PUSH6,
+ "PUSH7": PUSH7,
+ "PUSH8": PUSH8,
+ "PUSH9": PUSH9,
+ "PUSH10": PUSH10,
+ "PUSH11": PUSH11,
+ "PUSH12": PUSH12,
+ "PUSH13": PUSH13,
+ "PUSH14": PUSH14,
+ "PUSH15": PUSH15,
+ "PUSH16": PUSH16,
+ "PUSH17": PUSH17,
+ "PUSH18": PUSH18,
+ "PUSH19": PUSH19,
+ "PUSH20": PUSH20,
+ "PUSH21": PUSH21,
+ "PUSH22": PUSH22,
+ "PUSH23": PUSH23,
+ "PUSH24": PUSH24,
+ "PUSH25": PUSH25,
+ "PUSH26": PUSH26,
+ "PUSH27": PUSH27,
+ "PUSH28": PUSH28,
+ "PUSH29": PUSH29,
+ "PUSH30": PUSH30,
+ "PUSH31": PUSH31,
+ "PUSH32": PUSH32,
+ "DUP1": DUP1,
+ "DUP2": DUP2,
+ "DUP3": DUP3,
+ "DUP4": DUP4,
+ "DUP5": DUP5,
+ "DUP6": DUP6,
+ "DUP7": DUP7,
+ "DUP8": DUP8,
+ "DUP9": DUP9,
+ "DUP10": DUP10,
+ "DUP11": DUP11,
+ "DUP12": DUP12,
+ "DUP13": DUP13,
+ "DUP14": DUP14,
+ "DUP15": DUP15,
+ "DUP16": DUP16,
+ "SWAP1": SWAP1,
+ "SWAP2": SWAP2,
+ "SWAP3": SWAP3,
+ "SWAP4": SWAP4,
+ "SWAP5": SWAP5,
+ "SWAP6": SWAP6,
+ "SWAP7": SWAP7,
+ "SWAP8": SWAP8,
+ "SWAP9": SWAP9,
+ "SWAP10": SWAP10,
+ "SWAP11": SWAP11,
+ "SWAP12": SWAP12,
+ "SWAP13": SWAP13,
+ "SWAP14": SWAP14,
+ "SWAP15": SWAP15,
+ "SWAP16": SWAP16,
+ "LOG0": LOG0,
+ "LOG1": LOG1,
+ "LOG2": LOG2,
+ "LOG3": LOG3,
+ "LOG4": LOG4,
+ "CREATE": CREATE,
+ "CALL": CALL,
+ "RETURN": RETURN,
+ "CALLCODE": CALLCODE,
+ "SELFDESTRUCT": SELFDESTRUCT,
}
func StringToOp(str string) OpCode {
diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go
index 9aa88e669..818da1be2 100644
--- a/core/vm/runtime/env.go
+++ b/core/vm/runtime/env.go
@@ -21,11 +21,10 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
)
-func NewEnv(cfg *Config, state *state.StateDB) *vm.EVM {
+func NewEnv(cfg *Config) *vm.EVM {
context := vm.Context{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
@@ -37,7 +36,7 @@ func NewEnv(cfg *Config, state *state.StateDB) *vm.EVM {
Time: cfg.Time,
Difficulty: cfg.Difficulty,
GasLimit: new(big.Int).SetUint64(cfg.GasLimit),
- GasPrice: new(big.Int),
+ GasPrice: cfg.GasPrice,
}
return vm.NewEVM(context, cfg.State, cfg.ChainConfig, cfg.EVMConfig)
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 44cde4f70..edbf54176 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -106,7 +106,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
}
var (
address = common.StringToAddress("contract")
- vmenv = NewEnv(cfg, cfg.State)
+ vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
)
cfg.State.CreateAccount(address)
@@ -136,7 +136,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(db))
}
var (
- vmenv = NewEnv(cfg, cfg.State)
+ vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
)
@@ -158,7 +158,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, error) {
setDefaults(cfg)
- vmenv := NewEnv(cfg, cfg.State)
+ vmenv := NewEnv(cfg)
sender := cfg.State.GetOrNewStateObject(cfg.Origin)
// Call the code with the given configuration.