From 38c30f8dd897987b12123083201f1124dae0ffdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 2 Apr 2019 11:47:01 +0300 Subject: light, params: update CHTs, integrate CHT for Goerli too --- light/postprocess.go | 1 + params/config.go | 33 +++++++++++++++++---------------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/light/postprocess.go b/light/postprocess.go index dd1b74a7b..24fe47bc7 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -109,6 +109,7 @@ var trustedCheckpoints = map[common.Hash]*params.TrustedCheckpoint{ params.MainnetGenesisHash: params.MainnetTrustedCheckpoint, params.TestnetGenesisHash: params.TestnetTrustedCheckpoint, params.RinkebyGenesisHash: params.RinkebyTrustedCheckpoint, + params.GoerliGenesisHash: params.GoerliTrustedCheckpoint, } var ( diff --git a/params/config.go b/params/config.go index 44b2ffeba..d07738a81 100644 --- a/params/config.go +++ b/params/config.go @@ -28,6 +28,7 @@ var ( MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") TestnetGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") + GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") ) var ( @@ -50,10 +51,10 @@ var ( // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &TrustedCheckpoint{ Name: "mainnet", - SectionIndex: 216, - SectionHead: common.HexToHash("0xae3e551c8d60d06fd411a8e6008e90625d3bb0cbbf664b65d5ed90b318553541"), - CHTRoot: common.HexToHash("0xeea7d2ab3545a37deecc66fc43c9556ae337c3ea1c6893e401428207bdb8e434"), - BloomRoot: common.HexToHash("0xb0d4176d160d67b99a9f963281e52bce0583a566b74b4497fe3ed24ae04004ff"), + SectionIndex: 227, + SectionHead: common.HexToHash("0xa2e0b25d72c2fc6e35a7f853cdacb193b4b4f95c606accf7f8fa8415283582c7"), + CHTRoot: common.HexToHash("0xf69bdd4053b95b61a27b106a0e86103d791edd8574950dc96aa351ab9b9f1aa0"), + BloomRoot: common.HexToHash("0xec1b454d4c6322c78ccedf76ac922a8698c3cac4d98748a84af4995b7bd3d744"), } // TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network. @@ -75,10 +76,10 @@ var ( // TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. TestnetTrustedCheckpoint = &TrustedCheckpoint{ Name: "testnet", - SectionIndex: 148, - SectionHead: common.HexToHash("0x4d3181bedb6aa96a6f3efa866c71f7802400d0fb4a6906946c453630d850efc0"), - CHTRoot: common.HexToHash("0x25df2f9d63a5f84b2852988f0f0f7af5a7877da061c11b85c812780b5a27a5ec"), - BloomRoot: common.HexToHash("0x0584834e5222471a06c669d210e302ca602780eaaddd04634fd65471c2a91419"), + SectionIndex: 161, + SectionHead: common.HexToHash("0x5378afa734e1feafb34bcca1534c4d96952b754579b96a4afb23d5301ecececc"), + CHTRoot: common.HexToHash("0x1cf2b071e7443a62914362486b613ff30f60cea0d9c268ed8c545f876a3ee60c"), + BloomRoot: common.HexToHash("0x5ac25c84bd18a9cbe878d4609a80220f57f85037a112644532412ba0d498a31b"), } // RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network. @@ -103,10 +104,10 @@ var ( // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. RinkebyTrustedCheckpoint = &TrustedCheckpoint{ Name: "rinkeby", - SectionIndex: 113, - SectionHead: common.HexToHash("0xb812f3095af3af1cb2de7d7c2086ee807736a7315992c461b0986699185daf77"), - CHTRoot: common.HexToHash("0x5416d0924925eb835987ad3d1f059ecc66778c51959c8246a7a35b22ec5f3109"), - BloomRoot: common.HexToHash("0xcf74ca2c14e843b366561dab4fc64237bf6bb335119cbc97d723f3b501863470"), + SectionIndex: 125, + SectionHead: common.HexToHash("0x8a738386f6bb34add15846f8f49c4c519a2f32519096e792b9f43bcb407c831c"), + CHTRoot: common.HexToHash("0xa1e5720a9bad4dce794f129e4ac6744398197b652868011486a6f89c8ec84a75"), + BloomRoot: common.HexToHash("0xa3048fe8b7e30f77f11bc755a88478363d7d3e71c2bdfe4e8ab9e269cd804ba2"), } // GoerliChainConfig contains the chain parameters to run a node on the Görli test network. @@ -130,10 +131,10 @@ var ( // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. GoerliTrustedCheckpoint = &TrustedCheckpoint{ Name: "goerli", - SectionIndex: 0, - SectionHead: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), - CHTRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), - BloomRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + SectionIndex: 9, + SectionHead: common.HexToHash("0x8e223d827391eee53b07cb8ee057dbfa11c93e0b45352188c783affd7840a921"), + CHTRoot: common.HexToHash("0xe0a817ac69b36c1e437c5b0cff9e764853f5115702b5f66d451b665d6afb7e78"), + BloomRoot: common.HexToHash("0x50d672aeb655b723284969c7c1201fb6ca003c23ed144bcb9f2d1b30e2971c1b"), } // AllEthashProtocolChanges contains every protocol change (EIPs) introduced -- cgit v1.2.3 From fca5f9fd6fc8a45166ff21cbb41df9e2005dc314 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 27 Feb 2019 14:20:29 +0200 Subject: common/fdlimit: fix macos file descriptors for Go 1.12 --- common/fdlimit/fdlimit_darwin.go | 71 +++++++++++++++++++++++++++++++++++++++ common/fdlimit/fdlimit_unix.go | 2 +- common/fdlimit/fdlimit_windows.go | 1 + 3 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 common/fdlimit/fdlimit_darwin.go diff --git a/common/fdlimit/fdlimit_darwin.go b/common/fdlimit/fdlimit_darwin.go new file mode 100644 index 000000000..88dd0f56c --- /dev/null +++ b/common/fdlimit/fdlimit_darwin.go @@ -0,0 +1,71 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package fdlimit + +import "syscall" + +// hardlimit is the number of file descriptors allowed at max by the kernel. +const hardlimit = 10240 + +// Raise tries to maximize the file descriptor allowance of this process +// to the maximum hard-limit allowed by the OS. +// Returns the size it was set to (may differ from the desired 'max') +func Raise(max uint64) (uint64, error) { + // Get the current limit + var limit syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + // Try to update the limit to the max allowance + limit.Cur = limit.Max + if limit.Cur > max { + limit.Cur = max + } + if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + // MacOS can silently apply further caps, so retrieve the actually set limit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + return limit.Cur, nil +} + +// Current retrieves the number of file descriptors allowed to be opened by this +// process. +func Current() (int, error) { + var limit syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + return int(limit.Cur), nil +} + +// Maximum retrieves the maximum number of file descriptors this process is +// allowed to request for itself. +func Maximum() (int, error) { + // Retrieve the maximum allowed by dynamic OS limits + var limit syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { + return 0, err + } + // Cap it to OPEN_MAX (10240) because macos is a special snowflake + if limit.Max > hardlimit { + limit.Max = hardlimit + } + return int(limit.Max), nil +} diff --git a/common/fdlimit/fdlimit_unix.go b/common/fdlimit/fdlimit_unix.go index 670112751..e5a575f7a 100644 --- a/common/fdlimit/fdlimit_unix.go +++ b/common/fdlimit/fdlimit_unix.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build linux darwin netbsd openbsd solaris +// +build linux netbsd openbsd solaris package fdlimit diff --git a/common/fdlimit/fdlimit_windows.go b/common/fdlimit/fdlimit_windows.go index 63a44e0de..f47215366 100644 --- a/common/fdlimit/fdlimit_windows.go +++ b/common/fdlimit/fdlimit_windows.go @@ -18,6 +18,7 @@ package fdlimit import "fmt" +// hardlimit is the number of file descriptors allowed at max by the kernel. const hardlimit = 16384 // Raise tries to maximize the file descriptor allowance of this process -- cgit v1.2.3 From 80a2a35bc3aaf208b5f91a1fb1d803975d4bb01c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 5 Apr 2019 13:09:28 +0300 Subject: trie: there's no point in retrieving the metaroot --- trie/database.go | 5 +++++ trie/database_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 trie/database_test.go diff --git a/trie/database.go b/trie/database.go index 739a98add..c39af85cf 100644 --- a/trie/database.go +++ b/trie/database.go @@ -17,6 +17,7 @@ package trie import ( + "errors" "fmt" "io" "sync" @@ -391,6 +392,10 @@ func (db *Database) node(hash common.Hash, cachegen uint16) node { // Node retrieves an encoded cached trie node from memory. If it cannot be found // cached, the method queries the persistent database for the content. func (db *Database) Node(hash common.Hash) ([]byte, error) { + // It doens't make sense to retrieve the metaroot + if hash == (common.Hash{}) { + return nil, errors.New("not found") + } // Retrieve the node from the clean cache if available if db.cleans != nil { if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { diff --git a/trie/database_test.go b/trie/database_test.go new file mode 100644 index 000000000..65b65678b --- /dev/null +++ b/trie/database_test.go @@ -0,0 +1,33 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" +) + +// Tests that the trie database returns a missing trie node error if attempting +// to retrieve the meta root. +func TestDatabaseMetarootFetch(t *testing.T) { + db := NewDatabase(ethdb.NewMemDatabase()) + if _, err := db.Node(common.Hash{}); err == nil { + t.Fatalf("metaroot retrieval succeeded") + } +} -- cgit v1.2.3 From af401d03a395c21fdb297edb687edf8af3470cb2 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 2 Apr 2019 22:28:48 +0200 Subject: all: simplify timestamps to uint64 (#19372) * all: simplify timestamps to uint64 * tests: update definitions * clef, faucet, mobile: leftover uint64 fixups * ethash: fix tests * graphql: update schema for timestamp * ethash: remove unused variable --- cmd/clef/main.go | 20 ++++++++++---------- cmd/faucet/faucet.go | 2 +- consensus/clique/clique.go | 12 ++++++------ consensus/ethash/algorithm_test.go | 2 +- consensus/ethash/consensus.go | 21 ++++++++------------- consensus/ethash/consensus_test.go | 2 +- core/blockchain.go | 12 ++++++------ core/blockchain_insert.go | 2 +- core/chain_makers.go | 18 +++++++++--------- core/evm.go | 2 +- core/genesis.go | 2 +- core/headerchain.go | 2 +- core/types/block.go | 11 ++++------- core/types/block_test.go | 2 +- core/types/gen_header_json.go | 8 ++++---- ethstats/ethstats.go | 2 +- internal/ethapi/api.go | 2 +- light/lightchain.go | 4 ++-- miner/worker.go | 6 +++--- mobile/types.go | 4 ++-- tests/block_test_util.go | 8 ++++---- tests/difficulty_test_util.go | 10 +++++----- tests/gen_btheader.go | 10 ++++++---- tests/gen_difficultytest.go | 18 ++++++++++-------- 24 files changed, 89 insertions(+), 93 deletions(-) diff --git a/cmd/clef/main.go b/cmd/clef/main.go index 519d63b3c..effc59fee 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -56,13 +56,13 @@ const ExternalAPIVersion = "4.0.0" const InternalAPIVersion = "3.0.0" const legalWarning = ` -WARNING! +WARNING! Clef is alpha software, and not yet publically released. This software has _not_ been audited, and there are no guarantees about the workings of this software. It may contain severe flaws. You should not use this software -unless you agree to take full responsibility for doing so, and know what you are doing. +unless you agree to take full responsibility for doing so, and know what you are doing. -TLDR; THIS IS NOT PRODUCTION-READY SOFTWARE! +TLDR; THIS IS NOT PRODUCTION-READY SOFTWARE! ` @@ -136,7 +136,7 @@ var ( configdirFlag, }, Description: ` -The init command generates a master seed which Clef can use to store credentials and data needed for +The init command generates a master seed which Clef can use to store credentials and data needed for the rule-engine to work.`, } attestCommand = cli.Command{ @@ -150,10 +150,10 @@ the rule-engine to work.`, signerSecretFlag, }, Description: ` -The attest command stores the sha256 of the rule.js-file that you want to use for automatic processing of -incoming requests. +The attest command stores the sha256 of the rule.js-file that you want to use for automatic processing of +incoming requests. -Whenever you make an edit to the rule file, you need to use attestation to tell +Whenever you make an edit to the rule file, you need to use attestation to tell Clef that the file is 'safe' to execute.`, } @@ -168,7 +168,7 @@ Clef that the file is 'safe' to execute.`, signerSecretFlag, }, Description: ` - The setpw command stores a password for a given address (keyfile). If you enter a blank passphrase, it will + The setpw command stores a password for a given address (keyfile). If you enter a blank passphrase, it will remove any stored credential for that address (keyfile) `, } @@ -258,12 +258,12 @@ func initializeSecrets(c *cli.Context) error { } fmt.Printf("A master seed has been generated into %s\n", location) fmt.Printf(` -This is required to be able to store credentials, such as : +This is required to be able to store credentials, such as : * Passwords for keystores (used by rule engine) * Storage for javascript rules * Hash of rule-file -You should treat that file with utmost secrecy, and make a backup of it. +You should treat that file with utmost secrecy, and make a backup of it. NOTE: This file does not contain your accounts. Those need to be backed up separately! `) diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index a7c20db77..debfe87cf 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -579,7 +579,7 @@ func (f *faucet) loop() { go func() { for head := range update { // New chain head arrived, query the current stats and stream to clients - timestamp := time.Unix(head.Time.Int64(), 0) + timestamp := time.Unix(int64(head.Time), 0) if time.Since(timestamp) > time.Hour { log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp)) continue diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index c79c30cae..a18782474 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -279,7 +279,7 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, number := header.Number.Uint64() // Don't waste time checking blocks from the future - if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 { + if header.Time > uint64(time.Now().Unix()) { return consensus.ErrFutureBlock } // Checkpoint blocks need to enforce zero beneficiary @@ -351,7 +351,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash { return consensus.ErrUnknownAncestor } - if parent.Time.Uint64()+c.config.Period > header.Time.Uint64() { + if parent.Time+c.config.Period > header.Time { return ErrInvalidTimestamp } // Retrieve the snapshot needed to verify this header and cache it @@ -570,9 +570,9 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro if parent == nil { return consensus.ErrUnknownAncestor } - header.Time = new(big.Int).Add(parent.Time, new(big.Int).SetUint64(c.config.Period)) - if header.Time.Int64() < time.Now().Unix() { - header.Time = big.NewInt(time.Now().Unix()) + header.Time = parent.Time + c.config.Period + if header.Time < uint64(time.Now().Unix()) { + header.Time = uint64(time.Now().Unix()) } return nil } @@ -637,7 +637,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c } } // Sweet, the protocol permits us to sign the block, wait for our time - delay := time.Unix(header.Time.Int64(), 0).Sub(time.Now()) // nolint: gosimple + delay := time.Unix(int64(header.Time), 0).Sub(time.Now()) // nolint: gosimple if header.Difficulty.Cmp(diffNoTurn) == 0 { // It's not our turn explicitly to sign, delay it a bit wiggle := time.Duration(len(snap.Signers)/2+1) * wiggleTime diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go index c58479e28..cf8552f3a 100644 --- a/consensus/ethash/algorithm_test.go +++ b/consensus/ethash/algorithm_test.go @@ -716,7 +716,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { Difficulty: big.NewInt(167925187834220), GasLimit: 4015682, GasUsed: 0, - Time: big.NewInt(1488928920), + Time: 1488928920, Extra: []byte("www.bw.com"), MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"), Nonce: types.EncodeNonce(0xf400cd0006070c49), diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 62e3f8fca..fb9a396ae 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -63,7 +63,6 @@ var ( // codebase, inherently breaking if the engine is swapped out. Please put common // error types into the consensus package. var ( - errLargeBlockTime = errors.New("timestamp too big") errZeroBlockTime = errors.New("timestamp equals parent's") errTooManyUncles = errors.New("too many uncles") errDuplicateUncle = errors.New("duplicate uncle") @@ -242,20 +241,16 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent * return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) } // Verify the header's timestamp - if uncle { - if header.Time.Cmp(math.MaxBig256) > 0 { - return errLargeBlockTime - } - } else { - if header.Time.Cmp(big.NewInt(time.Now().Add(allowedFutureBlockTime).Unix())) > 0 { + if !uncle { + if header.Time > uint64(time.Now().Add(allowedFutureBlockTime).Unix()) { return consensus.ErrFutureBlock } } - if header.Time.Cmp(parent.Time) <= 0 { + if header.Time <= parent.Time { return errZeroBlockTime } // Verify the block's difficulty based in it's timestamp and parent's difficulty - expected := ethash.CalcDifficulty(chain, header.Time.Uint64(), parent) + expected := ethash.CalcDifficulty(chain, header.Time, parent) if expected.Cmp(header.Difficulty) != 0 { return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected) @@ -349,7 +344,7 @@ func makeDifficultyCalculator(bombDelay *big.Int) func(time uint64, parent *type // ) + 2^(periodCount - 2) bigTime := new(big.Int).SetUint64(time) - bigParentTime := new(big.Int).Set(parent.Time) + bigParentTime := new(big.Int).SetUint64(parent.Time) // holds intermediate values to make the algo easier to read & audit x := new(big.Int) @@ -408,7 +403,7 @@ func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int { // ) + 2^(periodCount - 2) bigTime := new(big.Int).SetUint64(time) - bigParentTime := new(big.Int).Set(parent.Time) + bigParentTime := new(big.Int).SetUint64(parent.Time) // holds intermediate values to make the algo easier to read & audit x := new(big.Int) @@ -456,7 +451,7 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int { bigParentTime := new(big.Int) bigTime.SetUint64(time) - bigParentTime.Set(parent.Time) + bigParentTime.SetUint64(parent.Time) if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 { diff.Add(parent.Difficulty, adjust) @@ -558,7 +553,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) if parent == nil { return consensus.ErrUnknownAncestor } - header.Difficulty = ethash.CalcDifficulty(chain, header.Time.Uint64(), parent) + header.Difficulty = ethash.CalcDifficulty(chain, header.Time, parent) return nil } diff --git a/consensus/ethash/consensus_test.go b/consensus/ethash/consensus_test.go index 438a99dd6..675737d9e 100644 --- a/consensus/ethash/consensus_test.go +++ b/consensus/ethash/consensus_test.go @@ -76,7 +76,7 @@ func TestCalcDifficulty(t *testing.T) { number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1)) diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{ Number: number, - Time: new(big.Int).SetUint64(test.ParentTimestamp), + Time: test.ParentTimestamp, Difficulty: test.ParentDifficulty, }) if diff.Cmp(test.CurrentDifficulty) != 0 { diff --git a/core/blockchain.go b/core/blockchain.go index 0d2d71b4d..e40fc39fa 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -267,9 +267,9 @@ func (bc *BlockChain) loadLastState() error { blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) - log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0))) - log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0))) - log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0))) + log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) + log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) + log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) return nil } @@ -894,7 +894,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ context := []interface{}{ "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), - "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)), + "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)), "size", common.StorageSize(bytes), } if stats.ignored > 0 { @@ -1058,8 +1058,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. // accepted for future processing, and returns an error if the block is too far // ahead and was not added. func (bc *BlockChain) addFutureBlock(block *types.Block) error { - max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) - if block.Time().Cmp(max) > 0 { + max := uint64(time.Now().Unix() + maxTimeFutureBlocks) + if block.Time() > max { return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) } bc.futureBlocks.Add(block.Hash(), block) diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go index cfa32c5aa..ff668925a 100644 --- a/core/blockchain_insert.go +++ b/core/blockchain_insert.go @@ -60,7 +60,7 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), "number", end.Number(), "hash", end.Hash(), } - if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute { + if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute { context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) } context = append(context, []interface{}{"cache", cache}...) diff --git a/core/chain_makers.go b/core/chain_makers.go index 0b5a3d184..d563d85ee 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -149,12 +149,12 @@ func (b *BlockGen) PrevBlock(index int) *types.Block { // associated difficulty. It's useful to test scenarios where forking is not // tied to chain length directly. func (b *BlockGen) OffsetTime(seconds int64) { - b.header.Time.Add(b.header.Time, big.NewInt(seconds)) - if b.header.Time.Cmp(b.parent.Header().Time) <= 0 { + b.header.Time += uint64(seconds) + if b.header.Time <= b.parent.Header().Time { panic("block time out of range") } chainreader := &fakeChainReader{config: b.config} - b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time.Uint64(), b.parent.Header()) + b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, b.parent.Header()) } // GenerateChain creates a chain of n blocks. The first block's @@ -225,20 +225,20 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { - var time *big.Int - if parent.Time() == nil { - time = big.NewInt(10) + var time uint64 + if parent.Time() == 0 { + time = 10 } else { - time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds + time = parent.Time() + 10 // block time is fixed at 10 seconds } return &types.Header{ Root: state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())), ParentHash: parent.Hash(), Coinbase: parent.Coinbase(), - Difficulty: engine.CalcDifficulty(chain, time.Uint64(), &types.Header{ + Difficulty: engine.CalcDifficulty(chain, time, &types.Header{ Number: parent.Number(), - Time: new(big.Int).Sub(time, big.NewInt(10)), + Time: time - 10, Difficulty: parent.Difficulty(), UncleHash: parent.UncleHash(), }), diff --git a/core/evm.go b/core/evm.go index d303c40a4..b654bbd47 100644 --- a/core/evm.go +++ b/core/evm.go @@ -51,7 +51,7 @@ func NewEVMContext(msg Message, header *types.Header, chain ChainContext, author Origin: msg.From(), Coinbase: beneficiary, BlockNumber: new(big.Int).Set(header.Number), - Time: new(big.Int).Set(header.Time), + Time: new(big.Int).SetUint64(header.Time), Difficulty: new(big.Int).Set(header.Difficulty), GasLimit: header.GasLimit, GasPrice: new(big.Int).Set(msg.GasPrice()), diff --git a/core/genesis.go b/core/genesis.go index cbb6eecd2..0d16c0468 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -243,7 +243,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { head := &types.Header{ Number: new(big.Int).SetUint64(g.Number), Nonce: types.EncodeNonce(g.Nonce), - Time: new(big.Int).SetUint64(g.Timestamp), + Time: g.Timestamp, ParentHash: g.ParentHash, Extra: g.ExtraData, GasLimit: g.GasLimit, diff --git a/core/headerchain.go b/core/headerchain.go index d2093113c..896afd9bb 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -286,7 +286,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), "number", last.Number, "hash", last.Hash(), } - if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute { + if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) } if stats.ignored > 0 { diff --git a/core/types/block.go b/core/types/block.go index 57905d8c7..867d77db3 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -79,7 +79,7 @@ type Header struct { Number *big.Int `json:"number" gencodec:"required"` GasLimit uint64 `json:"gasLimit" gencodec:"required"` GasUsed uint64 `json:"gasUsed" gencodec:"required"` - Time *big.Int `json:"timestamp" gencodec:"required"` + Time uint64 `json:"timestamp" gencodec:"required"` Extra []byte `json:"extraData" gencodec:"required"` MixDigest common.Hash `json:"mixHash"` Nonce BlockNonce `json:"nonce"` @@ -91,7 +91,7 @@ type headerMarshaling struct { Number *hexutil.Big GasLimit hexutil.Uint64 GasUsed hexutil.Uint64 - Time *hexutil.Big + Time hexutil.Uint64 Extra hexutil.Bytes Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON } @@ -105,7 +105,7 @@ func (h *Header) Hash() common.Hash { // Size returns the approximate memory used by all internal contents. It is used // to approximate and limit the memory consumption of various caches. func (h *Header) Size() common.StorageSize { - return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen()+h.Time.BitLen())/8) + return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8) } func rlpHash(x interface{}) (h common.Hash) { @@ -221,9 +221,6 @@ func NewBlockWithHeader(header *Header) *Block { // modifying a header variable. func CopyHeader(h *Header) *Header { cpy := *h - if cpy.Time = new(big.Int); h.Time != nil { - cpy.Time.Set(h.Time) - } if cpy.Difficulty = new(big.Int); h.Difficulty != nil { cpy.Difficulty.Set(h.Difficulty) } @@ -286,7 +283,7 @@ func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) func (b *Block) GasLimit() uint64 { return b.header.GasLimit } func (b *Block) GasUsed() uint64 { return b.header.GasUsed } func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) } -func (b *Block) Time() *big.Int { return new(big.Int).Set(b.header.Time) } +func (b *Block) Time() uint64 { return b.header.Time } func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() } func (b *Block) MixDigest() common.Hash { return b.header.MixDigest } diff --git a/core/types/block_test.go b/core/types/block_test.go index a35fbc25b..2576f2fbc 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -48,7 +48,7 @@ func TestBlockEncoding(t *testing.T) { check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017")) check("Hash", block.Hash(), common.HexToHash("0a5843ac1cb04865017cb35a57b50b07084e5fcee39b5acadade33149f4fff9e")) check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) - check("Time", block.Time(), big.NewInt(1426516743)) + check("Time", block.Time(), uint64(1426516743)) check("Size", block.Size(), common.StorageSize(len(blockEnc))) tx1 := NewTransaction(0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), big.NewInt(10), 50000, big.NewInt(10), nil) diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go index 59a1c9c43..4212b8d94 100644 --- a/core/types/gen_header_json.go +++ b/core/types/gen_header_json.go @@ -27,7 +27,7 @@ func (h Header) MarshalJSON() ([]byte, error) { Number *hexutil.Big `json:"number" gencodec:"required"` GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Time *hexutil.Big `json:"timestamp" gencodec:"required"` + Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` Extra hexutil.Bytes `json:"extraData" gencodec:"required"` MixDigest common.Hash `json:"mixHash"` Nonce BlockNonce `json:"nonce"` @@ -45,7 +45,7 @@ func (h Header) MarshalJSON() ([]byte, error) { enc.Number = (*hexutil.Big)(h.Number) enc.GasLimit = hexutil.Uint64(h.GasLimit) enc.GasUsed = hexutil.Uint64(h.GasUsed) - enc.Time = (*hexutil.Big)(h.Time) + enc.Time = hexutil.Uint64(h.Time) enc.Extra = h.Extra enc.MixDigest = h.MixDigest enc.Nonce = h.Nonce @@ -67,7 +67,7 @@ func (h *Header) UnmarshalJSON(input []byte) error { Number *hexutil.Big `json:"number" gencodec:"required"` GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Time *hexutil.Big `json:"timestamp" gencodec:"required"` + Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"` Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` MixDigest *common.Hash `json:"mixHash"` Nonce *BlockNonce `json:"nonce"` @@ -123,7 +123,7 @@ func (h *Header) UnmarshalJSON(input []byte) error { if dec.Time == nil { return errors.New("missing required field 'timestamp' for Header") } - h.Time = (*big.Int)(dec.Time) + h.Time = uint64(*dec.Time) if dec.Extra == nil { return errors.New("missing required field 'extraData' for Header") } diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index 9f3d7237e..caf232097 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -557,7 +557,7 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats { Number: header.Number, Hash: header.Hash(), ParentHash: header.ParentHash, - Timestamp: header.Time, + Timestamp: new(big.Int).SetUint64(header.Time), Miner: author, GasUsed: header.GasUsed, GasLimit: header.GasLimit, diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 73b629bd9..b732adff6 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -882,7 +882,7 @@ func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]inter "size": hexutil.Uint64(b.Size()), "gasLimit": hexutil.Uint64(head.GasLimit), "gasUsed": hexutil.Uint64(head.GasUsed), - "timestamp": (*hexutil.Big)(head.Time), + "timestamp": hexutil.Uint64(head.Time), "transactionsRoot": head.TxHash, "receiptsRoot": head.ReceiptHash, } diff --git a/light/lightchain.go b/light/lightchain.go index 8e2734c2d..977f497a7 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -157,7 +157,7 @@ func (self *LightChain) loadLastState() error { // Issue a status log and return header := self.hc.CurrentHeader() headerTd := self.GetTd(header.Hash(), header.Number.Uint64()) - log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0))) + log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(header.Time), 0))) return nil } @@ -488,7 +488,7 @@ func (self *LightChain) SyncCht(ctx context.Context) bool { // Ensure the chain didn't move past the latest block while retrieving it if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() { - log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(header.Time.Int64(), 0))) + log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(int64(header.Time), 0))) self.hc.SetCurrentHeader(header) } return true diff --git a/miner/worker.go b/miner/worker.go index 48473796b..44a9f44f7 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -823,8 +823,8 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) tstart := time.Now() parent := w.chain.CurrentBlock() - if parent.Time().Cmp(new(big.Int).SetInt64(timestamp)) >= 0 { - timestamp = parent.Time().Int64() + 1 + if parent.Time() >= uint64(timestamp) { + timestamp = int64(parent.Time() + 1) } // this will ensure we're not going off too far in the future if now := time.Now().Unix(); timestamp > now+1 { @@ -839,7 +839,7 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) Number: num.Add(num, common.Big1), GasLimit: core.CalcGasLimit(parent, w.gasFloor, w.gasCeil), Extra: w.extra, - Time: big.NewInt(timestamp), + Time: uint64(timestamp), } // Only set the coinbase if our consensus engine is running (avoid spurious block rewards) if w.isRunning() { diff --git a/mobile/types.go b/mobile/types.go index 443d07ea9..d5e8db2fa 100644 --- a/mobile/types.go +++ b/mobile/types.go @@ -109,7 +109,7 @@ func (h *Header) GetDifficulty() *BigInt { return &BigInt{h.header.Difficulty} } func (h *Header) GetNumber() int64 { return h.header.Number.Int64() } func (h *Header) GetGasLimit() int64 { return int64(h.header.GasLimit) } func (h *Header) GetGasUsed() int64 { return int64(h.header.GasUsed) } -func (h *Header) GetTime() int64 { return h.header.Time.Int64() } +func (h *Header) GetTime() int64 { return int64(h.header.Time) } func (h *Header) GetExtra() []byte { return h.header.Extra } func (h *Header) GetMixDigest() *Hash { return &Hash{h.header.MixDigest} } func (h *Header) GetNonce() *Nonce { return &Nonce{h.header.Nonce} } @@ -180,7 +180,7 @@ func (b *Block) GetDifficulty() *BigInt { return &BigInt{b.block.Difficu func (b *Block) GetNumber() int64 { return b.block.Number().Int64() } func (b *Block) GetGasLimit() int64 { return int64(b.block.GasLimit()) } func (b *Block) GetGasUsed() int64 { return int64(b.block.GasUsed()) } -func (b *Block) GetTime() int64 { return b.block.Time().Int64() } +func (b *Block) GetTime() int64 { return int64(b.block.Time()) } func (b *Block) GetExtra() []byte { return b.block.Extra() } func (b *Block) GetMixDigest() *Hash { return &Hash{b.block.MixDigest()} } func (b *Block) GetNonce() int64 { return int64(b.block.Nonce()) } diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 9fa69bf4e..3a1644497 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -82,7 +82,7 @@ type btHeader struct { Difficulty *big.Int GasLimit uint64 GasUsed uint64 - Timestamp *big.Int + Timestamp uint64 } type btHeaderMarshaling struct { @@ -91,7 +91,7 @@ type btHeaderMarshaling struct { Difficulty *math.HexOrDecimal256 GasLimit math.HexOrDecimal64 GasUsed math.HexOrDecimal64 - Timestamp *math.HexOrDecimal256 + Timestamp math.HexOrDecimal64 } func (t *BlockTest) Run() error { @@ -146,7 +146,7 @@ func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis { return &core.Genesis{ Config: config, Nonce: t.json.Genesis.Nonce.Uint64(), - Timestamp: t.json.Genesis.Timestamp.Uint64(), + Timestamp: t.json.Genesis.Timestamp, ParentHash: t.json.Genesis.ParentHash, ExtraData: t.json.Genesis.ExtraData, GasLimit: t.json.Genesis.GasLimit, @@ -248,7 +248,7 @@ func validateHeader(h *btHeader, h2 *types.Header) error { if h.GasUsed != h2.GasUsed { return fmt.Errorf("GasUsed: want: %d have: %d", h.GasUsed, h2.GasUsed) } - if h.Timestamp.Cmp(h2.Time) != 0 { + if h.Timestamp != h2.Time { return fmt.Errorf("Timestamp: want: %v have: %v", h.Timestamp, h2.Time) } return nil diff --git a/tests/difficulty_test_util.go b/tests/difficulty_test_util.go index 00d699cf7..fe6e90b02 100644 --- a/tests/difficulty_test_util.go +++ b/tests/difficulty_test_util.go @@ -30,18 +30,18 @@ import ( //go:generate gencodec -type DifficultyTest -field-override difficultyTestMarshaling -out gen_difficultytest.go type DifficultyTest struct { - ParentTimestamp *big.Int `json:"parentTimestamp"` + ParentTimestamp uint64 `json:"parentTimestamp"` ParentDifficulty *big.Int `json:"parentDifficulty"` UncleHash common.Hash `json:"parentUncles"` - CurrentTimestamp *big.Int `json:"currentTimestamp"` + CurrentTimestamp uint64 `json:"currentTimestamp"` CurrentBlockNumber uint64 `json:"currentBlockNumber"` CurrentDifficulty *big.Int `json:"currentDifficulty"` } type difficultyTestMarshaling struct { - ParentTimestamp *math.HexOrDecimal256 + ParentTimestamp math.HexOrDecimal64 ParentDifficulty *math.HexOrDecimal256 - CurrentTimestamp *math.HexOrDecimal256 + CurrentTimestamp math.HexOrDecimal64 CurrentDifficulty *math.HexOrDecimal256 UncleHash common.Hash CurrentBlockNumber math.HexOrDecimal64 @@ -56,7 +56,7 @@ func (test *DifficultyTest) Run(config *params.ChainConfig) error { UncleHash: test.UncleHash, } - actual := ethash.CalcDifficulty(config, test.CurrentTimestamp.Uint64(), parent) + actual := ethash.CalcDifficulty(config, test.CurrentTimestamp, parent) exp := test.CurrentDifficulty if actual.Cmp(exp) != 0 { diff --git a/tests/gen_btheader.go b/tests/gen_btheader.go index 5cfd4bd0a..f2e086a7b 100644 --- a/tests/gen_btheader.go +++ b/tests/gen_btheader.go @@ -14,6 +14,7 @@ import ( var _ = (*btHeaderMarshaling)(nil) +// MarshalJSON marshals as JSON. func (b btHeader) MarshalJSON() ([]byte, error) { type btHeader struct { Bloom types.Bloom @@ -31,7 +32,7 @@ func (b btHeader) MarshalJSON() ([]byte, error) { Difficulty *math.HexOrDecimal256 GasLimit math.HexOrDecimal64 GasUsed math.HexOrDecimal64 - Timestamp *math.HexOrDecimal256 + Timestamp math.HexOrDecimal64 } var enc btHeader enc.Bloom = b.Bloom @@ -49,10 +50,11 @@ func (b btHeader) MarshalJSON() ([]byte, error) { enc.Difficulty = (*math.HexOrDecimal256)(b.Difficulty) enc.GasLimit = math.HexOrDecimal64(b.GasLimit) enc.GasUsed = math.HexOrDecimal64(b.GasUsed) - enc.Timestamp = (*math.HexOrDecimal256)(b.Timestamp) + enc.Timestamp = math.HexOrDecimal64(b.Timestamp) return json.Marshal(&enc) } +// UnmarshalJSON unmarshals from JSON. func (b *btHeader) UnmarshalJSON(input []byte) error { type btHeader struct { Bloom *types.Bloom @@ -70,7 +72,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error { Difficulty *math.HexOrDecimal256 GasLimit *math.HexOrDecimal64 GasUsed *math.HexOrDecimal64 - Timestamp *math.HexOrDecimal256 + Timestamp *math.HexOrDecimal64 } var dec btHeader if err := json.Unmarshal(input, &dec); err != nil { @@ -122,7 +124,7 @@ func (b *btHeader) UnmarshalJSON(input []byte) error { b.GasUsed = uint64(*dec.GasUsed) } if dec.Timestamp != nil { - b.Timestamp = (*big.Int)(dec.Timestamp) + b.Timestamp = uint64(*dec.Timestamp) } return nil } diff --git a/tests/gen_difficultytest.go b/tests/gen_difficultytest.go index 88f36ce99..cd15ae31b 100644 --- a/tests/gen_difficultytest.go +++ b/tests/gen_difficultytest.go @@ -12,31 +12,33 @@ import ( var _ = (*difficultyTestMarshaling)(nil) +// MarshalJSON marshals as JSON. func (d DifficultyTest) MarshalJSON() ([]byte, error) { type DifficultyTest struct { - ParentTimestamp *math.HexOrDecimal256 `json:"parentTimestamp"` + ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp"` ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` UncleHash common.Hash `json:"parentUncles"` - CurrentTimestamp *math.HexOrDecimal256 `json:"currentTimestamp"` + CurrentTimestamp math.HexOrDecimal64 `json:"currentTimestamp"` CurrentBlockNumber math.HexOrDecimal64 `json:"currentBlockNumber"` CurrentDifficulty *math.HexOrDecimal256 `json:"currentDifficulty"` } var enc DifficultyTest - enc.ParentTimestamp = (*math.HexOrDecimal256)(d.ParentTimestamp) + enc.ParentTimestamp = math.HexOrDecimal64(d.ParentTimestamp) enc.ParentDifficulty = (*math.HexOrDecimal256)(d.ParentDifficulty) enc.UncleHash = d.UncleHash - enc.CurrentTimestamp = (*math.HexOrDecimal256)(d.CurrentTimestamp) + enc.CurrentTimestamp = math.HexOrDecimal64(d.CurrentTimestamp) enc.CurrentBlockNumber = math.HexOrDecimal64(d.CurrentBlockNumber) enc.CurrentDifficulty = (*math.HexOrDecimal256)(d.CurrentDifficulty) return json.Marshal(&enc) } +// UnmarshalJSON unmarshals from JSON. func (d *DifficultyTest) UnmarshalJSON(input []byte) error { type DifficultyTest struct { - ParentTimestamp *math.HexOrDecimal256 `json:"parentTimestamp"` + ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp"` ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` UncleHash *common.Hash `json:"parentUncles"` - CurrentTimestamp *math.HexOrDecimal256 `json:"currentTimestamp"` + CurrentTimestamp *math.HexOrDecimal64 `json:"currentTimestamp"` CurrentBlockNumber *math.HexOrDecimal64 `json:"currentBlockNumber"` CurrentDifficulty *math.HexOrDecimal256 `json:"currentDifficulty"` } @@ -45,7 +47,7 @@ func (d *DifficultyTest) UnmarshalJSON(input []byte) error { return err } if dec.ParentTimestamp != nil { - d.ParentTimestamp = (*big.Int)(dec.ParentTimestamp) + d.ParentTimestamp = uint64(*dec.ParentTimestamp) } if dec.ParentDifficulty != nil { d.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) @@ -54,7 +56,7 @@ func (d *DifficultyTest) UnmarshalJSON(input []byte) error { d.UncleHash = *dec.UncleHash } if dec.CurrentTimestamp != nil { - d.CurrentTimestamp = (*big.Int)(dec.CurrentTimestamp) + d.CurrentTimestamp = uint64(*dec.CurrentTimestamp) } if dec.CurrentBlockNumber != nil { d.CurrentBlockNumber = uint64(*dec.CurrentBlockNumber) -- cgit v1.2.3 From 442320a8aee324a7c1059ae05de36a34556a9fa7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 8 Apr 2019 10:43:01 +0300 Subject: travis: update builders to xenial to shadow Go releases --- .travis.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index a079d9eb8..d93282078 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ sudo: false matrix: include: - os: linux - dist: trusty + dist: xenial sudo: required go: 1.10.x script: @@ -16,7 +16,7 @@ matrix: # These are the latest Go versions. - os: linux - dist: trusty + dist: xenial sudo: required go: 1.11.x script: @@ -43,7 +43,7 @@ matrix: # This builder only tests code linters on latest version of Go - os: linux - dist: trusty + dist: xenial go: 1.11.x env: - lint @@ -55,7 +55,7 @@ matrix: # This builder does the Ubuntu PPA upload - if: type = push os: linux - dist: trusty + dist: xenial go: 1.11.x env: - ubuntu-ppa @@ -77,7 +77,7 @@ matrix: # This builder does the Linux Azure uploads - if: type = push os: linux - dist: trusty + dist: xenial sudo: required go: 1.11.x env: @@ -111,7 +111,7 @@ matrix: # This builder does the Linux Azure MIPS xgo uploads - if: type = push os: linux - dist: trusty + dist: xenial services: - docker go: 1.11.x @@ -139,7 +139,7 @@ matrix: # This builder does the Android Maven and Azure uploads - if: type = push os: linux - dist: trusty + dist: xenial addons: apt: packages: @@ -206,7 +206,7 @@ matrix: # This builder does the Azure archive purges to avoid accumulating junk - if: type = cron os: linux - dist: trusty + dist: xenial go: 1.11.x env: - azure-purge -- cgit v1.2.3 From f1b00cffc828105c17c0ecacb2074874b752a9a0 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 17 Dec 2018 15:23:54 +0800 Subject: core: re-omit new log event when logs rebirth --- core/blockchain.go | 27 +++++-- core/blockchain_test.go | 206 +++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 224 insertions(+), 9 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index e40fc39fa..117be8c72 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1401,11 +1401,11 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { commonBlock *types.Block deletedTxs types.Transactions deletedLogs []*types.Log + rebirthLogs []*types.Log // collectLogs collects the logs that were generated during the // processing of the block that corresponds with the given hash. - // These logs are later announced as deleted. - collectLogs = func(hash common.Hash) { - // Coalesce logs and set 'Removed'. + // These logs are later announced as deleted or reborn + collectLogs = func(hash common.Hash, removed bool) { number := bc.hc.GetBlockNumber(hash) if number == nil { return @@ -1413,9 +1413,13 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { receipts := rawdb.ReadReceipts(bc.db, hash, *number) for _, receipt := range receipts { for _, log := range receipt.Logs { - del := *log - del.Removed = true - deletedLogs = append(deletedLogs, &del) + l := *log + if removed { + l.Removed = true + deletedLogs = append(deletedLogs, &l) + } else { + rebirthLogs = append(rebirthLogs, &l) + } } } } @@ -1428,7 +1432,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { oldChain = append(oldChain, oldBlock) deletedTxs = append(deletedTxs, oldBlock.Transactions()...) - collectLogs(oldBlock.Hash()) + collectLogs(oldBlock.Hash(), true) } } else { // reduce new chain and append new chain blocks for inserting later on @@ -1452,7 +1456,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { oldChain = append(oldChain, oldBlock) newChain = append(newChain, newBlock) deletedTxs = append(deletedTxs, oldBlock.Transactions()...) - collectLogs(oldBlock.Hash()) + collectLogs(oldBlock.Hash(), true) oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) if oldBlock == nil { @@ -1478,6 +1482,10 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { for i := len(newChain) - 1; i >= 0; i-- { // insert the block in the canonical way, re-writing history bc.insert(newChain[i]) + // collect reborn logs due to chain reorg(except head block) + if i != 0 { + collectLogs(newChain[i].Hash(), false) + } // write lookup entries for hash based transaction/receipt searches rawdb.WriteTxLookupEntries(bc.db, newChain[i]) addedTxs = append(addedTxs, newChain[i].Transactions()...) @@ -1495,6 +1503,9 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { if len(deletedLogs) > 0 { go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) } + if len(rebirthLogs) > 0 { + go bc.logsFeed.Send(rebirthLogs) + } if len(oldChain) > 0 { go func() { for _, block := range oldChain { diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 504ad0eaf..7c76f1fc4 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -884,7 +884,6 @@ func TestChainTxReorgs(t *testing.T) { } func TestLogReorgs(t *testing.T) { - var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -930,6 +929,211 @@ func TestLogReorgs(t *testing.T) { } } +func TestLogRebirth(t *testing.T) { + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + db = ethdb.NewMemDatabase() + // this code generates a log + code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") + gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} + genesis = gspec.MustCommit(db) + signer = types.NewEIP155Signer(gspec.Config.ChainID) + newLogCh = make(chan bool) + ) + + // listenNewLog checks whether the received logs number is equal with expected. + listenNewLog := func(sink chan []*types.Log, expect int) { + cnt := 0 + for { + select { + case logs := <-sink: + cnt += len(logs) + case <-time.NewTimer(5 * time.Second).C: + // new logs timeout + newLogCh <- false + return + } + if cnt == expect { + break + } else if cnt > expect { + // redundant logs received + newLogCh <- false + return + } + } + select { + case <-sink: + // redundant logs received + newLogCh <- false + case <-time.NewTimer(100 * time.Millisecond).C: + newLogCh <- true + } + } + + blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil) + defer blockchain.Stop() + + logsCh := make(chan []*types.Log) + blockchain.SubscribeLogsEvent(logsCh) + + rmLogsCh := make(chan RemovedLogsEvent) + blockchain.SubscribeRemovedLogsEvent(rmLogsCh) + + chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) { + if i == 1 { + tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1) + if err != nil { + t.Fatalf("failed to create tx: %v", err) + } + gen.AddTx(tx) + } + }) + + // Spawn a goroutine to receive log events + go listenNewLog(logsCh, 1) + if _, err := blockchain.InsertChain(chain); err != nil { + t.Fatalf("failed to insert chain: %v", err) + } + if !<-newLogCh { + t.Fatalf("failed to receive new log event") + } + + // Generate long reorg chain + forkChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) { + if i == 1 { + tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1) + if err != nil { + t.Fatalf("failed to create tx: %v", err) + } + gen.AddTx(tx) + // Higher block difficulty + gen.OffsetTime(-9) + } + }) + + // Spawn a goroutine to receive log events + go listenNewLog(logsCh, 1) + if _, err := blockchain.InsertChain(forkChain); err != nil { + t.Fatalf("failed to insert forked chain: %v", err) + } + if !<-newLogCh { + t.Fatalf("failed to receive new log event") + } + // Ensure removedLog events received + select { + case ev := <-rmLogsCh: + if len(ev.Logs) == 0 { + t.Error("expected logs") + } + case <-time.NewTimer(1 * time.Second).C: + t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.") + } + + newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}) + go listenNewLog(logsCh, 1) + if _, err := blockchain.InsertChain(newBlocks); err != nil { + t.Fatalf("failed to insert forked chain: %v", err) + } + // Rebirth logs should omit a newLogEvent + if !<-newLogCh { + t.Fatalf("failed to receive new log event") + } + // Ensure removedLog events received + select { + case ev := <-rmLogsCh: + if len(ev.Logs) == 0 { + t.Error("expected logs") + } + case <-time.NewTimer(1 * time.Second).C: + t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.") + } +} + +func TestSideLogRebirth(t *testing.T) { + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + db = ethdb.NewMemDatabase() + // this code generates a log + code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") + gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} + genesis = gspec.MustCommit(db) + signer = types.NewEIP155Signer(gspec.Config.ChainID) + newLogCh = make(chan bool) + ) + + // listenNewLog checks whether the received logs number is equal with expected. + listenNewLog := func(sink chan []*types.Log, expect int) { + cnt := 0 + for { + select { + case logs := <-sink: + cnt += len(logs) + case <-time.NewTimer(5 * time.Second).C: + // new logs timeout + newLogCh <- false + return + } + if cnt == expect { + break + } else if cnt > expect { + // redundant logs received + newLogCh <- false + return + } + } + select { + case <-sink: + // redundant logs received + newLogCh <- false + case <-time.NewTimer(100 * time.Millisecond).C: + newLogCh <- true + } + } + + blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil) + defer blockchain.Stop() + + logsCh := make(chan []*types.Log) + blockchain.SubscribeLogsEvent(logsCh) + + chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) { + if i == 1 { + // Higher block difficulty + gen.OffsetTime(-9) + } + }) + if _, err := blockchain.InsertChain(chain); err != nil { + t.Fatalf("failed to insert forked chain: %v", err) + } + + // Generate side chain with lower difficulty + sideChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) { + if i == 1 { + tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1) + if err != nil { + t.Fatalf("failed to create tx: %v", err) + } + gen.AddTx(tx) + } + }) + if _, err := blockchain.InsertChain(sideChain); err != nil { + t.Fatalf("failed to insert forked chain: %v", err) + } + + // Generate a new block based on side chain + newBlocks, _ := GenerateChain(params.TestChainConfig, sideChain[len(sideChain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}) + go listenNewLog(logsCh, 1) + if _, err := blockchain.InsertChain(newBlocks); err != nil { + t.Fatalf("failed to insert forked chain: %v", err) + } + // Rebirth logs should omit a newLogEvent + if !<-newLogCh { + t.Fatalf("failed to receive new log event") + } +} + func TestReorgSideEvent(t *testing.T) { var ( db = ethdb.NewMemDatabase() -- cgit v1.2.3 From 0e63a70505e0011d8f668dba86c99071cee9790e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 4 Apr 2019 14:39:11 +0300 Subject: core: minor code polishes + rebase fixes --- core/blockchain.go | 85 ++++++++++++++++++++++++++++--------------------- core/blockchain_test.go | 10 +++--- 2 files changed, 54 insertions(+), 41 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 117be8c72..bd55acf7f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1391,17 +1391,21 @@ func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (i return 0, nil, nil, nil } -// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them -// to be part of the new canonical chain and accumulates potential missing transactions and post an -// event about them +// reorg takes two blocks, an old chain and a new chain and will reconstruct the +// blocks and inserts them to be part of the new canonical chain and accumulates +// potential missing transactions and post an event about them. func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { var ( newChain types.Blocks oldChain types.Blocks commonBlock *types.Block - deletedTxs types.Transactions + + deletedTxs types.Transactions + addedTxs types.Transactions + deletedLogs []*types.Log rebirthLogs []*types.Log + // collectLogs collects the logs that were generated during the // processing of the block that corresponds with the given hash. // These logs are later announced as deleted or reborn @@ -1424,46 +1428,49 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } } ) - - // first reduce whoever is higher bound + // Reduce the longer chain to the same number as the shorter one if oldBlock.NumberU64() > newBlock.NumberU64() { - // reduce old chain + // Old chain is longer, gather all transactions and logs as deleted ones for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { oldChain = append(oldChain, oldBlock) deletedTxs = append(deletedTxs, oldBlock.Transactions()...) - collectLogs(oldBlock.Hash(), true) } } else { - // reduce new chain and append new chain blocks for inserting later on + // New chain is longer, stash all blocks away for subsequent insertion for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { newChain = append(newChain, newBlock) } } if oldBlock == nil { - return fmt.Errorf("Invalid old chain") + return fmt.Errorf("invalid old chain") } if newBlock == nil { - return fmt.Errorf("Invalid new chain") + return fmt.Errorf("invalid new chain") } - + // Both sides of the reorg are at the same number, reduce both until the common + // ancestor is found for { + // If the common ancestor was found, bail out if oldBlock.Hash() == newBlock.Hash() { commonBlock = oldBlock break } - + // Remove an old block as well as stash away a new block oldChain = append(oldChain, oldBlock) - newChain = append(newChain, newBlock) deletedTxs = append(deletedTxs, oldBlock.Transactions()...) collectLogs(oldBlock.Hash(), true) - oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) + newChain = append(newChain, newBlock) + + // Step back with both chains + oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) if oldBlock == nil { - return fmt.Errorf("Invalid old chain") + return fmt.Errorf("invalid old chain") } + newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) if newBlock == nil { - return fmt.Errorf("Invalid new chain") + return fmt.Errorf("invalid new chain") } } // Ensure the user sees large reorgs @@ -1478,42 +1485,46 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) } // Insert the new chain, taking care of the proper incremental order - var addedTxs types.Transactions for i := len(newChain) - 1; i >= 0; i-- { - // insert the block in the canonical way, re-writing history + // Insert the block in the canonical way, re-writing history bc.insert(newChain[i]) - // collect reborn logs due to chain reorg(except head block) + + // Collect reborn logs due to chain reorg (except head block (reverse order)) if i != 0 { collectLogs(newChain[i].Hash(), false) } - // write lookup entries for hash based transaction/receipt searches + // Write lookup entries for hash based transaction/receipt searches rawdb.WriteTxLookupEntries(bc.db, newChain[i]) addedTxs = append(addedTxs, newChain[i].Transactions()...) } - // calculate the difference between deleted and added transactions - diff := types.TxDifference(deletedTxs, addedTxs) - // When transactions get deleted from the database that means the - // receipts that were created in the fork must also be deleted + // When transactions get deleted from the database, the receipts that were + // created in the fork must also be deleted batch := bc.db.NewBatch() - for _, tx := range diff { + for _, tx := range types.TxDifference(deletedTxs, addedTxs) { rawdb.DeleteTxLookupEntry(batch, tx.Hash()) } batch.Write() - if len(deletedLogs) > 0 { - go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) - } - if len(rebirthLogs) > 0 { - go bc.logsFeed.Send(rebirthLogs) - } - if len(oldChain) > 0 { - go func() { + // If any logs need to be fired, do it now. In theory we could avoid creating + // this goroutine if there are no events to fire, but realistcally that only + // ever happens if we're reorging empty blocks, which will only happen on idle + // networks where performance is not an issue either way. + // + // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct + // event ordering? + go func() { + if len(deletedLogs) > 0 { + bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) + } + if len(rebirthLogs) > 0 { + bc.logsFeed.Send(rebirthLogs) + } + if len(oldChain) > 0 { for _, block := range oldChain { bc.chainSideFeed.Send(ChainSideEvent{Block: block}) } - }() - } - + } + }() return nil } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 7c76f1fc4..e1a0f33b7 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -934,6 +934,7 @@ func TestLogRebirth(t *testing.T) { key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) db = ethdb.NewMemDatabase() + // this code generates a log code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} @@ -1035,10 +1036,6 @@ func TestLogRebirth(t *testing.T) { if _, err := blockchain.InsertChain(newBlocks); err != nil { t.Fatalf("failed to insert forked chain: %v", err) } - // Rebirth logs should omit a newLogEvent - if !<-newLogCh { - t.Fatalf("failed to receive new log event") - } // Ensure removedLog events received select { case ev := <-rmLogsCh: @@ -1048,6 +1045,10 @@ func TestLogRebirth(t *testing.T) { case <-time.NewTimer(1 * time.Second).C: t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.") } + // Rebirth logs should omit a newLogEvent + if !<-newLogCh { + t.Fatalf("failed to receive new log event") + } } func TestSideLogRebirth(t *testing.T) { @@ -1055,6 +1056,7 @@ func TestSideLogRebirth(t *testing.T) { key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) db = ethdb.NewMemDatabase() + // this code generates a log code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} -- cgit v1.2.3 From 8ca64548077edb527b903058db876a4ec3771349 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 8 Apr 2019 12:14:05 +0300 Subject: params: set Rinkeby Petersburg fork block (4th May, 2019) --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index d07738a81..254508c0a 100644 --- a/params/config.go +++ b/params/config.go @@ -94,7 +94,7 @@ var ( EIP158Block: big.NewInt(3), ByzantiumBlock: big.NewInt(1035301), ConstantinopleBlock: big.NewInt(3660663), - PetersburgBlock: big.NewInt(9999999), //TODO! Insert Rinkeby block number + PetersburgBlock: big.NewInt(4321234), Clique: &CliqueConfig{ Period: 15, Epoch: 30000, -- cgit v1.2.3 From 9d9c6b5847bfd1267c2fee60f5a5b05bdaaf4216 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Mon, 8 Apr 2019 13:35:11 +0200 Subject: p2p/discover: bump failure counter only if no nodes were provided (#19362) This resolves a minor issue where neighbors responses containing less than 16 nodes would bump the failure counter, removing the node. One situation where this can happen is a private deployment where the total number of extant nodes is less than 16. Issue found by @jsying. --- p2p/discover/table.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/discover/table.go b/p2p/discover/table.go index ef0c08afc..3e9353753 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -313,7 +313,7 @@ func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) { // Avoid recording failures on shutdown. reply <- nil return - } else if err != nil || len(r) == 0 { + } else if len(r) == 0 { fails++ tab.db.UpdateFindFails(n.ID(), n.IP(), fails) log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err) -- cgit v1.2.3 From e872ba7a9e8123a4010198ee8dfaada9f71fa24a Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Mon, 8 Apr 2019 13:49:52 +0200 Subject: eth, les, geth: implement cli-configurable global gas cap for RPC calls (#19401) * eth, les, geth: implement cli-configurable global gas cap for RPC calls * graphql, ethapi: place gas cap in DoCall * ethapi: reformat log message --- cmd/geth/main.go | 3 +-- cmd/geth/usage.go | 1 + cmd/utils/flags.go | 7 +++++++ eth/api_backend.go | 4 ++++ eth/config.go | 3 +++ internal/ethapi/api.go | 21 +++++++++++++++------ internal/ethapi/backend.go | 1 + les/api_backend.go | 4 ++++ 8 files changed, 36 insertions(+), 8 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index e60a27e43..458608112 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -125,8 +125,6 @@ var ( utils.VMEnableDebugFlag, utils.NetworkIdFlag, utils.ConstantinopleOverrideFlag, - utils.RPCCORSDomainFlag, - utils.RPCVirtualHostsFlag, utils.EthStatsURLFlag, utils.MetricsEnabledFlag, utils.FakePoWFlag, @@ -150,6 +148,7 @@ var ( utils.WSAllowedOriginsFlag, utils.IPCDisabledFlag, utils.IPCPathFlag, + utils.RPCGlobalGasCap, } whisperFlags = []cli.Flag{ diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 6823aa36c..259e6d30a 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -153,6 +153,7 @@ var AppHelpFlagGroups = []flagGroup{ utils.RPCListenAddrFlag, utils.RPCPortFlag, utils.RPCApiFlag, + utils.RPCGlobalGasCap, utils.WSEnabledFlag, utils.WSListenAddrFlag, utils.WSPortFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 55e84b876..052660ba1 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -411,6 +411,10 @@ var ( Name: "vmdebug", Usage: "Record information useful for VM and contract debugging", } + RPCGlobalGasCap = cli.Uint64Flag{ + Name: "rpc.gascap", + Usage: "Sets a cap on gas that can be used in eth_call/estimateGas", + } // Logging and debug settings EthStatsURLFlag = cli.StringFlag{ Name: "ethstats", @@ -1256,6 +1260,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(EVMInterpreterFlag.Name) { cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name) } + if ctx.GlobalIsSet(RPCGlobalGasCap.Name) { + cfg.RPCGasCap = new(big.Int).SetUint64(ctx.GlobalUint64(RPCGlobalGasCap.Name)) + } // Override any default configs for hard coded networks. switch { diff --git a/eth/api_backend.go b/eth/api_backend.go index a48815e0d..3efc09cc1 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -213,6 +213,10 @@ func (b *EthAPIBackend) AccountManager() *accounts.Manager { return b.eth.AccountManager() } +func (b *EthAPIBackend) RPCGasCap() *big.Int { + return b.eth.config.RPCGasCap +} + func (b *EthAPIBackend) BloomStatus() (uint64, uint64) { sections, _, _ := b.eth.bloomIndexer.Sections() return params.BloomBitsBlocks, sections diff --git a/eth/config.go b/eth/config.go index 7c041d1af..5b9db2502 100644 --- a/eth/config.go +++ b/eth/config.go @@ -135,6 +135,9 @@ type Config struct { // Constantinople block override (TODO: remove after the fork) ConstantinopleOverride *big.Int + + // RPCGasCap is the global gas cap for eth-call variants. + RPCGasCap *big.Int `toml:",omitempty"` } type configMarshaling struct { diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index b732adff6..e1ca71104 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -683,7 +683,7 @@ type CallArgs struct { Data hexutil.Bytes `json:"data"` } -func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration) ([]byte, uint64, bool, error) { +func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) { defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr) @@ -700,14 +700,18 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr } } // Set default gas & gas price if none were set - gas, gasPrice := uint64(args.Gas), args.GasPrice.ToInt() + gas := uint64(args.Gas) if gas == 0 { gas = math.MaxUint64 / 2 } + if globalGasCap != nil && globalGasCap.Uint64() < gas { + log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) + gas = globalGasCap.Uint64() + } + gasPrice := args.GasPrice.ToInt() if gasPrice.Sign() == 0 { gasPrice = new(big.Int).SetUint64(defaultGasPrice) } - // Create new call message msg := types.NewMessage(addr, args.To, 0, args.Value.ToInt(), gas, gasPrice, args.Data, false) @@ -748,7 +752,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr // Call executes the given transaction on the state for the given block number. // It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values. func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) { - result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second) + result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second, s.b.RPCGasCap()) return (hexutil.Bytes)(result), err } @@ -771,13 +775,18 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h } hi = block.GasLimit() } + gasCap := s.b.RPCGasCap() + if gasCap != nil && hi > gasCap.Uint64() { + log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap) + hi = gasCap.Uint64() + } cap = hi // Create a helper to check if a gas allowance results in an executable transaction executable := func(gas uint64) bool { args.Gas = hexutil.Uint64(gas) - _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0) + _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0, gasCap) if err != nil || failed { return false } @@ -795,7 +804,7 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h // Reject the transaction as invalid if it still fails at the highest allowance if hi == cap { if !executable(hi) { - return 0, fmt.Errorf("gas required exceeds allowance or always failing transaction") + return 0, fmt.Errorf("gas required exceeds allowance (%d) or always failing transaction", cap) } } return hexutil.Uint64(hi), nil diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index e23ee03b1..56a3daffa 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -44,6 +44,7 @@ type Backend interface { ChainDb() ethdb.Database EventMux() *event.TypeMux AccountManager() *accounts.Manager + RPCGasCap() *big.Int // global gas cap for eth_call over rpc: DoS protection // BlockChain API SetHead(number uint64) diff --git a/les/api_backend.go b/les/api_backend.go index 753139623..f03d32fed 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -187,6 +187,10 @@ func (b *LesApiBackend) AccountManager() *accounts.Manager { return b.eth.accountManager } +func (b *LesApiBackend) RPCGasCap() *big.Int { + return b.eth.config.RPCGasCap +} + func (b *LesApiBackend) BloomStatus() (uint64, uint64) { if b.eth.bloomIndexer == nil { return 0, 0 -- cgit v1.2.3 From 009d2fe2d650b1a92e28f0decbf5f7fa628779e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 8 Apr 2019 16:06:59 +0300 Subject: params, swarm: release Geth v1.8.24 (noop Swarm 0.3.12) --- params/version.go | 2 +- swarm/version/version.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/params/version.go b/params/version.go index e3815aaf0..7949d4fd1 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 8 // Minor version component of the current release - VersionPatch = 23 // Patch version component of the current release + VersionPatch = 24 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/swarm/version/version.go b/swarm/version/version.go index 820aec2c0..d41110576 100644 --- a/swarm/version/version.go +++ b/swarm/version/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 0 // Major version component of the current release VersionMinor = 3 // Minor version component of the current release - VersionPatch = 11 // Patch version component of the current release + VersionPatch = 12 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) -- cgit v1.2.3