aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2019-04-08 21:16:05 +0800
committerGitHub <noreply@github.com>2019-04-08 21:16:05 +0800
commit4e13a09c5033b4cf073db6aeaaa7d159dcf07f30 (patch)
tree0f64daac335200ce6fd45d6ee9f62dabc0fb1887 /core
parentc942700427557e3ff6de3aaf6b916e2f056c1ec2 (diff)
parent009d2fe2d650b1a92e28f0decbf5f7fa628779e9 (diff)
downloaddexon-4e13a09c5033b4cf073db6aeaaa7d159dcf07f30.tar
dexon-4e13a09c5033b4cf073db6aeaaa7d159dcf07f30.tar.gz
dexon-4e13a09c5033b4cf073db6aeaaa7d159dcf07f30.tar.bz2
dexon-4e13a09c5033b4cf073db6aeaaa7d159dcf07f30.tar.lz
dexon-4e13a09c5033b4cf073db6aeaaa7d159dcf07f30.tar.xz
dexon-4e13a09c5033b4cf073db6aeaaa7d159dcf07f30.tar.zst
dexon-4e13a09c5033b4cf073db6aeaaa7d159dcf07f30.zip
Merge pull request #19370 from karalabe/geth-1.8.24
Backport PR for the v1.8.24 maintenance release
Diffstat (limited to 'core')
-rw-r--r--core/blockchain.go116
-rw-r--r--core/blockchain_insert.go2
-rw-r--r--core/blockchain_test.go208
-rw-r--r--core/chain_makers.go18
-rw-r--r--core/evm.go2
-rw-r--r--core/genesis.go2
-rw-r--r--core/headerchain.go2
-rw-r--r--core/types/block.go11
-rw-r--r--core/types/block_test.go2
-rw-r--r--core/types/gen_header_json.go8
10 files changed, 298 insertions, 73 deletions
diff --git a/core/blockchain.go b/core/blockchain.go
index 0d2d71b4d..bd55acf7f 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -267,9 +267,9 @@ func (bc *BlockChain) loadLastState() error {
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
- log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)))
- log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)))
- log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)))
+ log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
+ log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
+ log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
return nil
}
@@ -894,7 +894,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
context := []interface{}{
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
- "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)),
+ "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
"size", common.StorageSize(bytes),
}
if stats.ignored > 0 {
@@ -1058,8 +1058,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// accepted for future processing, and returns an error if the block is too far
// ahead and was not added.
func (bc *BlockChain) addFutureBlock(block *types.Block) error {
- max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
- if block.Time().Cmp(max) > 0 {
+ max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
+ if block.Time() > max {
return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
}
bc.futureBlocks.Add(block.Hash(), block)
@@ -1391,21 +1391,25 @@ func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (i
return 0, nil, nil, nil
}
-// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
-// to be part of the new canonical chain and accumulates potential missing transactions and post an
-// event about them
+// reorg takes two blocks, an old chain and a new chain and will reconstruct the
+// blocks and inserts them to be part of the new canonical chain and accumulates
+// potential missing transactions and post an event about them.
func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
var (
newChain types.Blocks
oldChain types.Blocks
commonBlock *types.Block
- deletedTxs types.Transactions
+
+ deletedTxs types.Transactions
+ addedTxs types.Transactions
+
deletedLogs []*types.Log
+ rebirthLogs []*types.Log
+
// collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash.
- // These logs are later announced as deleted.
- collectLogs = func(hash common.Hash) {
- // Coalesce logs and set 'Removed'.
+ // These logs are later announced as deleted or reborn
+ collectLogs = func(hash common.Hash, removed bool) {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return
@@ -1413,53 +1417,60 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
receipts := rawdb.ReadReceipts(bc.db, hash, *number)
for _, receipt := range receipts {
for _, log := range receipt.Logs {
- del := *log
- del.Removed = true
- deletedLogs = append(deletedLogs, &del)
+ l := *log
+ if removed {
+ l.Removed = true
+ deletedLogs = append(deletedLogs, &l)
+ } else {
+ rebirthLogs = append(rebirthLogs, &l)
+ }
}
}
}
)
-
- // first reduce whoever is higher bound
+ // Reduce the longer chain to the same number as the shorter one
if oldBlock.NumberU64() > newBlock.NumberU64() {
- // reduce old chain
+ // Old chain is longer, gather all transactions and logs as deleted ones
for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
-
- collectLogs(oldBlock.Hash())
+ collectLogs(oldBlock.Hash(), true)
}
} else {
- // reduce new chain and append new chain blocks for inserting later on
+ // New chain is longer, stash all blocks away for subsequent insertion
for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
newChain = append(newChain, newBlock)
}
}
if oldBlock == nil {
- return fmt.Errorf("Invalid old chain")
+ return fmt.Errorf("invalid old chain")
}
if newBlock == nil {
- return fmt.Errorf("Invalid new chain")
+ return fmt.Errorf("invalid new chain")
}
-
+ // Both sides of the reorg are at the same number, reduce both until the common
+ // ancestor is found
for {
+ // If the common ancestor was found, bail out
if oldBlock.Hash() == newBlock.Hash() {
commonBlock = oldBlock
break
}
-
+ // Remove an old block as well as stash away a new block
oldChain = append(oldChain, oldBlock)
- newChain = append(newChain, newBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
- collectLogs(oldBlock.Hash())
+ collectLogs(oldBlock.Hash(), true)
+
+ newChain = append(newChain, newBlock)
- oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
+ // Step back with both chains
+ oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
if oldBlock == nil {
- return fmt.Errorf("Invalid old chain")
+ return fmt.Errorf("invalid old chain")
}
+ newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
if newBlock == nil {
- return fmt.Errorf("Invalid new chain")
+ return fmt.Errorf("invalid new chain")
}
}
// Ensure the user sees large reorgs
@@ -1474,35 +1485,46 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
}
// Insert the new chain, taking care of the proper incremental order
- var addedTxs types.Transactions
for i := len(newChain) - 1; i >= 0; i-- {
- // insert the block in the canonical way, re-writing history
+ // Insert the block in the canonical way, re-writing history
bc.insert(newChain[i])
- // write lookup entries for hash based transaction/receipt searches
+
+ // Collect reborn logs due to chain reorg (except head block (reverse order))
+ if i != 0 {
+ collectLogs(newChain[i].Hash(), false)
+ }
+ // Write lookup entries for hash based transaction/receipt searches
rawdb.WriteTxLookupEntries(bc.db, newChain[i])
addedTxs = append(addedTxs, newChain[i].Transactions()...)
}
- // calculate the difference between deleted and added transactions
- diff := types.TxDifference(deletedTxs, addedTxs)
- // When transactions get deleted from the database that means the
- // receipts that were created in the fork must also be deleted
+ // When transactions get deleted from the database, the receipts that were
+ // created in the fork must also be deleted
batch := bc.db.NewBatch()
- for _, tx := range diff {
+ for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
rawdb.DeleteTxLookupEntry(batch, tx.Hash())
}
batch.Write()
- if len(deletedLogs) > 0 {
- go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
- }
- if len(oldChain) > 0 {
- go func() {
+ // If any logs need to be fired, do it now. In theory we could avoid creating
+ // this goroutine if there are no events to fire, but realistcally that only
+ // ever happens if we're reorging empty blocks, which will only happen on idle
+ // networks where performance is not an issue either way.
+ //
+ // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
+ // event ordering?
+ go func() {
+ if len(deletedLogs) > 0 {
+ bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
+ }
+ if len(rebirthLogs) > 0 {
+ bc.logsFeed.Send(rebirthLogs)
+ }
+ if len(oldChain) > 0 {
for _, block := range oldChain {
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
}
- }()
- }
-
+ }
+ }()
return nil
}
diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go
index cfa32c5aa..ff668925a 100644
--- a/core/blockchain_insert.go
+++ b/core/blockchain_insert.go
@@ -60,7 +60,7 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
"number", end.Number(), "hash", end.Hash(),
}
- if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
+ if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
context = append(context, []interface{}{"cache", cache}...)
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 504ad0eaf..e1a0f33b7 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -884,7 +884,6 @@ func TestChainTxReorgs(t *testing.T) {
}
func TestLogReorgs(t *testing.T) {
-
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
@@ -930,6 +929,213 @@ func TestLogReorgs(t *testing.T) {
}
}
+func TestLogRebirth(t *testing.T) {
+ var (
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ db = ethdb.NewMemDatabase()
+
+ // this code generates a log
+ code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
+ genesis = gspec.MustCommit(db)
+ signer = types.NewEIP155Signer(gspec.Config.ChainID)
+ newLogCh = make(chan bool)
+ )
+
+ // listenNewLog checks whether the received logs number is equal with expected.
+ listenNewLog := func(sink chan []*types.Log, expect int) {
+ cnt := 0
+ for {
+ select {
+ case logs := <-sink:
+ cnt += len(logs)
+ case <-time.NewTimer(5 * time.Second).C:
+ // new logs timeout
+ newLogCh <- false
+ return
+ }
+ if cnt == expect {
+ break
+ } else if cnt > expect {
+ // redundant logs received
+ newLogCh <- false
+ return
+ }
+ }
+ select {
+ case <-sink:
+ // redundant logs received
+ newLogCh <- false
+ case <-time.NewTimer(100 * time.Millisecond).C:
+ newLogCh <- true
+ }
+ }
+
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ defer blockchain.Stop()
+
+ logsCh := make(chan []*types.Log)
+ blockchain.SubscribeLogsEvent(logsCh)
+
+ rmLogsCh := make(chan RemovedLogsEvent)
+ blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
+
+ chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
+ if i == 1 {
+ tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
+ if err != nil {
+ t.Fatalf("failed to create tx: %v", err)
+ }
+ gen.AddTx(tx)
+ }
+ })
+
+ // Spawn a goroutine to receive log events
+ go listenNewLog(logsCh, 1)
+ if _, err := blockchain.InsertChain(chain); err != nil {
+ t.Fatalf("failed to insert chain: %v", err)
+ }
+ if !<-newLogCh {
+ t.Fatalf("failed to receive new log event")
+ }
+
+ // Generate long reorg chain
+ forkChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
+ if i == 1 {
+ tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
+ if err != nil {
+ t.Fatalf("failed to create tx: %v", err)
+ }
+ gen.AddTx(tx)
+ // Higher block difficulty
+ gen.OffsetTime(-9)
+ }
+ })
+
+ // Spawn a goroutine to receive log events
+ go listenNewLog(logsCh, 1)
+ if _, err := blockchain.InsertChain(forkChain); err != nil {
+ t.Fatalf("failed to insert forked chain: %v", err)
+ }
+ if !<-newLogCh {
+ t.Fatalf("failed to receive new log event")
+ }
+ // Ensure removedLog events received
+ select {
+ case ev := <-rmLogsCh:
+ if len(ev.Logs) == 0 {
+ t.Error("expected logs")
+ }
+ case <-time.NewTimer(1 * time.Second).C:
+ t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
+ }
+
+ newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
+ go listenNewLog(logsCh, 1)
+ if _, err := blockchain.InsertChain(newBlocks); err != nil {
+ t.Fatalf("failed to insert forked chain: %v", err)
+ }
+ // Ensure removedLog events received
+ select {
+ case ev := <-rmLogsCh:
+ if len(ev.Logs) == 0 {
+ t.Error("expected logs")
+ }
+ case <-time.NewTimer(1 * time.Second).C:
+ t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
+ }
+ // Rebirth logs should omit a newLogEvent
+ if !<-newLogCh {
+ t.Fatalf("failed to receive new log event")
+ }
+}
+
+func TestSideLogRebirth(t *testing.T) {
+ var (
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ db = ethdb.NewMemDatabase()
+
+ // this code generates a log
+ code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
+ genesis = gspec.MustCommit(db)
+ signer = types.NewEIP155Signer(gspec.Config.ChainID)
+ newLogCh = make(chan bool)
+ )
+
+ // listenNewLog checks whether the received logs number is equal with expected.
+ listenNewLog := func(sink chan []*types.Log, expect int) {
+ cnt := 0
+ for {
+ select {
+ case logs := <-sink:
+ cnt += len(logs)
+ case <-time.NewTimer(5 * time.Second).C:
+ // new logs timeout
+ newLogCh <- false
+ return
+ }
+ if cnt == expect {
+ break
+ } else if cnt > expect {
+ // redundant logs received
+ newLogCh <- false
+ return
+ }
+ }
+ select {
+ case <-sink:
+ // redundant logs received
+ newLogCh <- false
+ case <-time.NewTimer(100 * time.Millisecond).C:
+ newLogCh <- true
+ }
+ }
+
+ blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
+ defer blockchain.Stop()
+
+ logsCh := make(chan []*types.Log)
+ blockchain.SubscribeLogsEvent(logsCh)
+
+ chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
+ if i == 1 {
+ // Higher block difficulty
+ gen.OffsetTime(-9)
+ }
+ })
+ if _, err := blockchain.InsertChain(chain); err != nil {
+ t.Fatalf("failed to insert forked chain: %v", err)
+ }
+
+ // Generate side chain with lower difficulty
+ sideChain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
+ if i == 1 {
+ tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, new(big.Int), code), signer, key1)
+ if err != nil {
+ t.Fatalf("failed to create tx: %v", err)
+ }
+ gen.AddTx(tx)
+ }
+ })
+ if _, err := blockchain.InsertChain(sideChain); err != nil {
+ t.Fatalf("failed to insert forked chain: %v", err)
+ }
+
+ // Generate a new block based on side chain
+ newBlocks, _ := GenerateChain(params.TestChainConfig, sideChain[len(sideChain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
+ go listenNewLog(logsCh, 1)
+ if _, err := blockchain.InsertChain(newBlocks); err != nil {
+ t.Fatalf("failed to insert forked chain: %v", err)
+ }
+ // Rebirth logs should omit a newLogEvent
+ if !<-newLogCh {
+ t.Fatalf("failed to receive new log event")
+ }
+}
+
func TestReorgSideEvent(t *testing.T) {
var (
db = ethdb.NewMemDatabase()
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 0b5a3d184..d563d85ee 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -149,12 +149,12 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
// associated difficulty. It's useful to test scenarios where forking is not
// tied to chain length directly.
func (b *BlockGen) OffsetTime(seconds int64) {
- b.header.Time.Add(b.header.Time, big.NewInt(seconds))
- if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
+ b.header.Time += uint64(seconds)
+ if b.header.Time <= b.parent.Header().Time {
panic("block time out of range")
}
chainreader := &fakeChainReader{config: b.config}
- b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time.Uint64(), b.parent.Header())
+ b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, b.parent.Header())
}
// GenerateChain creates a chain of n blocks. The first block's
@@ -225,20 +225,20 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
- var time *big.Int
- if parent.Time() == nil {
- time = big.NewInt(10)
+ var time uint64
+ if parent.Time() == 0 {
+ time = 10
} else {
- time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds
+ time = parent.Time() + 10 // block time is fixed at 10 seconds
}
return &types.Header{
Root: state.IntermediateRoot(chain.Config().IsEIP158(parent.Number())),
ParentHash: parent.Hash(),
Coinbase: parent.Coinbase(),
- Difficulty: engine.CalcDifficulty(chain, time.Uint64(), &types.Header{
+ Difficulty: engine.CalcDifficulty(chain, time, &types.Header{
Number: parent.Number(),
- Time: new(big.Int).Sub(time, big.NewInt(10)),
+ Time: time - 10,
Difficulty: parent.Difficulty(),
UncleHash: parent.UncleHash(),
}),
diff --git a/core/evm.go b/core/evm.go
index d303c40a4..b654bbd47 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -51,7 +51,7 @@ func NewEVMContext(msg Message, header *types.Header, chain ChainContext, author
Origin: msg.From(),
Coinbase: beneficiary,
BlockNumber: new(big.Int).Set(header.Number),
- Time: new(big.Int).Set(header.Time),
+ Time: new(big.Int).SetUint64(header.Time),
Difficulty: new(big.Int).Set(header.Difficulty),
GasLimit: header.GasLimit,
GasPrice: new(big.Int).Set(msg.GasPrice()),
diff --git a/core/genesis.go b/core/genesis.go
index cbb6eecd2..0d16c0468 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -243,7 +243,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
head := &types.Header{
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
- Time: new(big.Int).SetUint64(g.Timestamp),
+ Time: g.Timestamp,
ParentHash: g.ParentHash,
Extra: g.ExtraData,
GasLimit: g.GasLimit,
diff --git a/core/headerchain.go b/core/headerchain.go
index d2093113c..896afd9bb 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -286,7 +286,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
"number", last.Number, "hash", last.Hash(),
}
- if timestamp := time.Unix(last.Time.Int64(), 0); time.Since(timestamp) > time.Minute {
+ if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
if stats.ignored > 0 {
diff --git a/core/types/block.go b/core/types/block.go
index 57905d8c7..867d77db3 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -79,7 +79,7 @@ type Header struct {
Number *big.Int `json:"number" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
- Time *big.Int `json:"timestamp" gencodec:"required"`
+ Time uint64 `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
@@ -91,7 +91,7 @@ type headerMarshaling struct {
Number *hexutil.Big
GasLimit hexutil.Uint64
GasUsed hexutil.Uint64
- Time *hexutil.Big
+ Time hexutil.Uint64
Extra hexutil.Bytes
Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON
}
@@ -105,7 +105,7 @@ func (h *Header) Hash() common.Hash {
// Size returns the approximate memory used by all internal contents. It is used
// to approximate and limit the memory consumption of various caches.
func (h *Header) Size() common.StorageSize {
- return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen()+h.Time.BitLen())/8)
+ return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8)
}
func rlpHash(x interface{}) (h common.Hash) {
@@ -221,9 +221,6 @@ func NewBlockWithHeader(header *Header) *Block {
// modifying a header variable.
func CopyHeader(h *Header) *Header {
cpy := *h
- if cpy.Time = new(big.Int); h.Time != nil {
- cpy.Time.Set(h.Time)
- }
if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
cpy.Difficulty.Set(h.Difficulty)
}
@@ -286,7 +283,7 @@ func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number)
func (b *Block) GasLimit() uint64 { return b.header.GasLimit }
func (b *Block) GasUsed() uint64 { return b.header.GasUsed }
func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) }
-func (b *Block) Time() *big.Int { return new(big.Int).Set(b.header.Time) }
+func (b *Block) Time() uint64 { return b.header.Time }
func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() }
func (b *Block) MixDigest() common.Hash { return b.header.MixDigest }
diff --git a/core/types/block_test.go b/core/types/block_test.go
index a35fbc25b..2576f2fbc 100644
--- a/core/types/block_test.go
+++ b/core/types/block_test.go
@@ -48,7 +48,7 @@ func TestBlockEncoding(t *testing.T) {
check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017"))
check("Hash", block.Hash(), common.HexToHash("0a5843ac1cb04865017cb35a57b50b07084e5fcee39b5acadade33149f4fff9e"))
check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4))
- check("Time", block.Time(), big.NewInt(1426516743))
+ check("Time", block.Time(), uint64(1426516743))
check("Size", block.Size(), common.StorageSize(len(blockEnc)))
tx1 := NewTransaction(0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), big.NewInt(10), 50000, big.NewInt(10), nil)
diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go
index 59a1c9c43..4212b8d94 100644
--- a/core/types/gen_header_json.go
+++ b/core/types/gen_header_json.go
@@ -27,7 +27,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Time *hexutil.Big `json:"timestamp" gencodec:"required"`
+ Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest common.Hash `json:"mixHash"`
Nonce BlockNonce `json:"nonce"`
@@ -45,7 +45,7 @@ func (h Header) MarshalJSON() ([]byte, error) {
enc.Number = (*hexutil.Big)(h.Number)
enc.GasLimit = hexutil.Uint64(h.GasLimit)
enc.GasUsed = hexutil.Uint64(h.GasUsed)
- enc.Time = (*hexutil.Big)(h.Time)
+ enc.Time = hexutil.Uint64(h.Time)
enc.Extra = h.Extra
enc.MixDigest = h.MixDigest
enc.Nonce = h.Nonce
@@ -67,7 +67,7 @@ func (h *Header) UnmarshalJSON(input []byte) error {
Number *hexutil.Big `json:"number" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
- Time *hexutil.Big `json:"timestamp" gencodec:"required"`
+ Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData" gencodec:"required"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"`
@@ -123,7 +123,7 @@ func (h *Header) UnmarshalJSON(input []byte) error {
if dec.Time == nil {
return errors.New("missing required field 'timestamp' for Header")
}
- h.Time = (*big.Int)(dec.Time)
+ h.Time = uint64(*dec.Time)
if dec.Extra == nil {
return errors.New("missing required field 'extraData' for Header")
}