From 7c7692933c21b77328a94eed714f66c276776197 Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Mon, 31 Aug 2015 17:09:50 +0200 Subject: cmd/geth, cmd/utils, core, rpc: renamed to blockchain * Renamed ChainManager to BlockChain * Checkpointing is no longer required and never really properly worked when the state was corrupted. --- cmd/geth/blocktestcmd.go | 2 +- cmd/geth/js_test.go | 2 +- cmd/geth/main.go | 2 +- cmd/utils/cmd.go | 12 +- cmd/utils/flags.go | 4 +- core/bench_test.go | 2 +- core/block_processor.go | 10 +- core/block_processor_test.go | 6 +- core/blockchain.go | 809 +++++++++++++++++++++++++++++++++++++++++ core/blockchain_test.go | 652 +++++++++++++++++++++++++++++++++ core/chain_makers.go | 4 +- core/chain_makers_test.go | 2 +- core/chain_manager.go | 849 ------------------------------------------- core/chain_manager_test.go | 652 --------------------------------- core/helper_test.go | 6 +- core/manager.go | 2 +- core/vm/common.go | 4 +- core/vm/contract.go | 4 +- core/vm/doc.go | 8 +- core/vm/environment.go | 4 +- core/vm/memory.go | 2 +- core/vm_env.go | 4 +- eth/backend.go | 22 +- eth/filters/filter.go | 17 +- eth/filters/filter_system.go | 2 +- eth/gasprice.go | 4 +- eth/handler.go | 66 ++-- eth/handler_test.go | 158 ++++---- eth/helper_test.go | 20 +- eth/protocol_test.go | 2 +- eth/sync.go | 2 +- miner/worker.go | 4 +- rpc/api/admin.go | 8 +- rpc/api/debug.go | 2 +- rpc/api/eth.go | 2 +- tests/block_test_util.go | 8 +- xeth/xeth.go | 18 +- 37 files changed, 1671 insertions(+), 1706 deletions(-) create mode 100644 core/blockchain.go create mode 100644 core/blockchain_test.go delete mode 100644 core/chain_manager.go delete mode 100644 core/chain_manager_test.go diff --git a/cmd/geth/blocktestcmd.go b/cmd/geth/blocktestcmd.go index d6195e025..e0a5becdc 100644 --- a/cmd/geth/blocktestcmd.go +++ b/cmd/geth/blocktestcmd.go @@ -118,7 +118,7 @@ func runOneBlockTest(ctx *cli.Context, test *tests.BlockTest) (*eth.Ethereum, er return ethereum, fmt.Errorf("InsertPreState: %v", err) } - cm := ethereum.ChainManager() + cm := ethereum.BlockChain() validBlocks, err := test.TryBlocksInsert(cm) if err != nil { return ethereum, fmt.Errorf("Block Test load error: %v", err) diff --git a/cmd/geth/js_test.go b/cmd/geth/js_test.go index 1f5b28e3a..2ad3d669c 100644 --- a/cmd/geth/js_test.go +++ b/cmd/geth/js_test.go @@ -196,7 +196,7 @@ func TestBlockChain(t *testing.T) { tmpfile := filepath.Join(extmp, "export.chain") tmpfileq := strconv.Quote(tmpfile) - ethereum.ChainManager().Reset() + ethereum.BlockChain().Reset() checkEvalJSON(t, repl, `admin.exportChain(`+tmpfileq+`)`, `true`) if _, err := os.Stat(tmpfile); err != nil { diff --git a/cmd/geth/main.go b/cmd/geth/main.go index a9aa9f61f..e8292fdf2 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -48,7 +48,7 @@ import ( const ( ClientIdentifier = "Geth" - Version = "1.2.0" + Version = "1.2.0-dev" VersionMajor = 1 VersionMinor = 2 VersionPatch = 0 diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 983762db8..5e4bfc937 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -169,7 +169,7 @@ func FormatTransactionData(data string) []byte { return d } -func ImportChain(chain *core.ChainManager, fn string) error { +func ImportChain(chain *core.BlockChain, fn string) error { // Watch for Ctrl-C while the import is running. // If a signal is received, the import will stop at the next batch. interrupt := make(chan os.Signal, 1) @@ -244,7 +244,7 @@ func ImportChain(chain *core.ChainManager, fn string) error { return nil } -func hasAllBlocks(chain *core.ChainManager, bs []*types.Block) bool { +func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool { for _, b := range bs { if !chain.HasBlock(b.Hash()) { return false @@ -253,21 +253,21 @@ func hasAllBlocks(chain *core.ChainManager, bs []*types.Block) bool { return true } -func ExportChain(chainmgr *core.ChainManager, fn string) error { +func ExportChain(blockchain *core.BlockChain, fn string) error { glog.Infoln("Exporting blockchain to", fn) fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) if err != nil { return err } defer fh.Close() - if err := chainmgr.Export(fh); err != nil { + if err := blockchain.Export(fh); err != nil { return err } glog.Infoln("Exported blockchain to", fn) return nil } -func ExportAppendChain(chainmgr *core.ChainManager, fn string, first uint64, last uint64) error { +func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { glog.Infoln("Exporting blockchain to", fn) // TODO verify mode perms fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) @@ -275,7 +275,7 @@ func ExportAppendChain(chainmgr *core.ChainManager, fn string, first uint64, las return err } defer fh.Close() - if err := chainmgr.ExportN(fh, first, last); err != nil { + if err := blockchain.ExportN(fh, first, last); err != nil { return err } glog.Infoln("Exported blockchain to", fn) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index ad474f17d..dea43bc5c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -508,7 +508,7 @@ func SetupEth(ctx *cli.Context) { } // MakeChain creates a chain manager from set command line flags. -func MakeChain(ctx *cli.Context) (chain *core.ChainManager, chainDb ethdb.Database) { +func MakeChain(ctx *cli.Context) (chain *core.BlockChain, chainDb ethdb.Database) { datadir := MustDataDir(ctx) cache := ctx.GlobalInt(CacheFlag.Name) @@ -527,7 +527,7 @@ func MakeChain(ctx *cli.Context) (chain *core.ChainManager, chainDb ethdb.Databa eventMux := new(event.TypeMux) pow := ethash.New() //genesis := core.GenesisBlock(uint64(ctx.GlobalInt(GenesisNonceFlag.Name)), blockDB) - chain, err = core.NewChainManager(chainDb, pow, eventMux) + chain, err = core.NewBlockChain(chainDb, pow, eventMux) if err != nil { Fatalf("Could not start chainmanager: %v", err) } diff --git a/core/bench_test.go b/core/bench_test.go index def4f0d2a..27f3e3158 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -168,7 +168,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { // Time the insertion of the new chain. // State and blocks are stored in the same DB. evmux := new(event.TypeMux) - chainman, _ := NewChainManager(db, FakePow{}, evmux) + chainman, _ := NewBlockChain(db, FakePow{}, evmux) chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux)) defer chainman.Stop() b.ReportAllocs() diff --git a/core/block_processor.go b/core/block_processor.go index 40e3931ba..783e15687 100644 --- a/core/block_processor.go +++ b/core/block_processor.go @@ -47,7 +47,7 @@ type BlockProcessor struct { // Mutex for locking the block processor. Blocks can only be handled one at a time mutex sync.Mutex // Canonical block chain - bc *ChainManager + bc *BlockChain // non-persistent key/value memory storage mem map[string]*big.Int // Proof of work used for validating @@ -70,12 +70,12 @@ type GasPool interface { SubGas(gas, price *big.Int) error } -func NewBlockProcessor(db ethdb.Database, pow pow.PoW, chainManager *ChainManager, eventMux *event.TypeMux) *BlockProcessor { +func NewBlockProcessor(db ethdb.Database, pow pow.PoW, blockchain *BlockChain, eventMux *event.TypeMux) *BlockProcessor { sm := &BlockProcessor{ chainDb: db, mem: make(map[string]*big.Int), Pow: pow, - bc: chainManager, + bc: blockchain, eventMux: eventMux, } return sm @@ -124,7 +124,7 @@ func (self *BlockProcessor) ApplyTransaction(gp GasPool, statedb *state.StateDB, return receipt, gas, err } -func (self *BlockProcessor) ChainManager() *ChainManager { +func (self *BlockProcessor) BlockChain() *BlockChain { return self.bc } @@ -347,7 +347,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty // GetBlockReceipts returns the receipts beloniging to the block hash func (sm *BlockProcessor) GetBlockReceipts(bhash common.Hash) types.Receipts { - if block := sm.ChainManager().GetBlock(bhash); block != nil { + if block := sm.BlockChain().GetBlock(bhash); block != nil { return GetBlockReceipts(sm.chainDb, block.Hash()) } diff --git a/core/block_processor_test.go b/core/block_processor_test.go index 5735b8d0a..ba8bd7bcd 100644 --- a/core/block_processor_test.go +++ b/core/block_processor_test.go @@ -30,16 +30,16 @@ import ( "github.com/ethereum/go-ethereum/pow/ezp" ) -func proc() (*BlockProcessor, *ChainManager) { +func proc() (*BlockProcessor, *BlockChain) { db, _ := ethdb.NewMemDatabase() var mux event.TypeMux WriteTestNetGenesisBlock(db, 0) - chainMan, err := NewChainManager(db, thePow(), &mux) + blockchain, err := NewBlockChain(db, thePow(), &mux) if err != nil { fmt.Println(err) } - return NewBlockProcessor(db, ezp.New(), chainMan, &mux), chainMan + return NewBlockProcessor(db, ezp.New(), blockchain, &mux), blockchain } func TestNumber(t *testing.T) { diff --git a/core/blockchain.go b/core/blockchain.go new file mode 100644 index 000000000..e8209f8e3 --- /dev/null +++ b/core/blockchain.go @@ -0,0 +1,809 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package core implements the Ethereum consensus protocol. +package core + +import ( + "errors" + "fmt" + "io" + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/pow" + "github.com/ethereum/go-ethereum/rlp" + "github.com/hashicorp/golang-lru" +) + +var ( + chainlogger = logger.NewLogger("CHAIN") + jsonlogger = logger.NewJsonLogger() + + blockInsertTimer = metrics.NewTimer("chain/inserts") + + ErrNoGenesis = errors.New("Genesis not found in chain") +) + +const ( + headerCacheLimit = 512 + bodyCacheLimit = 256 + tdCacheLimit = 1024 + blockCacheLimit = 256 + maxFutureBlocks = 256 + maxTimeFutureBlocks = 30 +) + +type BlockChain struct { + chainDb ethdb.Database + processor types.BlockProcessor + eventMux *event.TypeMux + genesisBlock *types.Block + // Last known total difficulty + mu sync.RWMutex + chainmu sync.RWMutex + tsmu sync.RWMutex + + td *big.Int + currentBlock *types.Block + currentGasLimit *big.Int + + headerCache *lru.Cache // Cache for the most recent block headers + bodyCache *lru.Cache // Cache for the most recent block bodies + bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format + tdCache *lru.Cache // Cache for the most recent block total difficulties + blockCache *lru.Cache // Cache for the most recent entire blocks + futureBlocks *lru.Cache // future blocks are blocks added for later processing + + quit chan struct{} + running int32 // running must be called automically + // procInterrupt must be atomically called + procInterrupt int32 // interrupt signaler for block processing + wg sync.WaitGroup + + pow pow.PoW +} + +func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*BlockChain, error) { + headerCache, _ := lru.New(headerCacheLimit) + bodyCache, _ := lru.New(bodyCacheLimit) + bodyRLPCache, _ := lru.New(bodyCacheLimit) + tdCache, _ := lru.New(tdCacheLimit) + blockCache, _ := lru.New(blockCacheLimit) + futureBlocks, _ := lru.New(maxFutureBlocks) + + bc := &BlockChain{ + chainDb: chainDb, + eventMux: mux, + quit: make(chan struct{}), + headerCache: headerCache, + bodyCache: bodyCache, + bodyRLPCache: bodyRLPCache, + tdCache: tdCache, + blockCache: blockCache, + futureBlocks: futureBlocks, + pow: pow, + } + + bc.genesisBlock = bc.GetBlockByNumber(0) + if bc.genesisBlock == nil { + reader, err := NewDefaultGenesisReader() + if err != nil { + return nil, err + } + bc.genesisBlock, err = WriteGenesisBlock(chainDb, reader) + if err != nil { + return nil, err + } + glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block") + } + if err := bc.setLastState(); err != nil { + return nil, err + } + // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain + for hash, _ := range BadHashes { + if block := bc.GetBlock(hash); block != nil { + glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4]) + block = bc.GetBlock(block.ParentHash()) + if block == nil { + glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?") + } + bc.SetHead(block) + + glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation") + } + } + // Take ownership of this particular state + go bc.update() + return bc, nil +} + +func (bc *BlockChain) SetHead(head *types.Block) { + bc.mu.Lock() + defer bc.mu.Unlock() + + for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) { + DeleteBlock(bc.chainDb, block.Hash()) + } + bc.headerCache.Purge() + bc.bodyCache.Purge() + bc.bodyRLPCache.Purge() + bc.blockCache.Purge() + bc.futureBlocks.Purge() + + bc.currentBlock = head + bc.setTotalDifficulty(bc.GetTd(head.Hash())) + bc.insert(head) + bc.setLastState() +} + +func (self *BlockChain) Td() *big.Int { + self.mu.RLock() + defer self.mu.RUnlock() + + return new(big.Int).Set(self.td) +} + +func (self *BlockChain) GasLimit() *big.Int { + self.mu.RLock() + defer self.mu.RUnlock() + + return self.currentBlock.GasLimit() +} + +func (self *BlockChain) LastBlockHash() common.Hash { + self.mu.RLock() + defer self.mu.RUnlock() + + return self.currentBlock.Hash() +} + +func (self *BlockChain) CurrentBlock() *types.Block { + self.mu.RLock() + defer self.mu.RUnlock() + + return self.currentBlock +} + +func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) { + self.mu.RLock() + defer self.mu.RUnlock() + + return new(big.Int).Set(self.td), self.currentBlock.Hash(), self.genesisBlock.Hash() +} + +func (self *BlockChain) SetProcessor(proc types.BlockProcessor) { + self.processor = proc +} + +func (self *BlockChain) State() *state.StateDB { + return state.New(self.CurrentBlock().Root(), self.chainDb) +} + +func (bc *BlockChain) setLastState() error { + head := GetHeadBlockHash(bc.chainDb) + if head != (common.Hash{}) { + block := bc.GetBlock(head) + if block != nil { + bc.currentBlock = block + } + } else { + bc.Reset() + } + bc.td = bc.GetTd(bc.currentBlock.Hash()) + bc.currentGasLimit = CalcGasLimit(bc.currentBlock) + + if glog.V(logger.Info) { + glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td) + } + + return nil +} + +// Reset purges the entire blockchain, restoring it to its genesis state. +func (bc *BlockChain) Reset() { + bc.ResetWithGenesisBlock(bc.genesisBlock) +} + +// ResetWithGenesisBlock purges the entire blockchain, restoring it to the +// specified genesis state. +func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) { + bc.mu.Lock() + defer bc.mu.Unlock() + + // Dump the entire block chain and purge the caches + for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) { + DeleteBlock(bc.chainDb, block.Hash()) + } + bc.headerCache.Purge() + bc.bodyCache.Purge() + bc.bodyRLPCache.Purge() + bc.blockCache.Purge() + bc.futureBlocks.Purge() + + // Prepare the genesis block and reinitialize the chain + if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil { + glog.Fatalf("failed to write genesis block TD: %v", err) + } + if err := WriteBlock(bc.chainDb, genesis); err != nil { + glog.Fatalf("failed to write genesis block: %v", err) + } + bc.genesisBlock = genesis + bc.insert(bc.genesisBlock) + bc.currentBlock = bc.genesisBlock + bc.setTotalDifficulty(genesis.Difficulty()) +} + +// Export writes the active chain to the given writer. +func (self *BlockChain) Export(w io.Writer) error { + if err := self.ExportN(w, uint64(0), self.currentBlock.NumberU64()); err != nil { + return err + } + return nil +} + +// ExportN writes a subset of the active chain to the given writer. +func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { + self.mu.RLock() + defer self.mu.RUnlock() + + if first > last { + return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) + } + + glog.V(logger.Info).Infof("exporting %d blocks...\n", last-first+1) + + for nr := first; nr <= last; nr++ { + block := self.GetBlockByNumber(nr) + if block == nil { + return fmt.Errorf("export failed on #%d: not found", nr) + } + + if err := block.EncodeRLP(w); err != nil { + return err + } + } + + return nil +} + +// insert injects a block into the current chain block chain. Note, this function +// assumes that the `mu` mutex is held! +func (bc *BlockChain) insert(block *types.Block) { + // Add the block to the canonical chain number scheme and mark as the head + if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { + glog.Fatalf("failed to insert block number: %v", err) + } + bc.currentBlock = block +} + +// Accessors +func (bc *BlockChain) Genesis() *types.Block { + return bc.genesisBlock +} + +// HasHeader checks if a block header is present in the database or not, caching +// it if present. +func (bc *BlockChain) HasHeader(hash common.Hash) bool { + return bc.GetHeader(hash) != nil +} + +// GetHeader retrieves a block header from the database by hash, caching it if +// found. +func (self *BlockChain) GetHeader(hash common.Hash) *types.Header { + // Short circuit if the header's already in the cache, retrieve otherwise + if header, ok := self.headerCache.Get(hash); ok { + return header.(*types.Header) + } + header := GetHeader(self.chainDb, hash) + if header == nil { + return nil + } + // Cache the found header for next time and return + self.headerCache.Add(header.Hash(), header) + return header +} + +// GetHeaderByNumber retrieves a block header from the database by number, +// caching it (associated with its hash) if found. +func (self *BlockChain) GetHeaderByNumber(number uint64) *types.Header { + hash := GetCanonicalHash(self.chainDb, number) + if hash == (common.Hash{}) { + return nil + } + return self.GetHeader(hash) +} + +// GetBody retrieves a block body (transactions and uncles) from the database by +// hash, caching it if found. +func (self *BlockChain) GetBody(hash common.Hash) *types.Body { + // Short circuit if the body's already in the cache, retrieve otherwise + if cached, ok := self.bodyCache.Get(hash); ok { + body := cached.(*types.Body) + return body + } + body := GetBody(self.chainDb, hash) + if body == nil { + return nil + } + // Cache the found body for next time and return + self.bodyCache.Add(hash, body) + return body +} + +// GetBodyRLP retrieves a block body in RLP encoding from the database by hash, +// caching it if found. +func (self *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { + // Short circuit if the body's already in the cache, retrieve otherwise + if cached, ok := self.bodyRLPCache.Get(hash); ok { + return cached.(rlp.RawValue) + } + body := GetBodyRLP(self.chainDb, hash) + if len(body) == 0 { + return nil + } + // Cache the found body for next time and return + self.bodyRLPCache.Add(hash, body) + return body +} + +// GetTd retrieves a block's total difficulty in the canonical chain from the +// database by hash, caching it if found. +func (self *BlockChain) GetTd(hash common.Hash) *big.Int { + // Short circuit if the td's already in the cache, retrieve otherwise + if cached, ok := self.tdCache.Get(hash); ok { + return cached.(*big.Int) + } + td := GetTd(self.chainDb, hash) + if td == nil { + return nil + } + // Cache the found body for next time and return + self.tdCache.Add(hash, td) + return td +} + +// HasBlock checks if a block is fully present in the database or not, caching +// it if present. +func (bc *BlockChain) HasBlock(hash common.Hash) bool { + return bc.GetBlock(hash) != nil +} + +// GetBlock retrieves a block from the database by hash, caching it if found. +func (self *BlockChain) GetBlock(hash common.Hash) *types.Block { + // Short circuit if the block's already in the cache, retrieve otherwise + if block, ok := self.blockCache.Get(hash); ok { + return block.(*types.Block) + } + block := GetBlock(self.chainDb, hash) + if block == nil { + return nil + } + // Cache the found block for next time and return + self.blockCache.Add(block.Hash(), block) + return block +} + +// GetBlockByNumber retrieves a block from the database by number, caching it +// (associated with its hash) if found. +func (self *BlockChain) GetBlockByNumber(number uint64) *types.Block { + hash := GetCanonicalHash(self.chainDb, number) + if hash == (common.Hash{}) { + return nil + } + return self.GetBlock(hash) +} + +// GetBlockHashesFromHash retrieves a number of block hashes starting at a given +// hash, fetching towards the genesis block. +func (self *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { + // Get the origin header from which to fetch + header := self.GetHeader(hash) + if header == nil { + return nil + } + // Iterate the headers until enough is collected or the genesis reached + chain := make([]common.Hash, 0, max) + for i := uint64(0); i < max; i++ { + if header = self.GetHeader(header.ParentHash); header == nil { + break + } + chain = append(chain, header.Hash()) + if header.Number.Cmp(common.Big0) == 0 { + break + } + } + return chain +} + +// [deprecated by eth/62] +// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. +func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { + for i := 0; i < n; i++ { + block := self.GetBlock(hash) + if block == nil { + break + } + blocks = append(blocks, block) + hash = block.ParentHash() + } + return +} + +func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) { + for i := 0; block != nil && i < length; i++ { + uncles = append(uncles, block.Uncles()...) + block = self.GetBlock(block.ParentHash()) + } + + return +} + +// setTotalDifficulty updates the TD of the chain manager. Note, this function +// assumes that the `mu` mutex is held! +func (bc *BlockChain) setTotalDifficulty(td *big.Int) { + bc.td = new(big.Int).Set(td) +} + +func (bc *BlockChain) Stop() { + if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { + return + } + close(bc.quit) + atomic.StoreInt32(&bc.procInterrupt, 1) + + bc.wg.Wait() + + glog.V(logger.Info).Infoln("Chain manager stopped") +} + +type queueEvent struct { + queue []interface{} + canonicalCount int + sideCount int + splitCount int +} + +func (self *BlockChain) procFutureBlocks() { + blocks := make([]*types.Block, self.futureBlocks.Len()) + for i, hash := range self.futureBlocks.Keys() { + block, _ := self.futureBlocks.Get(hash) + blocks[i] = block.(*types.Block) + } + if len(blocks) > 0 { + types.BlockBy(types.Number).Sort(blocks) + self.InsertChain(blocks) + } +} + +type writeStatus byte + +const ( + NonStatTy writeStatus = iota + CanonStatTy + SplitStatTy + SideStatTy +) + +// WriteBlock writes the block to the chain. +func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) { + self.wg.Add(1) + defer self.wg.Done() + + // Calculate the total difficulty of the block + ptd := self.GetTd(block.ParentHash()) + if ptd == nil { + return NonStatTy, ParentError(block.ParentHash()) + } + td := new(big.Int).Add(block.Difficulty(), ptd) + + self.mu.RLock() + cblock := self.currentBlock + self.mu.RUnlock() + + // Compare the TD of the last known block in the canonical chain to make sure it's greater. + // At this point it's possible that a different chain (fork) becomes the new canonical chain. + if td.Cmp(self.Td()) > 0 { + // chain fork + if block.ParentHash() != cblock.Hash() { + // during split we merge two different chains and create the new canonical chain + err := self.reorg(cblock, block) + if err != nil { + return NonStatTy, err + } + } + status = CanonStatTy + + self.mu.Lock() + self.setTotalDifficulty(td) + self.insert(block) + self.mu.Unlock() + } else { + status = SideStatTy + } + + if err := WriteTd(self.chainDb, block.Hash(), td); err != nil { + glog.Fatalf("failed to write block total difficulty: %v", err) + } + if err := WriteBlock(self.chainDb, block); err != nil { + glog.Fatalf("filed to write block contents: %v", err) + } + // Delete from future blocks + self.futureBlocks.Remove(block.Hash()) + + return +} + +// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. It an error is returned +// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go). +func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { + self.wg.Add(1) + defer self.wg.Done() + + self.chainmu.Lock() + defer self.chainmu.Unlock() + + // A queued approach to delivering events. This is generally + // faster than direct delivery and requires much less mutex + // acquiring. + var ( + queue = make([]interface{}, len(chain)) + queueEvent = queueEvent{queue: queue} + stats struct{ queued, processed, ignored int } + tstart = time.Now() + + nonceChecked = make([]bool, len(chain)) + ) + + // Start the parallel nonce verifier. + nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain) + defer close(nonceAbort) + + txcount := 0 + for i, block := range chain { + if atomic.LoadInt32(&self.procInterrupt) == 1 { + glog.V(logger.Debug).Infoln("Premature abort during chain processing") + break + } + + bstart := time.Now() + // Wait for block i's nonce to be verified before processing + // its state transition. + for !nonceChecked[i] { + r := <-nonceResults + nonceChecked[r.index] = true + if !r.valid { + block := chain[r.index] + return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} + } + } + + if BadHashes[block.Hash()] { + err := BadHashError(block.Hash()) + blockErr(block, err) + return i, err + } + // Call in to the block processor and check for errors. It's likely that if one block fails + // all others will fail too (unless a known block is returned). + logs, receipts, err := self.processor.Process(block) + if err != nil { + if IsKnownBlockErr(err) { + stats.ignored++ + continue + } + + if err == BlockFutureErr { + // Allow up to MaxFuture second in the future blocks. If this limit + // is exceeded the chain is discarded and processed at a later time + // if given. + max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) + if block.Time().Cmp(max) == 1 { + return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max) + } + + self.futureBlocks.Add(block.Hash(), block) + stats.queued++ + continue + } + + if IsParentErr(err) && self.futureBlocks.Contains(block.ParentHash()) { + self.futureBlocks.Add(block.Hash(), block) + stats.queued++ + continue + } + + blockErr(block, err) + + go ReportBlock(block, err) + + return i, err + } + if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil { + glog.V(logger.Warn).Infoln("error writing block receipts:", err) + } + + txcount += len(block.Transactions()) + // write the block to the chain and get the status + status, err := self.WriteBlock(block) + if err != nil { + return i, err + } + switch status { + case CanonStatTy: + if glog.V(logger.Debug) { + glog.Infof("[%v] inserted block #%d (%d TXs %v G %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), block.GasUsed(), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + } + queue[i] = ChainEvent{block, block.Hash(), logs} + queueEvent.canonicalCount++ + + // This puts transactions in a extra db for rpc + PutTransactions(self.chainDb, block, block.Transactions()) + // store the receipts + PutReceipts(self.chainDb, receipts) + case SideStatTy: + if glog.V(logger.Detail) { + glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + } + queue[i] = ChainSideEvent{block, logs} + queueEvent.sideCount++ + case SplitStatTy: + queue[i] = ChainSplitEvent{block, logs} + queueEvent.splitCount++ + } + stats.processed++ + } + + if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { + tend := time.Since(tstart) + start, end := chain[0], chain[len(chain)-1] + glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4]) + } + + go self.eventMux.Post(queueEvent) + + return 0, nil +} + +// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them +// to be part of the new canonical chain and accumulates potential missing transactions and post an +// event about them +func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error { + self.mu.Lock() + defer self.mu.Unlock() + + var ( + newChain types.Blocks + commonBlock *types.Block + oldStart = oldBlock + newStart = newBlock + deletedTxs types.Transactions + ) + + // first reduce whoever is higher bound + if oldBlock.NumberU64() > newBlock.NumberU64() { + // reduce old chain + for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) { + deletedTxs = append(deletedTxs, oldBlock.Transactions()...) + } + } else { + // reduce new chain and append new chain blocks for inserting later on + for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) { + newChain = append(newChain, newBlock) + } + } + if oldBlock == nil { + return fmt.Errorf("Invalid old chain") + } + if newBlock == nil { + return fmt.Errorf("Invalid new chain") + } + + numSplit := newBlock.Number() + for { + if oldBlock.Hash() == newBlock.Hash() { + commonBlock = oldBlock + break + } + newChain = append(newChain, newBlock) + deletedTxs = append(deletedTxs, oldBlock.Transactions()...) + + oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash()) + if oldBlock == nil { + return fmt.Errorf("Invalid old chain") + } + if newBlock == nil { + return fmt.Errorf("Invalid new chain") + } + } + + if glog.V(logger.Debug) { + commonHash := commonBlock.Hash() + glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4]) + } + + var addedTxs types.Transactions + // insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly + for _, block := range newChain { + // insert the block in the canonical way, re-writing history + self.insert(block) + // write canonical receipts and transactions + PutTransactions(self.chainDb, block, block.Transactions()) + PutReceipts(self.chainDb, GetBlockReceipts(self.chainDb, block.Hash())) + + addedTxs = append(addedTxs, block.Transactions()...) + } + + // calculate the difference between deleted and added transactions + diff := types.TxDifference(deletedTxs, addedTxs) + // When transactions get deleted from the database that means the + // receipts that were created in the fork must also be deleted + for _, tx := range diff { + DeleteReceipt(self.chainDb, tx.Hash()) + DeleteTransaction(self.chainDb, tx.Hash()) + } + // Must be posted in a goroutine because of the transaction pool trying + // to acquire the chain manager lock + go self.eventMux.Post(RemovedTransactionEvent{diff}) + + return nil +} + +func (self *BlockChain) update() { + events := self.eventMux.Subscribe(queueEvent{}) + futureTimer := time.Tick(5 * time.Second) +out: + for { + select { + case ev := <-events.Chan(): + switch ev := ev.(type) { + case queueEvent: + for _, event := range ev.queue { + switch event := event.(type) { + case ChainEvent: + // We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long + // and in most cases isn't even necessary. + if self.currentBlock.Hash() == event.Hash { + self.currentGasLimit = CalcGasLimit(event.Block) + self.eventMux.Post(ChainHeadEvent{event.Block}) + } + } + self.eventMux.Post(event) + } + } + case <-futureTimer: + self.procFutureBlocks() + case <-self.quit: + break out + } + } +} + +func blockErr(block *types.Block, err error) { + if glog.V(logger.Error) { + glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex()) + glog.Errorf(" %v", err) + } +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go new file mode 100644 index 000000000..e034417ce --- /dev/null +++ b/core/blockchain_test.go @@ -0,0 +1,652 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "fmt" + "math/big" + "math/rand" + "os" + "path/filepath" + "runtime" + "strconv" + "testing" + + "github.com/ethereum/ethash" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/pow" + "github.com/ethereum/go-ethereum/rlp" + "github.com/hashicorp/golang-lru" +) + +func init() { + runtime.GOMAXPROCS(runtime.NumCPU()) +} + +func thePow() pow.PoW { + pow, _ := ethash.NewForTesting() + return pow +} + +func theBlockChain(db ethdb.Database, t *testing.T) *BlockChain { + var eventMux event.TypeMux + WriteTestNetGenesisBlock(db, 0) + blockchain, err := NewBlockChain(db, thePow(), &eventMux) + if err != nil { + t.Error("failed creating chainmanager:", err) + t.FailNow() + return nil + } + blockMan := NewBlockProcessor(db, nil, blockchain, &eventMux) + blockchain.SetProcessor(blockMan) + + return blockchain +} + +// Test fork of length N starting from block i +func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) { + // switch databases to process the new chain + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + // copy old chain up to i into new db with deterministic canonical + bman2, err := newCanonical(i, db) + if err != nil { + t.Fatal("could not make new canonical in testFork", err) + } + // assert the bmans have the same block at i + bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash() + bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash() + if bi1 != bi2 { + fmt.Printf("%+v\n%+v\n\n", bi1, bi2) + t.Fatal("chains do not have the same hash at height", i) + } + bman2.bc.SetProcessor(bman2) + + // extend the fork + parent := bman2.bc.CurrentBlock() + chainB := makeChain(parent, N, db, forkSeed) + _, err = bman2.bc.InsertChain(chainB) + if err != nil { + t.Fatal("Insert chain error for fork:", err) + } + + tdpre := bman.bc.Td() + // Test the fork's blocks on the original chain + td, err := testChain(chainB, bman) + if err != nil { + t.Fatal("expected chainB not to give errors:", err) + } + // Compare difficulties + f(tdpre, td) + + // Loop over parents making sure reconstruction is done properly +} + +func printChain(bc *BlockChain) { + for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- { + b := bc.GetBlockByNumber(uint64(i)) + fmt.Printf("\t%x %v\n", b.Hash(), b.Difficulty()) + } +} + +// process blocks against a chain +func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) { + for _, block := range chainB { + _, _, err := bman.bc.processor.Process(block) + if err != nil { + if IsKnownBlockErr(err) { + continue + } + return nil, err + } + bman.bc.mu.Lock() + WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash()))) + WriteBlock(bman.bc.chainDb, block) + bman.bc.mu.Unlock() + } + return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil +} + +func loadChain(fn string, t *testing.T) (types.Blocks, error) { + fh, err := os.OpenFile(filepath.Join("..", "_data", fn), os.O_RDONLY, os.ModePerm) + if err != nil { + return nil, err + } + defer fh.Close() + + var chain types.Blocks + if err := rlp.Decode(fh, &chain); err != nil { + return nil, err + } + + return chain, nil +} + +func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *testing.T) { + _, err := blockchain.InsertChain(chain) + if err != nil { + fmt.Println(err) + t.FailNow() + } + done <- true +} + +func TestExtendCanonical(t *testing.T) { + CanonicalLength := 5 + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + // make first chain starting from genesis + bman, err := newCanonical(CanonicalLength, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + f := func(td1, td2 *big.Int) { + if td2.Cmp(td1) <= 0 { + t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) + } + } + // Start fork from current height (CanonicalLength) + testFork(t, bman, CanonicalLength, 1, f) + testFork(t, bman, CanonicalLength, 2, f) + testFork(t, bman, CanonicalLength, 5, f) + testFork(t, bman, CanonicalLength, 10, f) +} + +func TestShorterFork(t *testing.T) { + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + // make first chain starting from genesis + bman, err := newCanonical(10, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + f := func(td1, td2 *big.Int) { + if td2.Cmp(td1) >= 0 { + t.Error("expected chainB to have lower difficulty. Got", td2, "expected less than", td1) + } + } + // Sum of numbers must be less than 10 + // for this to be a shorter fork + testFork(t, bman, 0, 3, f) + testFork(t, bman, 0, 7, f) + testFork(t, bman, 1, 1, f) + testFork(t, bman, 1, 7, f) + testFork(t, bman, 5, 3, f) + testFork(t, bman, 5, 4, f) +} + +func TestLongerFork(t *testing.T) { + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + // make first chain starting from genesis + bman, err := newCanonical(10, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + f := func(td1, td2 *big.Int) { + if td2.Cmp(td1) <= 0 { + t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) + } + } + // Sum of numbers must be greater than 10 + // for this to be a longer fork + testFork(t, bman, 0, 11, f) + testFork(t, bman, 0, 15, f) + testFork(t, bman, 1, 10, f) + testFork(t, bman, 1, 12, f) + testFork(t, bman, 5, 6, f) + testFork(t, bman, 5, 8, f) +} + +func TestEqualFork(t *testing.T) { + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + bman, err := newCanonical(10, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + f := func(td1, td2 *big.Int) { + if td2.Cmp(td1) != 0 { + t.Error("expected chainB to have equal difficulty. Got", td2, "expected ", td1) + } + } + // Sum of numbers must be equal to 10 + // for this to be an equal fork + testFork(t, bman, 0, 10, f) + testFork(t, bman, 1, 9, f) + testFork(t, bman, 2, 8, f) + testFork(t, bman, 5, 5, f) + testFork(t, bman, 6, 4, f) + testFork(t, bman, 9, 1, f) +} + +func TestBrokenChain(t *testing.T) { + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + bman, err := newCanonical(10, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + db2, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + bman2, err := newCanonical(10, db2) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + bman2.bc.SetProcessor(bman2) + parent := bman2.bc.CurrentBlock() + chainB := makeChain(parent, 5, db2, forkSeed) + chainB = chainB[1:] + _, err = testChain(chainB, bman) + if err == nil { + t.Error("expected broken chain to return error") + } +} + +func TestChainInsertions(t *testing.T) { + t.Skip("Skipped: outdated test files") + + db, _ := ethdb.NewMemDatabase() + + chain1, err := loadChain("valid1", t) + if err != nil { + fmt.Println(err) + t.FailNow() + } + + chain2, err := loadChain("valid2", t) + if err != nil { + fmt.Println(err) + t.FailNow() + } + + blockchain := theBlockChain(db, t) + + const max = 2 + done := make(chan bool, max) + + go insertChain(done, blockchain, chain1, t) + go insertChain(done, blockchain, chain2, t) + + for i := 0; i < max; i++ { + <-done + } + + if chain2[len(chain2)-1].Hash() != blockchain.CurrentBlock().Hash() { + t.Error("chain2 is canonical and shouldn't be") + } + + if chain1[len(chain1)-1].Hash() != blockchain.CurrentBlock().Hash() { + t.Error("chain1 isn't canonical and should be") + } +} + +func TestChainMultipleInsertions(t *testing.T) { + t.Skip("Skipped: outdated test files") + + db, _ := ethdb.NewMemDatabase() + + const max = 4 + chains := make([]types.Blocks, max) + var longest int + for i := 0; i < max; i++ { + var err error + name := "valid" + strconv.Itoa(i+1) + chains[i], err = loadChain(name, t) + if len(chains[i]) >= len(chains[longest]) { + longest = i + } + fmt.Println("loaded", name, "with a length of", len(chains[i])) + if err != nil { + fmt.Println(err) + t.FailNow() + } + } + + blockchain := theBlockChain(db, t) + + done := make(chan bool, max) + for i, chain := range chains { + // XXX the go routine would otherwise reference the same (chain[3]) variable and fail + i := i + chain := chain + go func() { + insertChain(done, blockchain, chain, t) + fmt.Println(i, "done") + }() + } + + for i := 0; i < max; i++ { + <-done + } + + if chains[longest][len(chains[longest])-1].Hash() != blockchain.CurrentBlock().Hash() { + t.Error("Invalid canonical chain") + } +} + +type bproc struct{} + +func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil } + +func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block { + var chain []*types.Block + for i, difficulty := range d { + header := &types.Header{ + Coinbase: common.Address{seed}, + Number: big.NewInt(int64(i + 1)), + Difficulty: big.NewInt(int64(difficulty)), + } + if i == 0 { + header.ParentHash = genesis.Hash() + } else { + header.ParentHash = chain[i-1].Hash() + } + block := types.NewBlockWithHeader(header) + chain = append(chain, block) + } + return chain +} + +func chm(genesis *types.Block, db ethdb.Database) *BlockChain { + var eventMux event.TypeMux + bc := &BlockChain{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}} + bc.headerCache, _ = lru.New(100) + bc.bodyCache, _ = lru.New(100) + bc.bodyRLPCache, _ = lru.New(100) + bc.tdCache, _ = lru.New(100) + bc.blockCache, _ = lru.New(100) + bc.futureBlocks, _ = lru.New(100) + bc.processor = bproc{} + bc.ResetWithGenesisBlock(genesis) + + return bc +} + +func TestReorgLongest(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + + chain1 := makeChainWithDiff(genesis, []int{1, 2, 4}, 10) + chain2 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11) + + bc.InsertChain(chain1) + bc.InsertChain(chain2) + + prev := bc.CurrentBlock() + for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) { + if prev.ParentHash() != block.Hash() { + t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash()) + } + } +} + +func TestBadHashes(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + + chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10) + BadHashes[chain[2].Header().Hash()] = true + + _, err = bc.InsertChain(chain) + if !IsBadHashError(err) { + t.Errorf("error mismatch: want: BadHashError, have: %v", err) + } +} + +func TestReorgBadHashes(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + + chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11) + bc.InsertChain(chain) + + if chain[3].Header().Hash() != bc.LastBlockHash() { + t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash()) + } + + // NewChainManager should check BadHashes when loading it db + BadHashes[chain[3].Header().Hash()] = true + + var eventMux event.TypeMux + ncm, err := NewBlockChain(db, FakePow{}, &eventMux) + if err != nil { + t.Errorf("NewChainManager err: %s", err) + } + + // check it set head to (valid) parent of bad hash block + if chain[2].Header().Hash() != ncm.LastBlockHash() { + t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash()) + } + + if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 { + t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit()) + } +} + +func TestReorgShortest(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + + chain1 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 10) + chain2 := makeChainWithDiff(genesis, []int{1, 10}, 11) + + bc.InsertChain(chain1) + bc.InsertChain(chain2) + + prev := bc.CurrentBlock() + for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) { + if prev.ParentHash() != block.Hash() { + t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash()) + } + } +} + +func TestInsertNonceError(t *testing.T) { + for i := 1; i < 25 && !t.Failed(); i++ { + db, _ := ethdb.NewMemDatabase() + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + bc.processor = NewBlockProcessor(db, bc.pow, bc, bc.eventMux) + blocks := makeChain(bc.currentBlock, i, db, 0) + + fail := rand.Int() % len(blocks) + failblock := blocks[fail] + bc.pow = failPow{failblock.NumberU64()} + n, err := bc.InsertChain(blocks) + + // Check that the returned error indicates the nonce failure. + if n != fail { + t.Errorf("(i=%d) wrong failed block index: got %d, want %d", i, n, fail) + } + if !IsBlockNonceErr(err) { + t.Fatalf("(i=%d) got %q, want a nonce error", i, err) + } + nerr := err.(*BlockNonceErr) + if nerr.Number.Cmp(failblock.Number()) != 0 { + t.Errorf("(i=%d) wrong block number in error, got %v, want %v", i, nerr.Number, failblock.Number()) + } + if nerr.Hash != failblock.Hash() { + t.Errorf("(i=%d) wrong block hash in error, got %v, want %v", i, nerr.Hash, failblock.Hash()) + } + + // Check that all no blocks after the failing block have been inserted. + for _, block := range blocks[fail:] { + if bc.HasBlock(block.Hash()) { + t.Errorf("(i=%d) invalid block %d present in chain", i, block.NumberU64()) + } + } + } +} + +// Tests that chain reorganizations handle transaction removals and reinsertions. +func TestChainTxReorgs(t *testing.T) { + params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be. + params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block. + + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + addr3 = crypto.PubkeyToAddress(key3.PublicKey) + db, _ = ethdb.NewMemDatabase() + ) + genesis := WriteGenesisBlockForTesting(db, + GenesisAccount{addr1, big.NewInt(1000000)}, + GenesisAccount{addr2, big.NewInt(1000000)}, + GenesisAccount{addr3, big.NewInt(1000000)}, + ) + // Create two transactions shared between the chains: + // - postponed: transaction included at a later block in the forked chain + // - swapped: transaction included at the same block number in the forked chain + postponed, _ := types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1) + swapped, _ := types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1) + + // Create two transactions that will be dropped by the forked chain: + // - pastDrop: transaction dropped retroactively from a past block + // - freshDrop: transaction dropped exactly at the block where the reorg is detected + var pastDrop, freshDrop *types.Transaction + + // Create three transactions that will be added in the forked chain: + // - pastAdd: transaction added before the reorganiztion is detected + // - freshAdd: transaction added at the exact block the reorg is detected + // - futureAdd: transaction added after the reorg has already finished + var pastAdd, freshAdd, futureAdd *types.Transaction + + chain := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) { + switch i { + case 0: + pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2) + + gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point + gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork + + case 2: + freshDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2) + + gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point + gen.AddTx(swapped) // This transaction will be swapped out at the exact height + + gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain + } + }) + // Import the chain. This runs all block validation rules. + evmux := &event.TypeMux{} + chainman, _ := NewBlockChain(db, FakePow{}, evmux) + chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux)) + if i, err := chainman.InsertChain(chain); err != nil { + t.Fatalf("failed to insert original chain[%d]: %v", i, err) + } + + // overwrite the old chain + chain = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) { + switch i { + case 0: + pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) + gen.AddTx(pastAdd) // This transaction needs to be injected during reorg + + case 2: + gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain + gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain + + freshAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) + gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time + + case 3: + futureAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) + gen.AddTx(futureAdd) // This transaction will be added after a full reorg + } + }) + if _, err := chainman.InsertChain(chain); err != nil { + t.Fatalf("failed to insert forked chain: %v", err) + } + + // removed tx + for i, tx := range (types.Transactions{pastDrop, freshDrop}) { + if GetTransaction(db, tx.Hash()) != nil { + t.Errorf("drop %d: tx found while shouldn't have been", i) + } + if GetReceipt(db, tx.Hash()) != nil { + t.Errorf("drop %d: receipt found while shouldn't have been", i) + } + } + // added tx + for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) { + if GetTransaction(db, tx.Hash()) == nil { + t.Errorf("add %d: expected tx to be found", i) + } + if GetReceipt(db, tx.Hash()) == nil { + t.Errorf("add %d: expected receipt to be found", i) + } + } + // shared tx + for i, tx := range (types.Transactions{postponed, swapped}) { + if GetTransaction(db, tx.Hash()) == nil { + t.Errorf("share %d: expected tx to be found", i) + } + if GetReceipt(db, tx.Hash()) == nil { + t.Errorf("share %d: expected receipt to be found", i) + } + } +} diff --git a/core/chain_makers.go b/core/chain_makers.go index 3af9b0b89..ba09b3029 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -153,7 +153,7 @@ func (b *BlockGen) OffsetTime(seconds int64) { // and their coinbase will be the zero address. // // Blocks created by GenerateChain do not contain valid proof of work -// values. Inserting them into ChainManager requires use of FakePow or +// values. Inserting them into BlockChain requires use of FakePow or // a similar non-validating proof of work implementation. func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) []*types.Block { statedb := state.New(parent.Root(), db) @@ -205,7 +205,7 @@ func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) { evmux := &event.TypeMux{} WriteTestNetGenesisBlock(db, 0) - chainman, _ := NewChainManager(db, FakePow{}, evmux) + chainman, _ := NewBlockChain(db, FakePow{}, evmux) bman := NewBlockProcessor(db, FakePow{}, chainman, evmux) bman.bc.SetProcessor(bman) parent := bman.bc.CurrentBlock() diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index ac18e5e0b..b33af8d87 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -77,7 +77,7 @@ func ExampleGenerateChain() { // Import the chain. This runs all block validation rules. evmux := &event.TypeMux{} - chainman, _ := NewChainManager(db, FakePow{}, evmux) + chainman, _ := NewBlockChain(db, FakePow{}, evmux) chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux)) if i, err := chainman.InsertChain(chain); err != nil { fmt.Printf("insert error (block %d): %v\n", i, err) diff --git a/core/chain_manager.go b/core/chain_manager.go deleted file mode 100644 index 49f831a59..000000000 --- a/core/chain_manager.go +++ /dev/null @@ -1,849 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package core implements the Ethereum consensus protocol. -package core - -import ( - "errors" - "fmt" - "io" - "math/big" - "sync" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/pow" - "github.com/ethereum/go-ethereum/rlp" - "github.com/hashicorp/golang-lru" -) - -var ( - chainlogger = logger.NewLogger("CHAIN") - jsonlogger = logger.NewJsonLogger() - - blockInsertTimer = metrics.NewTimer("chain/inserts") - - ErrNoGenesis = errors.New("Genesis not found in chain") -) - -const ( - headerCacheLimit = 512 - bodyCacheLimit = 256 - tdCacheLimit = 1024 - blockCacheLimit = 256 - maxFutureBlocks = 256 - maxTimeFutureBlocks = 30 - checkpointLimit = 200 -) - -type ChainManager struct { - //eth EthManager - chainDb ethdb.Database - processor types.BlockProcessor - eventMux *event.TypeMux - genesisBlock *types.Block - // Last known total difficulty - mu sync.RWMutex - chainmu sync.RWMutex - tsmu sync.RWMutex - - checkpoint int // checkpoint counts towards the new checkpoint - td *big.Int - currentBlock *types.Block - currentGasLimit *big.Int - - headerCache *lru.Cache // Cache for the most recent block headers - bodyCache *lru.Cache // Cache for the most recent block bodies - bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format - tdCache *lru.Cache // Cache for the most recent block total difficulties - blockCache *lru.Cache // Cache for the most recent entire blocks - futureBlocks *lru.Cache // future blocks are blocks added for later processing - - quit chan struct{} - running int32 // running must be called automically - // procInterrupt must be atomically called - procInterrupt int32 // interrupt signaler for block processing - wg sync.WaitGroup - - pow pow.PoW -} - -func NewChainManager(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) { - headerCache, _ := lru.New(headerCacheLimit) - bodyCache, _ := lru.New(bodyCacheLimit) - bodyRLPCache, _ := lru.New(bodyCacheLimit) - tdCache, _ := lru.New(tdCacheLimit) - blockCache, _ := lru.New(blockCacheLimit) - futureBlocks, _ := lru.New(maxFutureBlocks) - - bc := &ChainManager{ - chainDb: chainDb, - eventMux: mux, - quit: make(chan struct{}), - headerCache: headerCache, - bodyCache: bodyCache, - bodyRLPCache: bodyRLPCache, - tdCache: tdCache, - blockCache: blockCache, - futureBlocks: futureBlocks, - pow: pow, - } - - bc.genesisBlock = bc.GetBlockByNumber(0) - if bc.genesisBlock == nil { - reader, err := NewDefaultGenesisReader() - if err != nil { - return nil, err - } - bc.genesisBlock, err = WriteGenesisBlock(chainDb, reader) - if err != nil { - return nil, err - } - glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block") - } - if err := bc.setLastState(); err != nil { - return nil, err - } - // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain - for hash, _ := range BadHashes { - if block := bc.GetBlock(hash); block != nil { - glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4]) - block = bc.GetBlock(block.ParentHash()) - if block == nil { - glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?") - } - bc.SetHead(block) - - glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation") - } - } - // Take ownership of this particular state - go bc.update() - return bc, nil -} - -func (bc *ChainManager) SetHead(head *types.Block) { - bc.mu.Lock() - defer bc.mu.Unlock() - - for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) { - DeleteBlock(bc.chainDb, block.Hash()) - } - bc.headerCache.Purge() - bc.bodyCache.Purge() - bc.bodyRLPCache.Purge() - bc.blockCache.Purge() - bc.futureBlocks.Purge() - - bc.currentBlock = head - bc.setTotalDifficulty(bc.GetTd(head.Hash())) - bc.insert(head) - bc.setLastState() -} - -func (self *ChainManager) Td() *big.Int { - self.mu.RLock() - defer self.mu.RUnlock() - - return new(big.Int).Set(self.td) -} - -func (self *ChainManager) GasLimit() *big.Int { - self.mu.RLock() - defer self.mu.RUnlock() - - return self.currentBlock.GasLimit() -} - -func (self *ChainManager) LastBlockHash() common.Hash { - self.mu.RLock() - defer self.mu.RUnlock() - - return self.currentBlock.Hash() -} - -func (self *ChainManager) CurrentBlock() *types.Block { - self.mu.RLock() - defer self.mu.RUnlock() - - return self.currentBlock -} - -func (self *ChainManager) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) { - self.mu.RLock() - defer self.mu.RUnlock() - - return new(big.Int).Set(self.td), self.currentBlock.Hash(), self.genesisBlock.Hash() -} - -func (self *ChainManager) SetProcessor(proc types.BlockProcessor) { - self.processor = proc -} - -func (self *ChainManager) State() *state.StateDB { - return state.New(self.CurrentBlock().Root(), self.chainDb) -} - -func (bc *ChainManager) recover() bool { - data, _ := bc.chainDb.Get([]byte("checkpoint")) - if len(data) != 0 { - block := bc.GetBlock(common.BytesToHash(data)) - if block != nil { - if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { - glog.Fatalf("failed to write database head number: %v", err) - } - if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil { - glog.Fatalf("failed to write database head hash: %v", err) - } - bc.currentBlock = block - return true - } - } - return false -} - -func (bc *ChainManager) setLastState() error { - head := GetHeadBlockHash(bc.chainDb) - if head != (common.Hash{}) { - block := bc.GetBlock(head) - if block != nil { - bc.currentBlock = block - } else { - glog.Infof("LastBlock (%x) not found. Recovering...\n", head) - if bc.recover() { - glog.Infof("Recover successful") - } else { - glog.Fatalf("Recover failed. Please report") - } - } - } else { - bc.Reset() - } - bc.td = bc.GetTd(bc.currentBlock.Hash()) - bc.currentGasLimit = CalcGasLimit(bc.currentBlock) - - if glog.V(logger.Info) { - glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td) - } - - return nil -} - -// Reset purges the entire blockchain, restoring it to its genesis state. -func (bc *ChainManager) Reset() { - bc.ResetWithGenesisBlock(bc.genesisBlock) -} - -// ResetWithGenesisBlock purges the entire blockchain, restoring it to the -// specified genesis state. -func (bc *ChainManager) ResetWithGenesisBlock(genesis *types.Block) { - bc.mu.Lock() - defer bc.mu.Unlock() - - // Dump the entire block chain and purge the caches - for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) { - DeleteBlock(bc.chainDb, block.Hash()) - } - bc.headerCache.Purge() - bc.bodyCache.Purge() - bc.bodyRLPCache.Purge() - bc.blockCache.Purge() - bc.futureBlocks.Purge() - - // Prepare the genesis block and reinitialize the chain - if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil { - glog.Fatalf("failed to write genesis block TD: %v", err) - } - if err := WriteBlock(bc.chainDb, genesis); err != nil { - glog.Fatalf("failed to write genesis block: %v", err) - } - bc.genesisBlock = genesis - bc.insert(bc.genesisBlock) - bc.currentBlock = bc.genesisBlock - bc.setTotalDifficulty(genesis.Difficulty()) -} - -// Export writes the active chain to the given writer. -func (self *ChainManager) Export(w io.Writer) error { - if err := self.ExportN(w, uint64(0), self.currentBlock.NumberU64()); err != nil { - return err - } - return nil -} - -// ExportN writes a subset of the active chain to the given writer. -func (self *ChainManager) ExportN(w io.Writer, first uint64, last uint64) error { - self.mu.RLock() - defer self.mu.RUnlock() - - if first > last { - return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) - } - - glog.V(logger.Info).Infof("exporting %d blocks...\n", last-first+1) - - for nr := first; nr <= last; nr++ { - block := self.GetBlockByNumber(nr) - if block == nil { - return fmt.Errorf("export failed on #%d: not found", nr) - } - - if err := block.EncodeRLP(w); err != nil { - return err - } - } - - return nil -} - -// insert injects a block into the current chain block chain. Note, this function -// assumes that the `mu` mutex is held! -func (bc *ChainManager) insert(block *types.Block) { - // Add the block to the canonical chain number scheme and mark as the head - if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { - glog.Fatalf("failed to insert block number: %v", err) - } - if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil { - glog.Fatalf("failed to insert block number: %v", err) - } - // Add a new restore point if we reached some limit - bc.checkpoint++ - if bc.checkpoint > checkpointLimit { - if err := bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes()); err != nil { - glog.Fatalf("failed to create checkpoint: %v", err) - } - bc.checkpoint = 0 - } - // Update the internal internal state with the head block - bc.currentBlock = block -} - -// Accessors -func (bc *ChainManager) Genesis() *types.Block { - return bc.genesisBlock -} - -// HasHeader checks if a block header is present in the database or not, caching -// it if present. -func (bc *ChainManager) HasHeader(hash common.Hash) bool { - return bc.GetHeader(hash) != nil -} - -// GetHeader retrieves a block header from the database by hash, caching it if -// found. -func (self *ChainManager) GetHeader(hash common.Hash) *types.Header { - // Short circuit if the header's already in the cache, retrieve otherwise - if header, ok := self.headerCache.Get(hash); ok { - return header.(*types.Header) - } - header := GetHeader(self.chainDb, hash) - if header == nil { - return nil - } - // Cache the found header for next time and return - self.headerCache.Add(header.Hash(), header) - return header -} - -// GetHeaderByNumber retrieves a block header from the database by number, -// caching it (associated with its hash) if found. -func (self *ChainManager) GetHeaderByNumber(number uint64) *types.Header { - hash := GetCanonicalHash(self.chainDb, number) - if hash == (common.Hash{}) { - return nil - } - return self.GetHeader(hash) -} - -// GetBody retrieves a block body (transactions and uncles) from the database by -// hash, caching it if found. -func (self *ChainManager) GetBody(hash common.Hash) *types.Body { - // Short circuit if the body's already in the cache, retrieve otherwise - if cached, ok := self.bodyCache.Get(hash); ok { - body := cached.(*types.Body) - return body - } - body := GetBody(self.chainDb, hash) - if body == nil { - return nil - } - // Cache the found body for next time and return - self.bodyCache.Add(hash, body) - return body -} - -// GetBodyRLP retrieves a block body in RLP encoding from the database by hash, -// caching it if found. -func (self *ChainManager) GetBodyRLP(hash common.Hash) rlp.RawValue { - // Short circuit if the body's already in the cache, retrieve otherwise - if cached, ok := self.bodyRLPCache.Get(hash); ok { - return cached.(rlp.RawValue) - } - body := GetBodyRLP(self.chainDb, hash) - if len(body) == 0 { - return nil - } - // Cache the found body for next time and return - self.bodyRLPCache.Add(hash, body) - return body -} - -// GetTd retrieves a block's total difficulty in the canonical chain from the -// database by hash, caching it if found. -func (self *ChainManager) GetTd(hash common.Hash) *big.Int { - // Short circuit if the td's already in the cache, retrieve otherwise - if cached, ok := self.tdCache.Get(hash); ok { - return cached.(*big.Int) - } - td := GetTd(self.chainDb, hash) - if td == nil { - return nil - } - // Cache the found body for next time and return - self.tdCache.Add(hash, td) - return td -} - -// HasBlock checks if a block is fully present in the database or not, caching -// it if present. -func (bc *ChainManager) HasBlock(hash common.Hash) bool { - return bc.GetBlock(hash) != nil -} - -// GetBlock retrieves a block from the database by hash, caching it if found. -func (self *ChainManager) GetBlock(hash common.Hash) *types.Block { - // Short circuit if the block's already in the cache, retrieve otherwise - if block, ok := self.blockCache.Get(hash); ok { - return block.(*types.Block) - } - block := GetBlock(self.chainDb, hash) - if block == nil { - return nil - } - // Cache the found block for next time and return - self.blockCache.Add(block.Hash(), block) - return block -} - -// GetBlockByNumber retrieves a block from the database by number, caching it -// (associated with its hash) if found. -func (self *ChainManager) GetBlockByNumber(number uint64) *types.Block { - hash := GetCanonicalHash(self.chainDb, number) - if hash == (common.Hash{}) { - return nil - } - return self.GetBlock(hash) -} - -// GetBlockHashesFromHash retrieves a number of block hashes starting at a given -// hash, fetching towards the genesis block. -func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { - // Get the origin header from which to fetch - header := self.GetHeader(hash) - if header == nil { - return nil - } - // Iterate the headers until enough is collected or the genesis reached - chain := make([]common.Hash, 0, max) - for i := uint64(0); i < max; i++ { - if header = self.GetHeader(header.ParentHash); header == nil { - break - } - chain = append(chain, header.Hash()) - if header.Number.Cmp(common.Big0) == 0 { - break - } - } - return chain -} - -// [deprecated by eth/62] -// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. -func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { - for i := 0; i < n; i++ { - block := self.GetBlock(hash) - if block == nil { - break - } - blocks = append(blocks, block) - hash = block.ParentHash() - } - return -} - -func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) { - for i := 0; block != nil && i < length; i++ { - uncles = append(uncles, block.Uncles()...) - block = self.GetBlock(block.ParentHash()) - } - - return -} - -// setTotalDifficulty updates the TD of the chain manager. Note, this function -// assumes that the `mu` mutex is held! -func (bc *ChainManager) setTotalDifficulty(td *big.Int) { - bc.td = new(big.Int).Set(td) -} - -func (bc *ChainManager) Stop() { - if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { - return - } - close(bc.quit) - atomic.StoreInt32(&bc.procInterrupt, 1) - - bc.wg.Wait() - - glog.V(logger.Info).Infoln("Chain manager stopped") -} - -type queueEvent struct { - queue []interface{} - canonicalCount int - sideCount int - splitCount int -} - -func (self *ChainManager) procFutureBlocks() { - blocks := make([]*types.Block, self.futureBlocks.Len()) - for i, hash := range self.futureBlocks.Keys() { - block, _ := self.futureBlocks.Get(hash) - blocks[i] = block.(*types.Block) - } - if len(blocks) > 0 { - types.BlockBy(types.Number).Sort(blocks) - self.InsertChain(blocks) - } -} - -type writeStatus byte - -const ( - NonStatTy writeStatus = iota - CanonStatTy - SplitStatTy - SideStatTy -) - -// WriteBlock writes the block to the chain. -func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, err error) { - self.wg.Add(1) - defer self.wg.Done() - - // Calculate the total difficulty of the block - ptd := self.GetTd(block.ParentHash()) - if ptd == nil { - return NonStatTy, ParentError(block.ParentHash()) - } - td := new(big.Int).Add(block.Difficulty(), ptd) - - self.mu.RLock() - cblock := self.currentBlock - self.mu.RUnlock() - - // Compare the TD of the last known block in the canonical chain to make sure it's greater. - // At this point it's possible that a different chain (fork) becomes the new canonical chain. - if td.Cmp(self.Td()) > 0 { - // chain fork - if block.ParentHash() != cblock.Hash() { - // during split we merge two different chains and create the new canonical chain - err := self.reorg(cblock, block) - if err != nil { - return NonStatTy, err - } - } - status = CanonStatTy - - self.mu.Lock() - self.setTotalDifficulty(td) - self.insert(block) - self.mu.Unlock() - } else { - status = SideStatTy - } - - if err := WriteTd(self.chainDb, block.Hash(), td); err != nil { - glog.Fatalf("failed to write block total difficulty: %v", err) - } - if err := WriteBlock(self.chainDb, block); err != nil { - glog.Fatalf("filed to write block contents: %v", err) - } - // Delete from future blocks - self.futureBlocks.Remove(block.Hash()) - - return -} - -// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. It an error is returned -// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go). -func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { - self.wg.Add(1) - defer self.wg.Done() - - self.chainmu.Lock() - defer self.chainmu.Unlock() - - // A queued approach to delivering events. This is generally - // faster than direct delivery and requires much less mutex - // acquiring. - var ( - queue = make([]interface{}, len(chain)) - queueEvent = queueEvent{queue: queue} - stats struct{ queued, processed, ignored int } - tstart = time.Now() - - nonceChecked = make([]bool, len(chain)) - ) - - // Start the parallel nonce verifier. - nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain) - defer close(nonceAbort) - - txcount := 0 - for i, block := range chain { - if atomic.LoadInt32(&self.procInterrupt) == 1 { - glog.V(logger.Debug).Infoln("Premature abort during chain processing") - break - } - - bstart := time.Now() - // Wait for block i's nonce to be verified before processing - // its state transition. - for !nonceChecked[i] { - r := <-nonceResults - nonceChecked[r.index] = true - if !r.valid { - block := chain[r.index] - return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} - } - } - - if BadHashes[block.Hash()] { - err := BadHashError(block.Hash()) - blockErr(block, err) - return i, err - } - // Call in to the block processor and check for errors. It's likely that if one block fails - // all others will fail too (unless a known block is returned). - logs, receipts, err := self.processor.Process(block) - if err != nil { - if IsKnownBlockErr(err) { - stats.ignored++ - continue - } - - if err == BlockFutureErr { - // Allow up to MaxFuture second in the future blocks. If this limit - // is exceeded the chain is discarded and processed at a later time - // if given. - max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) - if block.Time().Cmp(max) == 1 { - return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max) - } - - self.futureBlocks.Add(block.Hash(), block) - stats.queued++ - continue - } - - if IsParentErr(err) && self.futureBlocks.Contains(block.ParentHash()) { - self.futureBlocks.Add(block.Hash(), block) - stats.queued++ - continue - } - - blockErr(block, err) - - go ReportBlock(block, err) - - return i, err - } - if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil { - glog.V(logger.Warn).Infoln("error writing block receipts:", err) - } - - txcount += len(block.Transactions()) - // write the block to the chain and get the status - status, err := self.WriteBlock(block) - if err != nil { - return i, err - } - switch status { - case CanonStatTy: - if glog.V(logger.Debug) { - glog.Infof("[%v] inserted block #%d (%d TXs %v G %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), block.GasUsed(), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) - } - queue[i] = ChainEvent{block, block.Hash(), logs} - queueEvent.canonicalCount++ - - // This puts transactions in a extra db for rpc - PutTransactions(self.chainDb, block, block.Transactions()) - // store the receipts - PutReceipts(self.chainDb, receipts) - case SideStatTy: - if glog.V(logger.Detail) { - glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) - } - queue[i] = ChainSideEvent{block, logs} - queueEvent.sideCount++ - case SplitStatTy: - queue[i] = ChainSplitEvent{block, logs} - queueEvent.splitCount++ - } - stats.processed++ - } - - if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { - tend := time.Since(tstart) - start, end := chain[0], chain[len(chain)-1] - glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4]) - } - - go self.eventMux.Post(queueEvent) - - return 0, nil -} - -// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them -// to be part of the new canonical chain and accumulates potential missing transactions and post an -// event about them -func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error { - self.mu.Lock() - defer self.mu.Unlock() - - var ( - newChain types.Blocks - commonBlock *types.Block - oldStart = oldBlock - newStart = newBlock - deletedTxs types.Transactions - ) - - // first reduce whoever is higher bound - if oldBlock.NumberU64() > newBlock.NumberU64() { - // reduce old chain - for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) { - deletedTxs = append(deletedTxs, oldBlock.Transactions()...) - } - } else { - // reduce new chain and append new chain blocks for inserting later on - for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) { - newChain = append(newChain, newBlock) - } - } - if oldBlock == nil { - return fmt.Errorf("Invalid old chain") - } - if newBlock == nil { - return fmt.Errorf("Invalid new chain") - } - - numSplit := newBlock.Number() - for { - if oldBlock.Hash() == newBlock.Hash() { - commonBlock = oldBlock - break - } - newChain = append(newChain, newBlock) - deletedTxs = append(deletedTxs, oldBlock.Transactions()...) - - oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash()) - if oldBlock == nil { - return fmt.Errorf("Invalid old chain") - } - if newBlock == nil { - return fmt.Errorf("Invalid new chain") - } - } - - if glog.V(logger.Debug) { - commonHash := commonBlock.Hash() - glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4]) - } - - var addedTxs types.Transactions - // insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly - for _, block := range newChain { - // insert the block in the canonical way, re-writing history - self.insert(block) - // write canonical receipts and transactions - PutTransactions(self.chainDb, block, block.Transactions()) - PutReceipts(self.chainDb, GetBlockReceipts(self.chainDb, block.Hash())) - - addedTxs = append(addedTxs, block.Transactions()...) - } - - // calculate the difference between deleted and added transactions - diff := types.TxDifference(deletedTxs, addedTxs) - // When transactions get deleted from the database that means the - // receipts that were created in the fork must also be deleted - for _, tx := range diff { - DeleteReceipt(self.chainDb, tx.Hash()) - DeleteTransaction(self.chainDb, tx.Hash()) - } - // Must be posted in a goroutine because of the transaction pool trying - // to acquire the chain manager lock - go self.eventMux.Post(RemovedTransactionEvent{diff}) - - return nil -} - -func (self *ChainManager) update() { - events := self.eventMux.Subscribe(queueEvent{}) - futureTimer := time.Tick(5 * time.Second) -out: - for { - select { - case ev := <-events.Chan(): - switch ev := ev.(type) { - case queueEvent: - for _, event := range ev.queue { - switch event := event.(type) { - case ChainEvent: - // We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long - // and in most cases isn't even necessary. - if self.currentBlock.Hash() == event.Hash { - self.currentGasLimit = CalcGasLimit(event.Block) - self.eventMux.Post(ChainHeadEvent{event.Block}) - } - } - self.eventMux.Post(event) - } - } - case <-futureTimer: - self.procFutureBlocks() - case <-self.quit: - break out - } - } -} - -func blockErr(block *types.Block, err error) { - if glog.V(logger.Error) { - glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex()) - glog.Errorf(" %v", err) - } -} diff --git a/core/chain_manager_test.go b/core/chain_manager_test.go deleted file mode 100644 index 40286190b..000000000 --- a/core/chain_manager_test.go +++ /dev/null @@ -1,652 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "fmt" - "math/big" - "math/rand" - "os" - "path/filepath" - "runtime" - "strconv" - "testing" - - "github.com/ethereum/ethash" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/pow" - "github.com/ethereum/go-ethereum/rlp" - "github.com/hashicorp/golang-lru" -) - -func init() { - runtime.GOMAXPROCS(runtime.NumCPU()) -} - -func thePow() pow.PoW { - pow, _ := ethash.NewForTesting() - return pow -} - -func theChainManager(db ethdb.Database, t *testing.T) *ChainManager { - var eventMux event.TypeMux - WriteTestNetGenesisBlock(db, 0) - chainMan, err := NewChainManager(db, thePow(), &eventMux) - if err != nil { - t.Error("failed creating chainmanager:", err) - t.FailNow() - return nil - } - blockMan := NewBlockProcessor(db, nil, chainMan, &eventMux) - chainMan.SetProcessor(blockMan) - - return chainMan -} - -// Test fork of length N starting from block i -func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) { - // switch databases to process the new chain - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - // copy old chain up to i into new db with deterministic canonical - bman2, err := newCanonical(i, db) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - // assert the bmans have the same block at i - bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash() - bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash() - if bi1 != bi2 { - fmt.Printf("%+v\n%+v\n\n", bi1, bi2) - t.Fatal("chains do not have the same hash at height", i) - } - bman2.bc.SetProcessor(bman2) - - // extend the fork - parent := bman2.bc.CurrentBlock() - chainB := makeChain(parent, N, db, forkSeed) - _, err = bman2.bc.InsertChain(chainB) - if err != nil { - t.Fatal("Insert chain error for fork:", err) - } - - tdpre := bman.bc.Td() - // Test the fork's blocks on the original chain - td, err := testChain(chainB, bman) - if err != nil { - t.Fatal("expected chainB not to give errors:", err) - } - // Compare difficulties - f(tdpre, td) - - // Loop over parents making sure reconstruction is done properly -} - -func printChain(bc *ChainManager) { - for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- { - b := bc.GetBlockByNumber(uint64(i)) - fmt.Printf("\t%x %v\n", b.Hash(), b.Difficulty()) - } -} - -// process blocks against a chain -func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) { - for _, block := range chainB { - _, _, err := bman.bc.processor.Process(block) - if err != nil { - if IsKnownBlockErr(err) { - continue - } - return nil, err - } - bman.bc.mu.Lock() - WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash()))) - WriteBlock(bman.bc.chainDb, block) - bman.bc.mu.Unlock() - } - return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil -} - -func loadChain(fn string, t *testing.T) (types.Blocks, error) { - fh, err := os.OpenFile(filepath.Join("..", "_data", fn), os.O_RDONLY, os.ModePerm) - if err != nil { - return nil, err - } - defer fh.Close() - - var chain types.Blocks - if err := rlp.Decode(fh, &chain); err != nil { - return nil, err - } - - return chain, nil -} - -func insertChain(done chan bool, chainMan *ChainManager, chain types.Blocks, t *testing.T) { - _, err := chainMan.InsertChain(chain) - if err != nil { - fmt.Println(err) - t.FailNow() - } - done <- true -} - -func TestExtendCanonical(t *testing.T) { - CanonicalLength := 5 - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - // make first chain starting from genesis - bman, err := newCanonical(CanonicalLength, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - f := func(td1, td2 *big.Int) { - if td2.Cmp(td1) <= 0 { - t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) - } - } - // Start fork from current height (CanonicalLength) - testFork(t, bman, CanonicalLength, 1, f) - testFork(t, bman, CanonicalLength, 2, f) - testFork(t, bman, CanonicalLength, 5, f) - testFork(t, bman, CanonicalLength, 10, f) -} - -func TestShorterFork(t *testing.T) { - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - // make first chain starting from genesis - bman, err := newCanonical(10, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - f := func(td1, td2 *big.Int) { - if td2.Cmp(td1) >= 0 { - t.Error("expected chainB to have lower difficulty. Got", td2, "expected less than", td1) - } - } - // Sum of numbers must be less than 10 - // for this to be a shorter fork - testFork(t, bman, 0, 3, f) - testFork(t, bman, 0, 7, f) - testFork(t, bman, 1, 1, f) - testFork(t, bman, 1, 7, f) - testFork(t, bman, 5, 3, f) - testFork(t, bman, 5, 4, f) -} - -func TestLongerFork(t *testing.T) { - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - // make first chain starting from genesis - bman, err := newCanonical(10, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - f := func(td1, td2 *big.Int) { - if td2.Cmp(td1) <= 0 { - t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) - } - } - // Sum of numbers must be greater than 10 - // for this to be a longer fork - testFork(t, bman, 0, 11, f) - testFork(t, bman, 0, 15, f) - testFork(t, bman, 1, 10, f) - testFork(t, bman, 1, 12, f) - testFork(t, bman, 5, 6, f) - testFork(t, bman, 5, 8, f) -} - -func TestEqualFork(t *testing.T) { - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - bman, err := newCanonical(10, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - f := func(td1, td2 *big.Int) { - if td2.Cmp(td1) != 0 { - t.Error("expected chainB to have equal difficulty. Got", td2, "expected ", td1) - } - } - // Sum of numbers must be equal to 10 - // for this to be an equal fork - testFork(t, bman, 0, 10, f) - testFork(t, bman, 1, 9, f) - testFork(t, bman, 2, 8, f) - testFork(t, bman, 5, 5, f) - testFork(t, bman, 6, 4, f) - testFork(t, bman, 9, 1, f) -} - -func TestBrokenChain(t *testing.T) { - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - bman, err := newCanonical(10, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - db2, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - bman2, err := newCanonical(10, db2) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - bman2.bc.SetProcessor(bman2) - parent := bman2.bc.CurrentBlock() - chainB := makeChain(parent, 5, db2, forkSeed) - chainB = chainB[1:] - _, err = testChain(chainB, bman) - if err == nil { - t.Error("expected broken chain to return error") - } -} - -func TestChainInsertions(t *testing.T) { - t.Skip("Skipped: outdated test files") - - db, _ := ethdb.NewMemDatabase() - - chain1, err := loadChain("valid1", t) - if err != nil { - fmt.Println(err) - t.FailNow() - } - - chain2, err := loadChain("valid2", t) - if err != nil { - fmt.Println(err) - t.FailNow() - } - - chainMan := theChainManager(db, t) - - const max = 2 - done := make(chan bool, max) - - go insertChain(done, chainMan, chain1, t) - go insertChain(done, chainMan, chain2, t) - - for i := 0; i < max; i++ { - <-done - } - - if chain2[len(chain2)-1].Hash() != chainMan.CurrentBlock().Hash() { - t.Error("chain2 is canonical and shouldn't be") - } - - if chain1[len(chain1)-1].Hash() != chainMan.CurrentBlock().Hash() { - t.Error("chain1 isn't canonical and should be") - } -} - -func TestChainMultipleInsertions(t *testing.T) { - t.Skip("Skipped: outdated test files") - - db, _ := ethdb.NewMemDatabase() - - const max = 4 - chains := make([]types.Blocks, max) - var longest int - for i := 0; i < max; i++ { - var err error - name := "valid" + strconv.Itoa(i+1) - chains[i], err = loadChain(name, t) - if len(chains[i]) >= len(chains[longest]) { - longest = i - } - fmt.Println("loaded", name, "with a length of", len(chains[i])) - if err != nil { - fmt.Println(err) - t.FailNow() - } - } - - chainMan := theChainManager(db, t) - - done := make(chan bool, max) - for i, chain := range chains { - // XXX the go routine would otherwise reference the same (chain[3]) variable and fail - i := i - chain := chain - go func() { - insertChain(done, chainMan, chain, t) - fmt.Println(i, "done") - }() - } - - for i := 0; i < max; i++ { - <-done - } - - if chains[longest][len(chains[longest])-1].Hash() != chainMan.CurrentBlock().Hash() { - t.Error("Invalid canonical chain") - } -} - -type bproc struct{} - -func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil } - -func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block { - var chain []*types.Block - for i, difficulty := range d { - header := &types.Header{ - Coinbase: common.Address{seed}, - Number: big.NewInt(int64(i + 1)), - Difficulty: big.NewInt(int64(difficulty)), - } - if i == 0 { - header.ParentHash = genesis.Hash() - } else { - header.ParentHash = chain[i-1].Hash() - } - block := types.NewBlockWithHeader(header) - chain = append(chain, block) - } - return chain -} - -func chm(genesis *types.Block, db ethdb.Database) *ChainManager { - var eventMux event.TypeMux - bc := &ChainManager{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}} - bc.headerCache, _ = lru.New(100) - bc.bodyCache, _ = lru.New(100) - bc.bodyRLPCache, _ = lru.New(100) - bc.tdCache, _ = lru.New(100) - bc.blockCache, _ = lru.New(100) - bc.futureBlocks, _ = lru.New(100) - bc.processor = bproc{} - bc.ResetWithGenesisBlock(genesis) - - return bc -} - -func TestReorgLongest(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - - chain1 := makeChainWithDiff(genesis, []int{1, 2, 4}, 10) - chain2 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11) - - bc.InsertChain(chain1) - bc.InsertChain(chain2) - - prev := bc.CurrentBlock() - for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) { - if prev.ParentHash() != block.Hash() { - t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash()) - } - } -} - -func TestBadHashes(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - - chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10) - BadHashes[chain[2].Header().Hash()] = true - - _, err = bc.InsertChain(chain) - if !IsBadHashError(err) { - t.Errorf("error mismatch: want: BadHashError, have: %v", err) - } -} - -func TestReorgBadHashes(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - - chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11) - bc.InsertChain(chain) - - if chain[3].Header().Hash() != bc.LastBlockHash() { - t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash()) - } - - // NewChainManager should check BadHashes when loading it db - BadHashes[chain[3].Header().Hash()] = true - - var eventMux event.TypeMux - ncm, err := NewChainManager(db, FakePow{}, &eventMux) - if err != nil { - t.Errorf("NewChainManager err: %s", err) - } - - // check it set head to (valid) parent of bad hash block - if chain[2].Header().Hash() != ncm.LastBlockHash() { - t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash()) - } - - if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 { - t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit()) - } -} - -func TestReorgShortest(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - - chain1 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 10) - chain2 := makeChainWithDiff(genesis, []int{1, 10}, 11) - - bc.InsertChain(chain1) - bc.InsertChain(chain2) - - prev := bc.CurrentBlock() - for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) { - if prev.ParentHash() != block.Hash() { - t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash()) - } - } -} - -func TestInsertNonceError(t *testing.T) { - for i := 1; i < 25 && !t.Failed(); i++ { - db, _ := ethdb.NewMemDatabase() - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - bc.processor = NewBlockProcessor(db, bc.pow, bc, bc.eventMux) - blocks := makeChain(bc.currentBlock, i, db, 0) - - fail := rand.Int() % len(blocks) - failblock := blocks[fail] - bc.pow = failPow{failblock.NumberU64()} - n, err := bc.InsertChain(blocks) - - // Check that the returned error indicates the nonce failure. - if n != fail { - t.Errorf("(i=%d) wrong failed block index: got %d, want %d", i, n, fail) - } - if !IsBlockNonceErr(err) { - t.Fatalf("(i=%d) got %q, want a nonce error", i, err) - } - nerr := err.(*BlockNonceErr) - if nerr.Number.Cmp(failblock.Number()) != 0 { - t.Errorf("(i=%d) wrong block number in error, got %v, want %v", i, nerr.Number, failblock.Number()) - } - if nerr.Hash != failblock.Hash() { - t.Errorf("(i=%d) wrong block hash in error, got %v, want %v", i, nerr.Hash, failblock.Hash()) - } - - // Check that all no blocks after the failing block have been inserted. - for _, block := range blocks[fail:] { - if bc.HasBlock(block.Hash()) { - t.Errorf("(i=%d) invalid block %d present in chain", i, block.NumberU64()) - } - } - } -} - -// Tests that chain reorganizations handle transaction removals and reinsertions. -func TestChainTxReorgs(t *testing.T) { - params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be. - params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block. - - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - addr3 = crypto.PubkeyToAddress(key3.PublicKey) - db, _ = ethdb.NewMemDatabase() - ) - genesis := WriteGenesisBlockForTesting(db, - GenesisAccount{addr1, big.NewInt(1000000)}, - GenesisAccount{addr2, big.NewInt(1000000)}, - GenesisAccount{addr3, big.NewInt(1000000)}, - ) - // Create two transactions shared between the chains: - // - postponed: transaction included at a later block in the forked chain - // - swapped: transaction included at the same block number in the forked chain - postponed, _ := types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1) - swapped, _ := types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1) - - // Create two transactions that will be dropped by the forked chain: - // - pastDrop: transaction dropped retroactively from a past block - // - freshDrop: transaction dropped exactly at the block where the reorg is detected - var pastDrop, freshDrop *types.Transaction - - // Create three transactions that will be added in the forked chain: - // - pastAdd: transaction added before the reorganiztion is detected - // - freshAdd: transaction added at the exact block the reorg is detected - // - futureAdd: transaction added after the reorg has already finished - var pastAdd, freshAdd, futureAdd *types.Transaction - - chain := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) { - switch i { - case 0: - pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2) - - gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point - gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork - - case 2: - freshDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2) - - gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point - gen.AddTx(swapped) // This transaction will be swapped out at the exact height - - gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain - } - }) - // Import the chain. This runs all block validation rules. - evmux := &event.TypeMux{} - chainman, _ := NewChainManager(db, FakePow{}, evmux) - chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux)) - if i, err := chainman.InsertChain(chain); err != nil { - t.Fatalf("failed to insert original chain[%d]: %v", i, err) - } - - // overwrite the old chain - chain = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) { - switch i { - case 0: - pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) - gen.AddTx(pastAdd) // This transaction needs to be injected during reorg - - case 2: - gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain - gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain - - freshAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) - gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time - - case 3: - futureAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) - gen.AddTx(futureAdd) // This transaction will be added after a full reorg - } - }) - if _, err := chainman.InsertChain(chain); err != nil { - t.Fatalf("failed to insert forked chain: %v", err) - } - - // removed tx - for i, tx := range (types.Transactions{pastDrop, freshDrop}) { - if GetTransaction(db, tx.Hash()) != nil { - t.Errorf("drop %d: tx found while shouldn't have been", i) - } - if GetReceipt(db, tx.Hash()) != nil { - t.Errorf("drop %d: receipt found while shouldn't have been", i) - } - } - // added tx - for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) { - if GetTransaction(db, tx.Hash()) == nil { - t.Errorf("add %d: expected tx to be found", i) - } - if GetReceipt(db, tx.Hash()) == nil { - t.Errorf("add %d: expected receipt to be found", i) - } - } - // shared tx - for i, tx := range (types.Transactions{postponed, swapped}) { - if GetTransaction(db, tx.Hash()) == nil { - t.Errorf("share %d: expected tx to be found", i) - } - if GetReceipt(db, tx.Hash()) == nil { - t.Errorf("share %d: expected receipt to be found", i) - } - } -} diff --git a/core/helper_test.go b/core/helper_test.go index 81ea6fc22..fd6a5491c 100644 --- a/core/helper_test.go +++ b/core/helper_test.go @@ -34,7 +34,7 @@ type TestManager struct { db ethdb.Database txPool *TxPool - blockChain *ChainManager + blockChain *BlockChain Blocks []*types.Block } @@ -54,7 +54,7 @@ func (s *TestManager) Peers() *list.List { return list.New() } -func (s *TestManager) ChainManager() *ChainManager { +func (s *TestManager) BlockChain() *BlockChain { return s.blockChain } @@ -89,7 +89,7 @@ func NewTestManager() *TestManager { testManager.eventMux = new(event.TypeMux) testManager.db = db // testManager.txPool = NewTxPool(testManager) - // testManager.blockChain = NewChainManager(testManager) + // testManager.blockChain = NewBlockChain(testManager) // testManager.stateManager = NewStateManager(testManager) return testManager diff --git a/core/manager.go b/core/manager.go index 0f108a6de..289c87c11 100644 --- a/core/manager.go +++ b/core/manager.go @@ -26,7 +26,7 @@ import ( type Backend interface { AccountManager() *accounts.Manager BlockProcessor() *BlockProcessor - ChainManager() *ChainManager + BlockChain() *BlockChain TxPool() *TxPool ChainDb() ethdb.Database DappDb() ethdb.Database diff --git a/core/vm/common.go b/core/vm/common.go index b52b598ba..2d1aa9332 100644 --- a/core/vm/common.go +++ b/core/vm/common.go @@ -38,7 +38,7 @@ const ( ) var ( - Pow256 = common.BigPow(2, 256) // Pew256 is 2**256 + Pow256 = common.BigPow(2, 256) // Pow256 is 2**256 U256 = common.U256 // Shortcut to common.U256 S256 = common.S256 // Shortcut to common.S256 @@ -46,7 +46,7 @@ var ( Zero = common.Big0 // Shortcut to common.Big0 One = common.Big1 // Shortcut to common.Big1 - max = big.NewInt(math.MaxInt64) // Maximum 256 bit integer + max = big.NewInt(math.MaxInt64) // Maximum 64 bit integer ) // NewVm returns a new VM based on the Environment diff --git a/core/vm/contract.go b/core/vm/contract.go index 8460cc47b..95417e747 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -118,8 +118,8 @@ func (self *Contract) SetCode(code []byte) { self.Code = code } -// SetCallCode sets the address of the code address and sets the code -// of the contract according to the backing database. +// SetCallCode sets the code of the contract and address of the backing data +// object func (self *Contract) SetCallCode(addr *common.Address, code []byte) { self.Code = code self.CodeAddr = addr diff --git a/core/vm/doc.go b/core/vm/doc.go index 4deb7761d..ab87bf934 100644 --- a/core/vm/doc.go +++ b/core/vm/doc.go @@ -18,15 +18,15 @@ Package vm implements the Ethereum Virtual Machine. The vm package implements two EVMs, a byte code VM and a JIT VM. The BC -(Byte Code) VM loops over a set of bytes and executes them according to a set -of rules defined in the Ethereum yellow paper. When the BC VM is invokes it +(Byte Code) VM loops over a set of bytes and executes them according to the set +of rules defined in the Ethereum yellow paper. When the BC VM is invoked it invokes the JIT VM in a seperate goroutine and compiles the byte code in JIT instructions. The JIT VM, when invoked, loops around a set of pre-defined instructions until -it either runs of gas, causes an internel error, returns or stops. At a later +it either runs of gas, causes an internal error, returns or stops. At a later stage the JIT VM will see some additional features that will cause sets of instructions to be compiled down to segments. Segments are sets of instructions -that can be ran in one go saving precious time during execution. +that can be run in one go saving precious time during execution. */ package vm diff --git a/core/vm/environment.go b/core/vm/environment.go index 606518fd4..f8e19baea 100644 --- a/core/vm/environment.go +++ b/core/vm/environment.go @@ -26,7 +26,7 @@ import ( // it's own isolated environment. // Environment is an EVM requirement and helper which allows access to outside -// information such like states. +// information such as states. type Environment interface { // The state database Db() Database @@ -50,7 +50,7 @@ type Environment interface { GasLimit() *big.Int // Determines whether it's possible to transact CanTransfer(from common.Address, balance *big.Int) bool - // Transfer from to to with amount set + // Transfers amount from one account to the other Transfer(from, to Account, amount *big.Int) error // Adds a LOG to the state AddLog(*Log) diff --git a/core/vm/memory.go b/core/vm/memory.go index 101ec75cb..d01188417 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -18,7 +18,7 @@ package vm import "fmt" -// Memory implements ethereum RAM backed by a simple byte slice +// Memory implements a simple memory model for the ethereum virtual machine. type Memory struct { store []byte } diff --git a/core/vm_env.go b/core/vm_env.go index dea280746..467e34c6b 100644 --- a/core/vm_env.go +++ b/core/vm_env.go @@ -30,13 +30,13 @@ type VMEnv struct { header *types.Header msg Message depth int - chain *ChainManager + chain *BlockChain typ vm.Type // structured logging logs []vm.StructLog } -func NewEnv(state *state.StateDB, chain *ChainManager, msg Message, header *types.Header) *VMEnv { +func NewEnv(state *state.StateDB, chain *BlockChain, msg Message, header *types.Header) *VMEnv { return &VMEnv{ chain: chain, state: state, diff --git a/eth/backend.go b/eth/backend.go index 349dfa613..a480b4931 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -217,7 +217,7 @@ type Ethereum struct { // State manager for processing new blocks and managing the over all states blockProcessor *core.BlockProcessor txPool *core.TxPool - chainManager *core.ChainManager + blockchain *core.BlockChain accountManager *accounts.Manager whisper *whisper.Whisper pow *ethash.Ethash @@ -365,7 +365,7 @@ func New(config *Config) (*Ethereum, error) { eth.pow = ethash.New() } //genesis := core.GenesisBlock(uint64(config.GenesisNonce), stateDb) - eth.chainManager, err = core.NewChainManager(chainDb, eth.pow, eth.EventMux()) + eth.blockchain, err = core.NewBlockChain(chainDb, eth.pow, eth.EventMux()) if err != nil { if err == core.ErrNoGenesis { return nil, fmt.Errorf(`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`) @@ -373,11 +373,11 @@ func New(config *Config) (*Ethereum, error) { return nil, err } - eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit) + eth.txPool = core.NewTxPool(eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) - eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.chainManager, eth.EventMux()) - eth.chainManager.SetProcessor(eth.blockProcessor) - eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.chainManager, chainDb) + eth.blockProcessor = core.NewBlockProcessor(chainDb, eth.pow, eth.blockchain, eth.EventMux()) + eth.blockchain.SetProcessor(eth.blockProcessor) + eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb) eth.miner = miner.New(eth, eth.EventMux(), eth.pow) eth.miner.SetGasPrice(config.GasPrice) @@ -441,7 +441,7 @@ func (s *Ethereum) NodeInfo() *NodeInfo { DiscPort: int(node.UDP), TCPPort: int(node.TCP), ListenAddr: s.net.ListenAddr, - Td: s.ChainManager().Td().String(), + Td: s.BlockChain().Td().String(), } } @@ -478,7 +478,7 @@ func (s *Ethereum) PeersInfo() (peersinfo []*PeerInfo) { } func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) { - s.chainManager.ResetWithGenesisBlock(gb) + s.blockchain.ResetWithGenesisBlock(gb) } func (s *Ethereum) StartMining(threads int) error { @@ -518,7 +518,7 @@ func (s *Ethereum) Miner() *miner.Miner { return s.miner } // func (s *Ethereum) Logger() logger.LogSystem { return s.logger } func (s *Ethereum) Name() string { return s.net.Name } func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager } -func (s *Ethereum) ChainManager() *core.ChainManager { return s.chainManager } +func (s *Ethereum) BlockChain() *core.BlockChain { return s.blockchain } func (s *Ethereum) BlockProcessor() *core.BlockProcessor { return s.blockProcessor } func (s *Ethereum) TxPool() *core.TxPool { return s.txPool } func (s *Ethereum) Whisper() *whisper.Whisper { return s.whisper } @@ -581,7 +581,7 @@ func (self *Ethereum) AddPeer(nodeURL string) error { func (s *Ethereum) Stop() { s.net.Stop() - s.chainManager.Stop() + s.blockchain.Stop() s.protocolManager.Stop() s.txPool.Stop() s.eventMux.Stop() @@ -622,7 +622,7 @@ func (self *Ethereum) StartAutoDAG() { select { case <-timer: glog.V(logger.Info).Infof("checking DAG (ethash dir: %s)", ethash.DefaultDir) - currentBlock := self.ChainManager().CurrentBlock().NumberU64() + currentBlock := self.BlockChain().CurrentBlock().NumberU64() thisEpoch := currentBlock / epochLength if nextEpoch <= thisEpoch { if currentBlock%epochLength > autoDAGepochHeight { diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 0b4911629..2bcf20d0c 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -1,4 +1,4 @@ -// Copyright 2014 The go-ethereum Authors +// Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/ethdb" ) type AccountChange struct { @@ -31,7 +32,7 @@ type AccountChange struct { // Filtering interface type Filter struct { - db common.Database + db ethdb.Database earliest int64 latest int64 skip int @@ -46,7 +47,7 @@ type Filter struct { // Create a new filter which uses a bloom filter on blocks to figure out whether a particular block // is interesting or not. -func New(db common.Database) *Filter { +func New(db ethdb.Database) *Filter { return &Filter{db: db} } @@ -79,7 +80,7 @@ func (self *Filter) SetSkip(skip int) { // Run filters logs with the current parameters set func (self *Filter) Find() vm.Logs { - earliestBlock := core.GetCurrentBlock(self.db) + earliestBlock := core.GetBlock(self.db, core.GetHeadBlockHash(self.db)) var earliestBlockNo uint64 = uint64(self.earliest) if self.earliest == -1 { earliestBlockNo = earliestBlock.NumberU64() @@ -91,8 +92,12 @@ func (self *Filter) Find() vm.Logs { var ( logs vm.Logs - block = core.GetBlockByNumber(self.db, latestBlockNo) + block *types.Block ) + hash := core.GetCanonicalHash(self.db, latestBlockNo) + if hash != (common.Hash{}) { + block = core.GetBlock(self.db, hash) + } done: for i := 0; block != nil; i++ { @@ -120,7 +125,7 @@ done: logs = append(logs, self.FilterLogs(unfiltered)...) } - block = core.GetBlockByHash(self.db, block.ParentHash()) + block = core.GetBlock(self.db, block.ParentHash()) } skip := int(math.Min(float64(len(logs)), float64(self.skip))) diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 1c27c7be4..4972dcd59 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -28,7 +28,7 @@ import ( // FilterSystem manages filters that filter specific events such as // block, transaction and log events. The Filtering system can be used to listen -// for specific LOG events fires by the EVM (Ethereum Virtual Machine). +// for specific LOG events fired by the EVM (Ethereum Virtual Machine). type FilterSystem struct { eventMux *event.TypeMux diff --git a/eth/gasprice.go b/eth/gasprice.go index 3caad73c6..c08b96129 100644 --- a/eth/gasprice.go +++ b/eth/gasprice.go @@ -36,7 +36,7 @@ type blockPriceInfo struct { type GasPriceOracle struct { eth *Ethereum - chain *core.ChainManager + chain *core.BlockChain events event.Subscription blocks map[uint64]*blockPriceInfo firstProcessed, lastProcessed uint64 @@ -48,7 +48,7 @@ func NewGasPriceOracle(eth *Ethereum) (self *GasPriceOracle) { self = &GasPriceOracle{} self.blocks = make(map[uint64]*blockPriceInfo) self.eth = eth - self.chain = eth.chainManager + self.chain = eth.blockchain self.events = eth.EventMux().Subscribe( core.ChainEvent{}, core.ChainSplitEvent{}, diff --git a/eth/handler.go b/eth/handler.go index 52c9c4151..fc92338b4 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -60,9 +60,9 @@ func (ep extProt) GetHashes(hash common.Hash) error { return ep.getHashes(has func (ep extProt) GetBlock(hashes []common.Hash) error { return ep.getBlocks(hashes) } type ProtocolManager struct { - txpool txPool - chainman *core.ChainManager - chaindb ethdb.Database + txpool txPool + blockchain *core.BlockChain + chaindb ethdb.Database downloader *downloader.Downloader fetcher *fetcher.Fetcher @@ -87,17 +87,17 @@ type ProtocolManager struct { // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the ethereum network. -func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, chainman *core.ChainManager, chaindb ethdb.Database) *ProtocolManager { +func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) *ProtocolManager { // Create the protocol manager with the base fields manager := &ProtocolManager{ - eventMux: mux, - txpool: txpool, - chainman: chainman, - chaindb: chaindb, - peers: newPeerSet(), - newPeerCh: make(chan *peer, 1), - txsyncCh: make(chan *txsync), - quitSync: make(chan struct{}), + eventMux: mux, + txpool: txpool, + blockchain: blockchain, + chaindb: chaindb, + peers: newPeerSet(), + newPeerCh: make(chan *peer, 1), + txsyncCh: make(chan *txsync), + quitSync: make(chan struct{}), } // Initiate a sub-protocol for every implemented version we can handle manager.SubProtocols = make([]p2p.Protocol, len(ProtocolVersions)) @@ -116,15 +116,15 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po } } // Construct the different synchronisation mechanisms - manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.GetTd, manager.chainman.InsertChain, manager.removePeer) + manager.downloader = downloader.New(manager.eventMux, manager.blockchain.HasBlock, manager.blockchain.GetBlock, manager.blockchain.CurrentBlock, manager.blockchain.GetTd, manager.blockchain.InsertChain, manager.removePeer) validator := func(block *types.Block, parent *types.Block) error { return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false) } heighter := func() uint64 { - return manager.chainman.CurrentBlock().NumberU64() + return manager.blockchain.CurrentBlock().NumberU64() } - manager.fetcher = fetcher.New(manager.chainman.GetBlock, validator, manager.BroadcastBlock, heighter, manager.chainman.InsertChain, manager.removePeer) + manager.fetcher = fetcher.New(manager.blockchain.GetBlock, validator, manager.BroadcastBlock, heighter, manager.blockchain.InsertChain, manager.removePeer) return manager } @@ -187,7 +187,7 @@ func (pm *ProtocolManager) handle(p *peer) error { glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name()) // Execute the Ethereum handshake - td, head, genesis := pm.chainman.Status() + td, head, genesis := pm.blockchain.Status() if err := p.Handshake(td, head, genesis); err != nil { glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err) return err @@ -252,7 +252,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { request.Amount = uint64(downloader.MaxHashFetch) } // Retrieve the hashes from the block chain and return them - hashes := pm.chainman.GetBlockHashesFromHash(request.Hash, request.Amount) + hashes := pm.blockchain.GetBlockHashesFromHash(request.Hash, request.Amount) if len(hashes) == 0 { glog.V(logger.Debug).Infof("invalid block hash %x", request.Hash.Bytes()[:4]) } @@ -268,9 +268,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { request.Amount = uint64(downloader.MaxHashFetch) } // Calculate the last block that should be retrieved, and short circuit if unavailable - last := pm.chainman.GetBlockByNumber(request.Number + request.Amount - 1) + last := pm.blockchain.GetBlockByNumber(request.Number + request.Amount - 1) if last == nil { - last = pm.chainman.CurrentBlock() + last = pm.blockchain.CurrentBlock() request.Amount = last.NumberU64() - request.Number + 1 } if last.NumberU64() < request.Number { @@ -278,7 +278,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } // Retrieve the hashes from the last block backwards, reverse and return hashes := []common.Hash{last.Hash()} - hashes = append(hashes, pm.chainman.GetBlockHashesFromHash(last.Hash(), request.Amount-1)...) + hashes = append(hashes, pm.blockchain.GetBlockHashesFromHash(last.Hash(), request.Amount-1)...) for i := 0; i < len(hashes)/2; i++ { hashes[i], hashes[len(hashes)-1-i] = hashes[len(hashes)-1-i], hashes[i] @@ -318,7 +318,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "msg %v: %v", msg, err) } // Retrieve the requested block, stopping if enough was found - if block := pm.chainman.GetBlock(hash); block != nil { + if block := pm.blockchain.GetBlock(hash); block != nil { blocks = append(blocks, block) bytes += block.Size() } @@ -358,9 +358,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Retrieve the next header satisfying the query var origin *types.Header if query.Origin.Hash != (common.Hash{}) { - origin = pm.chainman.GetHeader(query.Origin.Hash) + origin = pm.blockchain.GetHeader(query.Origin.Hash) } else { - origin = pm.chainman.GetHeaderByNumber(query.Origin.Number) + origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number) } if origin == nil { break @@ -373,7 +373,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { case query.Origin.Hash != (common.Hash{}) && query.Reverse: // Hash based traversal towards the genesis block for i := 0; i < int(query.Skip)+1; i++ { - if header := pm.chainman.GetHeader(query.Origin.Hash); header != nil { + if header := pm.blockchain.GetHeader(query.Origin.Hash); header != nil { query.Origin.Hash = header.ParentHash } else { unknown = true @@ -382,8 +382,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } case query.Origin.Hash != (common.Hash{}) && !query.Reverse: // Hash based traversal towards the leaf block - if header := pm.chainman.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil { - if pm.chainman.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { + if header := pm.blockchain.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil { + if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { query.Origin.Hash = header.Hash() } else { unknown = true @@ -466,7 +466,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "msg %v: %v", msg, err) } // Retrieve the requested block body, stopping if enough was found - if data := pm.chainman.GetBodyRLP(hash); len(data) != 0 { + if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 { bodies = append(bodies, data) bytes += len(data) } @@ -562,7 +562,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Schedule all the unknown hashes for retrieval unknown := make([]announce, 0, len(announces)) for _, block := range announces { - if !pm.chainman.HasBlock(block.Hash) { + if !pm.blockchain.HasBlock(block.Hash) { unknown = append(unknown, block) } } @@ -586,7 +586,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { request.Block.ReceivedAt = msg.ReceivedAt // Mark the block's arrival for whatever reason - _, chainHead, _ := pm.chainman.Status() + _, chainHead, _ := pm.blockchain.Status() jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{ BlockHash: request.Block.Hash().Hex(), BlockNumber: request.Block.Number(), @@ -603,7 +603,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Update the peers total difficulty if needed, schedule a download if gapped if request.TD.Cmp(p.Td()) > 0 { p.SetTd(request.TD) - if request.TD.Cmp(new(big.Int).Add(pm.chainman.Td(), request.Block.Difficulty())) > 0 { + if request.TD.Cmp(new(big.Int).Add(pm.blockchain.Td(), request.Block.Difficulty())) > 0 { go pm.synchronise(p) } } @@ -645,8 +645,8 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { if propagate { // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) var td *big.Int - if parent := pm.chainman.GetBlock(block.ParentHash()); parent != nil { - td = new(big.Int).Add(block.Difficulty(), pm.chainman.GetTd(block.ParentHash())) + if parent := pm.blockchain.GetBlock(block.ParentHash()); parent != nil { + td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash())) } else { glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]) return @@ -659,7 +659,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)) } // Otherwise if the block is indeed in out own chain, announce it - if pm.chainman.HasBlock(hash) { + if pm.blockchain.HasBlock(hash) { for _, peer := range peers { if peer.version < eth62 { peer.SendNewBlockHashes61([]common.Hash{hash}) diff --git a/eth/handler_test.go b/eth/handler_test.go index 6400d4e78..2b8c6168a 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -33,23 +33,23 @@ func testGetBlockHashes(t *testing.T, protocol int) { number int result int }{ - {common.Hash{}, 1, 0}, // Make sure non existent hashes don't return results - {pm.chainman.Genesis().Hash(), 1, 0}, // There are no hashes to retrieve up from the genesis - {pm.chainman.GetBlockByNumber(5).Hash(), 5, 5}, // All the hashes including the genesis requested - {pm.chainman.GetBlockByNumber(5).Hash(), 10, 5}, // More hashes than available till the genesis requested - {pm.chainman.GetBlockByNumber(100).Hash(), 10, 10}, // All hashes available from the middle of the chain - {pm.chainman.CurrentBlock().Hash(), 10, 10}, // All hashes available from the head of the chain - {pm.chainman.CurrentBlock().Hash(), limit, limit}, // Request the maximum allowed hash count - {pm.chainman.CurrentBlock().Hash(), limit + 1, limit}, // Request more than the maximum allowed hash count + {common.Hash{}, 1, 0}, // Make sure non existent hashes don't return results + {pm.blockchain.Genesis().Hash(), 1, 0}, // There are no hashes to retrieve up from the genesis + {pm.blockchain.GetBlockByNumber(5).Hash(), 5, 5}, // All the hashes including the genesis requested + {pm.blockchain.GetBlockByNumber(5).Hash(), 10, 5}, // More hashes than available till the genesis requested + {pm.blockchain.GetBlockByNumber(100).Hash(), 10, 10}, // All hashes available from the middle of the chain + {pm.blockchain.CurrentBlock().Hash(), 10, 10}, // All hashes available from the head of the chain + {pm.blockchain.CurrentBlock().Hash(), limit, limit}, // Request the maximum allowed hash count + {pm.blockchain.CurrentBlock().Hash(), limit + 1, limit}, // Request more than the maximum allowed hash count } // Run each of the tests and verify the results against the chain for i, tt := range tests { // Assemble the hash response we would like to receive resp := make([]common.Hash, tt.result) if len(resp) > 0 { - from := pm.chainman.GetBlock(tt.origin).NumberU64() - 1 + from := pm.blockchain.GetBlock(tt.origin).NumberU64() - 1 for j := 0; j < len(resp); j++ { - resp[j] = pm.chainman.GetBlockByNumber(uint64(int(from) - j)).Hash() + resp[j] = pm.blockchain.GetBlockByNumber(uint64(int(from) - j)).Hash() } } // Send the hash request and verify the response @@ -76,11 +76,11 @@ func testGetBlockHashesFromNumber(t *testing.T, protocol int) { number int result int }{ - {pm.chainman.CurrentBlock().NumberU64() + 1, 1, 0}, // Out of bounds requests should return empty - {pm.chainman.CurrentBlock().NumberU64(), 1, 1}, // Make sure the head hash can be retrieved - {pm.chainman.CurrentBlock().NumberU64() - 4, 5, 5}, // All hashes, including the head hash requested - {pm.chainman.CurrentBlock().NumberU64() - 4, 10, 5}, // More hashes requested than available till the head - {pm.chainman.CurrentBlock().NumberU64() - 100, 10, 10}, // All hashes available from the middle of the chain + {pm.blockchain.CurrentBlock().NumberU64() + 1, 1, 0}, // Out of bounds requests should return empty + {pm.blockchain.CurrentBlock().NumberU64(), 1, 1}, // Make sure the head hash can be retrieved + {pm.blockchain.CurrentBlock().NumberU64() - 4, 5, 5}, // All hashes, including the head hash requested + {pm.blockchain.CurrentBlock().NumberU64() - 4, 10, 5}, // More hashes requested than available till the head + {pm.blockchain.CurrentBlock().NumberU64() - 100, 10, 10}, // All hashes available from the middle of the chain {0, 10, 10}, // All hashes available from the root of the chain {0, limit, limit}, // Request the maximum allowed hash count {0, limit + 1, limit}, // Request more than the maximum allowed hash count @@ -91,7 +91,7 @@ func testGetBlockHashesFromNumber(t *testing.T, protocol int) { // Assemble the hash response we would like to receive resp := make([]common.Hash, tt.result) for j := 0; j < len(resp); j++ { - resp[j] = pm.chainman.GetBlockByNumber(tt.origin + uint64(j)).Hash() + resp[j] = pm.blockchain.GetBlockByNumber(tt.origin + uint64(j)).Hash() } // Send the hash request and verify the response p2p.Send(peer.app, 0x08, getBlockHashesFromNumberData{tt.origin, uint64(tt.number)}) @@ -117,22 +117,22 @@ func testGetBlocks(t *testing.T, protocol int) { available []bool // Availability of explicitly requested blocks expected int // Total number of existing blocks to expect }{ - {1, nil, nil, 1}, // A single random block should be retrievable - {10, nil, nil, 10}, // Multiple random blocks should be retrievable - {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable - {limit + 1, nil, nil, limit}, // No more that the possible block count should be returned - {0, []common.Hash{pm.chainman.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable - {0, []common.Hash{pm.chainman.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable - {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned + {1, nil, nil, 1}, // A single random block should be retrievable + {10, nil, nil, 10}, // Multiple random blocks should be retrievable + {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable + {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned + {0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable + {0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable + {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned // Existing and non-existing blocks interleaved should not cause problems {0, []common.Hash{ common.Hash{}, - pm.chainman.GetBlockByNumber(1).Hash(), + pm.blockchain.GetBlockByNumber(1).Hash(), common.Hash{}, - pm.chainman.GetBlockByNumber(10).Hash(), + pm.blockchain.GetBlockByNumber(10).Hash(), common.Hash{}, - pm.chainman.GetBlockByNumber(100).Hash(), + pm.blockchain.GetBlockByNumber(100).Hash(), common.Hash{}, }, []bool{false, true, false, true, false, true, false}, 3}, } @@ -144,11 +144,11 @@ func testGetBlocks(t *testing.T, protocol int) { for j := 0; j < tt.random; j++ { for { - num := rand.Int63n(int64(pm.chainman.CurrentBlock().NumberU64())) + num := rand.Int63n(int64(pm.blockchain.CurrentBlock().NumberU64())) if !seen[num] { seen[num] = true - block := pm.chainman.GetBlockByNumber(uint64(num)) + block := pm.blockchain.GetBlockByNumber(uint64(num)) hashes = append(hashes, block.Hash()) if len(blocks) < tt.expected { blocks = append(blocks, block) @@ -160,7 +160,7 @@ func testGetBlocks(t *testing.T, protocol int) { for j, hash := range tt.explicit { hashes = append(hashes, hash) if tt.available[j] && len(blocks) < tt.expected { - blocks = append(blocks, pm.chainman.GetBlock(hash)) + blocks = append(blocks, pm.blockchain.GetBlock(hash)) } } // Send the hash request and verify the response @@ -194,83 +194,83 @@ func testGetBlockHeaders(t *testing.T, protocol int) { }{ // A single random block should be retrievable by hash and number too { - &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.chainman.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, - []common.Hash{pm.chainman.GetBlockByNumber(limit / 2).Hash()}, + &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + []common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1}, - []common.Hash{pm.chainman.GetBlockByNumber(limit / 2).Hash()}, + []common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, }, // Multiple headers should be retrievable in both directions { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3}, []common.Hash{ - pm.chainman.GetBlockByNumber(limit / 2).Hash(), - pm.chainman.GetBlockByNumber(limit/2 + 1).Hash(), - pm.chainman.GetBlockByNumber(limit/2 + 2).Hash(), + pm.blockchain.GetBlockByNumber(limit / 2).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 + 1).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 + 2).Hash(), }, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, []common.Hash{ - pm.chainman.GetBlockByNumber(limit / 2).Hash(), - pm.chainman.GetBlockByNumber(limit/2 - 1).Hash(), - pm.chainman.GetBlockByNumber(limit/2 - 2).Hash(), + pm.blockchain.GetBlockByNumber(limit / 2).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 - 1).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 - 2).Hash(), }, }, // Multiple headers with skip lists should be retrievable { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, []common.Hash{ - pm.chainman.GetBlockByNumber(limit / 2).Hash(), - pm.chainman.GetBlockByNumber(limit/2 + 4).Hash(), - pm.chainman.GetBlockByNumber(limit/2 + 8).Hash(), + pm.blockchain.GetBlockByNumber(limit / 2).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 + 4).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 + 8).Hash(), }, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ - pm.chainman.GetBlockByNumber(limit / 2).Hash(), - pm.chainman.GetBlockByNumber(limit/2 - 4).Hash(), - pm.chainman.GetBlockByNumber(limit/2 - 8).Hash(), + pm.blockchain.GetBlockByNumber(limit / 2).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 - 4).Hash(), + pm.blockchain.GetBlockByNumber(limit/2 - 8).Hash(), }, }, // The chain endpoints should be retrievable { &getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1}, - []common.Hash{pm.chainman.GetBlockByNumber(0).Hash()}, + []common.Hash{pm.blockchain.GetBlockByNumber(0).Hash()}, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64()}, Amount: 1}, - []common.Hash{pm.chainman.CurrentBlock().Hash()}, + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64()}, Amount: 1}, + []common.Hash{pm.blockchain.CurrentBlock().Hash()}, }, // Ensure protocol limits are honored { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, - pm.chainman.GetBlockHashesFromHash(pm.chainman.CurrentBlock().Hash(), limit), + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, + pm.blockchain.GetBlockHashesFromHash(pm.blockchain.CurrentBlock().Hash(), limit), }, // Check that requesting more than available is handled gracefully { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, []common.Hash{ - pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 4).Hash(), - pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64()).Hash(), + pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(), + pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64()).Hash(), }, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ - pm.chainman.GetBlockByNumber(4).Hash(), - pm.chainman.GetBlockByNumber(0).Hash(), + pm.blockchain.GetBlockByNumber(4).Hash(), + pm.blockchain.GetBlockByNumber(0).Hash(), }, }, // Check that requesting more than available is handled gracefully, even if mid skip { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, []common.Hash{ - pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 4).Hash(), - pm.chainman.GetBlockByNumber(pm.chainman.CurrentBlock().NumberU64() - 1).Hash(), + pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(), + pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 1).Hash(), }, }, { &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, []common.Hash{ - pm.chainman.GetBlockByNumber(4).Hash(), - pm.chainman.GetBlockByNumber(1).Hash(), + pm.blockchain.GetBlockByNumber(4).Hash(), + pm.blockchain.GetBlockByNumber(1).Hash(), }, }, // Check that non existing headers aren't returned @@ -278,7 +278,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) { &getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1}, []common.Hash{}, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.chainman.CurrentBlock().NumberU64() + 1}, Amount: 1}, + &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() + 1}, Amount: 1}, []common.Hash{}, }, } @@ -287,7 +287,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) { // Collect the headers to expect in the response headers := []*types.Header{} for _, hash := range tt.expect { - headers = append(headers, pm.chainman.GetBlock(hash).Header()) + headers = append(headers, pm.blockchain.GetBlock(hash).Header()) } // Send the hash request and verify the response p2p.Send(peer.app, 0x03, tt.query) @@ -315,22 +315,22 @@ func testGetBlockBodies(t *testing.T, protocol int) { available []bool // Availability of explicitly requested blocks expected int // Total number of existing blocks to expect }{ - {1, nil, nil, 1}, // A single random block should be retrievable - {10, nil, nil, 10}, // Multiple random blocks should be retrievable - {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable - {limit + 1, nil, nil, limit}, // No more that the possible block count should be returned - {0, []common.Hash{pm.chainman.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable - {0, []common.Hash{pm.chainman.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable - {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned + {1, nil, nil, 1}, // A single random block should be retrievable + {10, nil, nil, 10}, // Multiple random blocks should be retrievable + {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable + {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned + {0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable + {0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable + {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned // Existing and non-existing blocks interleaved should not cause problems {0, []common.Hash{ common.Hash{}, - pm.chainman.GetBlockByNumber(1).Hash(), + pm.blockchain.GetBlockByNumber(1).Hash(), common.Hash{}, - pm.chainman.GetBlockByNumber(10).Hash(), + pm.blockchain.GetBlockByNumber(10).Hash(), common.Hash{}, - pm.chainman.GetBlockByNumber(100).Hash(), + pm.blockchain.GetBlockByNumber(100).Hash(), common.Hash{}, }, []bool{false, true, false, true, false, true, false}, 3}, } @@ -342,11 +342,11 @@ func testGetBlockBodies(t *testing.T, protocol int) { for j := 0; j < tt.random; j++ { for { - num := rand.Int63n(int64(pm.chainman.CurrentBlock().NumberU64())) + num := rand.Int63n(int64(pm.blockchain.CurrentBlock().NumberU64())) if !seen[num] { seen[num] = true - block := pm.chainman.GetBlockByNumber(uint64(num)) + block := pm.blockchain.GetBlockByNumber(uint64(num)) hashes = append(hashes, block.Hash()) if len(bodies) < tt.expected { bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) @@ -358,7 +358,7 @@ func testGetBlockBodies(t *testing.T, protocol int) { for j, hash := range tt.explicit { hashes = append(hashes, hash) if tt.available[j] && len(bodies) < tt.expected { - block := pm.chainman.GetBlock(hash) + block := pm.blockchain.GetBlock(hash) bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) } } @@ -442,11 +442,11 @@ func testGetNodeData(t *testing.T, protocol int) { statedb.Put(hashes[i].Bytes(), data[i]) } accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr} - for i := uint64(0); i <= pm.chainman.CurrentBlock().NumberU64(); i++ { - trie := state.New(pm.chainman.GetBlockByNumber(i).Root(), statedb) + for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { + trie := state.New(pm.blockchain.GetBlockByNumber(i).Root(), statedb) for j, acc := range accounts { - bw := pm.chainman.State().GetBalance(acc) + bw := pm.blockchain.State().GetBalance(acc) bh := trie.GetBalance(acc) if (bw != nil && bh == nil) || (bw == nil && bh != nil) { @@ -505,8 +505,8 @@ func testGetReceipt(t *testing.T, protocol int) { // Collect the hashes to request, and the response to expect hashes := []common.Hash{} - for i := uint64(0); i <= pm.chainman.CurrentBlock().NumberU64(); i++ { - for _, tx := range pm.chainman.GetBlockByNumber(i).Transactions() { + for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { + for _, tx := range pm.blockchain.GetBlockByNumber(i).Transactions() { hashes = append(hashes, tx.Hash()) } } diff --git a/eth/helper_test.go b/eth/helper_test.go index 034751f7f..e42fa1f82 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -30,18 +30,18 @@ var ( // channels for different events. func newTestProtocolManager(blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { var ( - evmux = new(event.TypeMux) - pow = new(core.FakePow) - db, _ = ethdb.NewMemDatabase() - genesis = core.WriteGenesisBlockForTesting(db, core.GenesisAccount{testBankAddress, testBankFunds}) - chainman, _ = core.NewChainManager(db, pow, evmux) - blockproc = core.NewBlockProcessor(db, pow, chainman, evmux) + evmux = new(event.TypeMux) + pow = new(core.FakePow) + db, _ = ethdb.NewMemDatabase() + genesis = core.WriteGenesisBlockForTesting(db, core.GenesisAccount{testBankAddress, testBankFunds}) + blockchain, _ = core.NewBlockChain(db, pow, evmux) + blockproc = core.NewBlockProcessor(db, pow, blockchain, evmux) ) - chainman.SetProcessor(blockproc) - if _, err := chainman.InsertChain(core.GenerateChain(genesis, db, blocks, generator)); err != nil { + blockchain.SetProcessor(blockproc) + if _, err := blockchain.InsertChain(core.GenerateChain(genesis, db, blocks, generator)); err != nil { panic(err) } - pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, chainman, db) + pm := NewProtocolManager(NetworkId, evmux, &testTxPool{added: newtx}, pow, blockchain, db) pm.Start() return pm } @@ -116,7 +116,7 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te } // Execute any implicitly requested handshakes and return if shake { - td, head, genesis := pm.chainman.Status() + td, head, genesis := pm.blockchain.Status() tp.handshake(nil, td, head, genesis) } return tp, errc diff --git a/eth/protocol_test.go b/eth/protocol_test.go index bc3b5acfc..523e6c1eb 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -45,7 +45,7 @@ func TestStatusMsgErrors64(t *testing.T) { testStatusMsgErrors(t, 64) } func testStatusMsgErrors(t *testing.T, protocol int) { pm := newTestProtocolManager(0, nil, nil) - td, currentBlock, genesis := pm.chainman.Status() + td, currentBlock, genesis := pm.blockchain.Status() defer pm.Stop() tests := []struct { diff --git a/eth/sync.go b/eth/sync.go index b4dea4b0f..5a2031c68 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -160,7 +160,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) { return } // Make sure the peer's TD is higher than our own. If not drop. - if peer.Td().Cmp(pm.chainman.Td()) <= 0 { + if peer.Td().Cmp(pm.blockchain.Td()) <= 0 { return } // Otherwise try to sync with the downloader diff --git a/miner/worker.go b/miner/worker.go index 470f1f676..8be2db93e 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -100,7 +100,7 @@ type worker struct { pow pow.PoW eth core.Backend - chain *core.ChainManager + chain *core.BlockChain proc *core.BlockProcessor chainDb ethdb.Database @@ -131,7 +131,7 @@ func newWorker(coinbase common.Address, eth core.Backend) *worker { chainDb: eth.ChainDb(), recv: make(chan *Result, resultQueueSize), gasPrice: new(big.Int), - chain: eth.ChainManager(), + chain: eth.BlockChain(), proc: eth.BlockProcessor(), possibleUncles: make(map[common.Hash]*types.Block), coinbase: coinbase, diff --git a/rpc/api/admin.go b/rpc/api/admin.go index 8af69b189..6aa04e667 100644 --- a/rpc/api/admin.go +++ b/rpc/api/admin.go @@ -151,7 +151,7 @@ func (self *adminApi) DataDir(req *shared.Request) (interface{}, error) { return self.ethereum.DataDir, nil } -func hasAllBlocks(chain *core.ChainManager, bs []*types.Block) bool { +func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool { for _, b := range bs { if !chain.HasBlock(b.Hash()) { return false @@ -193,10 +193,10 @@ func (self *adminApi) ImportChain(req *shared.Request) (interface{}, error) { break } // Import the batch. - if hasAllBlocks(self.ethereum.ChainManager(), blocks[:i]) { + if hasAllBlocks(self.ethereum.BlockChain(), blocks[:i]) { continue } - if _, err := self.ethereum.ChainManager().InsertChain(blocks[:i]); err != nil { + if _, err := self.ethereum.BlockChain().InsertChain(blocks[:i]); err != nil { return false, fmt.Errorf("invalid block %d: %v", n, err) } } @@ -214,7 +214,7 @@ func (self *adminApi) ExportChain(req *shared.Request) (interface{}, error) { return false, err } defer fh.Close() - if err := self.ethereum.ChainManager().Export(fh); err != nil { + if err := self.ethereum.BlockChain().Export(fh); err != nil { return false, err } diff --git a/rpc/api/debug.go b/rpc/api/debug.go index d325b1720..e193f7ad2 100644 --- a/rpc/api/debug.go +++ b/rpc/api/debug.go @@ -152,7 +152,7 @@ func (self *debugApi) SetHead(req *shared.Request) (interface{}, error) { return nil, fmt.Errorf("block #%d not found", args.BlockNumber) } - self.ethereum.ChainManager().SetHead(block) + self.ethereum.BlockChain().SetHead(block) return nil, nil } diff --git a/rpc/api/eth.go b/rpc/api/eth.go index 4cd5f2695..6db006a46 100644 --- a/rpc/api/eth.go +++ b/rpc/api/eth.go @@ -168,7 +168,7 @@ func (self *ethApi) IsMining(req *shared.Request) (interface{}, error) { } func (self *ethApi) IsSyncing(req *shared.Request) (interface{}, error) { - current := self.ethereum.ChainManager().CurrentBlock().NumberU64() + current := self.ethereum.BlockChain().CurrentBlock().NumberU64() origin, height := self.ethereum.Downloader().Boundaries() if current < height { diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 33577cf55..fb9ca16e6 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -181,7 +181,7 @@ func runBlockTest(test *BlockTest) error { return fmt.Errorf("InsertPreState: %v", err) } - cm := ethereum.ChainManager() + cm := ethereum.BlockChain() validBlocks, err := test.TryBlocksInsert(cm) if err != nil { return err @@ -276,7 +276,7 @@ func (t *BlockTest) InsertPreState(ethereum *eth.Ethereum) (*state.StateDB, erro expected we are expected to ignore it and continue processing and then validate the post state. */ -func (t *BlockTest) TryBlocksInsert(chainManager *core.ChainManager) ([]btBlock, error) { +func (t *BlockTest) TryBlocksInsert(blockchain *core.BlockChain) ([]btBlock, error) { validBlocks := make([]btBlock, 0) // insert the test blocks, which will execute all transactions for _, b := range t.Json.Blocks { @@ -289,7 +289,7 @@ func (t *BlockTest) TryBlocksInsert(chainManager *core.ChainManager) ([]btBlock, } } // RLP decoding worked, try to insert into chain: - _, err = chainManager.InsertChain(types.Blocks{cb}) + _, err = blockchain.InsertChain(types.Blocks{cb}) if err != nil { if b.BlockHeader == nil { continue // OK - block is supposed to be invalid, continue with next block @@ -426,7 +426,7 @@ func (t *BlockTest) ValidatePostState(statedb *state.StateDB) error { return nil } -func (test *BlockTest) ValidateImportedHeaders(cm *core.ChainManager, validBlocks []btBlock) error { +func (test *BlockTest) ValidateImportedHeaders(cm *core.BlockChain, validBlocks []btBlock) error { // to get constant lookup when verifying block headers by hash (some tests have many blocks) bmap := make(map[string]btBlock, len(test.Json.Blocks)) for _, b := range validBlocks { diff --git a/xeth/xeth.go b/xeth/xeth.go index 9d366d215..da712a984 100644 --- a/xeth/xeth.go +++ b/xeth/xeth.go @@ -126,7 +126,7 @@ func New(ethereum *eth.Ethereum, frontend Frontend) *XEth { if frontend == nil { xeth.frontend = dummyFrontend{} } - xeth.state = NewState(xeth, xeth.backend.ChainManager().State()) + xeth.state = NewState(xeth, xeth.backend.BlockChain().State()) go xeth.start() @@ -214,7 +214,7 @@ func (self *XEth) AtStateNum(num int64) *XEth { if block := self.getBlockByHeight(num); block != nil { st = state.New(block.Root(), self.backend.ChainDb()) } else { - st = state.New(self.backend.ChainManager().GetBlockByNumber(0).Root(), self.backend.ChainDb()) + st = state.New(self.backend.BlockChain().GetBlockByNumber(0).Root(), self.backend.ChainDb()) } } @@ -290,19 +290,19 @@ func (self *XEth) getBlockByHeight(height int64) *types.Block { num = uint64(height) } - return self.backend.ChainManager().GetBlockByNumber(num) + return self.backend.BlockChain().GetBlockByNumber(num) } func (self *XEth) BlockByHash(strHash string) *Block { hash := common.HexToHash(strHash) - block := self.backend.ChainManager().GetBlock(hash) + block := self.backend.BlockChain().GetBlock(hash) return NewBlock(block) } func (self *XEth) EthBlockByHash(strHash string) *types.Block { hash := common.HexToHash(strHash) - block := self.backend.ChainManager().GetBlock(hash) + block := self.backend.BlockChain().GetBlock(hash) return block } @@ -356,11 +356,11 @@ func (self *XEth) EthBlockByNumber(num int64) *types.Block { } func (self *XEth) Td(hash common.Hash) *big.Int { - return self.backend.ChainManager().GetTd(hash) + return self.backend.BlockChain().GetTd(hash) } func (self *XEth) CurrentBlock() *types.Block { - return self.backend.ChainManager().CurrentBlock() + return self.backend.BlockChain().CurrentBlock() } func (self *XEth) GetBlockReceipts(bhash common.Hash) types.Receipts { @@ -372,7 +372,7 @@ func (self *XEth) GetTxReceipt(txhash common.Hash) *types.Receipt { } func (self *XEth) GasLimit() *big.Int { - return self.backend.ChainManager().GasLimit() + return self.backend.BlockChain().GasLimit() } func (self *XEth) Block(v interface{}) *Block { @@ -855,7 +855,7 @@ func (self *XEth) Call(fromStr, toStr, valueStr, gasStr, gasPriceStr, dataStr st } header := self.CurrentBlock().Header() - vmenv := core.NewEnv(statedb, self.backend.ChainManager(), msg, header) + vmenv := core.NewEnv(statedb, self.backend.BlockChain(), msg, header) res, gas, err := core.ApplyMessage(vmenv, msg, from) return common.ToHex(res), gas.String(), err -- cgit v1.2.3