diff options
author | obscuren <geffobscura@gmail.com> | 2015-05-22 00:58:57 +0800 |
---|---|---|
committer | obscuren <geffobscura@gmail.com> | 2015-05-22 00:58:57 +0800 |
commit | 2c1c78a6d9e59de1d4cdeb32737d281814d690f7 (patch) | |
tree | 05471c7e1862733478b08e18bd7ed9419f7f7297 /core | |
parent | 915fc0e581c042a8d4896880d45e680003809254 (diff) | |
parent | 3ea9868b656077c38af5ea8590761c3218ce558e (diff) | |
download | dexon-2c1c78a6d9e59de1d4cdeb32737d281814d690f7.tar dexon-2c1c78a6d9e59de1d4cdeb32737d281814d690f7.tar.gz dexon-2c1c78a6d9e59de1d4cdeb32737d281814d690f7.tar.bz2 dexon-2c1c78a6d9e59de1d4cdeb32737d281814d690f7.tar.lz dexon-2c1c78a6d9e59de1d4cdeb32737d281814d690f7.tar.xz dexon-2c1c78a6d9e59de1d4cdeb32737d281814d690f7.tar.zst dexon-2c1c78a6d9e59de1d4cdeb32737d281814d690f7.zip |
Merge branch 'release/0.9.23'
Diffstat (limited to 'core')
-rw-r--r-- | core/block_cache.go | 3 | ||||
-rw-r--r-- | core/block_processor.go | 37 | ||||
-rw-r--r-- | core/block_processor_test.go | 6 | ||||
-rw-r--r-- | core/chain_makers.go | 2 | ||||
-rw-r--r-- | core/chain_manager.go | 186 | ||||
-rw-r--r-- | core/chain_manager_test.go | 17 | ||||
-rw-r--r-- | core/execution.go | 6 | ||||
-rw-r--r-- | core/vm/memory.go | 12 | ||||
-rw-r--r-- | core/vm/vm.go | 2 |
9 files changed, 202 insertions, 69 deletions
diff --git a/core/block_cache.go b/core/block_cache.go index eeef5c41d..0c747d37c 100644 --- a/core/block_cache.go +++ b/core/block_cache.go @@ -85,6 +85,9 @@ func (bc *BlockCache) Get(hash common.Hash) *types.Block { } func (bc *BlockCache) Has(hash common.Hash) bool { + bc.mu.RLock() + defer bc.mu.RUnlock() + _, ok := bc.blocks[hash] return ok } diff --git a/core/block_processor.go b/core/block_processor.go index cae618b39..3f10e5efd 100644 --- a/core/block_processor.go +++ b/core/block_processor.go @@ -24,6 +24,8 @@ const ( BlockChainVersion = 2 ) +var receiptsPre = []byte("receipts-") + type BlockProcessor struct { db common.Database extraDb common.Database @@ -189,7 +191,7 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st state := state.New(parent.Root(), sm.db) // Block validation - if err = sm.ValidateHeader(block.Header(), parent.Header()); err != nil { + if err = sm.ValidateHeader(block.Header(), parent.Header(), false); err != nil { return } @@ -263,13 +265,27 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st putTx(sm.extraDb, tx, block, uint64(i)) } + receiptsRlp := block.Receipts().RlpEncode() + sm.extraDb.Put(append(receiptsPre, block.Hash().Bytes()...), receiptsRlp) + return state.Logs(), nil } +func (self *BlockProcessor) GetBlockReceipts(bhash common.Hash) (receipts types.Receipts, err error) { + var rdata []byte + rdata, err = self.extraDb.Get(append(receiptsPre, bhash[:]...)) + + if err == nil { + err = rlp.DecodeBytes(rdata, &receipts) + } + return + +} + // Validates the current block. Returns an error if the block was invalid, // an uncle or anything that isn't on the current block chain. // Validation validates easy over difficult (dagger takes longer time = difficult) -func (sm *BlockProcessor) ValidateHeader(block, parent *types.Header) error { +func (sm *BlockProcessor) ValidateHeader(block, parent *types.Header, checkPow bool) error { if big.NewInt(int64(len(block.Extra))).Cmp(params.MaximumExtraDataSize) == 1 { return fmt.Errorf("Block extra data too long (%d)", len(block.Extra)) } @@ -300,9 +316,11 @@ func (sm *BlockProcessor) ValidateHeader(block, parent *types.Header) error { return BlockEqualTSErr //ValidationError("Block timestamp equal or less than previous block (%v - %v)", block.Time, parent.Time) } - // Verify the nonce of the block. Return an error if it's not valid - if !sm.Pow.Verify(types.NewBlockWithHeader(block)) { - return ValidationError("Block's nonce is invalid (= %x)", block.Nonce) + if checkPow { + // Verify the nonce of the block. Return an error if it's not valid + if !sm.Pow.Verify(types.NewBlockWithHeader(block)) { + return ValidationError("Block's nonce is invalid (= %x)", block.Nonce) + } } return nil @@ -351,6 +369,13 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty uncles.Add(hash) if ancestors.Has(hash) { + branch := fmt.Sprintf(" O - %x\n |\n", block.Hash()) + ancestors.Each(func(item interface{}) bool { + branch += fmt.Sprintf(" O - %x\n |\n", hash) + return true + }) + glog.Infoln(branch) + return UncleError("uncle[%d](%x) is ancestor", i, hash[:4]) } @@ -358,7 +383,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty return UncleError("uncle[%d](%x)'s parent unknown (%x)", i, hash[:4], uncle.ParentHash[0:4]) } - if err := sm.ValidateHeader(uncle, ancestorHeaders[uncle.ParentHash]); err != nil { + if err := sm.ValidateHeader(uncle, ancestorHeaders[uncle.ParentHash], true); err != nil { return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err)) } } diff --git a/core/block_processor_test.go b/core/block_processor_test.go index 02524a4c1..e0aa5fb4c 100644 --- a/core/block_processor_test.go +++ b/core/block_processor_test.go @@ -14,7 +14,7 @@ func proc() (*BlockProcessor, *ChainManager) { db, _ := ethdb.NewMemDatabase() var mux event.TypeMux - chainMan := NewChainManager(db, db, &mux) + chainMan := NewChainManager(db, db, thePow(), &mux) return NewBlockProcessor(db, db, ezp.New(), nil, chainMan, &mux), chainMan } @@ -24,13 +24,13 @@ func TestNumber(t *testing.T) { block1.Header().Number = big.NewInt(3) block1.Header().Time-- - err := bp.ValidateHeader(block1.Header(), chain.Genesis().Header()) + err := bp.ValidateHeader(block1.Header(), chain.Genesis().Header(), false) if err != BlockNumberErr { t.Errorf("expected block number error %v", err) } block1 = chain.NewBlock(common.Address{}) - err = bp.ValidateHeader(block1.Header(), chain.Genesis().Header()) + err = bp.ValidateHeader(block1.Header(), chain.Genesis().Header(), false) if err == BlockNumberErr { t.Errorf("didn't expect block number error") } diff --git a/core/chain_makers.go b/core/chain_makers.go index acf7b39cc..44f17cc33 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -109,7 +109,7 @@ func makeChain(bman *BlockProcessor, parent *types.Block, max int, db common.Dat // Effectively a fork factory func newChainManager(block *types.Block, eventMux *event.TypeMux, db common.Database) *ChainManager { genesis := GenesisBlock(db) - bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: eventMux} + bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: eventMux, pow: FakePow{}} bc.txState = state.ManageState(state.New(genesis.Root(), db)) bc.futureBlocks = NewBlockCache(1000) if block == nil { diff --git a/core/chain_manager.go b/core/chain_manager.go index 2e8eb927d..4fb7506e5 100644 --- a/core/chain_manager.go +++ b/core/chain_manager.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "math/big" + "runtime" "sync" "time" @@ -15,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/rlp" ) @@ -83,8 +85,9 @@ type ChainManager struct { eventMux *event.TypeMux genesisBlock *types.Block // Last known total difficulty - mu sync.RWMutex - tsmu sync.RWMutex + mu sync.RWMutex + chainmu sync.RWMutex + tsmu sync.RWMutex td *big.Int currentBlock *types.Block @@ -99,9 +102,11 @@ type ChainManager struct { quit chan struct{} wg sync.WaitGroup + + pow pow.PoW } -func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *ChainManager { +func NewChainManager(blockDb, stateDb common.Database, pow pow.PoW, mux *event.TypeMux) *ChainManager { bc := &ChainManager{ blockDb: blockDb, stateDb: stateDb, @@ -109,6 +114,7 @@ func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *Chai eventMux: mux, quit: make(chan struct{}), cache: NewBlockCache(blockCacheLimit), + pow: pow, } bc.setLastState() @@ -342,7 +348,7 @@ func (self *ChainManager) Export(w io.Writer) error { last := self.currentBlock.NumberU64() - for nr := uint64(0); nr <= last; nr++ { + for nr := uint64(1); nr <= last; nr++ { block := self.GetBlockByNumber(nr) if block == nil { return fmt.Errorf("export failed on #%d: not found", nr) @@ -406,9 +412,11 @@ func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) ( } func (self *ChainManager) GetBlock(hash common.Hash) *types.Block { - if block := self.cache.Get(hash); block != nil { - return block - } + /* + if block := self.cache.Get(hash); block != nil { + return block + } + */ data, _ := self.blockDb.Get(append(blockHashPre, hash[:]...)) if len(data) == 0 { @@ -518,6 +526,9 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { self.wg.Add(1) defer self.wg.Done() + self.chainmu.Lock() + defer self.chainmu.Unlock() + // A queued approach to delivering events. This is generally faster than direct delivery and requires much less mutex acquiring. var ( queue = make([]interface{}, len(chain)) @@ -525,10 +536,19 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { stats struct{ queued, processed, ignored int } tstart = time.Now() ) + + // check the nonce in parallel to the block processing + // this speeds catching up significantly + nonceErrCh := make(chan error) + go func() { + nonceErrCh <- verifyNonces(self.pow, chain) + }() + for i, block := range chain { if block == nil { continue } + // Setting block.Td regardless of error (known for example) prevents errors down the line // in the protocol handler block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash()))) @@ -542,7 +562,6 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { continue } - block.Td = new(big.Int) // Do not penelise on future block. We'll need a block queue eventually that will queue // future block for future use if err == BlockFutureErr { @@ -559,68 +578,67 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { continue } - h := block.Header() - - glog.V(logger.Error).Infof("INVALID block #%v (%x)\n", h.Number, h.Hash().Bytes()) - glog.V(logger.Error).Infoln(err) - glog.V(logger.Debug).Infoln(block) + blockErr(block, err) return i, err } - self.mu.Lock() - { - cblock := self.currentBlock - // Write block to database. Eventually we'll have to improve on this and throw away blocks that are - // not in the canonical chain. - self.write(block) - // Compare the TD of the last known block in the canonical chain to make sure it's greater. - // At this point it's possible that a different chain (fork) becomes the new canonical chain. - if block.Td.Cmp(self.td) > 0 { - // chain fork - if block.ParentHash() != cblock.Hash() { - // during split we merge two different chains and create the new canonical chain - self.merge(cblock, block) - - queue[i] = ChainSplitEvent{block, logs} - queueEvent.splitCount++ - } - - self.setTotalDifficulty(block.Td) - self.insert(block) + cblock := self.currentBlock + // Write block to database. Eventually we'll have to improve on this and throw away blocks that are + // not in the canonical chain. + self.write(block) + // Compare the TD of the last known block in the canonical chain to make sure it's greater. + // At this point it's possible that a different chain (fork) becomes the new canonical chain. + if block.Td.Cmp(self.td) > 0 { + // chain fork + if block.ParentHash() != cblock.Hash() { + // during split we merge two different chains and create the new canonical chain + self.merge(cblock, block) + + queue[i] = ChainSplitEvent{block, logs} + queueEvent.splitCount++ + } - jsonlogger.LogJson(&logger.EthChainNewHead{ - BlockHash: block.Hash().Hex(), - BlockNumber: block.Number(), - ChainHeadHash: cblock.Hash().Hex(), - BlockPrevHash: block.ParentHash().Hex(), - }) + self.setTotalDifficulty(block.Td) + self.insert(block) - self.setTransState(state.New(block.Root(), self.stateDb)) - self.txState.SetState(state.New(block.Root(), self.stateDb)) + jsonlogger.LogJson(&logger.EthChainNewHead{ + BlockHash: block.Hash().Hex(), + BlockNumber: block.Number(), + ChainHeadHash: cblock.Hash().Hex(), + BlockPrevHash: block.ParentHash().Hex(), + }) - queue[i] = ChainEvent{block, block.Hash(), logs} - queueEvent.canonicalCount++ + self.setTransState(state.New(block.Root(), self.stateDb)) + self.txState.SetState(state.New(block.Root(), self.stateDb)) - if glog.V(logger.Debug) { - glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...)\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4]) - } - } else { - if glog.V(logger.Detail) { - glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...)\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4]) - } + queue[i] = ChainEvent{block, block.Hash(), logs} + queueEvent.canonicalCount++ - queue[i] = ChainSideEvent{block, logs} - queueEvent.sideCount++ + if glog.V(logger.Debug) { + glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...)\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4]) + } + } else { + if glog.V(logger.Detail) { + glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...)\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4]) } - self.futureBlocks.Delete(block.Hash()) + + queue[i] = ChainSideEvent{block, logs} + queueEvent.sideCount++ } - self.mu.Unlock() + self.futureBlocks.Delete(block.Hash()) stats.processed++ } + // check and wait for the nonce error channel and + // make sure no nonce error was thrown in the process + err := <-nonceErrCh + if err != nil { + return 0, err + } + if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { tend := time.Since(tstart) start, end := chain[0], chain[len(chain)-1] @@ -719,3 +737,63 @@ out: } } } + +func blockErr(block *types.Block, err error) { + h := block.Header() + glog.V(logger.Error).Infof("INVALID block #%v (%x)\n", h.Number, h.Hash().Bytes()) + glog.V(logger.Error).Infoln(err) + glog.V(logger.Debug).Infoln(block) +} + +// verifyNonces verifies nonces of the given blocks in parallel and returns +// an error if one of the blocks nonce verifications failed. +func verifyNonces(pow pow.PoW, blocks []*types.Block) error { + // Spawn a few workers. They listen for blocks on the in channel + // and send results on done. The workers will exit in the + // background when in is closed. + var ( + in = make(chan *types.Block) + done = make(chan error, runtime.GOMAXPROCS(0)) + ) + defer close(in) + for i := 0; i < cap(done); i++ { + go verifyNonce(pow, in, done) + } + // Feed blocks to the workers, aborting at the first invalid nonce. + var ( + running, i int + block *types.Block + sendin = in + ) + for i < len(blocks) || running > 0 { + if i == len(blocks) { + // Disable sending to in. + sendin = nil + } else { + block = blocks[i] + i++ + } + select { + case sendin <- block: + running++ + case err := <-done: + running-- + if err != nil { + return err + } + } + } + return nil +} + +// verifyNonce is a worker for the verifyNonces method. It will run until +// in is closed. +func verifyNonce(pow pow.PoW, in <-chan *types.Block, done chan<- error) { + for block := range in { + if !pow.Verify(block) { + done <- ValidationError("Block(#%v) nonce is invalid (= %x)", block.Number(), block.Nonce) + } else { + done <- nil + } + } +} diff --git a/core/chain_manager_test.go b/core/chain_manager_test.go index b5155e223..7dc7358c0 100644 --- a/core/chain_manager_test.go +++ b/core/chain_manager_test.go @@ -9,11 +9,13 @@ import ( "strconv" "testing" + "github.com/ethereum/ethash" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/rlp" ) @@ -21,6 +23,11 @@ func init() { runtime.GOMAXPROCS(runtime.NumCPU()) } +func thePow() pow.PoW { + pow, _ := ethash.NewForTesting() + return pow +} + // Test fork of length N starting from block i func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) { // switch databases to process the new chain @@ -259,7 +266,7 @@ func TestChainInsertions(t *testing.T) { } var eventMux event.TypeMux - chainMan := NewChainManager(db, db, &eventMux) + chainMan := NewChainManager(db, db, thePow(), &eventMux) txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) }) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) chainMan.SetProcessor(blockMan) @@ -305,7 +312,7 @@ func TestChainMultipleInsertions(t *testing.T) { } } var eventMux event.TypeMux - chainMan := NewChainManager(db, db, &eventMux) + chainMan := NewChainManager(db, db, thePow(), &eventMux) txPool := NewTxPool(&eventMux, chainMan.State, func() *big.Int { return big.NewInt(100000000) }) blockMan := NewBlockProcessor(db, db, nil, txPool, chainMan, &eventMux) chainMan.SetProcessor(blockMan) @@ -334,7 +341,7 @@ func TestGetAncestors(t *testing.T) { db, _ := ethdb.NewMemDatabase() var eventMux event.TypeMux - chainMan := NewChainManager(db, db, &eventMux) + chainMan := NewChainManager(db, db, thePow(), &eventMux) chain, err := loadChain("valid1", t) if err != nil { fmt.Println(err) @@ -372,7 +379,7 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block func chm(genesis *types.Block, db common.Database) *ChainManager { var eventMux event.TypeMux - bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: &eventMux} + bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}} bc.cache = NewBlockCache(100) bc.futureBlocks = NewBlockCache(100) bc.processor = bproc{} @@ -383,6 +390,7 @@ func chm(genesis *types.Block, db common.Database) *ChainManager { } func TestReorgLongest(t *testing.T) { + t.Skip("skipped while cache is removed") db, _ := ethdb.NewMemDatabase() genesis := GenesisBlock(db) bc := chm(genesis, db) @@ -402,6 +410,7 @@ func TestReorgLongest(t *testing.T) { } func TestReorgShortest(t *testing.T) { + t.Skip("skipped while cache is removed") db, _ := ethdb.NewMemDatabase() genesis := GenesisBlock(db) bc := chm(genesis, db) diff --git a/core/execution.go b/core/execution.go index 9adf98032..522c90449 100644 --- a/core/execution.go +++ b/core/execution.go @@ -38,6 +38,12 @@ func (self *Execution) Create(caller vm.ContextRef) (ret []byte, err error, acco code := self.input self.input = nil ret, err = self.exec(nil, code, caller) + // Here we get an error if we run into maximum stack depth, + // See: https://github.com/ethereum/yellowpaper/pull/131 + // and YP definitions for CREATE instruction + if err != nil { + return nil, err, nil + } account = self.env.State().GetStateObject(*self.address) return } diff --git a/core/vm/memory.go b/core/vm/memory.go index b77d486eb..d20aa9591 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -49,6 +49,18 @@ func (self *Memory) Get(offset, size int64) (cpy []byte) { return } +func (self *Memory) GetPtr(offset, size int64) []byte { + if size == 0 { + return nil + } + + if len(self.store) > int(offset) { + return self.store[offset : offset+size] + } + + return nil +} + func (m *Memory) Len() int { return len(m.store) } diff --git a/core/vm/vm.go b/core/vm/vm.go index 927b67293..35fa19d03 100644 --- a/core/vm/vm.go +++ b/core/vm/vm.go @@ -695,7 +695,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) { self.Printf("resume %x (%v)", context.Address(), context.Gas) case RETURN: offset, size := stack.pop(), stack.pop() - ret := mem.Get(offset.Int64(), size.Int64()) + ret := mem.GetPtr(offset.Int64(), size.Int64()) self.Printf(" => [%v, %v] (%d) 0x%x", offset, size, len(ret), ret).Endl() |