aboutsummaryrefslogtreecommitdiffstats
path: root/eth
diff options
context:
space:
mode:
authorYondon Fu <yondon.fu@gmail.com>2017-12-19 06:17:41 +0800
committerYondon Fu <yondon.fu@gmail.com>2017-12-19 06:17:41 +0800
commit3857cdc267e3192697f561df0a0f827f65dfb6b5 (patch)
tree401c52c4972a68229ea283a394a0b0a5f3cfdc8e /eth
parenta5330fe0c569b75cb8a524f60f7e8dc06498262b (diff)
parentfe070ab5c32702033489f1b9d1655ea1b894c29e (diff)
downloaddexon-3857cdc267e3192697f561df0a0f827f65dfb6b5.tar
dexon-3857cdc267e3192697f561df0a0f827f65dfb6b5.tar.gz
dexon-3857cdc267e3192697f561df0a0f827f65dfb6b5.tar.bz2
dexon-3857cdc267e3192697f561df0a0f827f65dfb6b5.tar.lz
dexon-3857cdc267e3192697f561df0a0f827f65dfb6b5.tar.xz
dexon-3857cdc267e3192697f561df0a0f827f65dfb6b5.tar.zst
dexon-3857cdc267e3192697f561df0a0f827f65dfb6b5.zip
Merge branch 'master' into abi-offset-fixed-arrays
Diffstat (limited to 'eth')
-rw-r--r--eth/api.go109
-rw-r--r--eth/api_test.go5
-rw-r--r--eth/backend.go35
-rw-r--r--eth/bloombits.go19
-rw-r--r--eth/config.go39
-rw-r--r--eth/downloader/downloader.go14
-rw-r--r--eth/downloader/downloader_test.go61
-rw-r--r--eth/downloader/fakepeer.go2
-rw-r--r--eth/downloader/peer.go2
-rw-r--r--eth/filters/bench_test.go2
-rw-r--r--eth/filters/filter.go16
-rw-r--r--eth/filters/filter_system_test.go2
-rw-r--r--eth/gen_config.go51
-rw-r--r--eth/helper_test.go5
-rw-r--r--eth/protocol.go2
15 files changed, 252 insertions, 112 deletions
diff --git a/eth/api.go b/eth/api.go
index e91f51bb9..c748f75de 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -452,7 +452,12 @@ func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConf
}
statedb, err := blockchain.StateAt(blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
if err != nil {
- return false, structLogger.StructLogs(), err
+ switch err.(type) {
+ case *trie.MissingNodeError:
+ return false, structLogger.StructLogs(), fmt.Errorf("required historical state unavailable")
+ default:
+ return false, structLogger.StructLogs(), err
+ }
}
receipts, _, usedGas, err := processor.Process(block, statedb, config)
@@ -518,7 +523,12 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, txHash common.
}
msg, context, statedb, err := api.computeTxEnv(blockHash, int(txIndex))
if err != nil {
- return nil, err
+ switch err.(type) {
+ case *trie.MissingNodeError:
+ return nil, fmt.Errorf("required historical state unavailable")
+ default:
+ return nil, err
+ }
}
// Run the transaction with tracing enabled.
@@ -615,14 +625,18 @@ func (api *PrivateDebugAPI) StorageRangeAt(ctx context.Context, blockHash common
if st == nil {
return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress)
}
- return storageRangeAt(st, keyStart, maxResult), nil
+ return storageRangeAt(st, keyStart, maxResult)
}
-func storageRangeAt(st state.Trie, start []byte, maxResult int) StorageRangeResult {
+func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) {
it := trie.NewIterator(st.NodeIterator(start))
result := StorageRangeResult{Storage: storageMap{}}
for i := 0; i < maxResult && it.Next(); i++ {
- e := storageEntry{Value: common.BytesToHash(it.Value)}
+ _, content, _, err := rlp.Split(it.Value)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ e := storageEntry{Value: common.BytesToHash(content)}
if preimage := st.GetKey(it.Key); preimage != nil {
preimage := common.BytesToHash(preimage)
e.Key = &preimage
@@ -634,5 +648,88 @@ func storageRangeAt(st state.Trie, start []byte, maxResult int) StorageRangeResu
next := common.BytesToHash(it.Key)
result.NextKey = &next
}
- return result
+ return result, nil
+}
+
+// GetModifiedAccountsByumber returns all accounts that have changed between the
+// two blocks specified. A change is defined as a difference in nonce, balance,
+// code hash, or storage hash.
+//
+// With one parameter, returns the list of accounts modified in the specified block.
+func (api *PrivateDebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) {
+ var startBlock, endBlock *types.Block
+
+ startBlock = api.eth.blockchain.GetBlockByNumber(startNum)
+ if startBlock == nil {
+ return nil, fmt.Errorf("start block %x not found", startNum)
+ }
+
+ if endNum == nil {
+ endBlock = startBlock
+ startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
+ if startBlock == nil {
+ return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
+ }
+ } else {
+ endBlock = api.eth.blockchain.GetBlockByNumber(*endNum)
+ if endBlock == nil {
+ return nil, fmt.Errorf("end block %d not found", *endNum)
+ }
+ }
+ return api.getModifiedAccounts(startBlock, endBlock)
+}
+
+// GetModifiedAccountsByHash returns all accounts that have changed between the
+// two blocks specified. A change is defined as a difference in nonce, balance,
+// code hash, or storage hash.
+//
+// With one parameter, returns the list of accounts modified in the specified block.
+func (api *PrivateDebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) {
+ var startBlock, endBlock *types.Block
+ startBlock = api.eth.blockchain.GetBlockByHash(startHash)
+ if startBlock == nil {
+ return nil, fmt.Errorf("start block %x not found", startHash)
+ }
+
+ if endHash == nil {
+ endBlock = startBlock
+ startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
+ if startBlock == nil {
+ return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
+ }
+ } else {
+ endBlock = api.eth.blockchain.GetBlockByHash(*endHash)
+ if endBlock == nil {
+ return nil, fmt.Errorf("end block %x not found", *endHash)
+ }
+ }
+ return api.getModifiedAccounts(startBlock, endBlock)
+}
+
+func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) {
+ if startBlock.Number().Uint64() >= endBlock.Number().Uint64() {
+ return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64())
+ }
+
+ oldTrie, err := trie.NewSecure(startBlock.Root(), api.eth.chainDb, 0)
+ if err != nil {
+ return nil, err
+ }
+ newTrie, err := trie.NewSecure(endBlock.Root(), api.eth.chainDb, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
+ iter := trie.NewIterator(diff)
+
+ var dirty []common.Address
+ for iter.Next() {
+ key := newTrie.GetKey(iter.Key)
+ if key == nil {
+ return nil, fmt.Errorf("no preimage found for hash %x", iter.Key)
+ }
+ dirty = append(dirty, common.BytesToAddress(key))
+ }
+ return dirty, nil
}
diff --git a/eth/api_test.go b/eth/api_test.go
index 49ce38688..248bc3ab6 100644
--- a/eth/api_test.go
+++ b/eth/api_test.go
@@ -79,7 +79,10 @@ func TestStorageRangeAt(t *testing.T) {
},
}
for _, test := range tests {
- result := storageRangeAt(state.StorageTrie(addr), test.start, test.limit)
+ result, err := storageRangeAt(state.StorageTrie(addr), test.start, test.limit)
+ if err != nil {
+ t.Error(err)
+ }
if !reflect.DeepEqual(result, test.want) {
t.Fatalf("wrong result for range 0x%x.., limit %d:\ngot %s\nwant %s",
test.start, test.limit, dumper.Sdump(result), dumper.Sdump(&test.want))
diff --git a/eth/backend.go b/eth/backend.go
index 6a06bd829..c39974a2c 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -54,6 +54,7 @@ type LesServer interface {
Start(srvr *p2p.Server)
Stop()
Protocols() []p2p.Protocol
+ SetBloomBitsIndexer(bbIndexer *core.ChainIndexer)
}
// Ethereum implements the Ethereum full node service.
@@ -95,6 +96,7 @@ type Ethereum struct {
func (s *Ethereum) AddLesServer(ls LesServer) {
s.lesServer = ls
+ ls.SetBloomBitsIndexer(s.bloomIndexer)
}
// New creates a new Ethereum object (including the
@@ -123,7 +125,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
chainConfig: chainConfig,
eventMux: ctx.EventMux,
accountManager: ctx.AccountManager,
- engine: CreateConsensusEngine(ctx, config, chainConfig, chainDb),
+ engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
shutdownChan: make(chan bool),
stopDbUpgrade: stopDbUpgrade,
networkId: config.NetworkId,
@@ -154,7 +156,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
eth.blockchain.SetHead(compat.RewindTo)
core.WriteChainConfig(chainDb, genesisHash, chainConfig)
}
- eth.bloomIndexer.Start(eth.blockchain.CurrentHeader(), eth.blockchain.SubscribeChainEvent)
+ eth.bloomIndexer.Start(eth.blockchain)
if config.TxPool.Journal != "" {
config.TxPool.Journal = ctx.ResolvePath(config.TxPool.Journal)
@@ -207,25 +209,31 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
}
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
-func CreateConsensusEngine(ctx *node.ServiceContext, config *Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
+func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
// If proof-of-authority is requested, set it up
if chainConfig.Clique != nil {
return clique.New(chainConfig.Clique, db)
}
// Otherwise assume proof-of-work
switch {
- case config.PowFake:
+ case config.PowMode == ethash.ModeFake:
log.Warn("Ethash used in fake mode")
return ethash.NewFaker()
- case config.PowTest:
+ case config.PowMode == ethash.ModeTest:
log.Warn("Ethash used in test mode")
return ethash.NewTester()
- case config.PowShared:
+ case config.PowMode == ethash.ModeShared:
log.Warn("Ethash used in shared mode")
return ethash.NewShared()
default:
- engine := ethash.New(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
- config.EthashDatasetDir, config.EthashDatasetsInMem, config.EthashDatasetsOnDisk)
+ engine := ethash.New(ethash.Config{
+ CacheDir: ctx.ResolvePath(config.CacheDir),
+ CachesInMem: config.CachesInMem,
+ CachesOnDisk: config.CachesOnDisk,
+ DatasetDir: config.DatasetDir,
+ DatasetsInMem: config.DatasetsInMem,
+ DatasetsOnDisk: config.DatasetsOnDisk,
+ })
engine.SetThreads(-1) // Disable CPU mining
return engine
}
@@ -302,10 +310,17 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) {
}
if wallets := s.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
- return accounts[0].Address, nil
+ etherbase := accounts[0].Address
+
+ s.lock.Lock()
+ s.etherbase = etherbase
+ s.lock.Unlock()
+
+ log.Info("Etherbase automatically configured", "address", etherbase)
+ return etherbase, nil
}
}
- return common.Address{}, fmt.Errorf("etherbase address must be explicitly specified")
+ return common.Address{}, fmt.Errorf("etherbase must be explicitly specified")
}
// set in js console via admin interface or wrapper from cli flags
diff --git a/eth/bloombits.go b/eth/bloombits.go
index 32f6c7b31..c5597391c 100644
--- a/eth/bloombits.go
+++ b/eth/bloombits.go
@@ -58,15 +58,18 @@ func (eth *Ethereum) startBloomHandlers() {
case request := <-eth.bloomRequests:
task := <-request
-
task.Bitsets = make([][]byte, len(task.Sections))
for i, section := range task.Sections {
head := core.GetCanonicalHash(eth.chainDb, (section+1)*params.BloomBitsBlocks-1)
- blob, err := bitutil.DecompressBytes(core.GetBloomBits(eth.chainDb, task.Bit, section, head), int(params.BloomBitsBlocks)/8)
- if err != nil {
- panic(err)
+ if compVector, err := core.GetBloomBits(eth.chainDb, task.Bit, section, head); err == nil {
+ if blob, err := bitutil.DecompressBytes(compVector, int(params.BloomBitsBlocks)/8); err == nil {
+ task.Bitsets[i] = blob
+ } else {
+ task.Error = err
+ }
+ } else {
+ task.Error = err
}
- task.Bitsets[i] = blob
}
request <- task
}
@@ -111,12 +114,10 @@ func NewBloomIndexer(db ethdb.Database, size uint64) *core.ChainIndexer {
// Reset implements core.ChainIndexerBackend, starting a new bloombits index
// section.
-func (b *BloomIndexer) Reset(section uint64) {
+func (b *BloomIndexer) Reset(section uint64, lastSectionHead common.Hash) error {
gen, err := bloombits.NewGenerator(uint(b.size))
- if err != nil {
- panic(err)
- }
b.gen, b.section, b.head = gen, section, common.Hash{}
+ return err
}
// Process implements core.ChainIndexerBackend, adding a new header's bloom into
diff --git a/eth/config.go b/eth/config.go
index 7bcfd403e..383cd6783 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -33,16 +34,18 @@ import (
// DefaultConfig contains default settings for use on the Ethereum main net.
var DefaultConfig = Config{
- SyncMode: downloader.FastSync,
- EthashCacheDir: "ethash",
- EthashCachesInMem: 2,
- EthashCachesOnDisk: 3,
- EthashDatasetsInMem: 1,
- EthashDatasetsOnDisk: 2,
- NetworkId: 1,
- LightPeers: 20,
- DatabaseCache: 128,
- GasPrice: big.NewInt(18 * params.Shannon),
+ SyncMode: downloader.FastSync,
+ Ethash: ethash.Config{
+ CacheDir: "ethash",
+ CachesInMem: 2,
+ CachesOnDisk: 3,
+ DatasetsInMem: 1,
+ DatasetsOnDisk: 2,
+ },
+ NetworkId: 1,
+ LightPeers: 20,
+ DatabaseCache: 128,
+ GasPrice: big.NewInt(18 * params.Shannon),
TxPool: core.DefaultTxPoolConfig,
GPO: gasprice.Config{
@@ -59,9 +62,9 @@ func init() {
}
}
if runtime.GOOS == "windows" {
- DefaultConfig.EthashDatasetDir = filepath.Join(home, "AppData", "Ethash")
+ DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "AppData", "Ethash")
} else {
- DefaultConfig.EthashDatasetDir = filepath.Join(home, ".ethash")
+ DefaultConfig.Ethash.DatasetDir = filepath.Join(home, ".ethash")
}
}
@@ -92,12 +95,7 @@ type Config struct {
GasPrice *big.Int
// Ethash options
- EthashCacheDir string
- EthashCachesInMem int
- EthashCachesOnDisk int
- EthashDatasetDir string
- EthashDatasetsInMem int
- EthashDatasetsOnDisk int
+ Ethash ethash.Config
// Transaction pool options
TxPool core.TxPoolConfig
@@ -109,10 +107,7 @@ type Config struct {
EnablePreimageRecording bool
// Miscellaneous options
- DocRoot string `toml:"-"`
- PowFake bool `toml:"-"`
- PowTest bool `toml:"-"`
- PowShared bool `toml:"-"`
+ DocRoot string `toml:"-"`
}
type configMarshaling struct {
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 5782d4cf5..b338129e0 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -333,7 +333,7 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
}
// synchronise will select the peer and use it for synchronising. If an empty string is given
-// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
+// it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous
func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
// Mock out the synchronisation if testing
@@ -708,7 +708,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
ttl := d.requestTTL()
timeout := time.After(ttl)
- go p.peer.RequestHeadersByNumber(uint64(check), 1, 0, false)
+ go p.peer.RequestHeadersByNumber(check, 1, 0, false)
// Wait until a reply arrives to this request
for arrived := false; !arrived; {
@@ -1003,8 +1003,8 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
return errCancel
case packet := <-deliveryCh:
- // If the peer was previously banned and failed to deliver it's pack
- // in a reasonable time frame, ignore it's message.
+ // If the peer was previously banned and failed to deliver its pack
+ // in a reasonable time frame, ignore its message.
if peer := d.peers.Peer(packet.PeerId()); peer != nil {
// Deliver the received chunk of data and check chain validity
accepted, err := deliver(packet)
@@ -1205,8 +1205,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
case <-d.cancelCh:
}
}
- // If no headers were retrieved at all, the peer violated it's TD promise that it had a
- // better chain compared to ours. The only exception is if it's promised blocks were
+ // If no headers were retrieved at all, the peer violated its TD promise that it had a
+ // better chain compared to ours. The only exception is if its promised blocks were
// already imported by other means (e.g. fecher):
//
// R <remote peer>, L <local node>: Both at block 10
@@ -1518,7 +1518,7 @@ func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, i
func (d *Downloader) qosTuner() {
for {
// Retrieve the current median RTT and integrate into the previoust target RTT
- rtt := time.Duration(float64(1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
+ rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
// A new RTT cycle passed, increase our confidence in the estimated RTT
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 58f6e9a62..7d1cc8c34 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -704,6 +704,7 @@ func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
+ t.Parallel()
tester := newTester()
defer tester.terminate()
@@ -1166,6 +1167,8 @@ func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 6
func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
+ t.Parallel()
+
tester := newTester()
defer tester.terminate()
@@ -1198,6 +1201,8 @@ func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(
func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
+ t.Parallel()
+
tester := newTester()
defer tester.terminate()
@@ -1310,6 +1315,8 @@ func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDr
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
+ t.Parallel()
+
// Define the disconnection requirement for individual hash fetch errors
tests := []struct {
result error
@@ -1665,12 +1672,26 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// This test reproduces an issue where unexpected deliveries would
// block indefinitely if they arrived at the right time.
-func TestDeliverHeadersHang62(t *testing.T) { testDeliverHeadersHang(t, 62, FullSync) }
-func TestDeliverHeadersHang63Full(t *testing.T) { testDeliverHeadersHang(t, 63, FullSync) }
-func TestDeliverHeadersHang63Fast(t *testing.T) { testDeliverHeadersHang(t, 63, FastSync) }
-func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) }
-func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }
-func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
+// We use data driven subtests to manage this so that it will be parallel on its own
+// and not with the other tests, avoiding intermittent failures.
+func TestDeliverHeadersHang(t *testing.T) {
+ testCases := []struct {
+ protocol int
+ syncMode SyncMode
+ }{
+ {62, FullSync},
+ {63, FullSync},
+ {63, FastSync},
+ {64, FullSync},
+ {64, FastSync},
+ {64, LightSync},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
+ testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
+ })
+ }
+}
type floodingTestPeer struct {
peer Peer
@@ -1703,7 +1724,7 @@ func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int
// Deliver the actual requested headers.
go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
// None of the extra deliveries should block.
- timeout := time.After(15 * time.Second)
+ timeout := time.After(60 * time.Second)
for i := 0; i < cap(deliveriesDone); i++ {
select {
case <-deliveriesDone:
@@ -1732,7 +1753,6 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
tester.downloader.peers.peers["peer"].peer,
tester,
}
-
if err := tester.sync("peer", nil, mode); err != nil {
t.Errorf("sync failed: %v", err)
}
@@ -1742,12 +1762,28 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
// Tests that if fast sync aborts in the critical section, it can restart a few
// times before giving up.
-func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
-func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
-func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
-func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
+// We use data driven subtests to manage this so that it will be parallel on its own
+// and not with the other tests, avoiding intermittent failures.
+func TestFastCriticalRestarts(t *testing.T) {
+ testCases := []struct {
+ protocol int
+ progress bool
+ }{
+ {63, false},
+ {64, false},
+ {63, true},
+ {64, true},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("protocol %d progress %v", tc.protocol, tc.progress), func(t *testing.T) {
+ testFastCriticalRestarts(t, tc.protocol, tc.progress)
+ })
+ }
+}
func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
+ t.Parallel()
+
tester := newTester()
defer tester.terminate()
@@ -1776,6 +1812,7 @@ func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
if i == 0 {
+ time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
if tester.downloader.fsPivotLock == nil {
time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
t.Fatalf("pivot block not locked in after critical section failure")
diff --git a/eth/downloader/fakepeer.go b/eth/downloader/fakepeer.go
index ebdb9c334..b45acff7d 100644
--- a/eth/downloader/fakepeer.go
+++ b/eth/downloader/fakepeer.go
@@ -62,7 +62,7 @@ func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int,
number := origin.Number.Uint64()
headers = append(headers, origin)
if reverse {
- for i := 0; i < int(skip)+1; i++ {
+ for i := 0; i <= skip; i++ {
if header := p.hc.GetHeader(hash, number); header != nil {
hash = header.ParentHash
number--
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index e638744ea..a4aa86114 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -548,7 +548,7 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerC
return idle, total
}
-// medianRTT returns the median RTT of te peerset, considering only the tuning
+// medianRTT returns the median RTT of the peerset, considering only the tuning
// peers if there are more peers available.
func (ps *peerSet) medianRTT() time.Duration {
// Gather all the currnetly measured round trip times
diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go
index abbf4593e..0a0929bc1 100644
--- a/eth/filters/bench_test.go
+++ b/eth/filters/bench_test.go
@@ -192,7 +192,7 @@ func BenchmarkNoBloomBits(b *testing.B) {
start := time.Now()
mux := new(event.TypeMux)
backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
- filter := New(backend, 0, int64(headNum), []common.Address{common.Address{}}, nil)
+ filter := New(backend, 0, int64(headNum), []common.Address{{}}, nil)
filter.Logs(context.Background())
d := time.Since(start)
fmt.Println("Finished running filter benchmarks")
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index 026cbf95c..43d7e2a81 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -19,7 +19,6 @@ package filters
import (
"context"
"math/big"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@@ -136,11 +135,11 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err
// Create a matcher session and request servicing from the backend
matches := make(chan uint64, 64)
- session, err := f.matcher.Start(uint64(f.begin), end, matches)
+ session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
if err != nil {
return nil, err
}
- defer session.Close(time.Second)
+ defer session.Close()
f.backend.ServiceFilter(ctx, session)
@@ -152,9 +151,14 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err
case number, ok := <-matches:
// Abort if all matches have been fulfilled
if !ok {
- f.begin = int64(end) + 1
- return logs, nil
+ err := session.Error()
+ if err == nil {
+ f.begin = int64(end) + 1
+ }
+ return logs, err
}
+ f.begin = int64(number) + 1
+
// Retrieve the suggested block and pull any truly matching logs
header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
if header == nil || err != nil {
@@ -203,7 +207,7 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs [
}
var unfiltered []*types.Log
for _, receipt := range receipts {
- unfiltered = append(unfiltered, ([]*types.Log)(receipt.Logs)...)
+ unfiltered = append(unfiltered, receipt.Logs...)
}
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
if len(logs) > 0 {
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index bc3511f23..7da114fda 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -109,7 +109,7 @@ func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.Matc
for i, section := range task.Sections {
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
- task.Bitsets[i] = core.GetBloomBits(b.db, task.Bit, section, head)
+ task.Bitsets[i], _ = core.GetBloomBits(b.db, task.Bit, section, head)
}
}
request <- task
diff --git a/eth/gen_config.go b/eth/gen_config.go
index 4a4cd7b9c..e2d50e1f6 100644
--- a/eth/gen_config.go
+++ b/eth/gen_config.go
@@ -7,6 +7,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -36,10 +37,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
TxPool core.TxPoolConfig
GPO gasprice.Config
EnablePreimageRecording bool
- DocRoot string `toml:"-"`
- PowFake bool `toml:"-"`
- PowTest bool `toml:"-"`
- PowShared bool `toml:"-"`
+ DocRoot string `toml:"-"`
+ PowMode ethash.Mode `toml:"-"`
}
var enc Config
enc.Genesis = c.Genesis
@@ -54,19 +53,17 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.MinerThreads = c.MinerThreads
enc.ExtraData = c.ExtraData
enc.GasPrice = c.GasPrice
- enc.EthashCacheDir = c.EthashCacheDir
- enc.EthashCachesInMem = c.EthashCachesInMem
- enc.EthashCachesOnDisk = c.EthashCachesOnDisk
- enc.EthashDatasetDir = c.EthashDatasetDir
- enc.EthashDatasetsInMem = c.EthashDatasetsInMem
- enc.EthashDatasetsOnDisk = c.EthashDatasetsOnDisk
+ enc.EthashCacheDir = c.Ethash.CacheDir
+ enc.EthashCachesInMem = c.Ethash.CachesInMem
+ enc.EthashCachesOnDisk = c.Ethash.CachesOnDisk
+ enc.EthashDatasetDir = c.Ethash.DatasetDir
+ enc.EthashDatasetsInMem = c.Ethash.DatasetsInMem
+ enc.EthashDatasetsOnDisk = c.Ethash.DatasetsOnDisk
enc.TxPool = c.TxPool
enc.GPO = c.GPO
enc.EnablePreimageRecording = c.EnablePreimageRecording
enc.DocRoot = c.DocRoot
- enc.PowFake = c.PowFake
- enc.PowTest = c.PowTest
- enc.PowShared = c.PowShared
+ enc.PowMode = c.Ethash.PowMode
return &enc, nil
}
@@ -94,10 +91,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
TxPool *core.TxPoolConfig
GPO *gasprice.Config
EnablePreimageRecording *bool
- DocRoot *string `toml:"-"`
- PowFake *bool `toml:"-"`
- PowTest *bool `toml:"-"`
- PowShared *bool `toml:"-"`
+ DocRoot *string `toml:"-"`
+ PowMode *ethash.Mode `toml:"-"`
}
var dec Config
if err := unmarshal(&dec); err != nil {
@@ -140,22 +135,22 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
c.GasPrice = dec.GasPrice
}
if dec.EthashCacheDir != nil {
- c.EthashCacheDir = *dec.EthashCacheDir
+ c.Ethash.CacheDir = *dec.EthashCacheDir
}
if dec.EthashCachesInMem != nil {
- c.EthashCachesInMem = *dec.EthashCachesInMem
+ c.Ethash.CachesInMem = *dec.EthashCachesInMem
}
if dec.EthashCachesOnDisk != nil {
- c.EthashCachesOnDisk = *dec.EthashCachesOnDisk
+ c.Ethash.CachesOnDisk = *dec.EthashCachesOnDisk
}
if dec.EthashDatasetDir != nil {
- c.EthashDatasetDir = *dec.EthashDatasetDir
+ c.Ethash.DatasetDir = *dec.EthashDatasetDir
}
if dec.EthashDatasetsInMem != nil {
- c.EthashDatasetsInMem = *dec.EthashDatasetsInMem
+ c.Ethash.DatasetsInMem = *dec.EthashDatasetsInMem
}
if dec.EthashDatasetsOnDisk != nil {
- c.EthashDatasetsOnDisk = *dec.EthashDatasetsOnDisk
+ c.Ethash.DatasetsOnDisk = *dec.EthashDatasetsOnDisk
}
if dec.TxPool != nil {
c.TxPool = *dec.TxPool
@@ -169,14 +164,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.DocRoot != nil {
c.DocRoot = *dec.DocRoot
}
- if dec.PowFake != nil {
- c.PowFake = *dec.PowFake
- }
- if dec.PowTest != nil {
- c.PowTest = *dec.PowTest
- }
- if dec.PowShared != nil {
- c.PowShared = *dec.PowShared
+ if dec.PowMode != nil {
+ c.Ethash.PowMode = *dec.PowMode
}
return nil
}
diff --git a/eth/helper_test.go b/eth/helper_test.go
index b66553135..f02242b15 100644
--- a/eth/helper_test.go
+++ b/eth/helper_test.go
@@ -97,7 +97,7 @@ type testTxPool struct {
// AddRemotes appends a batch of transactions to the pool, and notifies any
// listeners if the addition channel is non nil
-func (p *testTxPool) AddRemotes(txs []*types.Transaction) error {
+func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error {
p.lock.Lock()
defer p.lock.Unlock()
@@ -105,8 +105,7 @@ func (p *testTxPool) AddRemotes(txs []*types.Transaction) error {
if p.added != nil {
p.added <- txs
}
-
- return nil
+ return make([]error, len(txs))
}
// Pending returns all the transactions known to the pool
diff --git a/eth/protocol.go b/eth/protocol.go
index 2c41376fa..cd7db57f2 100644
--- a/eth/protocol.go
+++ b/eth/protocol.go
@@ -97,7 +97,7 @@ var errorToString = map[int]string{
type txPool interface {
// AddRemotes should add the given transactions to the pool.
- AddRemotes([]*types.Transaction) error
+ AddRemotes([]*types.Transaction) []error
// Pending should return pending transactions.
// The slice should be modifiable by the caller.