aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--accounts/keystore/account_cache_test.go12
-rw-r--r--cmd/utils/flags.go34
-rw-r--r--consensus/ethash/algorithm_test.go3
-rw-r--r--consensus/ethash/consensus.go10
-rw-r--r--consensus/ethash/ethash.go117
-rw-r--r--consensus/ethash/sealer.go2
-rw-r--r--console/console_test.go5
-rw-r--r--eth/backend.go20
-rw-r--r--eth/config.go39
-rw-r--r--eth/gen_config.go51
-rw-r--r--internal/ethapi/tracer.go85
-rw-r--r--les/backend.go2
-rw-r--r--swarm/api/manifest.go10
-rw-r--r--swarm/api/manifest_test.go78
14 files changed, 268 insertions, 200 deletions
diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go
index e3dc31065..fe9233c04 100644
--- a/accounts/keystore/account_cache_test.go
+++ b/accounts/keystore/account_cache_test.go
@@ -59,7 +59,7 @@ func TestWatchNewFile(t *testing.T) {
// Ensure the watcher is started before adding any files.
ks.Accounts()
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(1000 * time.Millisecond)
// Move in the files.
wantAccounts := make([]accounts.Account, len(cachetestAccounts))
@@ -349,6 +349,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
return
}
+ // needed so that modTime of `file` is different to its current value after forceCopyFile
+ time.Sleep(1000 * time.Millisecond)
+
// Now replace file contents
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
t.Fatal(err)
@@ -362,6 +365,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
return
}
+ // needed so that modTime of `file` is different to its current value after forceCopyFile
+ time.Sleep(1000 * time.Millisecond)
+
// Now replace file contents again
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
t.Fatal(err)
@@ -374,6 +380,10 @@ func TestUpdatedKeyfileContents(t *testing.T) {
t.Error(err)
return
}
+
+ // needed so that modTime of `file` is different to its current value after ioutil.WriteFile
+ time.Sleep(1000 * time.Millisecond)
+
// Now replace file contents with crap
if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil {
t.Fatal(err)
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 5c2929268..9fbad8dab 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -217,27 +217,27 @@ var (
EthashCachesInMemoryFlag = cli.IntFlag{
Name: "ethash.cachesinmem",
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
- Value: eth.DefaultConfig.EthashCachesInMem,
+ Value: eth.DefaultConfig.Ethash.CachesInMem,
}
EthashCachesOnDiskFlag = cli.IntFlag{
Name: "ethash.cachesondisk",
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
- Value: eth.DefaultConfig.EthashCachesOnDisk,
+ Value: eth.DefaultConfig.Ethash.CachesOnDisk,
}
EthashDatasetDirFlag = DirectoryFlag{
Name: "ethash.dagdir",
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
- Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
+ Value: DirectoryString{eth.DefaultConfig.Ethash.DatasetDir},
}
EthashDatasetsInMemoryFlag = cli.IntFlag{
Name: "ethash.dagsinmem",
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
- Value: eth.DefaultConfig.EthashDatasetsInMem,
+ Value: eth.DefaultConfig.Ethash.DatasetsInMem,
}
EthashDatasetsOnDiskFlag = cli.IntFlag{
Name: "ethash.dagsondisk",
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
- Value: eth.DefaultConfig.EthashDatasetsOnDisk,
+ Value: eth.DefaultConfig.Ethash.DatasetsOnDisk,
}
// Transaction pool settings
TxPoolNoLocalsFlag = cli.BoolFlag{
@@ -910,22 +910,22 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
func setEthash(ctx *cli.Context, cfg *eth.Config) {
if ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
- cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
+ cfg.Ethash.CacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
}
if ctx.GlobalIsSet(EthashDatasetDirFlag.Name) {
- cfg.EthashDatasetDir = ctx.GlobalString(EthashDatasetDirFlag.Name)
+ cfg.Ethash.DatasetDir = ctx.GlobalString(EthashDatasetDirFlag.Name)
}
if ctx.GlobalIsSet(EthashCachesInMemoryFlag.Name) {
- cfg.EthashCachesInMem = ctx.GlobalInt(EthashCachesInMemoryFlag.Name)
+ cfg.Ethash.CachesInMem = ctx.GlobalInt(EthashCachesInMemoryFlag.Name)
}
if ctx.GlobalIsSet(EthashCachesOnDiskFlag.Name) {
- cfg.EthashCachesOnDisk = ctx.GlobalInt(EthashCachesOnDiskFlag.Name)
+ cfg.Ethash.CachesOnDisk = ctx.GlobalInt(EthashCachesOnDiskFlag.Name)
}
if ctx.GlobalIsSet(EthashDatasetsInMemoryFlag.Name) {
- cfg.EthashDatasetsInMem = ctx.GlobalInt(EthashDatasetsInMemoryFlag.Name)
+ cfg.Ethash.DatasetsInMem = ctx.GlobalInt(EthashDatasetsInMemoryFlag.Name)
}
if ctx.GlobalIsSet(EthashDatasetsOnDiskFlag.Name) {
- cfg.EthashDatasetsOnDisk = ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name)
+ cfg.Ethash.DatasetsOnDisk = ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name)
}
}
@@ -1159,10 +1159,14 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
} else {
engine = ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
- engine = ethash.New(
- stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
- stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
- )
+ engine = ethash.New(ethash.Config{
+ CacheDir: stack.ResolvePath(eth.DefaultConfig.Ethash.CacheDir),
+ CachesInMem: eth.DefaultConfig.Ethash.CachesInMem,
+ CachesOnDisk: eth.DefaultConfig.Ethash.CachesOnDisk,
+ DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
+ DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
+ DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
+ })
}
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go
index 7e4307a74..7765ff9fe 100644
--- a/consensus/ethash/algorithm_test.go
+++ b/consensus/ethash/algorithm_test.go
@@ -703,8 +703,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
go func(idx int) {
defer pend.Done()
-
- ethash := New(cachedir, 0, 1, "", 0, 0)
+ ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal})
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
t.Errorf("proc %d: block verification failed: %v", idx, err)
}
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index e330b7ce5..775419e06 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -68,7 +68,7 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
// stock Ethereum ethash engine.
func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
// If we're running a full engine faking, accept any input as valid
- if ethash.fakeFull {
+ if ethash.config.PowMode == ModeFullFake {
return nil
}
// Short circuit if the header is known, or it's parent not
@@ -89,7 +89,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He
// a results channel to retrieve the async verifications.
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
// If we're running a full engine faking, accept any input as valid
- if ethash.fakeFull || len(headers) == 0 {
+ if ethash.config.PowMode == ModeFullFake || len(headers) == 0 {
abort, results := make(chan struct{}), make(chan error, len(headers))
for i := 0; i < len(headers); i++ {
results <- nil
@@ -169,7 +169,7 @@ func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainReader, headers []
// rules of the stock Ethereum ethash engine.
func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
// If we're running a full engine faking, accept any input as valid
- if ethash.fakeFull {
+ if ethash.config.PowMode == ModeFullFake {
return nil
}
// Verify that there are at most 2 uncles included in this block
@@ -455,7 +455,7 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
// the PoW difficulty requirements.
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
// If we're running a fake PoW, accept any seal as valid
- if ethash.fakeMode {
+ if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
time.Sleep(ethash.fakeDelay)
if ethash.fakeFail == header.Number.Uint64() {
return errInvalidPoW
@@ -480,7 +480,7 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
cache := ethash.cache(number)
size := datasetSize(number)
- if ethash.tester {
+ if ethash.config.PowMode == ModeTest {
size = 32 * 1024
}
digest, result := hashimotoLight(size, cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go
index dd6147072..a78b3a895 100644
--- a/consensus/ethash/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -45,7 +45,7 @@ var (
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
// sharedEthash is a full instance that can be shared between multiple users.
- sharedEthash = New("", 3, 0, "", 1, 0)
+ sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal})
// algorithmRevision is the data structure version used for file naming.
algorithmRevision = 23
@@ -320,15 +320,32 @@ func MakeDataset(block uint64, dir string) {
d.release()
}
+// Mode defines the type and amount of PoW verification an ethash engine makes.
+type Mode uint
+
+const (
+ ModeNormal Mode = iota
+ ModeShared
+ ModeTest
+ ModeFake
+ ModeFullFake
+)
+
+// Config are the configuration parameters of the ethash.
+type Config struct {
+ CacheDir string
+ CachesInMem int
+ CachesOnDisk int
+ DatasetDir string
+ DatasetsInMem int
+ DatasetsOnDisk int
+ PowMode Mode
+}
+
// Ethash is a consensus engine based on proot-of-work implementing the ethash
// algorithm.
type Ethash struct {
- cachedir string // Data directory to store the verification caches
- cachesinmem int // Number of caches to keep in memory
- cachesondisk int // Number of caches to keep on disk
- dagdir string // Data directory to store full mining datasets
- dagsinmem int // Number of mining datasets to keep in memory
- dagsondisk int // Number of mining datasets to keep on disk
+ config Config
caches map[uint64]*cache // In memory caches to avoid regenerating too often
fcache *cache // Pre-generated cache for the estimated future epoch
@@ -342,10 +359,7 @@ type Ethash struct {
hashrate metrics.Meter // Meter tracking the average hashrate
// The fields below are hooks for testing
- tester bool // Flag whether to use a smaller test dataset
shared *Ethash // Shared PoW verifier to avoid cache regeneration
- fakeMode bool // Flag whether to disable PoW checking
- fakeFull bool // Flag whether to disable all consensus rules
fakeFail uint64 // Block number which fails PoW check even in fake mode
fakeDelay time.Duration // Time delay to sleep for before returning from verify
@@ -353,28 +367,23 @@ type Ethash struct {
}
// New creates a full sized ethash PoW scheme.
-func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
- if cachesinmem <= 0 {
- log.Warn("One ethash cache must always be in memory", "requested", cachesinmem)
- cachesinmem = 1
+func New(config Config) *Ethash {
+ if config.CachesInMem <= 0 {
+ log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
+ config.CachesInMem = 1
}
- if cachedir != "" && cachesondisk > 0 {
- log.Info("Disk storage enabled for ethash caches", "dir", cachedir, "count", cachesondisk)
+ if config.CacheDir != "" && config.CachesOnDisk > 0 {
+ log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
}
- if dagdir != "" && dagsondisk > 0 {
- log.Info("Disk storage enabled for ethash DAGs", "dir", dagdir, "count", dagsondisk)
+ if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
+ log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
}
return &Ethash{
- cachedir: cachedir,
- cachesinmem: cachesinmem,
- cachesondisk: cachesondisk,
- dagdir: dagdir,
- dagsinmem: dagsinmem,
- dagsondisk: dagsondisk,
- caches: make(map[uint64]*cache),
- datasets: make(map[uint64]*dataset),
- update: make(chan struct{}),
- hashrate: metrics.NewMeter(),
+ config: config,
+ caches: make(map[uint64]*cache),
+ datasets: make(map[uint64]*dataset),
+ update: make(chan struct{}),
+ hashrate: metrics.NewMeter(),
}
}
@@ -382,12 +391,14 @@ func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinme
// purposes.
func NewTester() *Ethash {
return &Ethash{
- cachesinmem: 1,
- caches: make(map[uint64]*cache),
- datasets: make(map[uint64]*dataset),
- tester: true,
- update: make(chan struct{}),
- hashrate: metrics.NewMeter(),
+ config: Config{
+ CachesInMem: 1,
+ PowMode: ModeTest,
+ },
+ caches: make(map[uint64]*cache),
+ datasets: make(map[uint64]*dataset),
+ update: make(chan struct{}),
+ hashrate: metrics.NewMeter(),
}
}
@@ -395,27 +406,45 @@ func NewTester() *Ethash {
// all blocks' seal as valid, though they still have to conform to the Ethereum
// consensus rules.
func NewFaker() *Ethash {
- return &Ethash{fakeMode: true}
+ return &Ethash{
+ config: Config{
+ PowMode: ModeFake,
+ },
+ }
}
// NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
// accepts all blocks as valid apart from the single one specified, though they
// still have to conform to the Ethereum consensus rules.
func NewFakeFailer(fail uint64) *Ethash {
- return &Ethash{fakeMode: true, fakeFail: fail}
+ return &Ethash{
+ config: Config{
+ PowMode: ModeFake,
+ },
+ fakeFail: fail,
+ }
}
// NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
// accepts all blocks as valid, but delays verifications by some time, though
// they still have to conform to the Ethereum consensus rules.
func NewFakeDelayer(delay time.Duration) *Ethash {
- return &Ethash{fakeMode: true, fakeDelay: delay}
+ return &Ethash{
+ config: Config{
+ PowMode: ModeFake,
+ },
+ fakeDelay: delay,
+ }
}
// NewFullFaker creates an ethash consensus engine with a full fake scheme that
// accepts all blocks as valid, without checking any consensus rules whatsoever.
func NewFullFaker() *Ethash {
- return &Ethash{fakeMode: true, fakeFull: true}
+ return &Ethash{
+ config: Config{
+ PowMode: ModeFullFake,
+ },
+ }
}
// NewShared creates a full sized ethash PoW shared between all requesters running
@@ -436,7 +465,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
current, future := ethash.caches[epoch], (*cache)(nil)
if current == nil {
// No in-memory cache, evict the oldest if the cache limit was reached
- for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.cachesinmem {
+ for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.config.CachesInMem {
var evict *cache
for _, cache := range ethash.caches {
if evict == nil || evict.used.After(cache.used) {
@@ -473,7 +502,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
ethash.lock.Unlock()
// Wait for generation finish, bump the timestamp and finalize the cache
- current.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
+ current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
current.lock.Lock()
current.used = time.Now()
@@ -481,7 +510,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
// If we exhausted the future cache, now's a good time to regenerate it
if future != nil {
- go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
+ go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
}
return current.cache
}
@@ -498,7 +527,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
current, future := ethash.datasets[epoch], (*dataset)(nil)
if current == nil {
// No in-memory dataset, evict the oldest if the dataset limit was reached
- for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.dagsinmem {
+ for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.config.DatasetsInMem {
var evict *dataset
for _, dataset := range ethash.datasets {
if evict == nil || evict.used.After(dataset.used) {
@@ -536,7 +565,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
ethash.lock.Unlock()
// Wait for generation finish, bump the timestamp and finalize the cache
- current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
+ current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
current.lock.Lock()
current.used = time.Now()
@@ -544,7 +573,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
// If we exhausted the future dataset, now's a good time to regenerate it
if future != nil {
- go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
+ go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
}
return current.dataset
}
diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go
index 784e8f649..c2447e473 100644
--- a/consensus/ethash/sealer.go
+++ b/consensus/ethash/sealer.go
@@ -34,7 +34,7 @@ import (
// the block's difficulty requirements.
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
// If we're running a fake PoW, simply return a 0 nonce immediately
- if ethash.fakeMode {
+ if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
header := block.Header()
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
return block.WithSeal(header), nil
diff --git a/console/console_test.go b/console/console_test.go
index a159b62bb..d29680785 100644
--- a/console/console_test.go
+++ b/console/console_test.go
@@ -27,6 +27,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/internal/jsre"
@@ -96,7 +97,9 @@ func newTester(t *testing.T, confOverride func(*eth.Config)) *tester {
ethConf := &eth.Config{
Genesis: core.DeveloperGenesisBlock(15, common.Address{}),
Etherbase: common.HexToAddress(testAddress),
- PowTest: true,
+ Ethash: ethash.Config{
+ PowMode: ethash.ModeTest,
+ },
}
if confOverride != nil {
confOverride(ethConf)
diff --git a/eth/backend.go b/eth/backend.go
index 1cd9e8fff..e7f0f57dd 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -125,7 +125,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
chainConfig: chainConfig,
eventMux: ctx.EventMux,
accountManager: ctx.AccountManager,
- engine: CreateConsensusEngine(ctx, config, chainConfig, chainDb),
+ engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
shutdownChan: make(chan bool),
stopDbUpgrade: stopDbUpgrade,
networkId: config.NetworkId,
@@ -209,25 +209,31 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
}
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
-func CreateConsensusEngine(ctx *node.ServiceContext, config *Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
+func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
// If proof-of-authority is requested, set it up
if chainConfig.Clique != nil {
return clique.New(chainConfig.Clique, db)
}
// Otherwise assume proof-of-work
switch {
- case config.PowFake:
+ case config.PowMode == ethash.ModeFake:
log.Warn("Ethash used in fake mode")
return ethash.NewFaker()
- case config.PowTest:
+ case config.PowMode == ethash.ModeTest:
log.Warn("Ethash used in test mode")
return ethash.NewTester()
- case config.PowShared:
+ case config.PowMode == ethash.ModeShared:
log.Warn("Ethash used in shared mode")
return ethash.NewShared()
default:
- engine := ethash.New(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
- config.EthashDatasetDir, config.EthashDatasetsInMem, config.EthashDatasetsOnDisk)
+ engine := ethash.New(ethash.Config{
+ CacheDir: ctx.ResolvePath(config.CacheDir),
+ CachesInMem: config.CachesInMem,
+ CachesOnDisk: config.CachesOnDisk,
+ DatasetDir: config.DatasetDir,
+ DatasetsInMem: config.DatasetsInMem,
+ DatasetsOnDisk: config.DatasetsOnDisk,
+ })
engine.SetThreads(-1) // Disable CPU mining
return engine
}
diff --git a/eth/config.go b/eth/config.go
index 7bcfd403e..383cd6783 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -33,16 +34,18 @@ import (
// DefaultConfig contains default settings for use on the Ethereum main net.
var DefaultConfig = Config{
- SyncMode: downloader.FastSync,
- EthashCacheDir: "ethash",
- EthashCachesInMem: 2,
- EthashCachesOnDisk: 3,
- EthashDatasetsInMem: 1,
- EthashDatasetsOnDisk: 2,
- NetworkId: 1,
- LightPeers: 20,
- DatabaseCache: 128,
- GasPrice: big.NewInt(18 * params.Shannon),
+ SyncMode: downloader.FastSync,
+ Ethash: ethash.Config{
+ CacheDir: "ethash",
+ CachesInMem: 2,
+ CachesOnDisk: 3,
+ DatasetsInMem: 1,
+ DatasetsOnDisk: 2,
+ },
+ NetworkId: 1,
+ LightPeers: 20,
+ DatabaseCache: 128,
+ GasPrice: big.NewInt(18 * params.Shannon),
TxPool: core.DefaultTxPoolConfig,
GPO: gasprice.Config{
@@ -59,9 +62,9 @@ func init() {
}
}
if runtime.GOOS == "windows" {
- DefaultConfig.EthashDatasetDir = filepath.Join(home, "AppData", "Ethash")
+ DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "AppData", "Ethash")
} else {
- DefaultConfig.EthashDatasetDir = filepath.Join(home, ".ethash")
+ DefaultConfig.Ethash.DatasetDir = filepath.Join(home, ".ethash")
}
}
@@ -92,12 +95,7 @@ type Config struct {
GasPrice *big.Int
// Ethash options
- EthashCacheDir string
- EthashCachesInMem int
- EthashCachesOnDisk int
- EthashDatasetDir string
- EthashDatasetsInMem int
- EthashDatasetsOnDisk int
+ Ethash ethash.Config
// Transaction pool options
TxPool core.TxPoolConfig
@@ -109,10 +107,7 @@ type Config struct {
EnablePreimageRecording bool
// Miscellaneous options
- DocRoot string `toml:"-"`
- PowFake bool `toml:"-"`
- PowTest bool `toml:"-"`
- PowShared bool `toml:"-"`
+ DocRoot string `toml:"-"`
}
type configMarshaling struct {
diff --git a/eth/gen_config.go b/eth/gen_config.go
index 4a4cd7b9c..e2d50e1f6 100644
--- a/eth/gen_config.go
+++ b/eth/gen_config.go
@@ -7,6 +7,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -36,10 +37,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
TxPool core.TxPoolConfig
GPO gasprice.Config
EnablePreimageRecording bool
- DocRoot string `toml:"-"`
- PowFake bool `toml:"-"`
- PowTest bool `toml:"-"`
- PowShared bool `toml:"-"`
+ DocRoot string `toml:"-"`
+ PowMode ethash.Mode `toml:"-"`
}
var enc Config
enc.Genesis = c.Genesis
@@ -54,19 +53,17 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.MinerThreads = c.MinerThreads
enc.ExtraData = c.ExtraData
enc.GasPrice = c.GasPrice
- enc.EthashCacheDir = c.EthashCacheDir
- enc.EthashCachesInMem = c.EthashCachesInMem
- enc.EthashCachesOnDisk = c.EthashCachesOnDisk
- enc.EthashDatasetDir = c.EthashDatasetDir
- enc.EthashDatasetsInMem = c.EthashDatasetsInMem
- enc.EthashDatasetsOnDisk = c.EthashDatasetsOnDisk
+ enc.EthashCacheDir = c.Ethash.CacheDir
+ enc.EthashCachesInMem = c.Ethash.CachesInMem
+ enc.EthashCachesOnDisk = c.Ethash.CachesOnDisk
+ enc.EthashDatasetDir = c.Ethash.DatasetDir
+ enc.EthashDatasetsInMem = c.Ethash.DatasetsInMem
+ enc.EthashDatasetsOnDisk = c.Ethash.DatasetsOnDisk
enc.TxPool = c.TxPool
enc.GPO = c.GPO
enc.EnablePreimageRecording = c.EnablePreimageRecording
enc.DocRoot = c.DocRoot
- enc.PowFake = c.PowFake
- enc.PowTest = c.PowTest
- enc.PowShared = c.PowShared
+ enc.PowMode = c.Ethash.PowMode
return &enc, nil
}
@@ -94,10 +91,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
TxPool *core.TxPoolConfig
GPO *gasprice.Config
EnablePreimageRecording *bool
- DocRoot *string `toml:"-"`
- PowFake *bool `toml:"-"`
- PowTest *bool `toml:"-"`
- PowShared *bool `toml:"-"`
+ DocRoot *string `toml:"-"`
+ PowMode *ethash.Mode `toml:"-"`
}
var dec Config
if err := unmarshal(&dec); err != nil {
@@ -140,22 +135,22 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
c.GasPrice = dec.GasPrice
}
if dec.EthashCacheDir != nil {
- c.EthashCacheDir = *dec.EthashCacheDir
+ c.Ethash.CacheDir = *dec.EthashCacheDir
}
if dec.EthashCachesInMem != nil {
- c.EthashCachesInMem = *dec.EthashCachesInMem
+ c.Ethash.CachesInMem = *dec.EthashCachesInMem
}
if dec.EthashCachesOnDisk != nil {
- c.EthashCachesOnDisk = *dec.EthashCachesOnDisk
+ c.Ethash.CachesOnDisk = *dec.EthashCachesOnDisk
}
if dec.EthashDatasetDir != nil {
- c.EthashDatasetDir = *dec.EthashDatasetDir
+ c.Ethash.DatasetDir = *dec.EthashDatasetDir
}
if dec.EthashDatasetsInMem != nil {
- c.EthashDatasetsInMem = *dec.EthashDatasetsInMem
+ c.Ethash.DatasetsInMem = *dec.EthashDatasetsInMem
}
if dec.EthashDatasetsOnDisk != nil {
- c.EthashDatasetsOnDisk = *dec.EthashDatasetsOnDisk
+ c.Ethash.DatasetsOnDisk = *dec.EthashDatasetsOnDisk
}
if dec.TxPool != nil {
c.TxPool = *dec.TxPool
@@ -169,14 +164,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.DocRoot != nil {
c.DocRoot = *dec.DocRoot
}
- if dec.PowFake != nil {
- c.PowFake = *dec.PowFake
- }
- if dec.PowTest != nil {
- c.PowTest = *dec.PowTest
- }
- if dec.PowShared != nil {
- c.PowShared = *dec.PowShared
+ if dec.PowMode != nil {
+ c.Ethash.PowMode = *dec.PowMode
}
return nil
}
diff --git a/internal/ethapi/tracer.go b/internal/ethapi/tracer.go
index fc742e6c4..71cafc6e9 100644
--- a/internal/ethapi/tracer.go
+++ b/internal/ethapi/tracer.go
@@ -200,19 +200,18 @@ func (c *contractWrapper) toValue(vm *otto.Otto) otto.Value {
// JavascriptTracer provides an implementation of Tracer that evaluates a
// Javascript function for each VM execution step.
type JavascriptTracer struct {
- vm *otto.Otto // Javascript VM instance
- traceobj *otto.Object // User-supplied object to call
- log map[string]interface{} // (Reusable) map for the `log` arg to `step`
- logvalue otto.Value // JS view of `log`
- memory *memoryWrapper // Wrapper around the VM memory
- memvalue otto.Value // JS view of `memory`
- stack *stackWrapper // Wrapper around the VM stack
- stackvalue otto.Value // JS view of `stack`
- db *dbWrapper // Wrapper around the VM environment
- dbvalue otto.Value // JS view of `db`
- contract *contractWrapper // Wrapper around the contract object
- contractvalue otto.Value // JS view of `contract`
- err error // Error, if one has occurred
+ vm *otto.Otto // Javascript VM instance
+ traceobj *otto.Object // User-supplied object to call
+ op *opCodeWrapper // Wrapper around the VM opcode
+ log map[string]interface{} // (Reusable) map for the `log` arg to `step`
+ logvalue otto.Value // JS view of `log`
+ memory *memoryWrapper // Wrapper around the VM memory
+ stack *stackWrapper // Wrapper around the VM stack
+ db *dbWrapper // Wrapper around the VM environment
+ dbvalue otto.Value // JS view of `db`
+ contract *contractWrapper // Wrapper around the contract object
+ err error // Error, if one has occurred
+ result interface{} // Final result to return to the user
}
// NewJavascriptTracer instantiates a new JavascriptTracer instance.
@@ -230,7 +229,6 @@ func NewJavascriptTracer(code string) (*JavascriptTracer, error) {
if err != nil {
return nil, err
}
-
// Check the required functions exist
step, err := jstracer.Get("step")
if err != nil {
@@ -247,31 +245,34 @@ func NewJavascriptTracer(code string) (*JavascriptTracer, error) {
if !result.IsFunction() {
return nil, fmt.Errorf("Trace object must expose a function result()")
}
-
// Create the persistent log object
- log := make(map[string]interface{})
+ var (
+ op = new(opCodeWrapper)
+ mem = new(memoryWrapper)
+ stack = new(stackWrapper)
+ db = new(dbWrapper)
+ contract = new(contractWrapper)
+ )
+ log := map[string]interface{}{
+ "op": op.toValue(vm),
+ "memory": mem.toValue(vm),
+ "stack": stack.toValue(vm),
+ "contract": contract.toValue(vm),
+ }
logvalue, _ := vm.ToValue(log)
- // Create persistent wrappers for memory and stack
- mem := &memoryWrapper{}
- stack := &stackWrapper{}
- db := &dbWrapper{}
- contract := &contractWrapper{}
-
return &JavascriptTracer{
- vm: vm,
- traceobj: jstracer,
- log: log,
- logvalue: logvalue,
- memory: mem,
- memvalue: mem.toValue(vm),
- stack: stack,
- stackvalue: stack.toValue(vm),
- db: db,
- dbvalue: db.toValue(vm),
- contract: contract,
- contractvalue: contract.toValue(vm),
- err: nil,
+ vm: vm,
+ traceobj: jstracer,
+ op: op,
+ log: log,
+ logvalue: logvalue,
+ memory: mem,
+ stack: stack,
+ db: db,
+ dbvalue: db.toValue(vm),
+ contract: contract,
+ err: nil,
}, nil
}
@@ -319,24 +320,22 @@ func wrapError(context string, err error) error {
// CaptureState implements the Tracer interface to trace a single step of VM execution
func (jst *JavascriptTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
if jst.err == nil {
+ jst.op.op = op
jst.memory.memory = memory
jst.stack.stack = stack
jst.db.db = env.StateDB
jst.contract.contract = contract
- ocw := &opCodeWrapper{op}
-
jst.log["pc"] = pc
- jst.log["op"] = ocw.toValue(jst.vm)
jst.log["gas"] = gas
- jst.log["gasPrice"] = cost
- jst.log["memory"] = jst.memvalue
- jst.log["stack"] = jst.stackvalue
- jst.log["contract"] = jst.contractvalue
+ jst.log["cost"] = cost
jst.log["depth"] = depth
jst.log["account"] = contract.Address()
- jst.log["err"] = err
+ delete(jst.log, "error")
+ if err != nil {
+ jst.log["error"] = err
+ }
_, err := jst.callSafely("step", jst.logvalue, jst.dbvalue)
if err != nil {
jst.err = wrapError("step", err)
diff --git a/les/backend.go b/les/backend.go
index 333df920e..7180b81d7 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -98,7 +98,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
peers: peers,
reqDist: newRequestDistributor(peers, quitSync),
accountManager: ctx.AccountManager,
- engine: eth.CreateConsensusEngine(ctx, config, chainConfig, chainDb),
+ engine: eth.CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
shutdownChan: make(chan bool),
networkId: config.NetworkId,
bloomRequests: make(chan chan *bloombits.Retrieval),
diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go
index 32b5f80a7..685a300fc 100644
--- a/swarm/api/manifest.go
+++ b/swarm/api/manifest.go
@@ -436,6 +436,16 @@ func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *man
if len(path) <= epl {
if entry.Path[:len(path)] == path {
if entry.ContentType == ManifestType {
+ err := self.loadSubTrie(entry, quitC)
+ if err == nil && entry.subtrie != nil {
+ subentries := entry.subtrie.entries
+ for i := 0; i < len(subentries); i++ {
+ sub := subentries[i]
+ if sub != nil && sub.Path == "" {
+ return sub, len(path)
+ }
+ }
+ }
entry.Status = http.StatusMultipleChoices
}
pos = len(path)
diff --git a/swarm/api/manifest_test.go b/swarm/api/manifest_test.go
index f048627c5..7098ca16f 100644
--- a/swarm/api/manifest_test.go
+++ b/swarm/api/manifest_test.go
@@ -21,6 +21,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "net/http"
"strings"
"testing"
@@ -39,17 +40,17 @@ func manifest(paths ...string) (manifestReader storage.LazySectionReader) {
}
}
-func testGetEntry(t *testing.T, path, match string, paths ...string) *manifestTrie {
+func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie {
quitC := make(chan bool)
trie, err := readManifest(manifest(paths...), nil, nil, quitC)
if err != nil {
t.Errorf("unexpected error making manifest: %v", err)
}
- checkEntry(t, path, match, trie)
+ checkEntry(t, path, match, multiple, trie)
return trie
}
-func checkEntry(t *testing.T, path, match string, trie *manifestTrie) {
+func checkEntry(t *testing.T, path, match string, multiple bool, trie *manifestTrie) {
entry, fullpath := trie.getEntry(path)
if match == "-" && entry != nil {
t.Errorf("expected no match for '%s', got '%s'", path, fullpath)
@@ -60,32 +61,55 @@ func checkEntry(t *testing.T, path, match string, trie *manifestTrie) {
} else if fullpath != match {
t.Errorf("incorrect entry retrieved for '%s'. expected path '%v', got '%s'", path, match, fullpath)
}
+
+ if multiple && entry.Status != http.StatusMultipleChoices {
+ t.Errorf("Expected %d Multiple Choices Status for path %s, match %s, got %d", http.StatusMultipleChoices, path, match, entry.Status)
+ } else if !multiple && entry != nil && entry.Status == http.StatusMultipleChoices {
+ t.Errorf("Were not expecting %d Multiple Choices Status for path %s, match %s, but got it", http.StatusMultipleChoices, path, match)
+ }
}
func TestGetEntry(t *testing.T) {
// file system manifest always contains regularized paths
- testGetEntry(t, "a", "a", "a")
- testGetEntry(t, "b", "-", "a")
- testGetEntry(t, "/a//", "a", "a")
+ testGetEntry(t, "a", "a", false, "a")
+ testGetEntry(t, "b", "-", false, "a")
+ testGetEntry(t, "/a//", "a", false, "a")
// fallback
- testGetEntry(t, "/a", "", "")
- testGetEntry(t, "/a/b", "a/b", "a/b")
+ testGetEntry(t, "/a", "", false, "")
+ testGetEntry(t, "/a/b", "a/b", false, "a/b")
// longest/deepest math
- testGetEntry(t, "read", "read", "readme.md", "readit.md")
- testGetEntry(t, "rf", "-", "readme.md", "readit.md")
- testGetEntry(t, "readme", "readme", "readme.md")
- testGetEntry(t, "readme", "-", "readit.md")
- testGetEntry(t, "readme.md", "readme.md", "readme.md")
- testGetEntry(t, "readme.md", "-", "readit.md")
- testGetEntry(t, "readmeAmd", "-", "readit.md")
- testGetEntry(t, "readme.mdffff", "-", "readme.md")
- testGetEntry(t, "ab", "ab", "ab/cefg", "ab/cedh", "ab/kkkkkk")
- testGetEntry(t, "ab/ce", "ab/ce", "ab/cefg", "ab/cedh", "ab/ceuuuuuuuuuu")
- testGetEntry(t, "abc", "abc", "abcd", "abczzzzef", "abc/def", "abc/e/g")
- testGetEntry(t, "a/b", "a/b", "a", "a/bc", "a/ba", "a/b/c")
- testGetEntry(t, "a/b", "a/b", "a", "a/b", "a/bb", "a/b/c")
- testGetEntry(t, "//a//b//", "a/b", "a", "a/b", "a/bb", "a/b/c")
+ testGetEntry(t, "read", "read", true, "readme.md", "readit.md")
+ testGetEntry(t, "rf", "-", false, "readme.md", "readit.md")
+ testGetEntry(t, "readme", "readme", false, "readme.md")
+ testGetEntry(t, "readme", "-", false, "readit.md")
+ testGetEntry(t, "readme.md", "readme.md", false, "readme.md")
+ testGetEntry(t, "readme.md", "-", false, "readit.md")
+ testGetEntry(t, "readmeAmd", "-", false, "readit.md")
+ testGetEntry(t, "readme.mdffff", "-", false, "readme.md")
+ testGetEntry(t, "ab", "ab", true, "ab/cefg", "ab/cedh", "ab/kkkkkk")
+ testGetEntry(t, "ab/ce", "ab/ce", true, "ab/cefg", "ab/cedh", "ab/ceuuuuuuuuuu")
+ testGetEntry(t, "abc", "abc", true, "abcd", "abczzzzef", "abc/def", "abc/e/g")
+ testGetEntry(t, "a/b", "a/b", true, "a", "a/bc", "a/ba", "a/b/c")
+ testGetEntry(t, "a/b", "a/b", false, "a", "a/b", "a/bb", "a/b/c")
+ testGetEntry(t, "//a//b//", "a/b", false, "a", "a/b", "a/bb", "a/b/c")
}
+
+func TestExactMatch(t *testing.T) {
+ quitC := make(chan bool)
+ mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map")
+ trie, err := readManifest(mf, nil, nil, quitC)
+ if err != nil {
+ t.Errorf("unexpected error making manifest: %v", err)
+ }
+ entry, _ := trie.getEntry("shouldBeExactMatch.css")
+ if entry.Path != "" {
+ t.Errorf("Expected entry to match %s, got: %s", "shouldBeExactMatch.css", entry.Path)
+ }
+ if entry.Status == http.StatusMultipleChoices {
+ t.Errorf("Got status %d, which is unexepcted", http.StatusMultipleChoices)
+ }
+}
+
func TestDeleteEntry(t *testing.T) {
}
@@ -108,15 +132,15 @@ func TestAddFileWithManifestPath(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- checkEntry(t, "ab", "ab", trie)
- checkEntry(t, "ac", "ac", trie)
+ checkEntry(t, "ab", "ab", false, trie)
+ checkEntry(t, "ac", "ac", false, trie)
// now add path "a" and check we can still get "ab" and "ac"
entry := &manifestTrieEntry{}
entry.Path = "a"
entry.Hash = "a"
trie.addEntry(entry, nil)
- checkEntry(t, "ab", "ab", trie)
- checkEntry(t, "ac", "ac", trie)
- checkEntry(t, "a", "a", trie)
+ checkEntry(t, "ab", "ab", false, trie)
+ checkEntry(t, "ac", "ac", false, trie)
+ checkEntry(t, "a", "a", false, trie)
}