diff options
-rw-r--r-- | cmd/geth/chaincmd.go | 69 | ||||
-rw-r--r-- | cmd/geth/main.go | 1 | ||||
-rw-r--r-- | cmd/puppeth/wizard.go | 13 | ||||
-rw-r--r-- | cmd/puppeth/wizard_ethstats.go | 31 | ||||
-rw-r--r-- | cmd/utils/flags.go | 21 | ||||
-rw-r--r-- | consensus/ethash/algorithm.go | 6 | ||||
-rw-r--r-- | eth/downloader/fakepeer.go | 160 | ||||
-rw-r--r-- | miner/remote_agent.go | 2 |
8 files changed, 280 insertions, 23 deletions
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 12bc1d7c6..4a9a7b11b 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -31,7 +31,9 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie" "github.com/syndtr/goleveldb/leveldb/util" @@ -71,7 +73,7 @@ It expects the genesis file as argument.`, The import command imports blocks from an RLP-encoded form. The form can be one file with several RLP-encoded blocks, or several files can be used. -If only one file is used, import error will result in failure. If several files are used, +If only one file is used, import error will result in failure. If several files are used, processing will proceed even if an individual RLP-file import failure occurs.`, } exportCommand = cli.Command{ @@ -91,6 +93,23 @@ Optional second and third arguments control the first and last block to write. In this mode, the file will be appended if already existing.`, } + copydbCommand = cli.Command{ + Action: utils.MigrateFlags(copyDb), + Name: "copydb", + Usage: "Create a local chain from a target chaindata folder", + ArgsUsage: "<sourceChaindataDir>", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + utils.FakePoWFlag, + utils.TestnetFlag, + utils.RinkebyFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The first argument must be the directory containing the blockchain to download from`, + } removedbCommand = cli.Command{ Action: utils.MigrateFlags(removeDB), Name: "removedb", @@ -268,6 +287,54 @@ func exportChain(ctx *cli.Context) error { return nil } +func copyDb(ctx *cli.Context) error { + // Ensure we have a source chain directory to copy + if len(ctx.Args()) != 1 { + utils.Fatalf("Source chaindata directory path argument missing") + } + // Initialize a new chain for the running node to sync into + stack := makeFullNode(ctx) + chain, chainDb := utils.MakeChain(ctx, stack) + + syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) + dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil) + + // Create a source peer to satisfy downloader requests from + db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256) + if err != nil { + return err + } + hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) + if err != nil { + return err + } + peer := downloader.NewFakePeer("local", db, hc, dl) + if err = dl.RegisterPeer("local", 63, peer); err != nil { + return err + } + // Synchronise with the simulated peer + start := time.Now() + + currentHeader := hc.CurrentHeader() + if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil { + return err + } + for dl.Synchronising() { + time.Sleep(10 * time.Millisecond) + } + fmt.Printf("Database copy done in %v\n", time.Since(start)) + + // Compact the entire database to remove any sync overhead + start = time.Now() + fmt.Println("Compacting entire database...") + if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil { + utils.Fatalf("Compaction failed: %v", err) + } + fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) + + return nil +} + func removeDB(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 8166c9ce8..88f3528f3 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -146,6 +146,7 @@ func init() { initCommand, importCommand, exportCommand, + copydbCommand, removedbCommand, dumpCommand, // See monitorcmd.go: diff --git a/cmd/puppeth/wizard.go b/cmd/puppeth/wizard.go index 3fdd639f8..5fbc11cb9 100644 --- a/cmd/puppeth/wizard.go +++ b/cmd/puppeth/wizard.go @@ -302,8 +302,10 @@ func (w *wizard) readJSON() string { } // readIPAddress reads a single line from stdin, trimming if from spaces and -// converts it to a network IP address. -func (w *wizard) readIPAddress() net.IP { +// returning it if it's convertible to an IP address. The reason for keeping +// the user input format instead of returning a Go net.IP is to match with +// weird formats used by ethstats, which compares IPs textually, not by value. +func (w *wizard) readIPAddress() string { for { // Read the IP address from the user fmt.Printf("> ") @@ -312,14 +314,13 @@ func (w *wizard) readIPAddress() net.IP { log.Crit("Failed to read user input", "err", err) } if text = strings.TrimSpace(text); text == "" { - return nil + return "" } // Make sure it looks ok and return it if so - ip := net.ParseIP(text) - if ip == nil { + if ip := net.ParseIP(text); ip == nil { log.Error("Invalid IP address, please retry") continue } - return ip + return text } } diff --git a/cmd/puppeth/wizard_ethstats.go b/cmd/puppeth/wizard_ethstats.go index 504d8fd9c..8bfa1d6e5 100644 --- a/cmd/puppeth/wizard_ethstats.go +++ b/cmd/puppeth/wizard_ethstats.go @@ -18,6 +18,7 @@ package main import ( "fmt" + "sort" "github.com/ethereum/go-ethereum/log" ) @@ -64,17 +65,37 @@ func (w *wizard) deployEthstats() { fmt.Println() fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned) if w.readDefaultString("y") != "y" { - infos.banned = nil - + // The user might want to clear the entire list, although generally probably not + fmt.Println() + fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n") + if w.readDefaultString("n") != "n" { + infos.banned = nil + } + // Offer the user to explicitly add/remove certain IP addresses + fmt.Println() + fmt.Println("Which additional IP addresses should be blacklisted?") + for { + if ip := w.readIPAddress(); ip != "" { + infos.banned = append(infos.banned, ip) + continue + } + break + } fmt.Println() - fmt.Println("Which IP addresses should be blacklisted?") + fmt.Println("Which IP addresses should not be blacklisted?") for { - if ip := w.readIPAddress(); ip != nil { - infos.banned = append(infos.banned, ip.String()) + if ip := w.readIPAddress(); ip != "" { + for i, addr := range infos.banned { + if ip == addr { + infos.banned = append(infos.banned[:i], infos.banned[i+1:]...) + break + } + } continue } break } + sort.Strings(infos.banned) } // Try to deploy the ethstats server on the host trusted := make([]string, 0, len(w.servers)) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a62b4940b..bfef619f6 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -31,6 +31,8 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" @@ -1086,17 +1088,22 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai var err error chainDb = MakeChainDatabase(ctx, stack) - engine := ethash.NewFaker() - if !ctx.GlobalBool(FakePoWFlag.Name) { - engine = ethash.New( - stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk, - stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk, - ) - } config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx)) if err != nil { Fatalf("%v", err) } + var engine consensus.Engine + if config.Clique != nil { + engine = clique.New(config.Clique, chainDb) + } else { + engine = ethash.NewFaker() + if !ctx.GlobalBool(FakePoWFlag.Name) { + engine = ethash.New( + stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk, + stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk, + ) + } + } vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)} chain, err = core.NewBlockChain(chainDb, config, engine, vmcfg) if err != nil { diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index a737bc636..76f19252f 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -102,7 +102,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { header.Cap *= 4 cache := *(*[]byte)(unsafe.Pointer(&header)) - // Calculate the number of thoretical rows (we'll store in one buffer nonetheless) + // Calculate the number of theoretical rows (we'll store in one buffer nonetheless) size := uint64(len(cache)) rows := int(size) / hashBytes @@ -187,7 +187,7 @@ func fnvHash(mix []uint32, data []uint32) { // generateDatasetItem combines data from 256 pseudorandomly selected cache nodes, // and hashes that to compute a single dataset node. func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte { - // Calculate the number of thoretical rows (we use one buffer nonetheless) + // Calculate the number of theoretical rows (we use one buffer nonetheless) rows := uint32(len(cache) / hashWords) // Initialize the mix @@ -287,7 +287,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { // hashimoto aggregates data from the full dataset in order to produce our final // value for a particular header hash and nonce. func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32) []uint32) ([]byte, []byte) { - // Calculate the number of thoretical rows (we use one buffer nonetheless) + // Calculate the number of theoretical rows (we use one buffer nonetheless) rows := uint32(size / mixBytes) // Combine header+nonce into a 64 byte seed diff --git a/eth/downloader/fakepeer.go b/eth/downloader/fakepeer.go new file mode 100644 index 000000000..ebdb9c334 --- /dev/null +++ b/eth/downloader/fakepeer.go @@ -0,0 +1,160 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package downloader + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" +) + +// FakePeer is a mock downloader peer that operates on a local database instance +// instead of being an actual live node. It's useful for testing and to implement +// sync commands from an xisting local database. +type FakePeer struct { + id string + db ethdb.Database + hc *core.HeaderChain + dl *Downloader +} + +// NewFakePeer creates a new mock downloader peer with the given data sources. +func NewFakePeer(id string, db ethdb.Database, hc *core.HeaderChain, dl *Downloader) *FakePeer { + return &FakePeer{id: id, db: db, hc: hc, dl: dl} +} + +// Head implements downloader.Peer, returning the current head hash and number +// of the best known header. +func (p *FakePeer) Head() (common.Hash, *big.Int) { + header := p.hc.CurrentHeader() + return header.Hash(), header.Number +} + +// RequestHeadersByHash implements downloader.Peer, returning a batch of headers +// defined by the origin hash and the associaed query parameters. +func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error { + var ( + headers []*types.Header + unknown bool + ) + for !unknown && len(headers) < amount { + origin := p.hc.GetHeaderByHash(hash) + if origin == nil { + break + } + number := origin.Number.Uint64() + headers = append(headers, origin) + if reverse { + for i := 0; i < int(skip)+1; i++ { + if header := p.hc.GetHeader(hash, number); header != nil { + hash = header.ParentHash + number-- + } else { + unknown = true + break + } + } + } else { + var ( + current = origin.Number.Uint64() + next = current + uint64(skip) + 1 + ) + if header := p.hc.GetHeaderByNumber(next); header != nil { + if p.hc.GetBlockHashesFromHash(header.Hash(), uint64(skip+1))[skip] == hash { + hash = header.Hash() + } else { + unknown = true + } + } else { + unknown = true + } + } + } + p.dl.DeliverHeaders(p.id, headers) + return nil +} + +// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers +// defined by the origin number and the associaed query parameters. +func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error { + var ( + headers []*types.Header + unknown bool + ) + for !unknown && len(headers) < amount { + origin := p.hc.GetHeaderByNumber(number) + if origin == nil { + break + } + if reverse { + if number >= uint64(skip+1) { + number -= uint64(skip + 1) + } else { + unknown = true + } + } else { + number += uint64(skip + 1) + } + headers = append(headers, origin) + } + p.dl.DeliverHeaders(p.id, headers) + return nil +} + +// RequestBodies implements downloader.Peer, returning a batch of block bodies +// corresponding to the specified block hashes. +func (p *FakePeer) RequestBodies(hashes []common.Hash) error { + var ( + txs [][]*types.Transaction + uncles [][]*types.Header + ) + for _, hash := range hashes { + block := core.GetBlock(p.db, hash, p.hc.GetBlockNumber(hash)) + + txs = append(txs, block.Transactions()) + uncles = append(uncles, block.Uncles()) + } + p.dl.DeliverBodies(p.id, txs, uncles) + return nil +} + +// RequestReceipts implements downloader.Peer, returning a batch of transaction +// receipts corresponding to the specified block hashes. +func (p *FakePeer) RequestReceipts(hashes []common.Hash) error { + var receipts [][]*types.Receipt + for _, hash := range hashes { + receipts = append(receipts, core.GetBlockReceipts(p.db, hash, p.hc.GetBlockNumber(hash))) + } + p.dl.DeliverReceipts(p.id, receipts) + return nil +} + +// RequestNodeData implements downloader.Peer, returning a batch of state trie +// nodes corresponding to the specified trie hashes. +func (p *FakePeer) RequestNodeData(hashes []common.Hash) error { + var data [][]byte + for _, hash := range hashes { + if entry, err := p.db.Get(hash.Bytes()); err == nil { + data = append(data, entry) + } + } + p.dl.DeliverNodeData(p.id, data) + return nil +} diff --git a/miner/remote_agent.go b/miner/remote_agent.go index aac7ce865..287e7530c 100644 --- a/miner/remote_agent.go +++ b/miner/remote_agent.go @@ -163,7 +163,7 @@ func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common. } // loop monitors mining events on the work and quit channels, updating the internal -// state of the rmeote miner until a termination is requested. +// state of the remote miner until a termination is requested. // // Note, the reason the work and quit channels are passed as parameters is because // RemoteAgent.Start() constantly recreates these channels, so the loop code cannot |