diff options
author | Felix Lange <fjl@users.noreply.github.com> | 2017-04-12 22:27:23 +0800 |
---|---|---|
committer | Péter Szilágyi <peterke@gmail.com> | 2017-04-12 22:27:23 +0800 |
commit | 30d706c35e16305e2e3ec0eb6a6bdd6aba50d9d2 (patch) | |
tree | 3ec076154049f1fa71b19fd9b7762085059ff15b | |
parent | b57680b0b21036460c689aab1e82d89297738d50 (diff) | |
download | go-tangerine-30d706c35e16305e2e3ec0eb6a6bdd6aba50d9d2.tar go-tangerine-30d706c35e16305e2e3ec0eb6a6bdd6aba50d9d2.tar.gz go-tangerine-30d706c35e16305e2e3ec0eb6a6bdd6aba50d9d2.tar.bz2 go-tangerine-30d706c35e16305e2e3ec0eb6a6bdd6aba50d9d2.tar.lz go-tangerine-30d706c35e16305e2e3ec0eb6a6bdd6aba50d9d2.tar.xz go-tangerine-30d706c35e16305e2e3ec0eb6a6bdd6aba50d9d2.tar.zst go-tangerine-30d706c35e16305e2e3ec0eb6a6bdd6aba50d9d2.zip |
cmd/geth: add --config file flag (#13875)
* p2p/discover, p2p/discv5: add marshaling methods to Node
* p2p/netutil: make Netlist decodable from TOML
* common/math: encode nil HexOrDecimal256 as 0x0
* cmd/geth: add --config file flag
* cmd/geth: add missing license header
* eth: prettify Config again, fix tests
* eth: use gasprice.Config instead of duplicating its fields
* eth/gasprice: hide nil default from dumpconfig output
* cmd/geth: hide genesis block in dumpconfig output
* node: make tests compile
* console: fix tests
* cmd/geth: make TOML keys look exactly like Go struct fields
* p2p: use discovery by default
This makes the zero Config slightly more useful. It also fixes package
node tests because Node detects reuse of the datadir through the
NodeDatabase.
* cmd/geth: make ethstats URL settable through config file
* cmd/faucet: fix configuration
* cmd/geth: dedup attach tests
* eth: add comment for DefaultConfig
* eth: pass downloader.SyncMode in Config
This removes the FastSync, LightSync flags in favour of a more
general SyncMode flag.
* cmd/utils: remove jitvm flags
* cmd/utils: make mutually exclusive flag error prettier
It now reads:
Fatal: flags --dev, --testnet can't be used at the same time
* p2p: fix typo
* node: add DefaultConfig, use it for geth
* mobile: add missing NoDiscovery option
* cmd/utils: drop MakeNode
This exposed a couple of places that needed to be updated to use
node.DefaultConfig.
* node: fix typo
* eth: make fast sync the default mode
* cmd/utils: remove IPCApiFlag (unused)
* node: remove default IPC path
Set it in the frontends instead.
* cmd/geth: add --syncmode
* cmd/utils: make --ipcdisable and --ipcpath mutually exclusive
* cmd/utils: don't enable WS, HTTP when setting addr
* cmd/utils: fix --identity
55 files changed, 6615 insertions, 648 deletions
diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 232f0ff9e..fd34cdec1 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -41,11 +41,13 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethstats" "github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/nat" @@ -175,32 +177,29 @@ type faucet struct { func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network int, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) { // Assemble the raw devp2p protocol stack stack, err := node.New(&node.Config{ - Name: "geth", - Version: params.Version, - DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"), - NAT: nat.Any(), - DiscoveryV5: true, - ListenAddr: fmt.Sprintf(":%d", port), - DiscoveryV5Addr: fmt.Sprintf(":%d", port+1), - MaxPeers: 25, - BootstrapNodesV5: enodes, + Name: "geth", + Version: params.Version, + DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"), + P2P: p2p.Config{ + NAT: nat.Any(), + NoDiscovery: true, + DiscoveryV5: true, + ListenAddr: fmt.Sprintf(":%d", port), + DiscoveryV5Addr: fmt.Sprintf(":%d", port+1), + MaxPeers: 25, + BootstrapNodesV5: enodes, + }, }) if err != nil { return nil, err } // Assemble the Ethereum light client protocol if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - return les.New(ctx, ð.Config{ - LightMode: true, - NetworkId: network, - Genesis: genesis, - GasPrice: big.NewInt(20 * params.Shannon), - GpoBlocks: 10, - GpoPercentile: 50, - EthashCacheDir: "ethash", - EthashCachesInMem: 2, - EthashCachesOnDisk: 3, - }) + cfg := eth.DefaultConfig + cfg.SyncMode = downloader.LightSync + cfg.NetworkId = network + cfg.Genesis = genesis + return les.New(ctx, &cfg) }); err != nil { return nil, err } diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go index f86be62ba..90f79a47e 100644 --- a/cmd/geth/accountcmd.go +++ b/cmd/geth/accountcmd.go @@ -179,8 +179,7 @@ nodes. ) func accountList(ctx *cli.Context) error { - stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) - + stack, _ := makeConfigNode(ctx) var index int for _, wallet := range stack.AccountManager().Wallets() { for _, account := range wallet.Accounts() { @@ -278,7 +277,7 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr // accountCreate creates a new account into the keystore defined by the CLI flags. func accountCreate(ctx *cli.Context) error { - stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) + stack, _ := makeConfigNode(ctx) password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) @@ -296,7 +295,7 @@ func accountUpdate(ctx *cli.Context) error { if len(ctx.Args()) == 0 { utils.Fatalf("No accounts specified to update") } - stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) + stack, _ := makeConfigNode(ctx) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) account, oldPassword := unlockAccount(ctx, ks, ctx.Args().First(), 0, nil) @@ -317,7 +316,7 @@ func importWallet(ctx *cli.Context) error { utils.Fatalf("Could not read wallet file: %v", err) } - stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) + stack, _ := makeConfigNode(ctx) passphrase := getPassPhrase("", false, 0, utils.MakePasswordList(ctx)) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) @@ -338,7 +337,7 @@ func accountImport(ctx *cli.Context) error { if err != nil { utils.Fatalf("Failed to load the private key: %v", err) } - stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) + stack, _ := makeConfigNode(ctx) passphrase := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 2d121a611..66516b409 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -244,7 +244,7 @@ func exportChain(ctx *cli.Context) error { } func removeDB(ctx *cli.Context) error { - stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) + stack, _ := makeConfigNode(ctx) dbdir := stack.ResolvePath(utils.ChainDbName(ctx)) if !common.FileExist(dbdir) { fmt.Println(dbdir, "does not exist") diff --git a/cmd/geth/config.go b/cmd/geth/config.go new file mode 100644 index 000000000..86dd4bfdf --- /dev/null +++ b/cmd/geth/config.go @@ -0,0 +1,186 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. + +package main + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "reflect" + "unicode" + + cli "gopkg.in/urfave/cli.v1" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/contracts/release" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + "github.com/naoina/toml" +) + +var ( + dumpConfigCommand = cli.Command{ + Action: dumpConfig, + Name: "dumpconfig", + Usage: "Show configuration values", + ArgsUsage: "", + Category: "MISCELLANEOUS COMMANDS", + Description: `The dumpconfig command shows configuration values.`, + } + + configFileFlag = cli.StringFlag{ + Name: "config", + Usage: "TOML configuration file", + } +) + +// These settings ensure that TOML keys use the same names as Go struct fields. +var tomlSettings = toml.Config{ + NormFieldName: func(rt reflect.Type, key string) string { + return key + }, + FieldToKey: func(rt reflect.Type, field string) string { + return field + }, + MissingField: func(rt reflect.Type, field string) error { + link := "" + if unicode.IsUpper(rune(rt.Name()[0])) && rt.PkgPath() != "main" { + link = fmt.Sprintf(", see https://godoc.org/%s#%s for available fields", rt.PkgPath(), rt.Name()) + } + return fmt.Errorf("field '%s' is not defined in %s%s", field, rt.String(), link) + }, +} + +type ethstatsConfig struct { + URL string `toml:",omitempty"` +} + +type gethConfig struct { + Eth eth.Config + Node node.Config + Ethstats ethstatsConfig +} + +func loadConfig(file string, cfg *gethConfig) error { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + err = tomlSettings.NewDecoder(bufio.NewReader(f)).Decode(cfg) + // Add file name to errors that have a line number. + if _, ok := err.(*toml.LineError); ok { + err = errors.New(file + ", " + err.Error()) + } + return err +} + +func defaultNodeConfig() node.Config { + cfg := node.DefaultConfig + cfg.Name = clientIdentifier + cfg.Version = params.VersionWithCommit(gitCommit) + cfg.HTTPModules = append(cfg.HTTPModules, "eth") + cfg.WSModules = append(cfg.WSModules, "eth") + cfg.IPCPath = "geth.ipc" + return cfg +} + +func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { + // Load defaults. + cfg := gethConfig{ + Eth: eth.DefaultConfig, + Node: defaultNodeConfig(), + } + + // Load config file. + if file := ctx.GlobalString(configFileFlag.Name); file != "" { + if err := loadConfig(file, &cfg); err != nil { + utils.Fatalf("%v", err) + } + } + + // Apply flags. + utils.SetNodeConfig(ctx, &cfg.Node) + stack, err := node.New(&cfg.Node) + if err != nil { + utils.Fatalf("Failed to create the protocol stack: %v", err) + } + utils.SetEthConfig(ctx, stack, &cfg.Eth) + if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) { + cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name) + } + + return stack, cfg +} + +func makeFullNode(ctx *cli.Context) *node.Node { + stack, cfg := makeConfigNode(ctx) + + utils.RegisterEthService(stack, &cfg.Eth) + + // Whisper must be explicitly enabled, but is auto-enabled in --dev mode. + shhEnabled := ctx.GlobalBool(utils.WhisperEnabledFlag.Name) + shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DevModeFlag.Name) + if shhEnabled || shhAutoEnabled { + utils.RegisterShhService(stack) + } + + // Add the Ethereum Stats daemon if requested. + if cfg.Ethstats.URL != "" { + utils.RegisterEthStatsService(stack, cfg.Ethstats.URL) + } + + // Add the release oracle service so it boots along with node. + if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + config := release.Config{ + Oracle: relOracle, + Major: uint32(params.VersionMajor), + Minor: uint32(params.VersionMinor), + Patch: uint32(params.VersionPatch), + } + commit, _ := hex.DecodeString(gitCommit) + copy(config.Commit[:], commit) + return release.NewReleaseService(ctx, config) + }); err != nil { + utils.Fatalf("Failed to register the Geth release oracle service: %v", err) + } + return stack +} + +// dumpConfig is the dumpconfig command. +func dumpConfig(ctx *cli.Context) error { + _, cfg := makeConfigNode(ctx) + comment := "" + + if cfg.Eth.Genesis != nil { + cfg.Eth.Genesis = nil + comment += "# Note: this config doesn't contain the genesis block.\n\n" + } + + out, err := tomlSettings.Marshal(&cfg) + if err != nil { + return err + } + io.WriteString(os.Stdout, comment) + os.Stdout.Write(out) + return nil +} diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index fee8024a9..e5472836c 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -22,14 +22,17 @@ import ( "os" "path/filepath" "runtime" - "sort" "strconv" "strings" "testing" "time" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rpc" +) + +const ( + ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 shh:1.0 txpool:1.0 web3:1.0" + httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0" ) // Tests that a node embedded within a console can be started up properly and @@ -49,11 +52,7 @@ func TestConsoleWelcome(t *testing.T) { geth.setTemplateFunc("gover", runtime.Version) geth.setTemplateFunc("gethver", func() string { return params.Version }) geth.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) }) - geth.setTemplateFunc("apis", func() []string { - apis := append(strings.Split(rpc.DefaultIPCApis, ","), rpc.MetadataApi) - sort.Strings(apis) - return apis - }) + geth.setTemplateFunc("apis", func() string { return ipcAPIs }) // Verify the actual welcome message to the required template geth.expect(` @@ -63,7 +62,7 @@ instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}} coinbase: {{.Etherbase}} at block: 0 ({{niltime}}) datadir: {{.Datadir}} - modules:{{range apis}} {{.}}:1.0{{end}} + modules: {{apis}} > {{.InputLine "exit"}} `) @@ -89,7 +88,7 @@ func TestIPCAttachWelcome(t *testing.T) { "--etherbase", coinbase, "--shh", "--ipcpath", ipc) time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open - testAttachWelcome(t, geth, "ipc:"+ipc) + testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs) geth.interrupt() geth.expectExit() @@ -103,7 +102,7 @@ func TestHTTPAttachWelcome(t *testing.T) { "--etherbase", coinbase, "--rpc", "--rpcport", port) time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open - testAttachWelcome(t, geth, "http://localhost:"+port) + testAttachWelcome(t, geth, "http://localhost:"+port, httpAPIs) geth.interrupt() geth.expectExit() @@ -118,13 +117,13 @@ func TestWSAttachWelcome(t *testing.T) { "--etherbase", coinbase, "--ws", "--wsport", port) time.Sleep(2 * time.Second) // Simple way to wait for the RPC endpoint to open - testAttachWelcome(t, geth, "ws://localhost:"+port) + testAttachWelcome(t, geth, "ws://localhost:"+port, httpAPIs) geth.interrupt() geth.expectExit() } -func testAttachWelcome(t *testing.T, geth *testgeth, endpoint string) { +func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { // Attach to a running geth note and terminate immediately attach := runGeth(t, "attach", endpoint) defer attach.expectExit() @@ -139,16 +138,7 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint string) { attach.setTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) }) attach.setTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") }) attach.setTemplateFunc("datadir", func() string { return geth.Datadir }) - attach.setTemplateFunc("apis", func() []string { - var apis []string - if strings.HasPrefix(endpoint, "ipc") { - apis = append(strings.Split(rpc.DefaultIPCApis, ","), rpc.MetadataApi) - } else { - apis = append(strings.Split(rpc.DefaultHTTPApis, ","), rpc.MetadataApi) - } - sort.Strings(apis) - return apis - }) + attach.setTemplateFunc("apis", func() string { return apis }) // Verify the actual welcome message to the required template attach.expect(` @@ -158,7 +148,7 @@ instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}} coinbase: {{etherbase}} at block: 0 ({{niltime}}){{if ipc}} datadir: {{datadir}}{{end}} - modules:{{range apis}} {{.}}:1.0{{end}} + modules: {{apis}} > {{.InputLine "exit" }} `) diff --git a/cmd/geth/dao_test.go b/cmd/geth/dao_test.go index f9ce80218..ec7802ada 100644 --- a/cmd/geth/dao_test.go +++ b/cmd/geth/dao_test.go @@ -84,27 +84,24 @@ var daoGenesisForkBlock = big.NewInt(314) // set in the database after various initialization procedures and invocations. func TestDAOForkBlockNewChain(t *testing.T) { for i, arg := range []struct { - testnet bool genesis string expectBlock *big.Int expectVote bool }{ // Test DAO Default Mainnet - {false, "", params.MainNetDAOForkBlock, true}, - // test DAO Default Testnet - {true, "", params.TestNetDAOForkBlock, true}, + {"", params.MainNetDAOForkBlock, true}, // test DAO Init Old Privnet - {false, daoOldGenesis, nil, false}, + {daoOldGenesis, nil, false}, // test DAO Default No Fork Privnet - {false, daoNoForkGenesis, daoGenesisForkBlock, false}, + {daoNoForkGenesis, daoGenesisForkBlock, false}, // test DAO Default Pro Fork Privnet - {false, daoProForkGenesis, daoGenesisForkBlock, true}, + {daoProForkGenesis, daoGenesisForkBlock, true}, } { - testDAOForkBlockNewChain(t, i, arg.testnet, arg.genesis, arg.expectBlock, arg.expectVote) + testDAOForkBlockNewChain(t, i, arg.genesis, arg.expectBlock, arg.expectVote) } } -func testDAOForkBlockNewChain(t *testing.T, test int, testnet bool, genesis string, expectBlock *big.Int, expectVote bool) { +func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBlock *big.Int, expectVote bool) { // Create a temporary data directory to use and inspect later datadir := tmpdir(t) defer os.RemoveAll(datadir) @@ -119,17 +116,11 @@ func testDAOForkBlockNewChain(t *testing.T, test int, testnet bool, genesis stri } else { // Force chain initialization args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir} - if testnet { - args = append(args, "--testnet") - } geth := runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...) geth.cmd.Wait() } // Retrieve the DAO config flag from the database path := filepath.Join(datadir, "geth", "chaindata") - if testnet && genesis == "" { - path = filepath.Join(datadir, "testnet", "geth", "chaindata") - } db, err := ethdb.NewLDBDatabase(path, 0, 0) if err != nil { t.Fatalf("test %d: failed to open test database: %v", test, err) @@ -137,9 +128,6 @@ func testDAOForkBlockNewChain(t *testing.T, test int, testnet bool, genesis stri defer db.Close() genesisHash := common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") - if testnet { - genesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") - } if genesis != "" { genesisHash = daoGenesisHash } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index e8aef2bb2..8e434948e 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -18,7 +18,6 @@ package main import ( - "encoding/hex" "fmt" "os" "runtime" @@ -29,17 +28,13 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/console" - "github.com/ethereum/go-ethereum/contracts/release" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "gopkg.in/urfave/cli.v1" ) @@ -82,6 +77,8 @@ func init() { versionCommand, bugCommand, licenseCommand, + // See config.go + dumpConfigCommand, } app.Flags = []cli.Flag{ @@ -99,6 +96,7 @@ func init() { utils.EthashDatasetsOnDiskFlag, utils.FastSyncFlag, utils.LightModeFlag, + utils.SyncModeFlag, utils.LightServFlag, utils.LightPeersFlag, utils.LightKDFFlag, @@ -129,16 +127,12 @@ func init() { utils.WSApiFlag, utils.WSAllowedOriginsFlag, utils.IPCDisabledFlag, - utils.IPCApiFlag, utils.IPCPathFlag, utils.ExecFlag, utils.PreloadJSFlag, utils.WhisperEnabledFlag, utils.DevModeFlag, utils.TestNetFlag, - utils.VMForceJitFlag, - utils.VMJitCacheFlag, - utils.VMEnableJitFlag, utils.VMEnableDebugFlag, utils.NetworkIdFlag, utils.RPCCORSDomainFlag, @@ -150,6 +144,7 @@ func init() { utils.GpoBlocksFlag, utils.GpoPercentileFlag, utils.ExtraDataFlag, + configFileFlag, } app.Flags = append(app.Flags, debug.Flags...) @@ -189,52 +184,6 @@ func geth(ctx *cli.Context) error { return nil } -func makeFullNode(ctx *cli.Context) *node.Node { - // Create the default extradata and construct the base node - var clientInfo = struct { - Version uint - Name string - GoVersion string - Os string - }{uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch), clientIdentifier, runtime.Version(), runtime.GOOS} - extra, err := rlp.EncodeToBytes(clientInfo) - if err != nil { - log.Warn("Failed to set canonical miner information", "err", err) - } - if uint64(len(extra)) > params.MaximumExtraDataSize { - log.Warn("Miner extra data exceed limit", "extra", hexutil.Bytes(extra), "limit", params.MaximumExtraDataSize) - extra = nil - } - stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) - utils.RegisterEthService(ctx, stack, extra) - - // Whisper must be explicitly enabled, but is auto-enabled in --dev mode. - shhEnabled := ctx.GlobalBool(utils.WhisperEnabledFlag.Name) - shhAutoEnabled := !ctx.GlobalIsSet(utils.WhisperEnabledFlag.Name) && ctx.GlobalIsSet(utils.DevModeFlag.Name) - if shhEnabled || shhAutoEnabled { - utils.RegisterShhService(stack) - } - // Add the Ethereum Stats daemon if requested - if url := ctx.GlobalString(utils.EthStatsURLFlag.Name); url != "" { - utils.RegisterEthStatsService(stack, url) - } - // Add the release oracle service so it boots along with node. - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - config := release.Config{ - Oracle: relOracle, - Major: uint32(params.VersionMajor), - Minor: uint32(params.VersionMinor), - Patch: uint32(params.VersionPatch), - } - commit, _ := hex.DecodeString(gitCommit) - copy(config.Commit[:], commit) - return release.NewReleaseService(ctx, config) - }); err != nil { - utils.Fatalf("Failed to register the Geth release oracle service: %v", err) - } - return stack -} - // startNode boots up the system node and all registered protocols, after which // it unlocks any requested accounts, and starts the RPC/IPC interfaces and the // miner. diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 491a4eb98..334d017d9 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -115,7 +115,6 @@ var AppHelpFlagGroups = []flagGroup{ utils.WSApiFlag, utils.WSAllowedOriginsFlag, utils.IPCDisabledFlag, - utils.IPCApiFlag, utils.IPCPathFlag, utils.RPCCORSDomainFlag, utils.JSpathFlag, @@ -158,9 +157,6 @@ var AppHelpFlagGroups = []flagGroup{ { Name: "VIRTUAL MACHINE", Flags: []cli.Flag{ - utils.VMEnableJitFlag, - utils.VMForceJitFlag, - utils.VMJitCacheFlag, utils.VMEnableDebugFlag, }, }, diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index bbd0cb1b3..833083b91 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -39,19 +39,16 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/swarm" bzzapi "github.com/ethereum/go-ethereum/swarm/api" "gopkg.in/urfave/cli.v1" ) -const ( - clientIdentifier = "swarm" - versionString = "0.2" -) +const clientIdentifier = "swarm" var ( gitCommit string // Git SHA1 commit hash of the release (set via linker flags) - app = utils.NewApp(gitCommit, "Ethereum Swarm") testbetBootNodes = []string{ "enode://ec8ae764f7cb0417bdfb009b9d0f18ab3818a3a4e8e7c67dd5f18971a93510a2e6f43cd0b69a27e439a9629457ea804104f37c85e41eed057d3faabbf7744cdf@13.74.157.139:30429", "enode://c2e1fceb3bf3be19dff71eec6cccf19f2dbf7567ee017d130240c670be8594bc9163353ca55dd8df7a4f161dd94b36d0615c17418b5a3cdcbb4e9d99dfa4de37@13.74.157.139:30430", @@ -126,13 +123,22 @@ var ( } ) +var defaultNodeConfig = node.DefaultConfig + +// This init function sets defaults so cmd/swarm can run alongside geth. func init() { - // Override flag defaults so bzzd can run alongside geth. + defaultNodeConfig.Name = clientIdentifier + defaultNodeConfig.Version = params.VersionWithCommit(gitCommit) + defaultNodeConfig.P2P.ListenAddr = ":30399" + defaultNodeConfig.IPCPath = "bzzd.ipc" + // Set flag defaults for --help display. utils.ListenPortFlag.Value = 30399 - utils.IPCPathFlag.Value = utils.DirectoryString{Value: "bzzd.ipc"} - utils.IPCApiFlag.Value = "admin, bzz, chequebook, debug, rpc, swarmfs, web3" +} - // Set up the cli app. +var app = utils.NewApp(gitCommit, "Ethereum Swarm") + +// This init function creates the cli.App. +func init() { app.Action = bzzd app.HideVersion = true // we have a command to print the version app.Copyright = "Copyright 2013-2016 The go-ethereum Authors" @@ -235,7 +241,6 @@ Cleans database of corrupted entries. utils.MaxPeersFlag, utils.NATFlag, utils.IPCDisabledFlag, - utils.IPCApiFlag, utils.IPCPathFlag, utils.PasswordFileFlag, // bzzd-specific flags @@ -276,7 +281,7 @@ func main() { func version(ctx *cli.Context) error { fmt.Println(strings.Title(clientIdentifier)) - fmt.Println("Version:", versionString) + fmt.Println("Version:", params.Version) if gitCommit != "" { fmt.Println("Git Commit:", gitCommit) } @@ -289,9 +294,16 @@ func version(ctx *cli.Context) error { } func bzzd(ctx *cli.Context) error { - stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) + cfg := defaultNodeConfig + utils.SetNodeConfig(ctx, &cfg) + stack, err := node.New(&cfg) + if err != nil { + utils.Fatalf("can't create node: %v", err) + } + registerBzzService(ctx, stack) utils.StartNode(stack) + go func() { sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGTERM) @@ -300,6 +312,7 @@ func bzzd(ctx *cli.Context) error { log.Info("Got sigterm, shutting swarm down...") stack.Stop() }() + networkId := ctx.GlobalUint64(SwarmNetworkIdFlag.Name) // Add bootnodes as initial peers. if ctx.GlobalIsSet(utils.BootnodesFlag.Name) { @@ -316,7 +329,6 @@ func bzzd(ctx *cli.Context) error { } func registerBzzService(ctx *cli.Context, stack *node.Node) { - prvkey := getAccount(ctx, stack) chbookaddr := common.HexToAddress(ctx.GlobalString(ChequebookAddrFlag.Name)) diff --git a/cmd/utils/customflags.go b/cmd/utils/customflags.go index 00f28f2ec..e5bf8724c 100644 --- a/cmd/utils/customflags.go +++ b/cmd/utils/customflags.go @@ -17,6 +17,7 @@ package utils import ( + "encoding" "errors" "flag" "fmt" @@ -78,6 +79,58 @@ func (self DirectoryFlag) Apply(set *flag.FlagSet) { }) } +type TextMarshaler interface { + encoding.TextMarshaler + encoding.TextUnmarshaler +} + +// textMarshalerVal turns a TextMarshaler into a flag.Value +type textMarshalerVal struct { + v TextMarshaler +} + +func (v textMarshalerVal) String() string { + if v.v == nil { + return "" + } + text, _ := v.v.MarshalText() + return string(text) +} + +func (v textMarshalerVal) Set(s string) error { + return v.v.UnmarshalText([]byte(s)) +} + +// TextMarshalerFlag wraps a TextMarshaler value. +type TextMarshalerFlag struct { + Name string + Value TextMarshaler + Usage string +} + +func (f TextMarshalerFlag) GetName() string { + return f.Name +} + +func (f TextMarshalerFlag) String() string { + return fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage) +} + +func (f TextMarshalerFlag) Apply(set *flag.FlagSet) { + eachName(f.Name, func(name string) { + set.Var(textMarshalerVal{f.Value}, f.Name, f.Usage) + }) +} + +// GlobalTextMarshaler returns the value of a TextMarshalerFlag from the global flag set. +func GlobalTextMarshaler(ctx *cli.Context, name string) TextMarshaler { + val := ctx.GlobalGeneric(name) + if val == nil { + return nil + } + return val.(textMarshalerVal).v +} + // BigFlag is a command line flag that accepts 256 bit big integers in decimal or // hexadecimal syntax. type BigFlag struct { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0ca407a75..1bd77139c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -23,7 +23,6 @@ import ( "io/ioutil" "math/big" "os" - "os/user" "path/filepath" "runtime" "strconv" @@ -38,6 +37,8 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethstats" "github.com/ethereum/go-ethereum/event" @@ -45,12 +46,12 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rpc" whisper "github.com/ethereum/go-ethereum/whisper/whisperv2" "gopkg.in/urfave/cli.v1" ) @@ -121,31 +122,32 @@ var ( EthashCachesInMemoryFlag = cli.IntFlag{ Name: "ethash.cachesinmem", Usage: "Number of recent ethash caches to keep in memory (16MB each)", - Value: 2, + Value: eth.DefaultConfig.EthashCachesInMem, } EthashCachesOnDiskFlag = cli.IntFlag{ Name: "ethash.cachesondisk", Usage: "Number of recent ethash caches to keep on disk (16MB each)", - Value: 3, + Value: eth.DefaultConfig.EthashCachesOnDisk, } EthashDatasetDirFlag = DirectoryFlag{ Name: "ethash.dagdir", Usage: "Directory to store the ethash mining DAGs (default = inside home folder)", + Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir}, } EthashDatasetsInMemoryFlag = cli.IntFlag{ Name: "ethash.dagsinmem", Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)", - Value: 1, + Value: eth.DefaultConfig.EthashDatasetsInMem, } EthashDatasetsOnDiskFlag = cli.IntFlag{ Name: "ethash.dagsondisk", Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)", - Value: 2, + Value: eth.DefaultConfig.EthashDatasetsOnDisk, } NetworkIdFlag = cli.IntFlag{ Name: "networkid", Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten)", - Value: eth.NetworkId, + Value: eth.DefaultConfig.NetworkId, } TestNetFlag = cli.BoolFlag{ Name: "testnet", @@ -172,6 +174,13 @@ var ( Name: "light", Usage: "Enable light client mode", } + defaultSyncMode = eth.DefaultConfig.SyncMode + SyncModeFlag = TextMarshalerFlag{ + Name: "syncmode", + Usage: `Blockchain sync mode ("fast", "full", or "light")`, + Value: &defaultSyncMode, + } + LightServFlag = cli.IntFlag{ Name: "lightserv", Usage: "Maximum percentage of time allowed for serving LES requests (0-90)", @@ -238,19 +247,6 @@ var ( Value: "", } - VMForceJitFlag = cli.BoolFlag{ - Name: "forcejit", - Usage: "Force the JIT VM to take precedence", - } - VMJitCacheFlag = cli.IntFlag{ - Name: "jitcache", - Usage: "Amount of cached JIT VM programs", - Value: 64, - } - VMEnableJitFlag = cli.BoolFlag{ - Name: "jitvm", - Usage: "Enable the JIT VM", - } VMEnableDebugFlag = cli.BoolFlag{ Name: "vmdebug", Usage: "Record information useful for VM and contract debugging", @@ -295,21 +291,15 @@ var ( RPCApiFlag = cli.StringFlag{ Name: "rpcapi", Usage: "API's offered over the HTTP-RPC interface", - Value: rpc.DefaultHTTPApis, + Value: "", } IPCDisabledFlag = cli.BoolFlag{ Name: "ipcdisable", Usage: "Disable the IPC-RPC server", } - IPCApiFlag = cli.StringFlag{ - Name: "ipcapi", - Usage: "APIs offered over the IPC-RPC interface", - Value: rpc.DefaultIPCApis, - } IPCPathFlag = DirectoryFlag{ Name: "ipcpath", Usage: "Filename for IPC socket/pipe within the datadir (explicit paths escape it)", - Value: DirectoryString{"geth.ipc"}, } WSEnabledFlag = cli.BoolFlag{ Name: "ws", @@ -328,7 +318,7 @@ var ( WSApiFlag = cli.StringFlag{ Name: "wsapi", Usage: "API's offered over the WS-RPC interface", - Value: rpc.DefaultHTTPApis, + Value: "", } WSAllowedOriginsFlag = cli.StringFlag{ Name: "wsorigins", @@ -412,12 +402,12 @@ var ( GpoBlocksFlag = cli.IntFlag{ Name: "gpoblocks", Usage: "Number of recent blocks to check for gas prices", - Value: 10, + Value: eth.DefaultConfig.GPO.Blocks, } GpoPercentileFlag = cli.IntFlag{ Name: "gpopercentile", Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices", - Value: 50, + Value: eth.DefaultConfig.GPO.Percentile, } ) @@ -436,88 +426,42 @@ func MakeDataDir(ctx *cli.Context) string { return "" } -// MakeEthashCacheDir returns the directory to use for storing the ethash cache -// dumps. -func MakeEthashCacheDir(ctx *cli.Context) string { - if ctx.GlobalIsSet(EthashCacheDirFlag.Name) && ctx.GlobalString(EthashCacheDirFlag.Name) == "" { - return "" - } - if !ctx.GlobalIsSet(EthashCacheDirFlag.Name) { - return "ethash" - } - return ctx.GlobalString(EthashCacheDirFlag.Name) -} - -// MakeEthashDatasetDir returns the directory to use for storing the full ethash -// dataset dumps. -func MakeEthashDatasetDir(ctx *cli.Context) string { - if !ctx.GlobalIsSet(EthashDatasetDirFlag.Name) { - home := os.Getenv("HOME") - if home == "" { - if user, err := user.Current(); err == nil { - home = user.HomeDir - } - } - if runtime.GOOS == "windows" { - return filepath.Join(home, "AppData", "Ethash") - } - return filepath.Join(home, ".ethash") - } - return ctx.GlobalString(EthashDatasetDirFlag.Name) -} - -// MakeIPCPath creates an IPC path configuration from the set command line flags, -// returning an empty string if IPC was explicitly disabled, or the set path. -func MakeIPCPath(ctx *cli.Context) string { - if ctx.GlobalBool(IPCDisabledFlag.Name) { - return "" - } - return ctx.GlobalString(IPCPathFlag.Name) -} - -// MakeNodeKey creates a node key from set command line flags, either loading it +// setNodeKey creates a node key from set command line flags, either loading it // from a file or as a specified hex value. If neither flags were provided, this // method returns nil and an emphemeral key is to be generated. -func MakeNodeKey(ctx *cli.Context) *ecdsa.PrivateKey { +func setNodeKey(ctx *cli.Context, cfg *p2p.Config) { var ( hex = ctx.GlobalString(NodeKeyHexFlag.Name) file = ctx.GlobalString(NodeKeyFileFlag.Name) - - key *ecdsa.PrivateKey - err error + key *ecdsa.PrivateKey + err error ) switch { case file != "" && hex != "": Fatalf("Options %q and %q are mutually exclusive", NodeKeyFileFlag.Name, NodeKeyHexFlag.Name) - case file != "": if key, err = crypto.LoadECDSA(file); err != nil { Fatalf("Option %q: %v", NodeKeyFileFlag.Name, err) } - + cfg.PrivateKey = key case hex != "": if key, err = crypto.HexToECDSA(hex); err != nil { Fatalf("Option %q: %v", NodeKeyHexFlag.Name, err) } + cfg.PrivateKey = key } - return key } -// makeNodeUserIdent creates the user identifier from CLI flags. -func makeNodeUserIdent(ctx *cli.Context) string { - var comps []string +// setNodeUserIdent creates the user identifier from CLI flags. +func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) { if identity := ctx.GlobalString(IdentityFlag.Name); len(identity) > 0 { - comps = append(comps, identity) + cfg.UserIdent = identity } - if ctx.GlobalBool(VMEnableJitFlag.Name) { - comps = append(comps, "JIT") - } - return strings.Join(comps, "/") } -// MakeBootstrapNodes creates a list of bootstrap nodes from the command line +// setBootstrapNodes creates a list of bootstrap nodes from the command line // flags, reverting to pre-configured ones if none have been specified. -func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node { +func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { urls := params.MainnetBootnodes if ctx.GlobalIsSet(BootnodesFlag.Name) { urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",") @@ -525,62 +469,68 @@ func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node { urls = params.TestnetBootnodes } - bootnodes := make([]*discover.Node, 0, len(urls)) + cfg.BootstrapNodes = make([]*discover.Node, 0, len(urls)) for _, url := range urls { node, err := discover.ParseNode(url) if err != nil { log.Error("Bootstrap URL invalid", "enode", url, "err", err) continue } - bootnodes = append(bootnodes, node) + cfg.BootstrapNodes = append(cfg.BootstrapNodes, node) } - return bootnodes } -// MakeBootstrapNodesV5 creates a list of bootstrap nodes from the command line +// setBootstrapNodesV5 creates a list of bootstrap nodes from the command line // flags, reverting to pre-configured ones if none have been specified. -func MakeBootstrapNodesV5(ctx *cli.Context) []*discv5.Node { +func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) { urls := params.DiscoveryV5Bootnodes if ctx.GlobalIsSet(BootnodesFlag.Name) { urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",") + } else if cfg.BootstrapNodesV5 == nil { + return // already set, don't apply defaults. } - bootnodes := make([]*discv5.Node, 0, len(urls)) + cfg.BootstrapNodesV5 = make([]*discv5.Node, 0, len(urls)) for _, url := range urls { node, err := discv5.ParseNode(url) if err != nil { log.Error("Bootstrap URL invalid", "enode", url, "err", err) continue } - bootnodes = append(bootnodes, node) + cfg.BootstrapNodesV5 = append(cfg.BootstrapNodesV5, node) } - return bootnodes } -// MakeListenAddress creates a TCP listening address string from set command +// setListenAddress creates a TCP listening address string from set command // line flags. -func MakeListenAddress(ctx *cli.Context) string { - return fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name)) +func setListenAddress(ctx *cli.Context, cfg *p2p.Config) { + if ctx.GlobalIsSet(ListenPortFlag.Name) { + cfg.ListenAddr = fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name)) + } } -// MakeDiscoveryV5Address creates a UDP listening address string from set command +// setDiscoveryV5Address creates a UDP listening address string from set command // line flags for the V5 discovery protocol. -func MakeDiscoveryV5Address(ctx *cli.Context) string { - return fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name)+1) +func setDiscoveryV5Address(ctx *cli.Context, cfg *p2p.Config) { + if ctx.GlobalIsSet(ListenPortFlag.Name) { + cfg.DiscoveryV5Addr = fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name)+1) + } } -// MakeNAT creates a port mapper from set command line flags. -func MakeNAT(ctx *cli.Context) nat.Interface { - natif, err := nat.Parse(ctx.GlobalString(NATFlag.Name)) - if err != nil { - Fatalf("Option %s: %v", NATFlag.Name, err) +// setNAT creates a port mapper from command line flags. +func setNAT(ctx *cli.Context, cfg *p2p.Config) { + if ctx.GlobalIsSet(NATFlag.Name) { + natif, err := nat.Parse(ctx.GlobalString(NATFlag.Name)) + if err != nil { + Fatalf("Option %s: %v", NATFlag.Name, err) + } + cfg.NAT = natif } - return natif } -// MakeRPCModules splits input separated by a comma and trims excessive white +// makeRPCModules splits input separated by a comma and trims excessive white // space from the substrings. -func MakeRPCModules(input string) []string { +func makeRPCModules(input string) []string { result := strings.Split(input, ",") for i, r := range result { result[i] = strings.TrimSpace(r) @@ -588,27 +538,63 @@ func MakeRPCModules(input string) []string { return result } -// MakeHTTPRpcHost creates the HTTP RPC listener interface string from the set +// setHTTP creates the HTTP RPC listener interface string from the set // command line flags, returning empty if the HTTP endpoint is disabled. -func MakeHTTPRpcHost(ctx *cli.Context) string { - if !ctx.GlobalBool(RPCEnabledFlag.Name) { - return "" +func setHTTP(ctx *cli.Context, cfg *node.Config) { + if ctx.GlobalBool(RPCEnabledFlag.Name) && cfg.HTTPHost == "" { + cfg.HTTPHost = "127.0.0.1" + if ctx.GlobalIsSet(RPCListenAddrFlag.Name) { + cfg.HTTPHost = ctx.GlobalString(RPCListenAddrFlag.Name) + } + } + + if ctx.GlobalIsSet(RPCPortFlag.Name) { + cfg.HTTPPort = ctx.GlobalInt(RPCPortFlag.Name) + } + if ctx.GlobalIsSet(RPCCORSDomainFlag.Name) { + cfg.HTTPCors = ctx.GlobalString(RPCCORSDomainFlag.Name) + } + if ctx.GlobalIsSet(RPCApiFlag.Name) { + cfg.HTTPModules = makeRPCModules(ctx.GlobalString(RPCApiFlag.Name)) } - return ctx.GlobalString(RPCListenAddrFlag.Name) } -// MakeWSRpcHost creates the WebSocket RPC listener interface string from the set +// setWS creates the WebSocket RPC listener interface string from the set // command line flags, returning empty if the HTTP endpoint is disabled. -func MakeWSRpcHost(ctx *cli.Context) string { - if !ctx.GlobalBool(WSEnabledFlag.Name) { - return "" +func setWS(ctx *cli.Context, cfg *node.Config) { + if ctx.GlobalBool(WSEnabledFlag.Name) && cfg.WSHost == "" { + cfg.WSHost = "127.0.0.1" + if ctx.GlobalIsSet(WSListenAddrFlag.Name) { + cfg.WSHost = ctx.GlobalString(WSListenAddrFlag.Name) + } + } + + if ctx.GlobalIsSet(WSPortFlag.Name) { + cfg.WSPort = ctx.GlobalInt(WSPortFlag.Name) + } + if ctx.GlobalIsSet(WSAllowedOriginsFlag.Name) { + cfg.WSOrigins = ctx.GlobalString(WSAllowedOriginsFlag.Name) + } + if ctx.GlobalIsSet(WSApiFlag.Name) { + cfg.WSModules = makeRPCModules(ctx.GlobalString(WSApiFlag.Name)) + } +} + +// setIPC creates an IPC path configuration from the set command line flags, +// returning an empty string if IPC was explicitly disabled, or the set path. +func setIPC(ctx *cli.Context, cfg *node.Config) { + checkExclusive(ctx, IPCDisabledFlag, IPCPathFlag) + switch { + case ctx.GlobalBool(IPCDisabledFlag.Name): + cfg.IPCPath = "" + case ctx.GlobalIsSet(IPCPathFlag.Name): + cfg.IPCPath = ctx.GlobalString(IPCPathFlag.Name) } - return ctx.GlobalString(WSListenAddrFlag.Name) } -// MakeDatabaseHandles raises out the number of allowed file handles per process +// makeDatabaseHandles raises out the number of allowed file handles per process // for Geth and returns half of the allowance to assign to the database. -func MakeDatabaseHandles() int { +func makeDatabaseHandles() int { if err := raiseFdLimit(2048); err != nil { Fatalf("Failed to raise file descriptor allowance: %v", err) } @@ -641,33 +627,25 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error return accs[index], nil } -// MakeEtherbase retrieves the etherbase either from the directly specified +// setEtherbase retrieves the etherbase either from the directly specified // command line flags or from the keystore if CLI indexed. -func MakeEtherbase(ks *keystore.KeyStore, ctx *cli.Context) common.Address { - accounts := ks.Accounts() - if !ctx.GlobalIsSet(EtherbaseFlag.Name) && len(accounts) == 0 { - log.Warn("No etherbase set and no accounts found as default") - return common.Address{} - } - etherbase := ctx.GlobalString(EtherbaseFlag.Name) - if etherbase == "" { - return common.Address{} - } - // If the specified etherbase is a valid address, return it - account, err := MakeAddress(ks, etherbase) - if err != nil { - Fatalf("Option %q: %v", EtherbaseFlag.Name, err) +func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) { + if ctx.GlobalIsSet(EtherbaseFlag.Name) { + account, err := MakeAddress(ks, ctx.GlobalString(EtherbaseFlag.Name)) + if err != nil { + Fatalf("Option %q: %v", EtherbaseFlag.Name, err) + } + cfg.Etherbase = account.Address + return } - return account.Address -} - -// MakeMinerExtra resolves extradata for the miner from the set command line flags -// or returns a default one composed on the client, runtime and OS metadata. -func MakeMinerExtra(extra []byte, ctx *cli.Context) []byte { - if ctx.GlobalIsSet(ExtraDataFlag.Name) { - return []byte(ctx.GlobalString(ExtraDataFlag.Name)) + accounts := ks.Accounts() + if (cfg.Etherbase == common.Address{}) { + if len(accounts) > 0 { + cfg.Etherbase = accounts[0].Address + } else { + log.Warn("No etherbase set and no accounts found as default") + } } - return extra } // MakePasswordList reads password lines from the file specified by --password. @@ -688,144 +666,217 @@ func MakePasswordList(ctx *cli.Context) []string { return lines } -// MakeNode configures a node with no services from command line flags. -func MakeNode(ctx *cli.Context, name, gitCommit string) *node.Node { - vsn := params.Version - if gitCommit != "" { - vsn += "-" + gitCommit[:8] +func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { + setNodeKey(ctx, cfg) + setNAT(ctx, cfg) + setListenAddress(ctx, cfg) + setDiscoveryV5Address(ctx, cfg) + setBootstrapNodes(ctx, cfg) + setBootstrapNodesV5(ctx, cfg) + + if ctx.GlobalIsSet(MaxPeersFlag.Name) { + cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name) + } + if ctx.GlobalIsSet(MaxPendingPeersFlag.Name) { + cfg.MaxPendingPeers = ctx.GlobalInt(MaxPendingPeersFlag.Name) + } + if ctx.GlobalIsSet(NoDiscoverFlag.Name) || ctx.GlobalBool(LightModeFlag.Name) { + cfg.NoDiscovery = true } - // if we're running a light client or server, force enable the v5 peer discovery unless it is explicitly disabled with --nodiscover - // note that explicitly specifying --v5disc overrides --nodiscover, in which case the later only disables v4 discovery + // if we're running a light client or server, force enable the v5 peer discovery + // unless it is explicitly disabled with --nodiscover note that explicitly specifying + // --v5disc overrides --nodiscover, in which case the later only disables v4 discovery forceV5Discovery := (ctx.GlobalBool(LightModeFlag.Name) || ctx.GlobalInt(LightServFlag.Name) > 0) && !ctx.GlobalBool(NoDiscoverFlag.Name) - - config := &node.Config{ - DataDir: MakeDataDir(ctx), - KeyStoreDir: ctx.GlobalString(KeyStoreDirFlag.Name), - UseLightweightKDF: ctx.GlobalBool(LightKDFFlag.Name), - PrivateKey: MakeNodeKey(ctx), - Name: name, - Version: vsn, - UserIdent: makeNodeUserIdent(ctx), - NoDiscovery: ctx.GlobalBool(NoDiscoverFlag.Name) || ctx.GlobalBool(LightModeFlag.Name), // always disable v4 discovery in light client mode - DiscoveryV5: ctx.GlobalBool(DiscoveryV5Flag.Name) || forceV5Discovery, - DiscoveryV5Addr: MakeDiscoveryV5Address(ctx), - BootstrapNodes: MakeBootstrapNodes(ctx), - BootstrapNodesV5: MakeBootstrapNodesV5(ctx), - ListenAddr: MakeListenAddress(ctx), - NAT: MakeNAT(ctx), - MaxPeers: ctx.GlobalInt(MaxPeersFlag.Name), - MaxPendingPeers: ctx.GlobalInt(MaxPendingPeersFlag.Name), - IPCPath: MakeIPCPath(ctx), - HTTPHost: MakeHTTPRpcHost(ctx), - HTTPPort: ctx.GlobalInt(RPCPortFlag.Name), - HTTPCors: ctx.GlobalString(RPCCORSDomainFlag.Name), - HTTPModules: MakeRPCModules(ctx.GlobalString(RPCApiFlag.Name)), - WSHost: MakeWSRpcHost(ctx), - WSPort: ctx.GlobalInt(WSPortFlag.Name), - WSOrigins: ctx.GlobalString(WSAllowedOriginsFlag.Name), - WSModules: MakeRPCModules(ctx.GlobalString(WSApiFlag.Name)), - } - if ctx.GlobalBool(DevModeFlag.Name) { - if !ctx.GlobalIsSet(DataDirFlag.Name) { - config.DataDir = filepath.Join(os.TempDir(), "/ethereum_dev_mode") - } - // --dev mode does not need p2p networking. - config.MaxPeers = 0 - config.ListenAddr = ":0" + if ctx.GlobalIsSet(DiscoveryV5Flag.Name) { + cfg.DiscoveryV5 = ctx.GlobalBool(DiscoveryV5Flag.Name) + } else if forceV5Discovery { + cfg.DiscoveryV5 = true } + if netrestrict := ctx.GlobalString(NetrestrictFlag.Name); netrestrict != "" { list, err := netutil.ParseNetlist(netrestrict) if err != nil { Fatalf("Option %q: %v", NetrestrictFlag.Name, err) } - config.NetRestrict = list + cfg.NetRestrict = list } - stack, err := node.New(config) - if err != nil { - Fatalf("Failed to create the protocol stack: %v", err) + if ctx.GlobalBool(DevModeFlag.Name) { + // --dev mode can't use p2p networking. + cfg.MaxPeers = 0 + cfg.ListenAddr = ":0" + cfg.NoDiscovery = true + cfg.DiscoveryV5 = false } - return stack } -// RegisterEthService configures eth.Ethereum from command line flags and adds it to the -// given node. -func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) { - // Avoid conflicting network flags - networks, netFlags := 0, []cli.BoolFlag{DevModeFlag, TestNetFlag} - for _, flag := range netFlags { - if ctx.GlobalBool(flag.Name) { - networks++ +// SetNodeConfig applies node-related command line flags to the config. +func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { + SetP2PConfig(ctx, &cfg.P2P) + setIPC(ctx, cfg) + setHTTP(ctx, cfg) + setWS(ctx, cfg) + setNodeUserIdent(ctx, cfg) + + switch { + case ctx.GlobalIsSet(DataDirFlag.Name): + cfg.DataDir = ctx.GlobalString(DataDirFlag.Name) + case ctx.GlobalBool(DevModeFlag.Name): + cfg.DataDir = filepath.Join(os.TempDir(), "ethereum_dev_mode") + case ctx.GlobalBool(TestNetFlag.Name): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet") + } + + if ctx.GlobalIsSet(KeyStoreDirFlag.Name) { + cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name) + } + if ctx.GlobalIsSet(LightKDFFlag.Name) { + cfg.UseLightweightKDF = ctx.GlobalBool(LightKDFFlag.Name) + } +} + +func setGPO(ctx *cli.Context, cfg *gasprice.Config) { + if ctx.GlobalIsSet(GpoBlocksFlag.Name) { + cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name) + } + if ctx.GlobalIsSet(GpoPercentileFlag.Name) { + cfg.Percentile = ctx.GlobalInt(GpoPercentileFlag.Name) + } +} + +func setEthash(ctx *cli.Context, cfg *eth.Config) { + if ctx.GlobalIsSet(EthashCacheDirFlag.Name) { + cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name) + } + if ctx.GlobalIsSet(EthashDatasetDirFlag.Name) { + cfg.EthashDatasetDir = ctx.GlobalString(EthashDatasetDirFlag.Name) + } + if ctx.GlobalIsSet(EthashCachesInMemoryFlag.Name) { + cfg.EthashCachesInMem = ctx.GlobalInt(EthashCachesInMemoryFlag.Name) + } + if ctx.GlobalIsSet(EthashCachesOnDiskFlag.Name) { + cfg.EthashCachesOnDisk = ctx.GlobalInt(EthashCachesOnDiskFlag.Name) + } + if ctx.GlobalIsSet(EthashDatasetsInMemoryFlag.Name) { + cfg.EthashDatasetsInMem = ctx.GlobalInt(EthashDatasetsInMemoryFlag.Name) + } + if ctx.GlobalIsSet(EthashDatasetsOnDiskFlag.Name) { + cfg.EthashDatasetsOnDisk = ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name) + } +} + +func checkExclusive(ctx *cli.Context, flags ...cli.Flag) { + set := make([]string, 0, 1) + for _, flag := range flags { + if ctx.GlobalIsSet(flag.GetName()) { + set = append(set, "--"+flag.GetName()) } } - if networks > 1 { - Fatalf("The %v flags are mutually exclusive", netFlags) + if len(set) > 1 { + Fatalf("flags %v can't be used at the same time", strings.Join(set, ", ")) } +} + +// SetEthConfig applies eth-related command line flags to the config. +func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { + // Avoid conflicting network flags + checkExclusive(ctx, DevModeFlag, TestNetFlag) + checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag) + ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) + setEtherbase(ctx, ks, cfg) + setGPO(ctx, &cfg.GPO) + setEthash(ctx, cfg) + + switch { + case ctx.GlobalIsSet(SyncModeFlag.Name): + cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) + case ctx.GlobalBool(FastSyncFlag.Name): + cfg.SyncMode = downloader.FastSync + case ctx.GlobalBool(LightModeFlag.Name): + cfg.SyncMode = downloader.LightSync + } + if ctx.GlobalIsSet(LightServFlag.Name) { + cfg.LightServ = ctx.GlobalInt(LightServFlag.Name) + } + if ctx.GlobalIsSet(LightPeersFlag.Name) { + cfg.LightPeers = ctx.GlobalInt(LightPeersFlag.Name) + } + if ctx.GlobalIsSet(NetworkIdFlag.Name) { + cfg.NetworkId = ctx.GlobalInt(NetworkIdFlag.Name) + } + + // Ethereum needs to know maxPeers to calculate the light server peer ratio. + // TODO(fjl): ensure Ethereum can get MaxPeers from node. + cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name) + + if ctx.GlobalIsSet(CacheFlag.Name) { + cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) + } + cfg.DatabaseHandles = makeDatabaseHandles() + + if ctx.GlobalIsSet(MinerThreadsFlag.Name) { + cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name) + } + if ctx.GlobalIsSet(DocRootFlag.Name) { + cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) + } + if ctx.GlobalIsSet(ExtraDataFlag.Name) { + cfg.ExtraData = []byte(ctx.GlobalString(ExtraDataFlag.Name)) + } + if ctx.GlobalIsSet(GasPriceFlag.Name) { + cfg.GasPrice = GlobalBig(ctx, GasPriceFlag.Name) + } + + if ctx.GlobalIsSet(SolcPathFlag.Name) { + cfg.SolcPath = ctx.GlobalString(SolcPathFlag.Name) + } + if ctx.GlobalIsSet(VMEnableDebugFlag.Name) { + // TODO(fjl): force-enable this in --dev mode + cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name) + } - ethConf := ð.Config{ - Etherbase: MakeEtherbase(ks, ctx), - FastSync: ctx.GlobalBool(FastSyncFlag.Name), - LightMode: ctx.GlobalBool(LightModeFlag.Name), - LightServ: ctx.GlobalInt(LightServFlag.Name), - LightPeers: ctx.GlobalInt(LightPeersFlag.Name), - MaxPeers: ctx.GlobalInt(MaxPeersFlag.Name), - DatabaseCache: ctx.GlobalInt(CacheFlag.Name), - DatabaseHandles: MakeDatabaseHandles(), - NetworkId: ctx.GlobalInt(NetworkIdFlag.Name), - MinerThreads: ctx.GlobalInt(MinerThreadsFlag.Name), - ExtraData: MakeMinerExtra(extra, ctx), - DocRoot: ctx.GlobalString(DocRootFlag.Name), - GasPrice: GlobalBig(ctx, GasPriceFlag.Name), - GpoBlocks: ctx.GlobalInt(GpoBlocksFlag.Name), - GpoPercentile: ctx.GlobalInt(GpoPercentileFlag.Name), - SolcPath: ctx.GlobalString(SolcPathFlag.Name), - EthashCacheDir: MakeEthashCacheDir(ctx), - EthashCachesInMem: ctx.GlobalInt(EthashCachesInMemoryFlag.Name), - EthashCachesOnDisk: ctx.GlobalInt(EthashCachesOnDiskFlag.Name), - EthashDatasetDir: MakeEthashDatasetDir(ctx), - EthashDatasetsInMem: ctx.GlobalInt(EthashDatasetsInMemoryFlag.Name), - EthashDatasetsOnDisk: ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name), - EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name), - } - - // Override any default configs in dev mode or the test net + // Override any default configs for --dev and --testnet. switch { case ctx.GlobalBool(TestNetFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { - ethConf.NetworkId = 3 + cfg.NetworkId = 3 } - ethConf.Genesis = core.DefaultTestnetGenesisBlock() + cfg.Genesis = core.DefaultTestnetGenesisBlock() case ctx.GlobalBool(DevModeFlag.Name): - ethConf.Genesis = core.DevGenesisBlock() + cfg.Genesis = core.DevGenesisBlock() if !ctx.GlobalIsSet(GasPriceFlag.Name) { - ethConf.GasPrice = new(big.Int) + cfg.GasPrice = new(big.Int) } - ethConf.PowTest = true + cfg.PowTest = true } - // Override any global options pertaining to the Ethereum protocol + + // TODO(fjl): move trie cache generations into config if gen := ctx.GlobalInt(TrieCacheGenFlag.Name); gen > 0 { state.MaxTrieCacheGen = uint16(gen) } +} - if ethConf.LightMode { - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - return les.New(ctx, ethConf) - }); err != nil { - Fatalf("Failed to register the Ethereum light node service: %v", err) - } +// RegisterEthService adds an Ethereum client to the stack. +func RegisterEthService(stack *node.Node, cfg *eth.Config) { + var err error + if cfg.SyncMode == downloader.LightSync { + err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + return les.New(ctx, cfg) + }) } else { - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - fullNode, err := eth.New(ctx, ethConf) - if fullNode != nil && ethConf.LightServ > 0 { - ls, _ := les.NewLesServer(fullNode, ethConf) + err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + fullNode, err := eth.New(ctx, cfg) + if fullNode != nil && cfg.LightServ > 0 { + ls, _ := les.NewLesServer(fullNode, cfg) fullNode.AddLesServer(ls) } return fullNode, err - }); err != nil { - Fatalf("Failed to register the Ethereum full node service: %v", err) - } + }) + } + if err != nil { + Fatalf("Failed to register the Ethereum service: %v", err) } } @@ -855,6 +906,7 @@ func RegisterEthStatsService(stack *node.Node, url string) { // SetupNetwork configures the system for either the main net or some test network. func SetupNetwork(ctx *cli.Context) { + // TODO(fjl): move target gas limit into config params.TargetGasLimit = new(big.Int).SetUint64(ctx.GlobalUint64(TargetGasLimitFlag.Name)) } @@ -870,7 +922,7 @@ func ChainDbName(ctx *cli.Context) string { func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { var ( cache = ctx.GlobalInt(CacheFlag.Name) - handles = MakeDatabaseHandles() + handles = makeDatabaseHandles() name = ChainDbName(ctx) ) diff --git a/cmd/wnode/main.go b/cmd/wnode/main.go index 7431980b5..b40352f57 100644 --- a/cmd/wnode/main.go +++ b/cmd/wnode/main.go @@ -257,7 +257,6 @@ func initialize() { Config: p2p.Config{ PrivateKey: nodeid, MaxPeers: maxPeers, - Discovery: true, Name: common.MakeName("wnode", "5.0"), Protocols: shh.Protocols(), ListenAddr: *argIP, diff --git a/common/math/big.go b/common/math/big.go index 0b67a1b50..5255a88e9 100644 --- a/common/math/big.go +++ b/common/math/big.go @@ -51,6 +51,9 @@ func (i *HexOrDecimal256) UnmarshalText(input []byte) error { // MarshalText implements encoding.TextMarshaler. func (i *HexOrDecimal256) MarshalText() ([]byte, error) { + if i == nil { + return []byte("0x0"), nil + } return []byte(fmt.Sprintf("%#x", (*big.Int)(i))), nil } diff --git a/console/console_test.go b/console/console_test.go index b5cff2c4d..0fc0e7051 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -91,7 +91,7 @@ func newTester(t *testing.T, confOverride func(*eth.Config)) *tester { } // Create a networkless protocol stack and start an Ethereum service within - stack, err := node.New(&node.Config{DataDir: workspace, UseLightweightKDF: true, Name: testInstance, NoDiscovery: true}) + stack, err := node.New(&node.Config{DataDir: workspace, UseLightweightKDF: true, Name: testInstance}) if err != nil { t.Fatalf("failed to create node: %v", err) } diff --git a/eth/backend.go b/eth/backend.go index 4dffa2990..7ee591f9e 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -18,14 +18,15 @@ package eth import ( + "errors" "fmt" - "math/big" - "regexp" + "runtime" "sync" "sync/atomic" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" @@ -43,55 +44,10 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" ) -var ( - datadirInUseErrnos = map[uint]bool{11: true, 32: true, 35: true} - portInUseErrRE = regexp.MustCompile("address already in use") -) - -type Config struct { - // The genesis block, which is inserted if the database is empty. - // If nil, the Ethereum main net block is used. - Genesis *core.Genesis - - NetworkId int // Network ID to use for selecting peers to connect to - - FastSync bool // Enables the state download based fast synchronisation algorithm - LightMode bool // Running in light client mode - LightServ int // Maximum percentage of time allowed for serving LES requests - LightPeers int // Maximum number of LES client peers - MaxPeers int // Maximum number of global peers - - SkipBcVersionCheck bool // e.g. blockchain export - DatabaseCache int - DatabaseHandles int - - DocRoot string - PowFake bool - PowTest bool - PowShared bool - ExtraData []byte - - EthashCacheDir string - EthashCachesInMem int - EthashCachesOnDisk int - EthashDatasetDir string - EthashDatasetsInMem int - EthashDatasetsOnDisk int - - Etherbase common.Address - GasPrice *big.Int - MinerThreads int - SolcPath string - - GpoBlocks int - GpoPercentile int - - EnablePreimageRecording bool -} - type LesServer interface { Start(srvr *p2p.Server) Stop() @@ -137,6 +93,13 @@ func (s *Ethereum) AddLesServer(ls LesServer) { // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { + if config.SyncMode == downloader.LightSync { + return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum") + } + if !config.SyncMode.IsValid() { + return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode) + } + chainDb, err := CreateDB(ctx, config, "chaindata") if err != nil { return nil, err @@ -201,25 +164,41 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { } } - if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.FastSync, config.NetworkId, maxPeers, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil { + if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, maxPeers, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil { return nil, err } eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine) eth.miner.SetGasPrice(config.GasPrice) - eth.miner.SetExtra(config.ExtraData) + eth.miner.SetExtra(makeExtraData(config.ExtraData)) eth.ApiBackend = &EthApiBackend{eth, nil} - gpoParams := gasprice.Config{ - Blocks: config.GpoBlocks, - Percentile: config.GpoPercentile, - Default: config.GasPrice, + gpoParams := config.GPO + if gpoParams.Default == nil { + gpoParams.Default = config.GasPrice } eth.ApiBackend.gpo = gasprice.NewOracle(eth.ApiBackend, gpoParams) return eth, nil } +func makeExtraData(extra []byte) []byte { + if len(extra) == 0 { + // create default extradata + extra, _ = rlp.EncodeToBytes([]interface{}{ + uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch), + "geth", + runtime.Version(), + runtime.GOOS, + }) + } + if uint64(len(extra)) > params.MaximumExtraDataSize { + log.Warn("Miner extra data exceed limit", "extra", hexutil.Bytes(extra), "limit", params.MaximumExtraDataSize) + extra = nil + } + return extra +} + // CreateDB creates the chain database. func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Database, error) { db, err := ctx.OpenDatabase(name, config.DatabaseCache, config.DatabaseHandles) @@ -415,8 +394,3 @@ func (s *Ethereum) Stop() error { return nil } - -// This function will wait for a shutdown and resumes main thread execution -func (s *Ethereum) WaitForShutdown() { - <-s.shutdownChan -} diff --git a/eth/config.go b/eth/config.go new file mode 100644 index 000000000..9c3f8b0fb --- /dev/null +++ b/eth/config.go @@ -0,0 +1,117 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package eth + +import ( + "math/big" + "os" + "os/user" + "path/filepath" + "runtime" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/params" +) + +// DefaultConfig contains default settings for use on the Ethereum main net. +var DefaultConfig = Config{ + SyncMode: downloader.FastSync, + EthashCachesInMem: 2, + EthashCachesOnDisk: 3, + EthashDatasetsInMem: 1, + EthashDatasetsOnDisk: 2, + NetworkId: 1, + LightPeers: 20, + DatabaseCache: 128, + GasPrice: big.NewInt(20 * params.Shannon), + + GPO: gasprice.Config{ + Blocks: 10, + Percentile: 50, + }, +} + +func init() { + home := os.Getenv("HOME") + if home == "" { + if user, err := user.Current(); err == nil { + home = user.HomeDir + } + } + if runtime.GOOS == "windows" { + DefaultConfig.EthashDatasetDir = filepath.Join(home, "AppData", "Ethash") + } else { + DefaultConfig.EthashDatasetDir = filepath.Join(home, ".ethash") + } +} + +//go:generate gencodec -type Config -field-override configMarshaling -formats toml -out gen_config.go + +type Config struct { + // The genesis block, which is inserted if the database is empty. + // If nil, the Ethereum main net block is used. + Genesis *core.Genesis `toml:",omitempty"` + + // Protocol options + NetworkId int // Network ID to use for selecting peers to connect to + SyncMode downloader.SyncMode + + // Light client options + LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests + LightPeers int `toml:",omitempty"` // Maximum number of LES client peers + MaxPeers int `toml:"-"` // Maximum number of global peers + + // Database options + SkipBcVersionCheck bool `toml:"-"` + DatabaseHandles int `toml:"-"` + DatabaseCache int + + // Mining-related options + Etherbase common.Address `toml:",omitempty"` + MinerThreads int `toml:",omitempty"` + ExtraData []byte `toml:",omitempty"` + GasPrice *big.Int + + // Ethash options + EthashCacheDir string + EthashCachesInMem int + EthashCachesOnDisk int + EthashDatasetDir string + EthashDatasetsInMem int + EthashDatasetsOnDisk int + + // Gas Price Oracle options + GPO gasprice.Config + + // Enables tracking of SHA3 preimages in the VM + EnablePreimageRecording bool + + // Miscellaneous options + SolcPath string + DocRoot string `toml:"-"` + PowFake bool `toml:"-"` + PowTest bool `toml:"-"` + PowShared bool `toml:"-"` +} + +type configMarshaling struct { + ExtraData hexutil.Bytes +} diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go index ae3c43888..8ecdf91f1 100644 --- a/eth/downloader/modes.go +++ b/eth/downloader/modes.go @@ -16,6 +16,8 @@ package downloader +import "fmt" + // SyncMode represents the synchronisation mode of the downloader. type SyncMode int @@ -25,6 +27,10 @@ const ( LightSync // Download only the headers and terminate afterwards ) +func (mode SyncMode) IsValid() bool { + return mode >= FullSync && mode <= LightSync +} + // String implements the stringer interface. func (mode SyncMode) String() string { switch mode { @@ -38,3 +44,30 @@ func (mode SyncMode) String() string { return "unknown" } } + +func (mode SyncMode) MarshalText() ([]byte, error) { + switch mode { + case FullSync: + return []byte("full"), nil + case FastSync: + return []byte("fast"), nil + case LightSync: + return []byte("light"), nil + default: + return nil, fmt.Errorf("unknown sync mode %d", mode) + } +} + +func (mode *SyncMode) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *mode = FullSync + case "fast": + *mode = FastSync + case "light": + *mode = LightSync + default: + return fmt.Errorf(`unknown sync mode %q, want "full", "fast" or "light"`, text) + } + return nil +} diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index bac048c88..05c25e644 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -33,7 +33,7 @@ var maxPrice = big.NewInt(500 * params.Shannon) type Config struct { Blocks int Percentile int - Default *big.Int + Default *big.Int `toml:",omitempty"` } // Oracle recommends gas prices based on the content of recent diff --git a/eth/gen_config.go b/eth/gen_config.go new file mode 100644 index 000000000..d34273e1c --- /dev/null +++ b/eth/gen_config.go @@ -0,0 +1,186 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package eth + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/gasprice" +) + +func (c Config) MarshalTOML() (interface{}, error) { + type Config struct { + Genesis *core.Genesis `toml:",omitempty"` + NetworkId int + SyncMode downloader.SyncMode + LightServ int `toml:",omitempty"` + LightPeers int `toml:",omitempty"` + MaxPeers int `toml:"-"` + SkipBcVersionCheck bool `toml:"-"` + DatabaseHandles int `toml:"-"` + DatabaseCache int + Etherbase common.Address `toml:",omitempty"` + MinerThreads int `toml:",omitempty"` + ExtraData hexutil.Bytes `toml:",omitempty"` + GasPrice *big.Int + EthashCacheDir string + EthashCachesInMem int + EthashCachesOnDisk int + EthashDatasetDir string + EthashDatasetsInMem int + EthashDatasetsOnDisk int + GPO gasprice.Config + EnablePreimageRecording bool + SolcPath string + DocRoot string `toml:"-"` + PowFake bool `toml:"-"` + PowTest bool `toml:"-"` + PowShared bool `toml:"-"` + } + var enc Config + enc.Genesis = c.Genesis + enc.NetworkId = c.NetworkId + enc.SyncMode = c.SyncMode + enc.LightServ = c.LightServ + enc.LightPeers = c.LightPeers + enc.MaxPeers = c.MaxPeers + enc.SkipBcVersionCheck = c.SkipBcVersionCheck + enc.DatabaseHandles = c.DatabaseHandles + enc.DatabaseCache = c.DatabaseCache + enc.Etherbase = c.Etherbase + enc.MinerThreads = c.MinerThreads + enc.ExtraData = c.ExtraData + enc.GasPrice = c.GasPrice + enc.EthashCacheDir = c.EthashCacheDir + enc.EthashCachesInMem = c.EthashCachesInMem + enc.EthashCachesOnDisk = c.EthashCachesOnDisk + enc.EthashDatasetDir = c.EthashDatasetDir + enc.EthashDatasetsInMem = c.EthashDatasetsInMem + enc.EthashDatasetsOnDisk = c.EthashDatasetsOnDisk + enc.GPO = c.GPO + enc.EnablePreimageRecording = c.EnablePreimageRecording + enc.SolcPath = c.SolcPath + enc.DocRoot = c.DocRoot + enc.PowFake = c.PowFake + enc.PowTest = c.PowTest + enc.PowShared = c.PowShared + return &enc, nil +} + +func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { + type Config struct { + Genesis *core.Genesis `toml:",omitempty"` + NetworkId *int + SyncMode *downloader.SyncMode + LightServ *int `toml:",omitempty"` + LightPeers *int `toml:",omitempty"` + MaxPeers *int `toml:"-"` + SkipBcVersionCheck *bool `toml:"-"` + DatabaseHandles *int `toml:"-"` + DatabaseCache *int + Etherbase *common.Address `toml:",omitempty"` + MinerThreads *int `toml:",omitempty"` + ExtraData hexutil.Bytes `toml:",omitempty"` + GasPrice *big.Int + EthashCacheDir *string + EthashCachesInMem *int + EthashCachesOnDisk *int + EthashDatasetDir *string + EthashDatasetsInMem *int + EthashDatasetsOnDisk *int + GPO *gasprice.Config + EnablePreimageRecording *bool + SolcPath *string + DocRoot *string `toml:"-"` + PowFake *bool `toml:"-"` + PowTest *bool `toml:"-"` + PowShared *bool `toml:"-"` + } + var dec Config + if err := unmarshal(&dec); err != nil { + return err + } + if dec.Genesis != nil { + c.Genesis = dec.Genesis + } + if dec.NetworkId != nil { + c.NetworkId = *dec.NetworkId + } + if dec.SyncMode != nil { + c.SyncMode = *dec.SyncMode + } + if dec.LightServ != nil { + c.LightServ = *dec.LightServ + } + if dec.LightPeers != nil { + c.LightPeers = *dec.LightPeers + } + if dec.MaxPeers != nil { + c.MaxPeers = *dec.MaxPeers + } + if dec.SkipBcVersionCheck != nil { + c.SkipBcVersionCheck = *dec.SkipBcVersionCheck + } + if dec.DatabaseHandles != nil { + c.DatabaseHandles = *dec.DatabaseHandles + } + if dec.DatabaseCache != nil { + c.DatabaseCache = *dec.DatabaseCache + } + if dec.Etherbase != nil { + c.Etherbase = *dec.Etherbase + } + if dec.MinerThreads != nil { + c.MinerThreads = *dec.MinerThreads + } + if dec.ExtraData != nil { + c.ExtraData = dec.ExtraData + } + if dec.GasPrice != nil { + c.GasPrice = dec.GasPrice + } + if dec.EthashCacheDir != nil { + c.EthashCacheDir = *dec.EthashCacheDir + } + if dec.EthashCachesInMem != nil { + c.EthashCachesInMem = *dec.EthashCachesInMem + } + if dec.EthashCachesOnDisk != nil { + c.EthashCachesOnDisk = *dec.EthashCachesOnDisk + } + if dec.EthashDatasetDir != nil { + c.EthashDatasetDir = *dec.EthashDatasetDir + } + if dec.EthashDatasetsInMem != nil { + c.EthashDatasetsInMem = *dec.EthashDatasetsInMem + } + if dec.EthashDatasetsOnDisk != nil { + c.EthashDatasetsOnDisk = *dec.EthashDatasetsOnDisk + } + if dec.GPO != nil { + c.GPO = *dec.GPO + } + if dec.EnablePreimageRecording != nil { + c.EnablePreimageRecording = *dec.EnablePreimageRecording + } + if dec.SolcPath != nil { + c.SolcPath = *dec.SolcPath + } + if dec.DocRoot != nil { + c.DocRoot = *dec.DocRoot + } + if dec.PowFake != nil { + c.PowFake = *dec.PowFake + } + if dec.PowTest != nil { + c.PowTest = *dec.PowTest + } + if dec.PowShared != nil { + c.PowShared = *dec.PowShared + } + return nil +} diff --git a/eth/handler.go b/eth/handler.go index 99c2c4b32..fb8a0fd57 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -96,7 +96,7 @@ type ProtocolManager struct { // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the ethereum network. -func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int, maxPeers int, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { +func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId int, maxPeers int, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { // Create the protocol manager with the base fields manager := &ProtocolManager{ networkId: networkId, @@ -113,18 +113,18 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int quitSync: make(chan struct{}), } // Figure out whether to allow fast sync or not - if fastSync && blockchain.CurrentBlock().NumberU64() > 0 { + if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 { log.Warn("Blockchain not empty, fast sync disabled") - fastSync = false + mode = downloader.FullSync } - if fastSync { + if mode == downloader.FastSync { manager.fastSync = uint32(1) } // Initiate a sub-protocol for every implemented version we can handle manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions)) for i, version := range ProtocolVersions { // Skip protocol version if incompatible with the mode of operation - if fastSync && version < eth63 { + if mode == downloader.FastSync && version < eth63 { continue } // Compatible; initialise the sub-protocol @@ -159,7 +159,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int return nil, errIncompatibleConfig } // Construct the different synchronisation mechanisms - manager.downloader = downloader.New(downloader.FullSync, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeaderByHash, + manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeaderByHash, blockchain.GetBlockByHash, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, blockchain.GetTdByHash, blockchain.InsertHeaderChain, manager.blockchain.InsertChain, blockchain.InsertReceiptChain, blockchain.Rollback, manager.removePeer) diff --git a/eth/handler_test.go b/eth/handler_test.go index f85d730b6..413ed2bff 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -44,11 +44,11 @@ func TestProtocolCompatibility(t *testing.T) { // Define the compatibility chart tests := []struct { version uint - fastSync bool + mode downloader.SyncMode compatible bool }{ - {61, false, true}, {62, false, true}, {63, false, true}, - {61, true, false}, {62, true, false}, {63, true, true}, + {61, downloader.FullSync, true}, {62, downloader.FullSync, true}, {63, downloader.FullSync, true}, + {61, downloader.FastSync, false}, {62, downloader.FastSync, false}, {63, downloader.FastSync, true}, } // Make sure anything we screw up is restored backup := ProtocolVersions @@ -58,7 +58,7 @@ func TestProtocolCompatibility(t *testing.T) { for i, tt := range tests { ProtocolVersions = []uint{tt.version} - pm, err := newTestProtocolManager(tt.fastSync, 0, nil, nil) + pm, err := newTestProtocolManager(tt.mode, 0, nil, nil) if pm != nil { defer pm.Stop() } @@ -73,7 +73,7 @@ func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) } func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) } func testGetBlockHeaders(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxHashFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -232,7 +232,7 @@ func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) } func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) } func testGetBlockBodies(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, false, downloader.MaxBlockFetch+15, nil, nil) + pm := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxBlockFetch+15, nil, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -339,7 +339,7 @@ func testGetNodeData(t *testing.T, protocol int) { } } // Assemble the test environment - pm := newTestProtocolManagerMust(t, false, 4, generator, nil) + pm := newTestProtocolManagerMust(t, downloader.FullSync, 4, generator, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -431,7 +431,7 @@ func testGetReceipt(t *testing.T, protocol int) { } } // Assemble the test environment - pm := newTestProtocolManagerMust(t, false, 4, generator, nil) + pm := newTestProtocolManagerMust(t, downloader.FullSync, 4, generator, nil) peer, _ := newTestPeer("peer", protocol, pm, true) defer peer.close() @@ -476,7 +476,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool genesis = gspec.MustCommit(db) blockchain, _ = core.NewBlockChain(db, config, pow, evmux, vm.Config{}) ) - pm, err := NewProtocolManager(config, false, NetworkId, 1000, evmux, new(testTxPool), pow, blockchain, db) + pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, 1000, evmux, new(testTxPool), pow, blockchain, db) if err != nil { t.Fatalf("failed to start test protocol manager: %v", err) } diff --git a/eth/helper_test.go b/eth/helper_test.go index a8c538e6c..21ac3724e 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/p2p" @@ -48,7 +49,7 @@ var ( // newTestProtocolManager creates a new protocol manager for testing purposes, // with the given number of blocks already known, and potential notification // channels for different events. -func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) { +func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) { var ( evmux = new(event.TypeMux) engine = ethash.NewFaker() @@ -65,7 +66,7 @@ func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core panic(err) } - pm, err := NewProtocolManager(gspec.Config, fastSync, NetworkId, 1000, evmux, &testTxPool{added: newtx}, engine, blockchain, db) + pm, err := NewProtocolManager(gspec.Config, mode, DefaultConfig.NetworkId, 1000, evmux, &testTxPool{added: newtx}, engine, blockchain, db) if err != nil { return nil, err } @@ -77,8 +78,8 @@ func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core // with the given number of blocks already known, and potential notification // channels for different events. In case of an error, the constructor force- // fails the test. -func newTestProtocolManagerMust(t *testing.T, fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { - pm, err := newTestProtocolManager(fastSync, blocks, generator, newtx) +func newTestProtocolManagerMust(t *testing.T, mode downloader.SyncMode, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) *ProtocolManager { + pm, err := newTestProtocolManager(mode, blocks, generator, newtx) if err != nil { t.Fatalf("Failed to create protocol manager: %v", err) } @@ -172,7 +173,7 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash) { msg := &statusData{ ProtocolVersion: uint32(p.version), - NetworkId: uint32(NetworkId), + NetworkId: uint32(DefaultConfig.NetworkId), TD: td, CurrentBlock: head, GenesisBlock: genesis, diff --git a/eth/protocol.go b/eth/protocol.go index 7d22b33de..40997da7a 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -41,10 +41,7 @@ var ProtocolVersions = []uint{eth63, eth62} // Number of implemented message corresponding to different protocol versions. var ProtocolLengths = []uint64{17, 8} -const ( - NetworkId = 1 - ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message -) +const ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message // eth protocol message codes const ( diff --git a/eth/protocol_test.go b/eth/protocol_test.go index 3c9a734df..74180bedd 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/rlp" ) @@ -40,7 +41,7 @@ func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) } func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) } func testStatusMsgErrors(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, false, 0, nil, nil) + pm := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil) td, currentBlock, genesis := pm.blockchain.Status() defer pm.Stop() @@ -54,7 +55,7 @@ func testStatusMsgErrors(t *testing.T, protocol int) { wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"), }, { - code: StatusMsg, data: statusData{10, NetworkId, td, currentBlock, genesis}, + code: StatusMsg, data: statusData{10, uint32(DefaultConfig.NetworkId), td, currentBlock, genesis}, wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", protocol), }, { @@ -62,7 +63,7 @@ func testStatusMsgErrors(t *testing.T, protocol int) { wantError: errResp(ErrNetworkIdMismatch, "999 (!= 1)"), }, { - code: StatusMsg, data: statusData{uint32(protocol), NetworkId, td, currentBlock, common.Hash{3}}, + code: StatusMsg, data: statusData{uint32(protocol), uint32(DefaultConfig.NetworkId), td, currentBlock, common.Hash{3}}, wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000 (!= %x)", genesis[:8]), }, } @@ -93,7 +94,7 @@ func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) } func testRecvTransactions(t *testing.T, protocol int) { txAdded := make(chan []*types.Transaction) - pm := newTestProtocolManagerMust(t, false, 0, nil, txAdded) + pm := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, txAdded) pm.acceptTxs = 1 // mark synced to accept transactions p, _ := newTestPeer("peer", protocol, pm, true) defer pm.Stop() @@ -120,7 +121,7 @@ func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) } func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) } func testSendTransactions(t *testing.T, protocol int) { - pm := newTestProtocolManagerMust(t, false, 0, nil, nil) + pm := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil) defer pm.Stop() // Fill the pool with big transactions. diff --git a/eth/sync_test.go b/eth/sync_test.go index 198ffaf27..9eaa1156f 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" ) @@ -29,12 +30,12 @@ import ( // imported into the blockchain. func TestFastSyncDisabling(t *testing.T) { // Create a pristine protocol manager, check that fast sync is left enabled - pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil) + pmEmpty := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil) if atomic.LoadUint32(&pmEmpty.fastSync) == 0 { t.Fatalf("fast sync disabled on pristine blockchain") } // Create a full protocol manager, check that fast sync gets disabled - pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil) + pmFull := newTestProtocolManagerMust(t, downloader.FastSync, 1024, nil, nil) if atomic.LoadUint32(&pmFull.fastSync) == 1 { t.Fatalf("fast sync not disabled on non-empty blockchain") } diff --git a/les/backend.go b/les/backend.go index 5670b77d2..184464f20 100644 --- a/les/backend.go +++ b/les/backend.go @@ -104,17 +104,17 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { } eth.txPool = light.NewTxPool(eth.chainConfig, eth.eventMux, eth.blockchain, eth.relay) - if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.LightMode, config.NetworkId, eth.eventMux, eth.engine, eth.blockchain, nil, chainDb, odr, relay); err != nil { + lightSync := config.SyncMode == downloader.LightSync + if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, lightSync, config.NetworkId, eth.eventMux, eth.engine, eth.blockchain, nil, chainDb, odr, relay); err != nil { return nil, err } relay.ps = eth.protocolManager.peers relay.reqDist = eth.protocolManager.reqDist eth.ApiBackend = &LesApiBackend{eth, nil} - gpoParams := gasprice.Config{ - Blocks: config.GpoBlocks, - Percentile: config.GpoPercentile, - Default: config.GasPrice, + gpoParams := config.GPO + if gpoParams.Default == nil { + gpoParams.Default = config.GasPrice } eth.ApiBackend.gpo = gasprice.NewOracle(eth.ApiBackend, gpoParams) return eth, nil diff --git a/mobile/geth.go b/mobile/geth.go index 86034df98..be04e4603 100644 --- a/mobile/geth.go +++ b/mobile/geth.go @@ -22,15 +22,16 @@ package geth import ( "encoding/json" "fmt" - "math/big" "path/filepath" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethstats" "github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/params" whisper "github.com/ethereum/go-ethereum/whisper/whisperv2" @@ -108,17 +109,19 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) { } // Create the empty networking stack nodeConf := &node.Config{ - Name: clientIdentifier, - Version: params.Version, - DataDir: datadir, - KeyStoreDir: filepath.Join(datadir, "keystore"), // Mobile should never use internal keystores! - NoDiscovery: true, - DiscoveryV5: true, - DiscoveryV5Addr: ":0", - BootstrapNodesV5: config.BootstrapNodes.nodes, - ListenAddr: ":0", - NAT: nat.Any(), - MaxPeers: config.MaxPeers, + Name: clientIdentifier, + Version: params.Version, + DataDir: datadir, + KeyStoreDir: filepath.Join(datadir, "keystore"), // Mobile should never use internal keystores! + P2P: p2p.Config{ + NoDiscovery: true, + DiscoveryV5: true, + DiscoveryV5Addr: ":0", + BootstrapNodesV5: config.BootstrapNodes.nodes, + ListenAddr: ":0", + NAT: nat.Any(), + MaxPeers: config.MaxPeers, + }, } rawStack, err := node.New(nodeConf) if err != nil { @@ -142,20 +145,13 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) { } // Register the Ethereum protocol if requested if config.EthereumEnabled { - ethConf := ð.Config{ - Genesis: genesis, - LightMode: true, - DatabaseCache: config.EthereumDatabaseCache, - NetworkId: config.EthereumNetworkID, - GasPrice: new(big.Int).SetUint64(20 * params.Shannon), - GpoBlocks: 10, - GpoPercentile: 50, - EthashCacheDir: "ethash", - EthashCachesInMem: 2, - EthashCachesOnDisk: 3, - } + ethConf := eth.DefaultConfig + ethConf.Genesis = genesis + ethConf.SyncMode = downloader.LightSync + ethConf.NetworkId = config.EthereumNetworkID + ethConf.DatabaseCache = config.EthereumDatabaseCache if err := rawStack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - return les.New(ctx, ethConf) + return les.New(ctx, ðConf) }); err != nil { return nil, fmt.Errorf("ethereum init: %v", err) } diff --git a/node/config.go b/node/config.go index b060b05f2..7c17e707d 100644 --- a/node/config.go +++ b/node/config.go @@ -20,7 +20,6 @@ import ( "crypto/ecdsa" "fmt" "io/ioutil" - "net" "os" "path/filepath" "runtime" @@ -32,10 +31,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/discv5" - "github.com/ethereum/go-ethereum/p2p/nat" - "github.com/ethereum/go-ethereum/p2p/netutil" ) var ( @@ -53,14 +50,14 @@ type Config struct { // Name sets the instance name of the node. It must not contain the / character and is // used in the devp2p node identifier. The instance name of geth is "geth". If no // value is specified, the basename of the current executable is used. - Name string + Name string `toml:"-"` // UserIdent, if set, is used as an additional component in the devp2p node identifier. - UserIdent string + UserIdent string `toml:",omitempty"` // Version should be set to the version number of the program. It is used // in the devp2p node identifier. - Version string + Version string `toml:"-"` // DataDir is the file system folder the node should use for any data storage // requirements. The configured data directory will not be directly shared with @@ -69,6 +66,9 @@ type Config struct { // in memory. DataDir string + // Configuration of peer-to-peer networking. + P2P p2p.Config + // KeyStoreDir is the file system folder that contains private keys. The directory can // be specified as a relative path, in which case it is resolved relative to the // current directory. @@ -76,106 +76,55 @@ type Config struct { // If KeyStoreDir is empty, the default location is the "keystore" subdirectory of // DataDir. If DataDir is unspecified and KeyStoreDir is empty, an ephemeral directory // is created by New and destroyed when the node is stopped. - KeyStoreDir string + KeyStoreDir string `toml:",omitempty"` // UseLightweightKDF lowers the memory and CPU requirements of the key store // scrypt KDF at the expense of security. - UseLightweightKDF bool + UseLightweightKDF bool `toml:",omitempty"` // IPCPath is the requested location to place the IPC endpoint. If the path is // a simple file name, it is placed inside the data directory (or on the root // pipe path on Windows), whereas if it's a resolvable path name (absolute or // relative), then that specific path is enforced. An empty path disables IPC. - IPCPath string - - // This field should be a valid secp256k1 private key that will be used for both - // remote peer identification as well as network traffic encryption. If no key - // is configured, the preset one is loaded from the data dir, generating it if - // needed. - PrivateKey *ecdsa.PrivateKey - - // NoDiscovery specifies whether the peer discovery mechanism should be started - // or not. Disabling is usually useful for protocol debugging (manual topology). - NoDiscovery bool - - // DiscoveryV5 specifies whether the the new topic-discovery based V5 discovery - // protocol should be started or not. - DiscoveryV5 bool - - // Listener address for the V5 discovery protocol UDP traffic. - DiscoveryV5Addr string - - // Restrict communication to white listed IP networks. - // The whitelist only applies when non-nil. - NetRestrict *netutil.Netlist - - // BootstrapNodes used to establish connectivity with the rest of the network. - BootstrapNodes []*discover.Node - - // BootstrapNodesV5 used to establish connectivity with the rest of the network - // using the V5 discovery protocol. - BootstrapNodesV5 []*discv5.Node - - // Network interface address on which the node should listen for inbound peers. - ListenAddr string - - // If set to a non-nil value, the given NAT port mapper is used to make the - // listening port available to the Internet. - NAT nat.Interface - - // If Dialer is set to a non-nil value, the given Dialer is used to dial outbound - // peer connections. - Dialer *net.Dialer - - // If NoDial is true, the node will not dial any peers. - NoDial bool - - // MaxPeers is the maximum number of peers that can be connected. If this is - // set to zero, then only the configured static and trusted peers can connect. - MaxPeers int - - // MaxPendingPeers is the maximum number of peers that can be pending in the - // handshake phase, counted separately for inbound and outbound connections. - // Zero defaults to preset values. - MaxPendingPeers int + IPCPath string `toml:",omitempty"` // HTTPHost is the host interface on which to start the HTTP RPC server. If this // field is empty, no HTTP API endpoint will be started. - HTTPHost string + HTTPHost string `toml:",omitempty"` // HTTPPort is the TCP port number on which to start the HTTP RPC server. The // default zero value is/ valid and will pick a port number randomly (useful // for ephemeral nodes). - HTTPPort int + HTTPPort int `toml:",omitempty"` // HTTPCors is the Cross-Origin Resource Sharing header to send to requesting // clients. Please be aware that CORS is a browser enforced security, it's fully // useless for custom HTTP clients. - HTTPCors string + HTTPCors string `toml:",omitempty"` // HTTPModules is a list of API modules to expose via the HTTP RPC interface. // If the module list is empty, all RPC API endpoints designated public will be // exposed. - HTTPModules []string + HTTPModules []string `toml:",omitempty"` // WSHost is the host interface on which to start the websocket RPC server. If // this field is empty, no websocket API endpoint will be started. - WSHost string + WSHost string `toml:",omitempty"` // WSPort is the TCP port number on which to start the websocket RPC server. The // default zero value is/ valid and will pick a port number randomly (useful for // ephemeral nodes). - WSPort int + WSPort int `toml:",omitempty"` // WSOrigins is the list of domain to accept websocket requests from. Please be // aware that the server can only act upon the HTTP request the client sends and // cannot verify the validity of the request header. - WSOrigins string + WSOrigins string `toml:",omitempty"` // WSModules is a list of API modules to expose via the websocket RPC interface. // If the module list is empty, all RPC API endpoints designated public will be // exposed. - WSModules []string + WSModules []string `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into @@ -326,8 +275,8 @@ func (c *Config) instanceDir() string { // data folder. If no key can be found, a new one is generated. func (c *Config) NodeKey() *ecdsa.PrivateKey { // Use any specifically configured key. - if c.PrivateKey != nil { - return c.PrivateKey + if c.P2P.PrivateKey != nil { + return c.P2P.PrivateKey } // Generate ephemeral key if no datadir is being used. if c.DataDir == "" { diff --git a/node/config_test.go b/node/config_test.go index c0eda72c2..b81d3d612 100644 --- a/node/config_test.go +++ b/node/config_test.go @@ -25,6 +25,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p" ) // Tests that datadirs can be successfully created, be them manually configured @@ -109,7 +110,7 @@ func TestNodeKeyPersistency(t *testing.T) { if err != nil { t.Fatalf("failed to generate one-shot node key: %v", err) } - config := &Config{Name: "unit-test", DataDir: dir, PrivateKey: key} + config := &Config{Name: "unit-test", DataDir: dir, P2P: p2p.Config{PrivateKey: key}} config.NodeKey() if _, err := os.Stat(filepath.Join(keyfile)); err == nil { t.Fatalf("one-shot node key persisted to data directory") diff --git a/node/defaults.go b/node/defaults.go index bfe257c8e..d4e148683 100644 --- a/node/defaults.go +++ b/node/defaults.go @@ -21,16 +21,32 @@ import ( "os/user" "path/filepath" "runtime" + + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/nat" ) const ( - DefaultIPCSocket = "geth.ipc" // Default (relative) name of the IPC RPC socket - DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server - DefaultHTTPPort = 8545 // Default TCP port for the HTTP RPC server - DefaultWSHost = "localhost" // Default host interface for the websocket RPC server - DefaultWSPort = 8546 // Default TCP port for the websocket RPC server + DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server + DefaultHTTPPort = 8545 // Default TCP port for the HTTP RPC server + DefaultWSHost = "localhost" // Default host interface for the websocket RPC server + DefaultWSPort = 8546 // Default TCP port for the websocket RPC server ) +// DefaultConfig contains reasonable default settings. +var DefaultConfig = Config{ + DataDir: DefaultDataDir(), + HTTPPort: DefaultHTTPPort, + HTTPModules: []string{"net", "web3"}, + WSPort: DefaultWSPort, + WSModules: []string{"net", "web3"}, + P2P: p2p.Config{ + ListenAddr: ":30303", + MaxPeers: 25, + NAT: nat.Any(), + }, +} + // DefaultDataDir is the default data directory to use for the databases and other // persistence requirements. func DefaultDataDir() string { diff --git a/node/node.go b/node/node.go index afb676b7f..2ecff2308 100644 --- a/node/node.go +++ b/node/node.go @@ -153,24 +153,17 @@ func (n *Node) Start() error { // Initialize the p2p server. This creates the node key and // discovery databases. - n.serverConfig = p2p.Config{ - PrivateKey: n.config.NodeKey(), - Name: n.config.NodeName(), - Discovery: !n.config.NoDiscovery, - DiscoveryV5: n.config.DiscoveryV5, - DiscoveryV5Addr: n.config.DiscoveryV5Addr, - BootstrapNodes: n.config.BootstrapNodes, - BootstrapNodesV5: n.config.BootstrapNodesV5, - StaticNodes: n.config.StaticNodes(), - TrustedNodes: n.config.TrusterNodes(), - NodeDatabase: n.config.NodeDB(), - ListenAddr: n.config.ListenAddr, - NetRestrict: n.config.NetRestrict, - NAT: n.config.NAT, - Dialer: n.config.Dialer, - NoDial: n.config.NoDial, - MaxPeers: n.config.MaxPeers, - MaxPendingPeers: n.config.MaxPendingPeers, + n.serverConfig = n.config.P2P + n.serverConfig.PrivateKey = n.config.NodeKey() + n.serverConfig.Name = n.config.NodeName() + if n.serverConfig.StaticNodes == nil { + n.serverConfig.StaticNodes = n.config.StaticNodes() + } + if n.serverConfig.TrustedNodes == nil { + n.serverConfig.TrustedNodes = n.config.TrusterNodes() + } + if n.serverConfig.NodeDatabase == "" { + n.serverConfig.NodeDatabase = n.config.NodeDB() } running := &p2p.Server{Config: n.serverConfig} log.Info("Starting peer-to-peer node", "instance", n.serverConfig.Name) diff --git a/node/node_example_test.go b/node/node_example_test.go index d2872cf38..ee06f4065 100644 --- a/node/node_example_test.go +++ b/node/node_example_test.go @@ -22,7 +22,6 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/rpc" ) @@ -42,23 +41,8 @@ func (s *SampleService) Start(*p2p.Server) error { fmt.Println("Service starti func (s *SampleService) Stop() error { fmt.Println("Service stopping..."); return nil } func ExampleService() { - // Create a network node to run protocols with the default values. The below list - // is only used to display each of the configuration options. All of these could - // have been omitted if the default behavior is desired. - nodeConfig := &node.Config{ - DataDir: "", // Empty uses ephemeral storage - PrivateKey: nil, // Nil generates a node key on the fly - Name: "", // Any textual node name is allowed - NoDiscovery: false, // Can disable discovering remote nodes - BootstrapNodes: []*discover.Node{}, // List of bootstrap nodes to use - ListenAddr: ":0", // Network interface to listen on - NAT: nil, // UPnP port mapper to use for crossing firewalls - Dialer: nil, // Custom dialer to use for establishing peer connections - NoDial: false, // Can prevent this node from dialing out - MaxPeers: 0, // Number of peers to allow - MaxPendingPeers: 0, // Number of peers allowed to handshake concurrently - } - stack, err := node.New(nodeConfig) + // Create a network node to run protocols with the default values. + stack, err := node.New(&node.Config{}) if err != nil { log.Fatalf("Failed to create network node: %v", err) } diff --git a/node/node_test.go b/node/node_test.go index 408d4cfcb..2880efa61 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -35,8 +35,8 @@ var ( func testNodeConfig() *Config { return &Config{ - PrivateKey: testNodeKey, - Name: "test node", + Name: "test node", + P2P: p2p.Config{PrivateKey: testNodeKey}, } } diff --git a/p2p/discover/node.go b/p2p/discover/node.go index 6a7ab814e..d9cbd9448 100644 --- a/p2p/discover/node.go +++ b/p2p/discover/node.go @@ -207,6 +207,20 @@ func MustParseNode(rawurl string) *Node { return n } +// MarshalText implements encoding.TextMarshaler. +func (n *Node) MarshalText() ([]byte, error) { + return []byte(n.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (n *Node) UnmarshalText(text []byte) error { + dec, err := ParseNode(string(text)) + if err == nil { + *n = *dec + } + return err +} + // NodeID is a unique identifier for each node. // The node identifier is a marshaled elliptic curve public key. type NodeID [NodeIDBits / 8]byte diff --git a/p2p/discv5/node.go b/p2p/discv5/node.go index c99b4da14..2db7a508f 100644 --- a/p2p/discv5/node.go +++ b/p2p/discv5/node.go @@ -215,6 +215,20 @@ func MustParseNode(rawurl string) *Node { return n } +// MarshalText implements encoding.TextMarshaler. +func (n *Node) MarshalText() ([]byte, error) { + return []byte(n.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (n *Node) UnmarshalText(text []byte) error { + dec, err := ParseNode(string(text)) + if err == nil { + *n = *dec + } + return err +} + // type nodeQueue []*Node // // // pushNew adds n to the end if it is not present. diff --git a/p2p/netutil/net.go b/p2p/netutil/net.go index 3c3715788..f6005afd2 100644 --- a/p2p/netutil/net.go +++ b/p2p/netutil/net.go @@ -84,6 +84,31 @@ func ParseNetlist(s string) (*Netlist, error) { return &l, nil } +// MarshalTOML implements toml.MarshalerRec. +func (l Netlist) MarshalTOML() interface{} { + list := make([]string, 0, len(l)) + for _, net := range l { + list = append(list, net.String()) + } + return list +} + +// UnmarshalTOML implements toml.UnmarshalerRec. +func (l *Netlist) UnmarshalTOML(fn func(interface{}) error) error { + var masks []string + if err := fn(&masks); err != nil { + return err + } + for _, mask := range masks { + _, n, err := net.ParseCIDR(mask) + if err != nil { + return err + } + *l = append(*l, *n) + } + return nil +} + // Add parses a CIDR mask and appends it to the list. It panics for invalid masks and is // intended to be used for setting up static lists. func (l *Netlist) Add(cidr string) { diff --git a/p2p/server.go b/p2p/server.go index b2b8c9762..d7909d53a 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -58,7 +58,7 @@ var errServerStopped = errors.New("server stopped") // Config holds Server options. type Config struct { // This field must be set to a valid secp256k1 private key. - PrivateKey *ecdsa.PrivateKey + PrivateKey *ecdsa.PrivateKey `toml:"-"` // MaxPeers is the maximum number of peers that can be // connected. It must be greater than zero. @@ -67,22 +67,22 @@ type Config struct { // MaxPendingPeers is the maximum number of peers that can be pending in the // handshake phase, counted separately for inbound and outbound connections. // Zero defaults to preset values. - MaxPendingPeers int + MaxPendingPeers int `toml:",omitempty"` - // Discovery specifies whether the peer discovery mechanism should be started - // or not. Disabling is usually useful for protocol debugging (manual topology). - Discovery bool + // NoDiscovery can be used to disable the peer discovery mechanism. + // Disabling is useful for protocol debugging (manual topology). + NoDiscovery bool // DiscoveryV5 specifies whether the the new topic-discovery based V5 discovery // protocol should be started or not. - DiscoveryV5 bool + DiscoveryV5 bool `toml:",omitempty"` // Listener address for the V5 discovery protocol UDP traffic. - DiscoveryV5Addr string + DiscoveryV5Addr string `toml:",omitempty"` // Name sets the node name of this server. // Use common.MakeName to create a name that follows existing conventions. - Name string + Name string `toml:"-"` // BootstrapNodes are used to establish connectivity // with the rest of the network. @@ -91,7 +91,7 @@ type Config struct { // BootstrapNodesV5 are used to establish connectivity // with the rest of the network using the V5 discovery // protocol. - BootstrapNodesV5 []*discv5.Node + BootstrapNodesV5 []*discv5.Node `toml:",omitempty"` // Static nodes are used as pre-configured connections which are always // maintained and re-connected on disconnects. @@ -104,16 +104,16 @@ type Config struct { // Connectivity can be restricted to certain IP networks. // If this option is set to a non-nil value, only hosts which match one of the // IP networks contained in the list are considered. - NetRestrict *netutil.Netlist + NetRestrict *netutil.Netlist `toml:",omitempty"` // NodeDatabase is the path to the database containing the previously seen // live nodes in the network. - NodeDatabase string + NodeDatabase string `toml:",omitempty"` // Protocols should contain the protocols supported // by the server. Matching protocols are launched for // each peer. - Protocols []Protocol + Protocols []Protocol `toml:"-"` // If ListenAddr is set to a non-nil address, the server // will listen for incoming connections. @@ -126,14 +126,14 @@ type Config struct { // If set to a non-nil value, the given NAT port mapper // is used to make the listening port available to the // Internet. - NAT nat.Interface + NAT nat.Interface `toml:",omitempty"` // If Dialer is set to a non-nil value, the given Dialer // is used to dial outbound peer connections. - Dialer *net.Dialer + Dialer *net.Dialer `toml:"-"` // If NoDial is true, the server will not dial any peers. - NoDial bool + NoDial bool `toml:",omitempty"` } // Server manages all peer connections. @@ -370,7 +370,7 @@ func (srv *Server) Start() (err error) { srv.peerOpDone = make(chan struct{}) // node table - if srv.Discovery { + if !srv.NoDiscovery { ntab, err := discover.ListenUDP(srv.PrivateKey, srv.ListenAddr, srv.NAT, srv.NodeDatabase, srv.NetRestrict) if err != nil { return err @@ -393,7 +393,7 @@ func (srv *Server) Start() (err error) { } dynPeers := (srv.MaxPeers + 1) / 2 - if !srv.Discovery { + if srv.NoDiscovery { dynPeers = 0 } dialer := newDialState(srv.StaticNodes, srv.BootstrapNodes, srv.ntab, dynPeers, srv.NetRestrict) diff --git a/params/version.go b/params/version.go index fef360473..6a0eb3506 100644 --- a/params/version.go +++ b/params/version.go @@ -16,7 +16,9 @@ package params -import "fmt" +import ( + "fmt" +) const ( VersionMajor = 1 // Major version component of the current release @@ -33,3 +35,11 @@ var Version = func() string { } return v }() + +func VersionWithCommit(gitCommit string) string { + vsn := Version + if len(gitCommit) >= 8 { + vsn += "-" + gitCommit[:8] + } + return vsn +} diff --git a/rpc/server.go b/rpc/server.go index 8627b5592..78df37e52 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -31,9 +31,7 @@ import ( const ( notificationBufferSize = 10000 // max buffered notifications before codec is closed - MetadataApi = "rpc" - DefaultIPCApis = "admin,debug,eth,miner,net,personal,shh,txpool,web3" - DefaultHTTPApis = "eth,net,web3" + MetadataApi = "rpc" ) // CodecOption specifies which type of messages this codec supports diff --git a/vendor/github.com/naoina/go-stringutil/LICENSE b/vendor/github.com/naoina/go-stringutil/LICENSE new file mode 100644 index 000000000..0fff1c58b --- /dev/null +++ b/vendor/github.com/naoina/go-stringutil/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Naoya Inada <naoina@kuune.org> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/naoina/go-stringutil/README.md b/vendor/github.com/naoina/go-stringutil/README.md new file mode 100644 index 000000000..ecf7a5fae --- /dev/null +++ b/vendor/github.com/naoina/go-stringutil/README.md @@ -0,0 +1,13 @@ +# stringutil [![Build Status](https://travis-ci.org/naoina/go-stringutil.svg?branch=master)](https://travis-ci.org/naoina/go-stringutil) + +## Installation + + go get -u github.com/naoina/go-stringutil + +## Documentation + +See https://godoc.org/github.com/naoina/go-stringutil + +## License + +MIT diff --git a/vendor/github.com/naoina/go-stringutil/da.go b/vendor/github.com/naoina/go-stringutil/da.go new file mode 100644 index 000000000..8fe651659 --- /dev/null +++ b/vendor/github.com/naoina/go-stringutil/da.go @@ -0,0 +1,253 @@ +package stringutil + +import ( + "fmt" + "sort" + "unicode/utf8" +) + +const ( + terminationCharacter = '#' +) + +func mustDoubleArray(da *doubleArray, err error) *doubleArray { + if err != nil { + panic(err) + } + return da +} + +func (da *doubleArray) Build(keys []string) error { + records := makeRecords(keys) + if err := da.build(records, 1, 0, make(map[int]struct{})); err != nil { + return err + } + return nil +} + +type doubleArray struct { + bc []baseCheck + node []int +} + +func newDoubleArray(keys []string) (*doubleArray, error) { + da := &doubleArray{ + bc: []baseCheck{0}, + node: []int{-1}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node. + } + if err := da.Build(keys); err != nil { + return nil, err + } + return da, nil +} + +// baseCheck contains BASE, CHECK and Extra flags. +// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK. +// +// BASE (22bit) | Extra flags (2bit) | CHECK (8bit) +// |----------------------|--|--------| +// 32 10 8 0 +type baseCheck uint32 + +func (bc baseCheck) Base() int { + return int(bc >> 10) +} + +func (bc *baseCheck) SetBase(base int) { + *bc |= baseCheck(base) << 10 +} + +func (bc baseCheck) Check() byte { + return byte(bc) +} + +func (bc *baseCheck) SetCheck(check byte) { + *bc |= baseCheck(check) +} + +func (bc baseCheck) IsEmpty() bool { + return bc&0xfffffcff == 0 +} + +func (da *doubleArray) Lookup(path string) (length int) { + idx := 1 + tmpIdx := idx + for i := 0; i < len(path); i++ { + c := path[i] + tmpIdx = da.nextIndex(da.bc[tmpIdx].Base(), c) + if tmpIdx >= len(da.bc) || da.bc[tmpIdx].Check() != c { + break + } + idx = tmpIdx + } + if next := da.nextIndex(da.bc[idx].Base(), terminationCharacter); next < len(da.bc) && da.bc[next].Check() == terminationCharacter { + return da.node[da.bc[next].Base()] + } + return -1 +} + +func (da *doubleArray) LookupByBytes(path []byte) (length int) { + idx := 1 + tmpIdx := idx + for i := 0; i < len(path); i++ { + c := path[i] + tmpIdx = da.nextIndex(da.bc[tmpIdx].Base(), c) + if tmpIdx >= len(da.bc) || da.bc[tmpIdx].Check() != c { + break + } + idx = tmpIdx + } + if next := da.nextIndex(da.bc[idx].Base(), terminationCharacter); next < len(da.bc) && da.bc[next].Check() == terminationCharacter { + return da.node[da.bc[next].Base()] + } + return -1 +} + +func (da *doubleArray) build(srcs []record, idx, depth int, usedBase map[int]struct{}) error { + sort.Stable(recordSlice(srcs)) + base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase) + if err != nil { + return err + } + if leaf != nil { + da.bc[idx].SetBase(len(da.node)) + da.node = append(da.node, leaf.value) + } + for _, sib := range siblings { + da.setCheck(da.nextIndex(base, sib.c), sib.c) + } + for _, sib := range siblings { + if err := da.build(srcs[sib.start:sib.end], da.nextIndex(base, sib.c), depth+1, usedBase); err != nil { + return err + } + } + return nil +} + +func (da *doubleArray) setBase(i, base int) { + da.bc[i].SetBase(base) +} + +func (da *doubleArray) setCheck(i int, check byte) { + da.bc[i].SetCheck(check) +} + +func (da *doubleArray) findEmptyIndex(start int) int { + i := start + for ; i < len(da.bc); i++ { + if da.bc[i].IsEmpty() { + break + } + } + return i +} + +// findBase returns good BASE. +func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) { + for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) { + base = da.nextIndex(idx, firstChar) + if _, used := usedBase[base]; used { + continue + } + i := 0 + for ; i < len(siblings); i++ { + next := da.nextIndex(base, siblings[i].c) + if len(da.bc) <= next { + da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...) + } + if !da.bc[next].IsEmpty() { + break + } + } + if i == len(siblings) { + break + } + } + usedBase[base] = struct{}{} + return base +} + +func (da *doubleArray) arrange(records []record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) { + siblings, leaf, err = makeSiblings(records, depth) + if err != nil { + return -1, nil, nil, err + } + if len(siblings) < 1 { + return -1, nil, leaf, nil + } + base = da.findBase(siblings, idx, usedBase) + da.setBase(idx, base) + return base, siblings, leaf, err +} + +type sibling struct { + start int + end int + c byte +} + +func (da *doubleArray) nextIndex(base int, c byte) int { + return base ^ int(c) +} + +func makeSiblings(records []record, depth int) (sib []sibling, leaf *record, err error) { + var ( + pc byte + n int + ) + for i, r := range records { + if len(r.key) <= depth { + leaf = &r + continue + } + c := r.key[depth] + switch { + case pc < c: + sib = append(sib, sibling{start: i, c: c}) + case pc == c: + continue + default: + return nil, nil, fmt.Errorf("stringutil: BUG: records hasn't been sorted") + } + if n > 0 { + sib[n-1].end = i + } + pc = c + n++ + } + if n == 0 { + return nil, leaf, nil + } + sib[n-1].end = len(records) + return sib, leaf, nil +} + +type record struct { + key string + value int +} + +func makeRecords(srcs []string) (records []record) { + termChar := string(terminationCharacter) + for _, s := range srcs { + records = append(records, record{ + key: string(s + termChar), + value: utf8.RuneCountInString(s), + }) + } + return records +} + +type recordSlice []record + +func (rs recordSlice) Len() int { + return len(rs) +} + +func (rs recordSlice) Less(i, j int) bool { + return rs[i].key < rs[j].key +} + +func (rs recordSlice) Swap(i, j int) { + rs[i], rs[j] = rs[j], rs[i] +} diff --git a/vendor/github.com/naoina/go-stringutil/strings.go b/vendor/github.com/naoina/go-stringutil/strings.go new file mode 100644 index 000000000..881ca2c8f --- /dev/null +++ b/vendor/github.com/naoina/go-stringutil/strings.go @@ -0,0 +1,320 @@ +package stringutil + +import ( + "sync" + "unicode" + "unicode/utf8" +) + +var ( + mu sync.Mutex + + // Based on https://github.com/golang/lint/blob/32a87160691b3c96046c0c678fe57c5bef761456/lint.go#L702 + commonInitialismMap = map[string]struct{}{ + "API": struct{}{}, + "ASCII": struct{}{}, + "CPU": struct{}{}, + "CSRF": struct{}{}, + "CSS": struct{}{}, + "DNS": struct{}{}, + "EOF": struct{}{}, + "GUID": struct{}{}, + "HTML": struct{}{}, + "HTTP": struct{}{}, + "HTTPS": struct{}{}, + "ID": struct{}{}, + "IP": struct{}{}, + "JSON": struct{}{}, + "LHS": struct{}{}, + "QPS": struct{}{}, + "RAM": struct{}{}, + "RHS": struct{}{}, + "RPC": struct{}{}, + "SLA": struct{}{}, + "SMTP": struct{}{}, + "SQL": struct{}{}, + "SSH": struct{}{}, + "TCP": struct{}{}, + "TLS": struct{}{}, + "TTL": struct{}{}, + "UDP": struct{}{}, + "UI": struct{}{}, + "UID": struct{}{}, + "UUID": struct{}{}, + "URI": struct{}{}, + "URL": struct{}{}, + "UTF8": struct{}{}, + "VM": struct{}{}, + "XML": struct{}{}, + "XSRF": struct{}{}, + "XSS": struct{}{}, + } + commonInitialisms = keys(commonInitialismMap) + commonInitialism = mustDoubleArray(newDoubleArray(commonInitialisms)) + longestLen = longestLength(commonInitialisms) + shortestLen = shortestLength(commonInitialisms, longestLen) +) + +// ToUpperCamelCase returns a copy of the string s with all Unicode letters mapped to their camel case. +// It will convert to upper case previous letter of '_' and first letter, and remove letter of '_'. +func ToUpperCamelCase(s string) string { + if s == "" { + return "" + } + upper := true + start := 0 + result := make([]byte, 0, len(s)) + var runeBuf [utf8.UTFMax]byte + var initialism []byte + for _, c := range s { + if c == '_' { + upper = true + candidate := string(result[start:]) + initialism = initialism[:0] + for _, r := range candidate { + if r < utf8.RuneSelf { + initialism = append(initialism, toUpperASCII(byte(r))) + } else { + n := utf8.EncodeRune(runeBuf[:], unicode.ToUpper(r)) + initialism = append(initialism, runeBuf[:n]...) + } + } + if length := commonInitialism.LookupByBytes(initialism); length > 0 { + result = append(result[:start], initialism...) + } + start = len(result) + continue + } + if upper { + if c < utf8.RuneSelf { + result = append(result, toUpperASCII(byte(c))) + } else { + n := utf8.EncodeRune(runeBuf[:], unicode.ToUpper(c)) + result = append(result, runeBuf[:n]...) + } + upper = false + continue + } + if c < utf8.RuneSelf { + result = append(result, byte(c)) + } else { + n := utf8.EncodeRune(runeBuf[:], c) + result = append(result, runeBuf[:n]...) + } + } + candidate := string(result[start:]) + initialism = initialism[:0] + for _, r := range candidate { + if r < utf8.RuneSelf { + initialism = append(initialism, toUpperASCII(byte(r))) + } else { + n := utf8.EncodeRune(runeBuf[:], unicode.ToUpper(r)) + initialism = append(initialism, runeBuf[:n]...) + } + } + if length := commonInitialism.LookupByBytes(initialism); length > 0 { + result = append(result[:start], initialism...) + } + return string(result) +} + +// ToUpperCamelCaseASCII is similar to ToUpperCamelCase, but optimized for +// only the ASCII characters. +// ToUpperCamelCaseASCII is faster than ToUpperCamelCase, but doesn't work if +// contains non-ASCII characters. +func ToUpperCamelCaseASCII(s string) string { + if s == "" { + return "" + } + upper := true + start := 0 + result := make([]byte, 0, len(s)) + var initialism []byte + for i := 0; i < len(s); i++ { + c := s[i] + if c == '_' { + upper = true + candidate := result[start:] + initialism = initialism[:0] + for _, b := range candidate { + initialism = append(initialism, toUpperASCII(b)) + } + if length := commonInitialism.LookupByBytes(initialism); length > 0 { + result = append(result[:start], initialism...) + } + start = len(result) + continue + } + if upper { + result = append(result, toUpperASCII(c)) + upper = false + continue + } + result = append(result, c) + } + candidate := result[start:] + initialism = initialism[:0] + for _, b := range candidate { + initialism = append(initialism, toUpperASCII(b)) + } + if length := commonInitialism.LookupByBytes(initialism); length > 0 { + result = append(result[:start], initialism...) + } + return string(result) +} + +// ToSnakeCase returns a copy of the string s with all Unicode letters mapped to their snake case. +// It will insert letter of '_' at position of previous letter of uppercase and all +// letters convert to lower case. +// ToSnakeCase does not insert '_' letter into a common initialism word like ID, URL and so on. +func ToSnakeCase(s string) string { + if s == "" { + return "" + } + result := make([]byte, 0, len(s)) + var runeBuf [utf8.UTFMax]byte + var j, skipCount int + for i, c := range s { + if i < skipCount { + continue + } + if unicode.IsUpper(c) { + if i != 0 { + result = append(result, '_') + } + next := nextIndex(j, len(s)) + if length := commonInitialism.Lookup(s[j:next]); length > 0 { + for _, r := range s[j : j+length] { + if r < utf8.RuneSelf { + result = append(result, toLowerASCII(byte(r))) + } else { + n := utf8.EncodeRune(runeBuf[:], unicode.ToLower(r)) + result = append(result, runeBuf[:n]...) + } + } + j += length - 1 + skipCount = i + length + continue + } + } + if c < utf8.RuneSelf { + result = append(result, toLowerASCII(byte(c))) + } else { + n := utf8.EncodeRune(runeBuf[:], unicode.ToLower(c)) + result = append(result, runeBuf[:n]...) + } + j++ + } + return string(result) +} + +// ToSnakeCaseASCII is similar to ToSnakeCase, but optimized for only the ASCII +// characters. +// ToSnakeCaseASCII is faster than ToSnakeCase, but doesn't work correctly if +// contains non-ASCII characters. +func ToSnakeCaseASCII(s string) string { + if s == "" { + return "" + } + result := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if isUpperASCII(c) { + if i != 0 { + result = append(result, '_') + } + if k := i + shortestLen - 1; k < len(s) && isUpperASCII(s[k]) { + if length := commonInitialism.Lookup(s[i:nextIndex(i, len(s))]); length > 0 { + for j, buf := 0, s[i:i+length]; j < len(buf); j++ { + result = append(result, toLowerASCII(buf[j])) + } + i += length - 1 + continue + } + } + } + result = append(result, toLowerASCII(c)) + } + return string(result) +} + +// AddCommonInitialism adds ss to list of common initialisms. +func AddCommonInitialism(ss ...string) { + mu.Lock() + defer mu.Unlock() + for _, s := range ss { + commonInitialismMap[s] = struct{}{} + } + commonInitialisms = keys(commonInitialismMap) + commonInitialism = mustDoubleArray(newDoubleArray(commonInitialisms)) + longestLen = longestLength(commonInitialisms) + shortestLen = shortestLength(commonInitialisms, longestLen) +} + +// DelCommonInitialism deletes ss from list of common initialisms. +func DelCommonInitialism(ss ...string) { + mu.Lock() + defer mu.Unlock() + for _, s := range ss { + delete(commonInitialismMap, s) + } + commonInitialisms = keys(commonInitialismMap) + commonInitialism = mustDoubleArray(newDoubleArray(commonInitialisms)) + longestLen = longestLength(commonInitialisms) + shortestLen = shortestLength(commonInitialisms, longestLen) +} + +func isUpperASCII(c byte) bool { + return 'A' <= c && c <= 'Z' +} + +func isLowerASCII(c byte) bool { + return 'a' <= c && c <= 'z' +} + +func toUpperASCII(c byte) byte { + if isLowerASCII(c) { + return c - ('a' - 'A') + } + return c +} + +func toLowerASCII(c byte) byte { + if isUpperASCII(c) { + return c + 'a' - 'A' + } + return c +} + +func nextIndex(i, maxlen int) int { + if n := i + longestLen; n < maxlen { + return n + } + return maxlen +} + +func keys(m map[string]struct{}) []string { + result := make([]string, 0, len(m)) + for k := range m { + result = append(result, k) + } + return result +} + +func shortestLength(strs []string, shortest int) int { + for _, s := range strs { + if candidate := utf8.RuneCountInString(s); candidate < shortest { + shortest = candidate + } + } + return shortest +} + +func longestLength(strs []string) (longest int) { + for _, s := range strs { + if candidate := utf8.RuneCountInString(s); candidate > longest { + longest = candidate + } + } + return longest +} diff --git a/vendor/github.com/naoina/toml/LICENSE b/vendor/github.com/naoina/toml/LICENSE new file mode 100644 index 000000000..e65039ad8 --- /dev/null +++ b/vendor/github.com/naoina/toml/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Naoya Inada <naoina@kuune.org> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/naoina/toml/README.md b/vendor/github.com/naoina/toml/README.md new file mode 100644 index 000000000..1c0143348 --- /dev/null +++ b/vendor/github.com/naoina/toml/README.md @@ -0,0 +1,392 @@ +# TOML parser and encoder library for Golang [![Build Status](https://travis-ci.org/naoina/toml.png?branch=master)](https://travis-ci.org/naoina/toml) + +[TOML](https://github.com/toml-lang/toml) parser and encoder library for [Golang](http://golang.org/). + +This library is compatible with TOML version [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md). + +## Installation + + go get -u github.com/naoina/toml + +## Usage + +The following TOML save as `example.toml`. + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Lance Uppercut" +dob = 1979-05-27T07:32:00-08:00 # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +Then above TOML will mapping to `tomlConfig` struct using `toml.Unmarshal`. + +```go +package main + +import ( + "io/ioutil" + "os" + "time" + + "github.com/naoina/toml" +) + +type tomlConfig struct { + Title string + Owner struct { + Name string + Dob time.Time + } + Database struct { + Server string + Ports []int + ConnectionMax uint + Enabled bool + } + Servers map[string]ServerInfo + Clients struct { + Data [][]interface{} + Hosts []string + } +} + +type ServerInfo struct { + IP net.IP + DC string +} + +func main() { + f, err := os.Open("example.toml") + if err != nil { + panic(err) + } + defer f.Close() + var config Config + if err := toml.NewDecoder(f).Decode(&config); err != nil { + panic(err) + } + + // then to use the unmarshaled config... + fmt.Println("IP of server 'alpha':", config.Servers["alpha"].IP) +} +``` + +## Mappings + +A key and value of TOML will map to the corresponding field. +The fields of struct for mapping must be exported. + +The rules of the mapping of key are following: + +#### Exact matching + +```toml +timeout_seconds = 256 +``` + +```go +type Config struct { + Timeout_seconds int +} +``` + +#### Camelcase matching + +```toml +server_name = "srv1" +``` + +```go +type Config struct { + ServerName string +} +``` + +#### Uppercase matching + +```toml +ip = "10.0.0.1" +``` + +```go +type Config struct { + IP string +} +``` + +See the following examples for the value mappings. + +### String + +```toml +val = "string" +``` + +```go +type Config struct { + Val string +} +``` + +### Integer + +```toml +val = 100 +``` + +```go +type Config struct { + Val int +} +``` + +All types that can be used are following: + +* int8 (from `-128` to `127`) +* int16 (from `-32768` to `32767`) +* int32 (from `-2147483648` to `2147483647`) +* int64 (from `-9223372036854775808` to `9223372036854775807`) +* int (same as `int32` on 32bit environment, or `int64` on 64bit environment) +* uint8 (from `0` to `255`) +* uint16 (from `0` to `65535`) +* uint32 (from `0` to `4294967295`) +* uint64 (from `0` to `18446744073709551615`) +* uint (same as `uint` on 32bit environment, or `uint64` on 64bit environment) + +### Float + +```toml +val = 3.1415 +``` + +```go +type Config struct { + Val float32 +} +``` + +All types that can be used are following: + +* float32 +* float64 + +### Boolean + +```toml +val = true +``` + +```go +type Config struct { + Val bool +} +``` + +### Datetime + +```toml +val = 2014-09-28T21:27:39Z +``` + +```go +type Config struct { + Val time.Time +} +``` + +### Array + +```toml +val = ["a", "b", "c"] +``` + +```go +type Config struct { + Val []string +} +``` + +Also following examples all can be mapped: + +```toml +val1 = [1, 2, 3] +val2 = [["a", "b"], ["c", "d"]] +val3 = [[1, 2, 3], ["a", "b", "c"]] +val4 = [[1, 2, 3], [["a", "b"], [true, false]]] +``` + +```go +type Config struct { + Val1 []int + Val2 [][]string + Val3 [][]interface{} + Val4 [][]interface{} +} +``` + +### Table + +```toml +[server] +type = "app" + + [server.development] + ip = "10.0.0.1" + + [server.production] + ip = "10.0.0.2" +``` + +```go +type Config struct { + Server map[string]Server +} + +type Server struct { + IP string +} +``` + +You can also use the following struct instead of map of struct. + +```go +type Config struct { + Server struct { + Development Server + Production Server + } +} + +type Server struct { + IP string +} +``` + +### Array of Tables + +```toml +[[fruit]] + name = "apple" + + [fruit.physical] + color = "red" + shape = "round" + + [[fruit.variety]] + name = "red delicious" + + [[fruit.variety]] + name = "granny smith" + +[[fruit]] + name = "banana" + + [[fruit.variety]] + name = "plantain" +``` + +```go +type Config struct { + Fruit []struct { + Name string + Physical struct { + Color string + Shape string + } + Variety []struct { + Name string + } + } +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Package toml supports `encoding.TextUnmarshaler` (and `encoding.TextMarshaler`). You can +use it to apply custom marshaling rules for certain types. The `UnmarshalText` method is +called with the value text found in the TOML input. TOML strings are passed unquoted. + +```toml +duration = "10s" +``` + +```go +import time + +type Duration time.Duration + +// UnmarshalText implements encoding.TextUnmarshaler +func (d *Duration) UnmarshalText(data []byte) error { + duration, err := time.ParseDuration(string(data)) + if err == nil { + *d = Duration(duration) + } + return err +} + +// MarshalText implements encoding.TextMarshaler +func (d Duration) MarshalText() ([]byte, error) { + return []byte(time.Duration(d).String()), nil +} + +type ConfigWithDuration struct { + Duration Duration +} +``` +### Using the `toml.UnmarshalerRec` interface + +You can also override marshaling rules specifically for TOML using the `UnmarshalerRec` +and `MarshalerRec` interfaces. These are useful if you want to control how structs or +arrays are handled. You can apply additional validation or set unexported struct fields. + +Note: `encoding.TextUnmarshaler` and `encoding.TextMarshaler` should be preferred for +simple (scalar) values because they're also compatible with other formats like JSON or +YAML. + +[See the UnmarshalerRec example](https://godoc.org/github.com/naoina/toml/#example_UnmarshalerRec). + +### Using the `toml.Unmarshaler` interface + +If you want to deal with raw TOML syntax, use the `Unmarshaler` and `Marshaler` +interfaces. Their input and output is raw TOML syntax. As such, these interfaces are +useful if you want to handle TOML at the syntax level. + +[See the Unmarshaler example](https://godoc.org/github.com/naoina/toml/#example_Unmarshaler). + +## API documentation + +See [Godoc](http://godoc.org/github.com/naoina/toml). + +## License + +MIT diff --git a/vendor/github.com/naoina/toml/ast/ast.go b/vendor/github.com/naoina/toml/ast/ast.go new file mode 100644 index 000000000..4868e2e1a --- /dev/null +++ b/vendor/github.com/naoina/toml/ast/ast.go @@ -0,0 +1,192 @@ +package ast + +import ( + "strconv" + "strings" + "time" +) + +type Position struct { + Begin int + End int +} + +type Value interface { + Pos() int + End() int + Source() string +} + +type String struct { + Position Position + Value string + Data []rune +} + +func (s *String) Pos() int { + return s.Position.Begin +} + +func (s *String) End() int { + return s.Position.End +} + +func (s *String) Source() string { + return string(s.Data) +} + +type Integer struct { + Position Position + Value string + Data []rune +} + +func (i *Integer) Pos() int { + return i.Position.Begin +} + +func (i *Integer) End() int { + return i.Position.End +} + +func (i *Integer) Source() string { + return string(i.Data) +} + +func (i *Integer) Int() (int64, error) { + return strconv.ParseInt(i.Value, 10, 64) +} + +type Float struct { + Position Position + Value string + Data []rune +} + +func (f *Float) Pos() int { + return f.Position.Begin +} + +func (f *Float) End() int { + return f.Position.End +} + +func (f *Float) Source() string { + return string(f.Data) +} + +func (f *Float) Float() (float64, error) { + return strconv.ParseFloat(f.Value, 64) +} + +type Boolean struct { + Position Position + Value string + Data []rune +} + +func (b *Boolean) Pos() int { + return b.Position.Begin +} + +func (b *Boolean) End() int { + return b.Position.End +} + +func (b *Boolean) Source() string { + return string(b.Data) +} + +func (b *Boolean) Boolean() (bool, error) { + return strconv.ParseBool(b.Value) +} + +type Datetime struct { + Position Position + Value string + Data []rune +} + +func (d *Datetime) Pos() int { + return d.Position.Begin +} + +func (d *Datetime) End() int { + return d.Position.End +} + +func (d *Datetime) Source() string { + return string(d.Data) +} + +func (d *Datetime) Time() (time.Time, error) { + switch { + case !strings.Contains(d.Value, ":"): + return time.Parse("2006-01-02", d.Value) + case !strings.Contains(d.Value, "-"): + return time.Parse("15:04:05.999999999", d.Value) + default: + return time.Parse(time.RFC3339Nano, d.Value) + } +} + +type Array struct { + Position Position + Value []Value + Data []rune +} + +func (a *Array) Pos() int { + return a.Position.Begin +} + +func (a *Array) End() int { + return a.Position.End +} + +func (a *Array) Source() string { + return string(a.Data) +} + +type TableType uint8 + +const ( + TableTypeNormal TableType = iota + TableTypeArray +) + +var tableTypes = [...]string{ + "normal", + "array", +} + +func (t TableType) String() string { + return tableTypes[t] +} + +type Table struct { + Position Position + Line int + Name string + Fields map[string]interface{} + Type TableType + Data []rune +} + +func (t *Table) Pos() int { + return t.Position.Begin +} + +func (t *Table) End() int { + return t.Position.End +} + +func (t *Table) Source() string { + return string(t.Data) +} + +type KeyValue struct { + Key string + Value Value + Line int +} diff --git a/vendor/github.com/naoina/toml/config.go b/vendor/github.com/naoina/toml/config.go new file mode 100644 index 000000000..06bb9493b --- /dev/null +++ b/vendor/github.com/naoina/toml/config.go @@ -0,0 +1,86 @@ +package toml + +import ( + "fmt" + "io" + "reflect" + "strings" + + stringutil "github.com/naoina/go-stringutil" + "github.com/naoina/toml/ast" +) + +// Config contains options for encoding and decoding. +type Config struct { + // NormFieldName is used to match TOML keys to struct fields. The function runs for + // both input keys and struct field names and should return a string that makes the + // two match. You must set this field to use the decoder. + // + // Example: The function in the default config removes _ and lowercases all keys. This + // allows a key called 'api_key' to match the struct field 'APIKey' because both are + // normalized to 'apikey'. + // + // Note that NormFieldName is not used for fields which define a TOML + // key through the struct tag. + NormFieldName func(typ reflect.Type, keyOrField string) string + + // FieldToKey determines the TOML key of a struct field when encoding. + // You must set this field to use the encoder. + // + // Note that FieldToKey is not used for fields which define a TOML + // key through the struct tag. + FieldToKey func(typ reflect.Type, field string) string + + // MissingField, if non-nil, is called when the decoder encounters a key for which no + // matching struct field exists. The default behavior is to return an error. + MissingField func(typ reflect.Type, key string) error +} + +// DefaultConfig contains the default options for encoding and decoding. +// Snake case (i.e. 'foo_bar') is used for key names. +var DefaultConfig = Config{ + NormFieldName: defaultNormFieldName, + FieldToKey: snakeCase, +} + +func defaultNormFieldName(typ reflect.Type, s string) string { + return strings.Replace(strings.ToLower(s), "_", "", -1) +} + +func snakeCase(typ reflect.Type, s string) string { + return stringutil.ToSnakeCase(s) +} + +func defaultMissingField(typ reflect.Type, key string) error { + return fmt.Errorf("field corresponding to `%s' is not defined in %v", key, typ) +} + +// NewEncoder returns a new Encoder that writes to w. +// It is shorthand for DefaultConfig.NewEncoder(w). +func NewEncoder(w io.Writer) *Encoder { + return DefaultConfig.NewEncoder(w) +} + +// Marshal returns the TOML encoding of v. +// It is shorthand for DefaultConfig.Marshal(v). +func Marshal(v interface{}) ([]byte, error) { + return DefaultConfig.Marshal(v) +} + +// Unmarshal parses the TOML data and stores the result in the value pointed to by v. +// It is shorthand for DefaultConfig.Unmarshal(data, v). +func Unmarshal(data []byte, v interface{}) error { + return DefaultConfig.Unmarshal(data, v) +} + +// UnmarshalTable applies the contents of an ast.Table to the value pointed at by v. +// It is shorthand for DefaultConfig.UnmarshalTable(t, v). +func UnmarshalTable(t *ast.Table, v interface{}) error { + return DefaultConfig.UnmarshalTable(t, v) +} + +// NewDecoder returns a new Decoder that reads from r. +// It is shorthand for DefaultConfig.NewDecoder(r). +func NewDecoder(r io.Reader) *Decoder { + return DefaultConfig.NewDecoder(r) +} diff --git a/vendor/github.com/naoina/toml/decode.go b/vendor/github.com/naoina/toml/decode.go new file mode 100644 index 000000000..b3c169eb1 --- /dev/null +++ b/vendor/github.com/naoina/toml/decode.go @@ -0,0 +1,478 @@ +// Package toml encodes and decodes the TOML configuration format using reflection. +// +// This library is compatible with TOML version v0.4.0. +package toml + +import ( + "encoding" + "fmt" + "io" + "io/ioutil" + "reflect" + "strconv" + "strings" + "time" + + "github.com/naoina/toml/ast" +) + +const ( + tableSeparator = '.' +) + +var ( + escapeReplacer = strings.NewReplacer( + "\b", "\\n", + "\f", "\\f", + "\n", "\\n", + "\r", "\\r", + "\t", "\\t", + ) + underscoreReplacer = strings.NewReplacer( + "_", "", + ) +) + +var timeType = reflect.TypeOf(time.Time{}) + +// Unmarshal parses the TOML data and stores the result in the value pointed to by v. +// +// Unmarshal will mapped to v that according to following rules: +// +// TOML strings to string +// TOML integers to any int type +// TOML floats to float32 or float64 +// TOML booleans to bool +// TOML datetimes to time.Time +// TOML arrays to any type of slice +// TOML tables to struct or map +// TOML array tables to slice of struct or map +func (cfg *Config) Unmarshal(data []byte, v interface{}) error { + table, err := Parse(data) + if err != nil { + return err + } + if err := cfg.UnmarshalTable(table, v); err != nil { + return err + } + return nil +} + +// A Decoder reads and decodes TOML from an input stream. +type Decoder struct { + r io.Reader + cfg *Config +} + +// NewDecoder returns a new Decoder that reads from r. +// Note that it reads all from r before parsing it. +func (cfg *Config) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r, cfg} +} + +// Decode parses the TOML data from its input and stores it in the value pointed to by v. +// See the documentation for Unmarshal for details about the conversion of TOML into a Go value. +func (d *Decoder) Decode(v interface{}) error { + b, err := ioutil.ReadAll(d.r) + if err != nil { + return err + } + return d.cfg.Unmarshal(b, v) +} + +// UnmarshalerRec may be implemented by types to customize their behavior when being +// unmarshaled from TOML. You can use it to implement custom validation or to set +// unexported fields. +// +// UnmarshalTOML receives a function that can be called to unmarshal the original TOML +// value into a field or variable. It is safe to call the function more than once if +// necessary. +type UnmarshalerRec interface { + UnmarshalTOML(fn func(interface{}) error) error +} + +// Unmarshaler can be used to capture and process raw TOML source of a table or value. +// UnmarshalTOML must copy the input if it wishes to retain it after returning. +// +// Note: this interface is retained for backwards compatibility. You probably want +// to implement encoding.TextUnmarshaler or UnmarshalerRec instead. +type Unmarshaler interface { + UnmarshalTOML(input []byte) error +} + +// UnmarshalTable applies the contents of an ast.Table to the value pointed at by v. +// +// UnmarshalTable will mapped to v that according to following rules: +// +// TOML strings to string +// TOML integers to any int type +// TOML floats to float32 or float64 +// TOML booleans to bool +// TOML datetimes to time.Time +// TOML arrays to any type of slice +// TOML tables to struct or map +// TOML array tables to slice of struct or map +func (cfg *Config) UnmarshalTable(t *ast.Table, v interface{}) error { + rv := reflect.ValueOf(v) + toplevelMap := rv.Kind() == reflect.Map + if (!toplevelMap && rv.Kind() != reflect.Ptr) || rv.IsNil() { + return &invalidUnmarshalError{reflect.TypeOf(v)} + } + return unmarshalTable(cfg, rv, t, toplevelMap) +} + +// used for UnmarshalerRec. +func unmarshalTableOrValue(cfg *Config, rv reflect.Value, av interface{}) error { + if (rv.Kind() != reflect.Ptr && rv.Kind() != reflect.Map) || rv.IsNil() { + return &invalidUnmarshalError{rv.Type()} + } + rv = indirect(rv) + + switch av.(type) { + case *ast.KeyValue, *ast.Table, []*ast.Table: + if err := unmarshalField(cfg, rv, av); err != nil { + return lineError(fieldLineNumber(av), err) + } + return nil + case ast.Value: + return setValue(cfg, rv, av.(ast.Value)) + default: + panic(fmt.Sprintf("BUG: unhandled AST node type %T", av)) + } +} + +// unmarshalTable unmarshals the fields of a table into a struct or map. +// +// toplevelMap is true when rv is an (unadressable) map given to UnmarshalTable. In this +// (special) case, the map is used as-is instead of creating a new map. +func unmarshalTable(cfg *Config, rv reflect.Value, t *ast.Table, toplevelMap bool) error { + rv = indirect(rv) + if err, ok := setUnmarshaler(cfg, rv, t); ok { + return lineError(t.Line, err) + } + switch { + case rv.Kind() == reflect.Struct: + fc := makeFieldCache(cfg, rv.Type()) + for key, fieldAst := range t.Fields { + fv, fieldName, err := fc.findField(cfg, rv, key) + if err != nil { + return lineError(fieldLineNumber(fieldAst), err) + } + if fv.IsValid() { + if err := unmarshalField(cfg, fv, fieldAst); err != nil { + return lineErrorField(fieldLineNumber(fieldAst), rv.Type().String()+"."+fieldName, err) + } + } + } + case rv.Kind() == reflect.Map || isEface(rv): + m := rv + if !toplevelMap { + if rv.Kind() == reflect.Interface { + m = reflect.ValueOf(make(map[string]interface{})) + } else { + m = reflect.MakeMap(rv.Type()) + } + } + elemtyp := m.Type().Elem() + for key, fieldAst := range t.Fields { + kv, err := unmarshalMapKey(m.Type().Key(), key) + if err != nil { + return lineError(fieldLineNumber(fieldAst), err) + } + fv := reflect.New(elemtyp).Elem() + if err := unmarshalField(cfg, fv, fieldAst); err != nil { + return lineError(fieldLineNumber(fieldAst), err) + } + m.SetMapIndex(kv, fv) + } + if !toplevelMap { + rv.Set(m) + } + default: + return lineError(t.Line, &unmarshalTypeError{"table", "struct or map", rv.Type()}) + } + return nil +} + +func fieldLineNumber(fieldAst interface{}) int { + switch av := fieldAst.(type) { + case *ast.KeyValue: + return av.Line + case *ast.Table: + return av.Line + case []*ast.Table: + return av[0].Line + default: + panic(fmt.Sprintf("BUG: unhandled node type %T", fieldAst)) + } +} + +func unmarshalField(cfg *Config, rv reflect.Value, fieldAst interface{}) error { + switch av := fieldAst.(type) { + case *ast.KeyValue: + return setValue(cfg, rv, av.Value) + case *ast.Table: + return unmarshalTable(cfg, rv, av, false) + case []*ast.Table: + rv = indirect(rv) + if err, ok := setUnmarshaler(cfg, rv, fieldAst); ok { + return err + } + var slice reflect.Value + switch { + case rv.Kind() == reflect.Slice: + slice = reflect.MakeSlice(rv.Type(), len(av), len(av)) + case isEface(rv): + slice = reflect.ValueOf(make([]interface{}, len(av))) + default: + return &unmarshalTypeError{"array table", "slice", rv.Type()} + } + for i, tbl := range av { + vv := reflect.New(slice.Type().Elem()).Elem() + if err := unmarshalTable(cfg, vv, tbl, false); err != nil { + return err + } + slice.Index(i).Set(vv) + } + rv.Set(slice) + default: + panic(fmt.Sprintf("BUG: unhandled AST node type %T", av)) + } + return nil +} + +func unmarshalMapKey(typ reflect.Type, key string) (reflect.Value, error) { + rv := reflect.New(typ).Elem() + if u, ok := rv.Addr().Interface().(encoding.TextUnmarshaler); ok { + return rv, u.UnmarshalText([]byte(key)) + } + switch typ.Kind() { + case reflect.String: + rv.SetString(key) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i, err := strconv.ParseInt(key, 10, int(typ.Size()*8)) + if err != nil { + return rv, convertNumError(typ.Kind(), err) + } + rv.SetInt(i) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + i, err := strconv.ParseUint(key, 10, int(typ.Size()*8)) + if err != nil { + return rv, convertNumError(typ.Kind(), err) + } + rv.SetUint(i) + default: + return rv, fmt.Errorf("invalid map key type %s", typ) + } + return rv, nil +} + +func setValue(cfg *Config, lhs reflect.Value, val ast.Value) error { + lhs = indirect(lhs) + if err, ok := setUnmarshaler(cfg, lhs, val); ok { + return err + } + if err, ok := setTextUnmarshaler(lhs, val); ok { + return err + } + switch v := val.(type) { + case *ast.Integer: + return setInt(lhs, v) + case *ast.Float: + return setFloat(lhs, v) + case *ast.String: + return setString(lhs, v) + case *ast.Boolean: + return setBoolean(lhs, v) + case *ast.Datetime: + return setDatetime(lhs, v) + case *ast.Array: + return setArray(cfg, lhs, v) + default: + panic(fmt.Sprintf("BUG: unhandled node type %T", v)) + } +} + +func indirect(rv reflect.Value) reflect.Value { + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + return rv +} + +func setUnmarshaler(cfg *Config, lhs reflect.Value, av interface{}) (error, bool) { + if lhs.CanAddr() { + if u, ok := lhs.Addr().Interface().(UnmarshalerRec); ok { + err := u.UnmarshalTOML(func(v interface{}) error { + return unmarshalTableOrValue(cfg, reflect.ValueOf(v), av) + }) + return err, true + } + if u, ok := lhs.Addr().Interface().(Unmarshaler); ok { + return u.UnmarshalTOML(unmarshalerSource(av)), true + } + } + return nil, false +} + +func unmarshalerSource(av interface{}) []byte { + var source []byte + switch av := av.(type) { + case []*ast.Table: + for i, tab := range av { + source = append(source, tab.Source()...) + if i != len(av)-1 { + source = append(source, '\n') + } + } + case ast.Value: + source = []byte(av.Source()) + default: + panic(fmt.Sprintf("BUG: unhandled node type %T", av)) + } + return source +} + +func setTextUnmarshaler(lhs reflect.Value, val ast.Value) (error, bool) { + if !lhs.CanAddr() { + return nil, false + } + u, ok := lhs.Addr().Interface().(encoding.TextUnmarshaler) + if !ok || lhs.Type() == timeType { + return nil, false + } + var data string + switch val := val.(type) { + case *ast.Array: + return &unmarshalTypeError{"array", "", lhs.Type()}, true + case *ast.String: + data = val.Value + default: + data = val.Source() + } + return u.UnmarshalText([]byte(data)), true +} + +func setInt(fv reflect.Value, v *ast.Integer) error { + k := fv.Kind() + switch { + case k >= reflect.Int && k <= reflect.Int64: + i, err := strconv.ParseInt(v.Value, 10, int(fv.Type().Size()*8)) + if err != nil { + return convertNumError(fv.Kind(), err) + } + fv.SetInt(i) + case k >= reflect.Uint && k <= reflect.Uintptr: + i, err := strconv.ParseUint(v.Value, 10, int(fv.Type().Size()*8)) + if err != nil { + return convertNumError(fv.Kind(), err) + } + fv.SetUint(i) + case isEface(fv): + i, err := strconv.ParseInt(v.Value, 10, 64) + if err != nil { + return convertNumError(reflect.Int64, err) + } + fv.Set(reflect.ValueOf(i)) + default: + return &unmarshalTypeError{"integer", "", fv.Type()} + } + return nil +} + +func setFloat(fv reflect.Value, v *ast.Float) error { + f, err := v.Float() + if err != nil { + return err + } + switch { + case fv.Kind() == reflect.Float32 || fv.Kind() == reflect.Float64: + if fv.OverflowFloat(f) { + return &overflowError{fv.Kind(), v.Value} + } + fv.SetFloat(f) + case isEface(fv): + fv.Set(reflect.ValueOf(f)) + default: + return &unmarshalTypeError{"float", "", fv.Type()} + } + return nil +} + +func setString(fv reflect.Value, v *ast.String) error { + switch { + case fv.Kind() == reflect.String: + fv.SetString(v.Value) + case isEface(fv): + fv.Set(reflect.ValueOf(v.Value)) + default: + return &unmarshalTypeError{"string", "", fv.Type()} + } + return nil +} + +func setBoolean(fv reflect.Value, v *ast.Boolean) error { + b, _ := v.Boolean() + switch { + case fv.Kind() == reflect.Bool: + fv.SetBool(b) + case isEface(fv): + fv.Set(reflect.ValueOf(b)) + default: + return &unmarshalTypeError{"boolean", "", fv.Type()} + } + return nil +} + +func setDatetime(rv reflect.Value, v *ast.Datetime) error { + t, err := v.Time() + if err != nil { + return err + } + if !timeType.AssignableTo(rv.Type()) { + return &unmarshalTypeError{"datetime", "", rv.Type()} + } + rv.Set(reflect.ValueOf(t)) + return nil +} + +func setArray(cfg *Config, rv reflect.Value, v *ast.Array) error { + var slicetyp reflect.Type + switch { + case rv.Kind() == reflect.Slice: + slicetyp = rv.Type() + case isEface(rv): + slicetyp = reflect.SliceOf(rv.Type()) + default: + return &unmarshalTypeError{"array", "slice", rv.Type()} + } + + if len(v.Value) == 0 { + // Ensure defined slices are always set to a non-nil value. + rv.Set(reflect.MakeSlice(slicetyp, 0, 0)) + return nil + } + + tomltyp := reflect.TypeOf(v.Value[0]) + slice := reflect.MakeSlice(slicetyp, len(v.Value), len(v.Value)) + typ := slicetyp.Elem() + for i, vv := range v.Value { + if i > 0 && tomltyp != reflect.TypeOf(vv) { + return errArrayMultiType + } + tmp := reflect.New(typ).Elem() + if err := setValue(cfg, tmp, vv); err != nil { + return err + } + slice.Index(i).Set(tmp) + } + rv.Set(slice) + return nil +} + +func isEface(rv reflect.Value) bool { + return rv.Kind() == reflect.Interface && rv.Type().NumMethod() == 0 +} diff --git a/vendor/github.com/naoina/toml/encode.go b/vendor/github.com/naoina/toml/encode.go new file mode 100644 index 000000000..ae6bfd575 --- /dev/null +++ b/vendor/github.com/naoina/toml/encode.go @@ -0,0 +1,398 @@ +package toml + +import ( + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "time" + + "github.com/naoina/toml/ast" +) + +const ( + tagOmitempty = "omitempty" + tagSkip = "-" +) + +// Marshal returns the TOML encoding of v. +// +// Struct values encode as TOML. Each exported struct field becomes a field of +// the TOML structure unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// +// The "toml" key in the struct field's tag value is the key name, followed by +// an optional comma and options. Examples: +// +// // Field is ignored by this package. +// Field int `toml:"-"` +// +// // Field appears in TOML as key "myName". +// Field int `toml:"myName"` +// +// // Field appears in TOML as key "myName" and the field is omitted from the +// // result of encoding if its value is empty. +// Field int `toml:"myName,omitempty"` +// +// // Field appears in TOML as key "field", but the field is skipped if +// // empty. Note the leading comma. +// Field int `toml:",omitempty"` +func (cfg *Config) Marshal(v interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + err := cfg.NewEncoder(buf).Encode(v) + return buf.Bytes(), err +} + +// A Encoder writes TOML to an output stream. +type Encoder struct { + w io.Writer + cfg *Config +} + +// NewEncoder returns a new Encoder that writes to w. +func (cfg *Config) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w, cfg} +} + +// Encode writes the TOML of v to the stream. +// See the documentation for Marshal for details about the conversion of Go values to TOML. +func (e *Encoder) Encode(v interface{}) error { + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return &marshalNilError{rv.Type()} + } + rv = rv.Elem() + } + buf := &tableBuf{typ: ast.TableTypeNormal} + var err error + switch rv.Kind() { + case reflect.Struct: + err = buf.structFields(e.cfg, rv) + case reflect.Map: + err = buf.mapFields(e.cfg, rv) + default: + err = &marshalTableError{rv.Type()} + } + if err != nil { + return err + } + return buf.writeTo(e.w, "") +} + +// Marshaler can be implemented to override the encoding of TOML values. The returned text +// must be a simple TOML value (i.e. not a table) and is inserted into marshaler output. +// +// This interface exists for backwards-compatibility reasons. You probably want to +// implement encoding.TextMarshaler or MarshalerRec instead. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// MarshalerRec can be implemented to override the TOML encoding of a type. +// The returned value is marshaled in place of the receiver. +type MarshalerRec interface { + MarshalTOML() (interface{}, error) +} + +type tableBuf struct { + name string // already escaped / quoted + body []byte + children []*tableBuf + typ ast.TableType + arrayDepth int +} + +func (b *tableBuf) writeTo(w io.Writer, prefix string) error { + key := b.name // TODO: escape dots + if prefix != "" { + key = prefix + "." + key + } + + if b.name != "" { + head := "[" + key + "]" + if b.typ == ast.TableTypeArray { + head = "[" + head + "]" + } + head += "\n" + if _, err := io.WriteString(w, head); err != nil { + return err + } + } + if _, err := w.Write(b.body); err != nil { + return err + } + + for i, child := range b.children { + if len(b.body) > 0 || i > 0 { + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + } + if err := child.writeTo(w, key); err != nil { + return err + } + } + return nil +} + +func (b *tableBuf) newChild(name string) *tableBuf { + child := &tableBuf{name: quoteName(name), typ: ast.TableTypeNormal} + if b.arrayDepth > 0 { + child.typ = ast.TableTypeArray + } + return child +} + +func (b *tableBuf) addChild(child *tableBuf) { + // Empty table elision: we can avoid writing a table that doesn't have any keys on its + // own. Array tables can't be elided because they define array elements (which would + // be missing if elided). + if len(child.body) == 0 && child.typ == ast.TableTypeNormal { + for _, gchild := range child.children { + gchild.name = child.name + "." + gchild.name + b.addChild(gchild) + } + return + } + b.children = append(b.children, child) +} + +func (b *tableBuf) structFields(cfg *Config, rv reflect.Value) error { + rt := rv.Type() + for i := 0; i < rv.NumField(); i++ { + ft := rt.Field(i) + if ft.PkgPath != "" && !ft.Anonymous { // not exported + continue + } + name, rest := extractTag(ft.Tag.Get(fieldTagName)) + if name == tagSkip { + continue + } + fv := rv.Field(i) + if rest == tagOmitempty && isEmptyValue(fv) { + continue + } + if name == "" { + name = cfg.FieldToKey(rt, ft.Name) + } + if err := b.field(cfg, name, fv); err != nil { + return err + } + } + return nil +} + +type mapKeyList []struct { + key string + value reflect.Value +} + +func (l mapKeyList) Len() int { return len(l) } +func (l mapKeyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l mapKeyList) Less(i, j int) bool { return l[i].key < l[j].key } + +func (b *tableBuf) mapFields(cfg *Config, rv reflect.Value) error { + keys := rv.MapKeys() + keylist := make(mapKeyList, len(keys)) + for i, key := range keys { + var err error + keylist[i].key, err = encodeMapKey(key) + if err != nil { + return err + } + keylist[i].value = rv.MapIndex(key) + } + sort.Sort(keylist) + + for _, kv := range keylist { + if err := b.field(cfg, kv.key, kv.value); err != nil { + return err + } + } + return nil +} + +func (b *tableBuf) field(cfg *Config, name string, rv reflect.Value) error { + off := len(b.body) + b.body = append(b.body, quoteName(name)...) + b.body = append(b.body, " = "...) + isTable, err := b.value(cfg, rv, name) + if isTable { + b.body = b.body[:off] // rub out "key =" + } else { + b.body = append(b.body, '\n') + } + return err +} + +func (b *tableBuf) value(cfg *Config, rv reflect.Value, name string) (bool, error) { + isMarshaler, isTable, err := b.marshaler(cfg, rv, name) + if isMarshaler { + return isTable, err + } + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + b.body = strconv.AppendInt(b.body, rv.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + b.body = strconv.AppendUint(b.body, rv.Uint(), 10) + case reflect.Float32, reflect.Float64: + b.body = strconv.AppendFloat(b.body, rv.Float(), 'e', -1, 64) + case reflect.Bool: + b.body = strconv.AppendBool(b.body, rv.Bool()) + case reflect.String: + b.body = strconv.AppendQuote(b.body, rv.String()) + case reflect.Ptr, reflect.Interface: + if rv.IsNil() { + return false, &marshalNilError{rv.Type()} + } + return b.value(cfg, rv.Elem(), name) + case reflect.Slice, reflect.Array: + rvlen := rv.Len() + if rvlen == 0 { + b.body = append(b.body, '[', ']') + return false, nil + } + + b.arrayDepth++ + wroteElem := false + b.body = append(b.body, '[') + for i := 0; i < rvlen; i++ { + isTable, err := b.value(cfg, rv.Index(i), name) + if err != nil { + return isTable, err + } + wroteElem = wroteElem || !isTable + if wroteElem { + if i < rvlen-1 { + b.body = append(b.body, ',', ' ') + } else { + b.body = append(b.body, ']') + } + } + } + if !wroteElem { + b.body = b.body[:len(b.body)-1] // rub out '[' + } + b.arrayDepth-- + return !wroteElem, nil + case reflect.Struct: + child := b.newChild(name) + err := child.structFields(cfg, rv) + b.addChild(child) + return true, err + case reflect.Map: + child := b.newChild(name) + err := child.mapFields(cfg, rv) + b.addChild(child) + return true, err + default: + return false, fmt.Errorf("toml: marshal: unsupported type %v", rv.Kind()) + } + return false, nil +} + +func (b *tableBuf) marshaler(cfg *Config, rv reflect.Value, name string) (handled, isTable bool, err error) { + switch t := rv.Interface().(type) { + case encoding.TextMarshaler: + enc, err := t.MarshalText() + if err != nil { + return true, false, err + } + b.body = encodeTextMarshaler(b.body, string(enc)) + return true, false, nil + case MarshalerRec: + newval, err := t.MarshalTOML() + if err != nil { + return true, false, err + } + isTable, err = b.value(cfg, reflect.ValueOf(newval), name) + return true, isTable, err + case Marshaler: + enc, err := t.MarshalTOML() + if err != nil { + return true, false, err + } + b.body = append(b.body, enc...) + return true, false, nil + } + return false, false, nil +} + +func encodeTextMarshaler(buf []byte, v string) []byte { + // Emit the value without quotes if possible. + if v == "true" || v == "false" { + return append(buf, v...) + } else if _, err := time.Parse(time.RFC3339Nano, v); err == nil { + return append(buf, v...) + } else if _, err := strconv.ParseInt(v, 10, 64); err == nil { + return append(buf, v...) + } else if _, err := strconv.ParseUint(v, 10, 64); err == nil { + return append(buf, v...) + } else if _, err := strconv.ParseFloat(v, 64); err == nil { + return append(buf, v...) + } + return strconv.AppendQuote(buf, v) +} + +func encodeMapKey(rv reflect.Value) (string, error) { + if rv.Kind() == reflect.String { + return rv.String(), nil + } + if tm, ok := rv.Interface().(encoding.TextMarshaler); ok { + b, err := tm.MarshalText() + return string(b), err + } + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(rv.Int(), 10), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(rv.Uint(), 10), nil + } + return "", fmt.Errorf("toml: invalid map key type %v", rv.Type()) +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array: + // encoding/json treats all arrays with non-zero length as non-empty. We check the + // array content here because zero-length arrays are almost never used. + len := v.Len() + for i := 0; i < len; i++ { + if !isEmptyValue(v.Index(i)) { + return false + } + } + return true + case reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func quoteName(s string) string { + if len(s) == 0 { + return strconv.Quote(s) + } + for _, r := range s { + if r >= '0' && r <= '9' || r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' || r == '-' || r == '_' { + continue + } + return strconv.Quote(s) + } + return s +} diff --git a/vendor/github.com/naoina/toml/error.go b/vendor/github.com/naoina/toml/error.go new file mode 100644 index 000000000..cb73b5e0a --- /dev/null +++ b/vendor/github.com/naoina/toml/error.go @@ -0,0 +1,107 @@ +package toml + +import ( + "errors" + "fmt" + "reflect" + "strconv" +) + +var ( + errArrayMultiType = errors.New("array can't contain multiple types") +) + +// LineError is returned by Unmarshal, UnmarshalTable and Parse +// if the error is local to a line. +type LineError struct { + Line int + StructField string + Err error +} + +func (err *LineError) Error() string { + field := "" + if err.StructField != "" { + field = "(" + err.StructField + ") " + } + return fmt.Sprintf("line %d: %s%v", err.Line, field, err.Err) +} + +func lineError(line int, err error) error { + if err == nil { + return nil + } + if _, ok := err.(*LineError); ok { + return err + } + return &LineError{Line: line, Err: err} +} + +func lineErrorField(line int, field string, err error) error { + if lerr, ok := err.(*LineError); ok { + return lerr + } else if err != nil { + err = &LineError{Line: line, StructField: field, Err: err} + } + return err +} + +type overflowError struct { + kind reflect.Kind + v string +} + +func (err *overflowError) Error() string { + return fmt.Sprintf("value %s is out of range for %v", err.v, err.kind) +} + +func convertNumError(kind reflect.Kind, err error) error { + if numerr, ok := err.(*strconv.NumError); ok && numerr.Err == strconv.ErrRange { + return &overflowError{kind, numerr.Num} + } + return err +} + +type invalidUnmarshalError struct { + typ reflect.Type +} + +func (err *invalidUnmarshalError) Error() string { + if err.typ == nil { + return "toml: Unmarshal(nil)" + } + if err.typ.Kind() != reflect.Ptr { + return "toml: Unmarshal(non-pointer " + err.typ.String() + ")" + } + return "toml: Unmarshal(nil " + err.typ.String() + ")" +} + +type unmarshalTypeError struct { + what string + want string + typ reflect.Type +} + +func (err *unmarshalTypeError) Error() string { + msg := fmt.Sprintf("cannot unmarshal TOML %s into %s", err.what, err.typ) + if err.want != "" { + msg += " (need " + err.want + ")" + } + return msg +} + +type marshalNilError struct { + typ reflect.Type +} + +func (err *marshalNilError) Error() string { + return fmt.Sprintf("toml: cannot marshal nil %s", err.typ) +} + +type marshalTableError struct { + typ reflect.Type +} + +func (err *marshalTableError) Error() string { + return fmt.Sprintf("toml: cannot marshal %s as table, want struct or map type", err.typ) +} diff --git a/vendor/github.com/naoina/toml/parse.go b/vendor/github.com/naoina/toml/parse.go new file mode 100644 index 000000000..e6f95001e --- /dev/null +++ b/vendor/github.com/naoina/toml/parse.go @@ -0,0 +1,376 @@ +package toml + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/naoina/toml/ast" +) + +// The parser is generated by github.com/pointlander/peg. To regenerate it, do: +// +// go get -u github.com/pointlander/peg +// go generate . + +//go:generate peg -switch -inline parse.peg + +var errParse = errors.New("invalid TOML syntax") + +// Parse returns an AST representation of TOML. +// The toplevel is represented by a table. +func Parse(data []byte) (*ast.Table, error) { + d := &parseState{p: &tomlParser{Buffer: string(data)}} + d.init() + + if err := d.parse(); err != nil { + return nil, err + } + + return d.p.toml.table, nil +} + +type parseState struct { + p *tomlParser +} + +func (d *parseState) init() { + d.p.Init() + d.p.toml.init(d.p.buffer) +} + +func (d *parseState) parse() error { + if err := d.p.Parse(); err != nil { + if err, ok := err.(*parseError); ok { + return lineError(err.Line(), errParse) + } + return err + } + return d.execute() +} + +func (d *parseState) execute() (err error) { + defer func() { + if e := recover(); e != nil { + lerr, ok := e.(*LineError) + if !ok { + panic(e) + } + err = lerr + } + }() + d.p.Execute() + return nil +} + +func (e *parseError) Line() int { + tokens := []token32{e.max} + positions, p := make([]int, 2*len(tokens)), 0 + for _, token := range tokens { + positions[p], p = int(token.begin), p+1 + positions[p], p = int(token.end), p+1 + } + for _, t := range translatePositions(e.p.buffer, positions) { + if e.p.line < t.line { + e.p.line = t.line + } + } + return e.p.line +} + +type stack struct { + key string + table *ast.Table +} + +type array struct { + parent *array + child *array + current *ast.Array + line int +} + +type toml struct { + table *ast.Table + line int + currentTable *ast.Table + s string + key string + val ast.Value + arr *array + stack []*stack + skip bool +} + +func (p *toml) init(data []rune) { + p.line = 1 + p.table = p.newTable(ast.TableTypeNormal, "") + p.table.Position.End = len(data) - 1 + p.table.Data = data[:len(data)-1] // truncate the end_symbol added by PEG parse generator. + p.currentTable = p.table +} + +func (p *toml) Error(err error) { + panic(lineError(p.line, err)) +} + +func (p *tomlParser) SetTime(begin, end int) { + p.val = &ast.Datetime{ + Position: ast.Position{Begin: begin, End: end}, + Data: p.buffer[begin:end], + Value: string(p.buffer[begin:end]), + } +} + +func (p *tomlParser) SetFloat64(begin, end int) { + p.val = &ast.Float{ + Position: ast.Position{Begin: begin, End: end}, + Data: p.buffer[begin:end], + Value: underscoreReplacer.Replace(string(p.buffer[begin:end])), + } +} + +func (p *tomlParser) SetInt64(begin, end int) { + p.val = &ast.Integer{ + Position: ast.Position{Begin: begin, End: end}, + Data: p.buffer[begin:end], + Value: underscoreReplacer.Replace(string(p.buffer[begin:end])), + } +} + +func (p *tomlParser) SetString(begin, end int) { + p.val = &ast.String{ + Position: ast.Position{Begin: begin, End: end}, + Data: p.buffer[begin:end], + Value: p.s, + } + p.s = "" +} + +func (p *tomlParser) SetBool(begin, end int) { + p.val = &ast.Boolean{ + Position: ast.Position{Begin: begin, End: end}, + Data: p.buffer[begin:end], + Value: string(p.buffer[begin:end]), + } +} + +func (p *tomlParser) StartArray() { + if p.arr == nil { + p.arr = &array{line: p.line, current: &ast.Array{}} + return + } + p.arr.child = &array{parent: p.arr, line: p.line, current: &ast.Array{}} + p.arr = p.arr.child +} + +func (p *tomlParser) AddArrayVal() { + if p.arr.current == nil { + p.arr.current = &ast.Array{} + } + p.arr.current.Value = append(p.arr.current.Value, p.val) +} + +func (p *tomlParser) SetArray(begin, end int) { + p.arr.current.Position = ast.Position{Begin: begin, End: end} + p.arr.current.Data = p.buffer[begin:end] + p.val = p.arr.current + p.arr = p.arr.parent +} + +func (p *toml) SetTable(buf []rune, begin, end int) { + p.setTable(p.table, buf, begin, end) +} + +func (p *toml) setTable(parent *ast.Table, buf []rune, begin, end int) { + name := string(buf[begin:end]) + names := splitTableKey(name) + parent, err := p.lookupTable(parent, names[:len(names)-1]) + if err != nil { + p.Error(err) + } + last := names[len(names)-1] + tbl := p.newTable(ast.TableTypeNormal, last) + switch v := parent.Fields[last].(type) { + case nil: + parent.Fields[last] = tbl + case []*ast.Table: + p.Error(fmt.Errorf("table `%s' is in conflict with array table in line %d", name, v[0].Line)) + case *ast.Table: + if (v.Position == ast.Position{}) { + // This table was created as an implicit parent. + // Replace it with the real defined table. + tbl.Fields = v.Fields + parent.Fields[last] = tbl + } else { + p.Error(fmt.Errorf("table `%s' is in conflict with table in line %d", name, v.Line)) + } + case *ast.KeyValue: + p.Error(fmt.Errorf("table `%s' is in conflict with line %d", name, v.Line)) + default: + p.Error(fmt.Errorf("BUG: table `%s' is in conflict but it's unknown type `%T'", last, v)) + } + p.currentTable = tbl +} + +func (p *toml) newTable(typ ast.TableType, name string) *ast.Table { + return &ast.Table{ + Line: p.line, + Name: name, + Type: typ, + Fields: make(map[string]interface{}), + } +} + +func (p *tomlParser) SetTableString(begin, end int) { + p.currentTable.Data = p.buffer[begin:end] + p.currentTable.Position.Begin = begin + p.currentTable.Position.End = end +} + +func (p *toml) SetArrayTable(buf []rune, begin, end int) { + p.setArrayTable(p.table, buf, begin, end) +} + +func (p *toml) setArrayTable(parent *ast.Table, buf []rune, begin, end int) { + name := string(buf[begin:end]) + names := splitTableKey(name) + parent, err := p.lookupTable(parent, names[:len(names)-1]) + if err != nil { + p.Error(err) + } + last := names[len(names)-1] + tbl := p.newTable(ast.TableTypeArray, last) + switch v := parent.Fields[last].(type) { + case nil: + parent.Fields[last] = []*ast.Table{tbl} + case []*ast.Table: + parent.Fields[last] = append(v, tbl) + case *ast.Table: + p.Error(fmt.Errorf("array table `%s' is in conflict with table in line %d", name, v.Line)) + case *ast.KeyValue: + p.Error(fmt.Errorf("array table `%s' is in conflict with line %d", name, v.Line)) + default: + p.Error(fmt.Errorf("BUG: array table `%s' is in conflict but it's unknown type `%T'", name, v)) + } + p.currentTable = tbl +} + +func (p *toml) StartInlineTable() { + p.skip = false + p.stack = append(p.stack, &stack{p.key, p.currentTable}) + buf := []rune(p.key) + if p.arr == nil { + p.setTable(p.currentTable, buf, 0, len(buf)) + } else { + p.setArrayTable(p.currentTable, buf, 0, len(buf)) + } +} + +func (p *toml) EndInlineTable() { + st := p.stack[len(p.stack)-1] + p.key, p.currentTable = st.key, st.table + p.stack[len(p.stack)-1] = nil + p.stack = p.stack[:len(p.stack)-1] + p.skip = true +} + +func (p *toml) AddLineCount(i int) { + p.line += i +} + +func (p *toml) SetKey(buf []rune, begin, end int) { + p.key = string(buf[begin:end]) +} + +func (p *toml) AddKeyValue() { + if p.skip { + p.skip = false + return + } + if val, exists := p.currentTable.Fields[p.key]; exists { + switch v := val.(type) { + case *ast.Table: + p.Error(fmt.Errorf("key `%s' is in conflict with table in line %d", p.key, v.Line)) + case *ast.KeyValue: + p.Error(fmt.Errorf("key `%s' is in conflict with line %xd", p.key, v.Line)) + default: + p.Error(fmt.Errorf("BUG: key `%s' is in conflict but it's unknown type `%T'", p.key, v)) + } + } + p.currentTable.Fields[p.key] = &ast.KeyValue{Key: p.key, Value: p.val, Line: p.line} +} + +func (p *toml) SetBasicString(buf []rune, begin, end int) { + p.s = p.unquote(string(buf[begin:end])) +} + +func (p *toml) SetMultilineString() { + p.s = p.unquote(`"` + escapeReplacer.Replace(strings.TrimLeft(p.s, "\r\n")) + `"`) +} + +func (p *toml) AddMultilineBasicBody(buf []rune, begin, end int) { + p.s += string(buf[begin:end]) +} + +func (p *toml) SetLiteralString(buf []rune, begin, end int) { + p.s = string(buf[begin:end]) +} + +func (p *toml) SetMultilineLiteralString(buf []rune, begin, end int) { + p.s = strings.TrimLeft(string(buf[begin:end]), "\r\n") +} + +func (p *toml) unquote(s string) string { + s, err := strconv.Unquote(s) + if err != nil { + p.Error(err) + } + return s +} + +func (p *toml) lookupTable(t *ast.Table, keys []string) (*ast.Table, error) { + for _, s := range keys { + val, exists := t.Fields[s] + if !exists { + tbl := p.newTable(ast.TableTypeNormal, s) + t.Fields[s] = tbl + t = tbl + continue + } + switch v := val.(type) { + case *ast.Table: + t = v + case []*ast.Table: + t = v[len(v)-1] + case *ast.KeyValue: + return nil, fmt.Errorf("key `%s' is in conflict with line %d", s, v.Line) + default: + return nil, fmt.Errorf("BUG: key `%s' is in conflict but it's unknown type `%T'", s, v) + } + } + return t, nil +} + +func splitTableKey(tk string) []string { + key := make([]byte, 0, 1) + keys := make([]string, 0, 1) + inQuote := false + for i := 0; i < len(tk); i++ { + k := tk[i] + switch { + case k == tableSeparator && !inQuote: + keys = append(keys, string(key)) + key = key[:0] // reuse buffer. + case k == '"': + inQuote = !inQuote + case (k == ' ' || k == '\t') && !inQuote: + // skip. + default: + key = append(key, k) + } + } + keys = append(keys, string(key)) + return keys +} diff --git a/vendor/github.com/naoina/toml/parse.peg b/vendor/github.com/naoina/toml/parse.peg new file mode 100644 index 000000000..da31dae30 --- /dev/null +++ b/vendor/github.com/naoina/toml/parse.peg @@ -0,0 +1,145 @@ +package toml + +type tomlParser Peg { + toml +} + +TOML <- Expression (newline Expression)* newline? !. { _ = buffer } + +Expression <- ( + <ws table ws comment? (wsnl keyval ws comment?)*> { p.SetTableString(begin, end) } + / ws keyval ws comment? + / ws comment? + / ws +) + +newline <- <[\r\n]+> { p.AddLineCount(end - begin) } + +ws <- [ \t]* +wsnl <- ( + [ \t] + / <[\r\n]> { p.AddLineCount(end - begin) } +)* + +comment <- '#' <[\t -\0x10FFFF]*> + +keyval <- key ws '=' ws val { p.AddKeyValue() } + +key <- bareKey / quotedKey + +bareKey <- <[0-9A-Za-z\-_]+> { p.SetKey(p.buffer, begin, end) } + +quotedKey <- '"' <basicChar+> '"' { p.SetKey(p.buffer, begin-1, end+1) } + +val <- ( + <datetime> { p.SetTime(begin, end) } + / <float> { p.SetFloat64(begin, end) } + / <integer> { p.SetInt64(begin, end) } + / <string> { p.SetString(begin, end) } + / <boolean> { p.SetBool(begin, end) } + / <array> { p.SetArray(begin, end) } + / inlineTable +) + +table <- stdTable / arrayTable + +stdTable <- '[' ws <tableKey> ws ']' { p.SetTable(p.buffer, begin, end) } + +arrayTable <- '[[' ws <tableKey> ws ']]' { p.SetArrayTable(p.buffer, begin, end) } + +inlineTable <- ( + '{' { p.StartInlineTable() } + ws inlineTableKeyValues ws + '}' { p.EndInlineTable() } +) + +inlineTableKeyValues <- (keyval inlineTableValSep?)* + +tableKey <- key (tableKeySep key)* + +tableKeySep <- ws '.' ws + +inlineTableValSep <- ws ',' ws + +integer <- [\-+]? int +int <- [1-9] (digit / '_' digit)+ / digit + +float <- integer (frac exp? / frac? exp) +frac <- '.' digit (digit / '_' digit)* +exp <- [eE] [\-+]? digit (digit / '_' digit)* + +string <- ( + mlLiteralString + / literalString + / mlBasicString + / basicString +) + +basicString <- <'"' basicChar* '"'> { p.SetBasicString(p.buffer, begin, end) } + +basicChar <- basicUnescaped / escaped +escaped <- escape ([btnfr"/\\] / 'u' hexQuad / 'U' hexQuad hexQuad) + +basicUnescaped <- [ -!#-\[\]-\0x10FFFF] + +escape <- '\\' + +mlBasicString <- '"""' mlBasicBody '"""' { p.SetMultilineString() } + +mlBasicBody <- ( + <basicChar / newline> { p.AddMultilineBasicBody(p.buffer, begin, end) } + / escape newline wsnl +)* + +literalString <- "'" <literalChar*> "'" { p.SetLiteralString(p.buffer, begin, end) } + +literalChar <- [\t -&(-\0x10FFFF] + +mlLiteralString <- "'''" <mlLiteralBody> "'''" { p.SetMultilineLiteralString(p.buffer, begin, end) } + +mlLiteralBody <- (!"'''" (mlLiteralChar / newline))* + +mlLiteralChar <- [\t -\0x10FFFF] + +hexdigit <- [0-9A-Fa-f] +hexQuad <- hexdigit hexdigit hexdigit hexdigit + +boolean <- 'true' / 'false' + +dateFullYear <- digitQuad +dateMonth <- digitDual +dateMDay <- digitDual +timeHour <- digitDual +timeMinute <- digitDual +timeSecond <- digitDual +timeSecfrac <- '.' digit+ +timeNumoffset <- [\-+] timeHour ':' timeMinute +timeOffset <- 'Z' / timeNumoffset +partialTime <- timeHour ':' timeMinute ':' timeSecond timeSecfrac? +fullDate <- dateFullYear '-' dateMonth '-' dateMDay +fullTime <- partialTime timeOffset +datetime <- (fullDate ('T' fullTime)?) / partialTime + +digit <- [0-9] +digitDual <- digit digit +digitQuad <- digitDual digitDual + +array <- ( + '[' { p.StartArray() } + wsnl arrayValues? wsnl + ']' +) + +arrayValues <- ( + val { p.AddArrayVal() } + ( + wsnl comment? + wsnl arraySep + wsnl comment? + wsnl val { p.AddArrayVal() } + )* + wsnl arraySep? + wsnl comment? +) + +arraySep <- ',' diff --git a/vendor/github.com/naoina/toml/parse.peg.go b/vendor/github.com/naoina/toml/parse.peg.go new file mode 100644 index 000000000..d7de73b19 --- /dev/null +++ b/vendor/github.com/naoina/toml/parse.peg.go @@ -0,0 +1,2556 @@ +package toml + +import ( + "fmt" + "math" + "sort" + "strconv" +) + +const endSymbol rune = 1114112 + +/* The rule types inferred from the grammar are below. */ +type pegRule uint8 + +const ( + ruleUnknown pegRule = iota + ruleTOML + ruleExpression + rulenewline + rulews + rulewsnl + rulecomment + rulekeyval + rulekey + rulebareKey + rulequotedKey + ruleval + ruletable + rulestdTable + rulearrayTable + ruleinlineTable + ruleinlineTableKeyValues + ruletableKey + ruletableKeySep + ruleinlineTableValSep + ruleinteger + ruleint + rulefloat + rulefrac + ruleexp + rulestring + rulebasicString + rulebasicChar + ruleescaped + rulebasicUnescaped + ruleescape + rulemlBasicString + rulemlBasicBody + ruleliteralString + ruleliteralChar + rulemlLiteralString + rulemlLiteralBody + rulemlLiteralChar + rulehexdigit + rulehexQuad + ruleboolean + ruledateFullYear + ruledateMonth + ruledateMDay + ruletimeHour + ruletimeMinute + ruletimeSecond + ruletimeSecfrac + ruletimeNumoffset + ruletimeOffset + rulepartialTime + rulefullDate + rulefullTime + ruledatetime + ruledigit + ruledigitDual + ruledigitQuad + rulearray + rulearrayValues + rulearraySep + ruleAction0 + rulePegText + ruleAction1 + ruleAction2 + ruleAction3 + ruleAction4 + ruleAction5 + ruleAction6 + ruleAction7 + ruleAction8 + ruleAction9 + ruleAction10 + ruleAction11 + ruleAction12 + ruleAction13 + ruleAction14 + ruleAction15 + ruleAction16 + ruleAction17 + ruleAction18 + ruleAction19 + ruleAction20 + ruleAction21 + ruleAction22 + ruleAction23 + ruleAction24 +) + +var rul3s = [...]string{ + "Unknown", + "TOML", + "Expression", + "newline", + "ws", + "wsnl", + "comment", + "keyval", + "key", + "bareKey", + "quotedKey", + "val", + "table", + "stdTable", + "arrayTable", + "inlineTable", + "inlineTableKeyValues", + "tableKey", + "tableKeySep", + "inlineTableValSep", + "integer", + "int", + "float", + "frac", + "exp", + "string", + "basicString", + "basicChar", + "escaped", + "basicUnescaped", + "escape", + "mlBasicString", + "mlBasicBody", + "literalString", + "literalChar", + "mlLiteralString", + "mlLiteralBody", + "mlLiteralChar", + "hexdigit", + "hexQuad", + "boolean", + "dateFullYear", + "dateMonth", + "dateMDay", + "timeHour", + "timeMinute", + "timeSecond", + "timeSecfrac", + "timeNumoffset", + "timeOffset", + "partialTime", + "fullDate", + "fullTime", + "datetime", + "digit", + "digitDual", + "digitQuad", + "array", + "arrayValues", + "arraySep", + "Action0", + "PegText", + "Action1", + "Action2", + "Action3", + "Action4", + "Action5", + "Action6", + "Action7", + "Action8", + "Action9", + "Action10", + "Action11", + "Action12", + "Action13", + "Action14", + "Action15", + "Action16", + "Action17", + "Action18", + "Action19", + "Action20", + "Action21", + "Action22", + "Action23", + "Action24", +} + +type token32 struct { + pegRule + begin, end uint32 +} + +func (t *token32) String() string { + return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v", rul3s[t.pegRule], t.begin, t.end) +} + +type node32 struct { + token32 + up, next *node32 +} + +func (node *node32) print(pretty bool, buffer string) { + var print func(node *node32, depth int) + print = func(node *node32, depth int) { + for node != nil { + for c := 0; c < depth; c++ { + fmt.Printf(" ") + } + rule := rul3s[node.pegRule] + quote := strconv.Quote(string(([]rune(buffer)[node.begin:node.end]))) + if !pretty { + fmt.Printf("%v %v\n", rule, quote) + } else { + fmt.Printf("\x1B[34m%v\x1B[m %v\n", rule, quote) + } + if node.up != nil { + print(node.up, depth+1) + } + node = node.next + } + } + print(node, 0) +} + +func (node *node32) Print(buffer string) { + node.print(false, buffer) +} + +func (node *node32) PrettyPrint(buffer string) { + node.print(true, buffer) +} + +type tokens32 struct { + tree []token32 +} + +func (t *tokens32) Trim(length uint32) { + t.tree = t.tree[:length] +} + +func (t *tokens32) Print() { + for _, token := range t.tree { + fmt.Println(token.String()) + } +} + +func (t *tokens32) AST() *node32 { + type element struct { + node *node32 + down *element + } + tokens := t.Tokens() + var stack *element + for _, token := range tokens { + if token.begin == token.end { + continue + } + node := &node32{token32: token} + for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { + stack.node.next = node.up + node.up = stack.node + stack = stack.down + } + stack = &element{node: node, down: stack} + } + if stack != nil { + return stack.node + } + return nil +} + +func (t *tokens32) PrintSyntaxTree(buffer string) { + t.AST().Print(buffer) +} + +func (t *tokens32) PrettyPrintSyntaxTree(buffer string) { + t.AST().PrettyPrint(buffer) +} + +func (t *tokens32) Add(rule pegRule, begin, end, index uint32) { + if tree := t.tree; int(index) >= len(tree) { + expanded := make([]token32, 2*len(tree)) + copy(expanded, tree) + t.tree = expanded + } + t.tree[index] = token32{ + pegRule: rule, + begin: begin, + end: end, + } +} + +func (t *tokens32) Tokens() []token32 { + return t.tree +} + +type tomlParser struct { + toml + + Buffer string + buffer []rune + rules [86]func() bool + parse func(rule ...int) error + reset func() + Pretty bool + tokens32 +} + +func (p *tomlParser) Parse(rule ...int) error { + return p.parse(rule...) +} + +func (p *tomlParser) Reset() { + p.reset() +} + +type textPosition struct { + line, symbol int +} + +type textPositionMap map[int]textPosition + +func translatePositions(buffer []rune, positions []int) textPositionMap { + length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0 + sort.Ints(positions) + +search: + for i, c := range buffer { + if c == '\n' { + line, symbol = line+1, 0 + } else { + symbol++ + } + if i == positions[j] { + translations[positions[j]] = textPosition{line, symbol} + for j++; j < length; j++ { + if i != positions[j] { + continue search + } + } + break search + } + } + + return translations +} + +type parseError struct { + p *tomlParser + max token32 +} + +func (e *parseError) Error() string { + tokens, error := []token32{e.max}, "\n" + positions, p := make([]int, 2*len(tokens)), 0 + for _, token := range tokens { + positions[p], p = int(token.begin), p+1 + positions[p], p = int(token.end), p+1 + } + translations := translatePositions(e.p.buffer, positions) + format := "parse error near %v (line %v symbol %v - line %v symbol %v):\n%v\n" + if e.p.Pretty { + format = "parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n" + } + for _, token := range tokens { + begin, end := int(token.begin), int(token.end) + error += fmt.Sprintf(format, + rul3s[token.pegRule], + translations[begin].line, translations[begin].symbol, + translations[end].line, translations[end].symbol, + strconv.Quote(string(e.p.buffer[begin:end]))) + } + + return error +} + +func (p *tomlParser) PrintSyntaxTree() { + if p.Pretty { + p.tokens32.PrettyPrintSyntaxTree(p.Buffer) + } else { + p.tokens32.PrintSyntaxTree(p.Buffer) + } +} + +func (p *tomlParser) Execute() { + buffer, _buffer, text, begin, end := p.Buffer, p.buffer, "", 0, 0 + for _, token := range p.Tokens() { + switch token.pegRule { + + case rulePegText: + begin, end = int(token.begin), int(token.end) + text = string(_buffer[begin:end]) + + case ruleAction0: + _ = buffer + case ruleAction1: + p.SetTableString(begin, end) + case ruleAction2: + p.AddLineCount(end - begin) + case ruleAction3: + p.AddLineCount(end - begin) + case ruleAction4: + p.AddKeyValue() + case ruleAction5: + p.SetKey(p.buffer, begin, end) + case ruleAction6: + p.SetKey(p.buffer, begin-1, end+1) + case ruleAction7: + p.SetTime(begin, end) + case ruleAction8: + p.SetFloat64(begin, end) + case ruleAction9: + p.SetInt64(begin, end) + case ruleAction10: + p.SetString(begin, end) + case ruleAction11: + p.SetBool(begin, end) + case ruleAction12: + p.SetArray(begin, end) + case ruleAction13: + p.SetTable(p.buffer, begin, end) + case ruleAction14: + p.SetArrayTable(p.buffer, begin, end) + case ruleAction15: + p.StartInlineTable() + case ruleAction16: + p.EndInlineTable() + case ruleAction17: + p.SetBasicString(p.buffer, begin, end) + case ruleAction18: + p.SetMultilineString() + case ruleAction19: + p.AddMultilineBasicBody(p.buffer, begin, end) + case ruleAction20: + p.SetLiteralString(p.buffer, begin, end) + case ruleAction21: + p.SetMultilineLiteralString(p.buffer, begin, end) + case ruleAction22: + p.StartArray() + case ruleAction23: + p.AddArrayVal() + case ruleAction24: + p.AddArrayVal() + + } + } + _, _, _, _, _ = buffer, _buffer, text, begin, end +} + +func (p *tomlParser) Init() { + var ( + max token32 + position, tokenIndex uint32 + buffer []rune + ) + p.reset = func() { + max = token32{} + position, tokenIndex = 0, 0 + + p.buffer = []rune(p.Buffer) + if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != endSymbol { + p.buffer = append(p.buffer, endSymbol) + } + buffer = p.buffer + } + p.reset() + + _rules := p.rules + tree := tokens32{tree: make([]token32, math.MaxInt16)} + p.parse = func(rule ...int) error { + r := 1 + if len(rule) > 0 { + r = rule[0] + } + matches := p.rules[r]() + p.tokens32 = tree + if matches { + p.Trim(tokenIndex) + return nil + } + return &parseError{p, max} + } + + add := func(rule pegRule, begin uint32) { + tree.Add(rule, begin, position, tokenIndex) + tokenIndex++ + if begin != position && position > max.end { + max = token32{rule, begin, position} + } + } + + matchDot := func() bool { + if buffer[position] != endSymbol { + position++ + return true + } + return false + } + + /*matchChar := func(c byte) bool { + if buffer[position] == c { + position++ + return true + } + return false + }*/ + + /*matchRange := func(lower byte, upper byte) bool { + if c := buffer[position]; c >= lower && c <= upper { + position++ + return true + } + return false + }*/ + + _rules = [...]func() bool{ + nil, + /* 0 TOML <- <(Expression (newline Expression)* newline? !. Action0)> */ + func() bool { + position0, tokenIndex0 := position, tokenIndex + { + position1 := position + if !_rules[ruleExpression]() { + goto l0 + } + l2: + { + position3, tokenIndex3 := position, tokenIndex + if !_rules[rulenewline]() { + goto l3 + } + if !_rules[ruleExpression]() { + goto l3 + } + goto l2 + l3: + position, tokenIndex = position3, tokenIndex3 + } + { + position4, tokenIndex4 := position, tokenIndex + if !_rules[rulenewline]() { + goto l4 + } + goto l5 + l4: + position, tokenIndex = position4, tokenIndex4 + } + l5: + { + position6, tokenIndex6 := position, tokenIndex + if !matchDot() { + goto l6 + } + goto l0 + l6: + position, tokenIndex = position6, tokenIndex6 + } + { + add(ruleAction0, position) + } + add(ruleTOML, position1) + } + return true + l0: + position, tokenIndex = position0, tokenIndex0 + return false + }, + /* 1 Expression <- <((<(ws table ws comment? (wsnl keyval ws comment?)*)> Action1) / (ws keyval ws comment?) / (ws comment?) / ws)> */ + func() bool { + position8, tokenIndex8 := position, tokenIndex + { + position9 := position + { + position10, tokenIndex10 := position, tokenIndex + { + position12 := position + if !_rules[rulews]() { + goto l11 + } + { + position13 := position + { + position14, tokenIndex14 := position, tokenIndex + { + position16 := position + if buffer[position] != rune('[') { + goto l15 + } + position++ + if !_rules[rulews]() { + goto l15 + } + { + position17 := position + if !_rules[ruletableKey]() { + goto l15 + } + add(rulePegText, position17) + } + if !_rules[rulews]() { + goto l15 + } + if buffer[position] != rune(']') { + goto l15 + } + position++ + { + add(ruleAction13, position) + } + add(rulestdTable, position16) + } + goto l14 + l15: + position, tokenIndex = position14, tokenIndex14 + { + position19 := position + if buffer[position] != rune('[') { + goto l11 + } + position++ + if buffer[position] != rune('[') { + goto l11 + } + position++ + if !_rules[rulews]() { + goto l11 + } + { + position20 := position + if !_rules[ruletableKey]() { + goto l11 + } + add(rulePegText, position20) + } + if !_rules[rulews]() { + goto l11 + } + if buffer[position] != rune(']') { + goto l11 + } + position++ + if buffer[position] != rune(']') { + goto l11 + } + position++ + { + add(ruleAction14, position) + } + add(rulearrayTable, position19) + } + } + l14: + add(ruletable, position13) + } + if !_rules[rulews]() { + goto l11 + } + { + position22, tokenIndex22 := position, tokenIndex + if !_rules[rulecomment]() { + goto l22 + } + goto l23 + l22: + position, tokenIndex = position22, tokenIndex22 + } + l23: + l24: + { + position25, tokenIndex25 := position, tokenIndex + if !_rules[rulewsnl]() { + goto l25 + } + if !_rules[rulekeyval]() { + goto l25 + } + if !_rules[rulews]() { + goto l25 + } + { + position26, tokenIndex26 := position, tokenIndex + if !_rules[rulecomment]() { + goto l26 + } + goto l27 + l26: + position, tokenIndex = position26, tokenIndex26 + } + l27: + goto l24 + l25: + position, tokenIndex = position25, tokenIndex25 + } + add(rulePegText, position12) + } + { + add(ruleAction1, position) + } + goto l10 + l11: + position, tokenIndex = position10, tokenIndex10 + if !_rules[rulews]() { + goto l29 + } + if !_rules[rulekeyval]() { + goto l29 + } + if !_rules[rulews]() { + goto l29 + } + { + position30, tokenIndex30 := position, tokenIndex + if !_rules[rulecomment]() { + goto l30 + } + goto l31 + l30: + position, tokenIndex = position30, tokenIndex30 + } + l31: + goto l10 + l29: + position, tokenIndex = position10, tokenIndex10 + if !_rules[rulews]() { + goto l32 + } + { + position33, tokenIndex33 := position, tokenIndex + if !_rules[rulecomment]() { + goto l33 + } + goto l34 + l33: + position, tokenIndex = position33, tokenIndex33 + } + l34: + goto l10 + l32: + position, tokenIndex = position10, tokenIndex10 + if !_rules[rulews]() { + goto l8 + } + } + l10: + add(ruleExpression, position9) + } + return true + l8: + position, tokenIndex = position8, tokenIndex8 + return false + }, + /* 2 newline <- <(<('\r' / '\n')+> Action2)> */ + func() bool { + position35, tokenIndex35 := position, tokenIndex + { + position36 := position + { + position37 := position + { + position40, tokenIndex40 := position, tokenIndex + if buffer[position] != rune('\r') { + goto l41 + } + position++ + goto l40 + l41: + position, tokenIndex = position40, tokenIndex40 + if buffer[position] != rune('\n') { + goto l35 + } + position++ + } + l40: + l38: + { + position39, tokenIndex39 := position, tokenIndex + { + position42, tokenIndex42 := position, tokenIndex + if buffer[position] != rune('\r') { + goto l43 + } + position++ + goto l42 + l43: + position, tokenIndex = position42, tokenIndex42 + if buffer[position] != rune('\n') { + goto l39 + } + position++ + } + l42: + goto l38 + l39: + position, tokenIndex = position39, tokenIndex39 + } + add(rulePegText, position37) + } + { + add(ruleAction2, position) + } + add(rulenewline, position36) + } + return true + l35: + position, tokenIndex = position35, tokenIndex35 + return false + }, + /* 3 ws <- <(' ' / '\t')*> */ + func() bool { + { + position46 := position + l47: + { + position48, tokenIndex48 := position, tokenIndex + { + position49, tokenIndex49 := position, tokenIndex + if buffer[position] != rune(' ') { + goto l50 + } + position++ + goto l49 + l50: + position, tokenIndex = position49, tokenIndex49 + if buffer[position] != rune('\t') { + goto l48 + } + position++ + } + l49: + goto l47 + l48: + position, tokenIndex = position48, tokenIndex48 + } + add(rulews, position46) + } + return true + }, + /* 4 wsnl <- <((&('\t') '\t') | (&(' ') ' ') | (&('\n' | '\r') (<('\r' / '\n')> Action3)))*> */ + func() bool { + { + position52 := position + l53: + { + position54, tokenIndex54 := position, tokenIndex + { + switch buffer[position] { + case '\t': + if buffer[position] != rune('\t') { + goto l54 + } + position++ + break + case ' ': + if buffer[position] != rune(' ') { + goto l54 + } + position++ + break + default: + { + position56 := position + { + position57, tokenIndex57 := position, tokenIndex + if buffer[position] != rune('\r') { + goto l58 + } + position++ + goto l57 + l58: + position, tokenIndex = position57, tokenIndex57 + if buffer[position] != rune('\n') { + goto l54 + } + position++ + } + l57: + add(rulePegText, position56) + } + { + add(ruleAction3, position) + } + break + } + } + + goto l53 + l54: + position, tokenIndex = position54, tokenIndex54 + } + add(rulewsnl, position52) + } + return true + }, + /* 5 comment <- <('#' <('\t' / [ -\U0010ffff])*>)> */ + func() bool { + position60, tokenIndex60 := position, tokenIndex + { + position61 := position + if buffer[position] != rune('#') { + goto l60 + } + position++ + { + position62 := position + l63: + { + position64, tokenIndex64 := position, tokenIndex + { + position65, tokenIndex65 := position, tokenIndex + if buffer[position] != rune('\t') { + goto l66 + } + position++ + goto l65 + l66: + position, tokenIndex = position65, tokenIndex65 + if c := buffer[position]; c < rune(' ') || c > rune('\U0010ffff') { + goto l64 + } + position++ + } + l65: + goto l63 + l64: + position, tokenIndex = position64, tokenIndex64 + } + add(rulePegText, position62) + } + add(rulecomment, position61) + } + return true + l60: + position, tokenIndex = position60, tokenIndex60 + return false + }, + /* 6 keyval <- <(key ws '=' ws val Action4)> */ + func() bool { + position67, tokenIndex67 := position, tokenIndex + { + position68 := position + if !_rules[rulekey]() { + goto l67 + } + if !_rules[rulews]() { + goto l67 + } + if buffer[position] != rune('=') { + goto l67 + } + position++ + if !_rules[rulews]() { + goto l67 + } + if !_rules[ruleval]() { + goto l67 + } + { + add(ruleAction4, position) + } + add(rulekeyval, position68) + } + return true + l67: + position, tokenIndex = position67, tokenIndex67 + return false + }, + /* 7 key <- <(bareKey / quotedKey)> */ + func() bool { + position70, tokenIndex70 := position, tokenIndex + { + position71 := position + { + position72, tokenIndex72 := position, tokenIndex + { + position74 := position + { + position75 := position + { + switch buffer[position] { + case '_': + if buffer[position] != rune('_') { + goto l73 + } + position++ + break + case '-': + if buffer[position] != rune('-') { + goto l73 + } + position++ + break + case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z': + if c := buffer[position]; c < rune('a') || c > rune('z') { + goto l73 + } + position++ + break + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l73 + } + position++ + break + default: + if c := buffer[position]; c < rune('A') || c > rune('Z') { + goto l73 + } + position++ + break + } + } + + l76: + { + position77, tokenIndex77 := position, tokenIndex + { + switch buffer[position] { + case '_': + if buffer[position] != rune('_') { + goto l77 + } + position++ + break + case '-': + if buffer[position] != rune('-') { + goto l77 + } + position++ + break + case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z': + if c := buffer[position]; c < rune('a') || c > rune('z') { + goto l77 + } + position++ + break + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l77 + } + position++ + break + default: + if c := buffer[position]; c < rune('A') || c > rune('Z') { + goto l77 + } + position++ + break + } + } + + goto l76 + l77: + position, tokenIndex = position77, tokenIndex77 + } + add(rulePegText, position75) + } + { + add(ruleAction5, position) + } + add(rulebareKey, position74) + } + goto l72 + l73: + position, tokenIndex = position72, tokenIndex72 + { + position81 := position + if buffer[position] != rune('"') { + goto l70 + } + position++ + { + position82 := position + if !_rules[rulebasicChar]() { + goto l70 + } + l83: + { + position84, tokenIndex84 := position, tokenIndex + if !_rules[rulebasicChar]() { + goto l84 + } + goto l83 + l84: + position, tokenIndex = position84, tokenIndex84 + } + add(rulePegText, position82) + } + if buffer[position] != rune('"') { + goto l70 + } + position++ + { + add(ruleAction6, position) + } + add(rulequotedKey, position81) + } + } + l72: + add(rulekey, position71) + } + return true + l70: + position, tokenIndex = position70, tokenIndex70 + return false + }, + /* 8 bareKey <- <(<((&('_') '_') | (&('-') '-') | (&('a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z') [a-z]) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') [0-9]) | (&('A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z') [A-Z]))+> Action5)> */ + nil, + /* 9 quotedKey <- <('"' <basicChar+> '"' Action6)> */ + nil, + /* 10 val <- <((<datetime> Action7) / (<float> Action8) / ((&('{') inlineTable) | (&('[') (<array> Action12)) | (&('f' | 't') (<boolean> Action11)) | (&('"' | '\'') (<string> Action10)) | (&('+' | '-' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') (<integer> Action9))))> */ + func() bool { + position88, tokenIndex88 := position, tokenIndex + { + position89 := position + { + position90, tokenIndex90 := position, tokenIndex + { + position92 := position + { + position93 := position + { + position94, tokenIndex94 := position, tokenIndex + { + position96 := position + { + position97 := position + { + position98 := position + if !_rules[ruledigitDual]() { + goto l95 + } + if !_rules[ruledigitDual]() { + goto l95 + } + add(ruledigitQuad, position98) + } + add(ruledateFullYear, position97) + } + if buffer[position] != rune('-') { + goto l95 + } + position++ + { + position99 := position + if !_rules[ruledigitDual]() { + goto l95 + } + add(ruledateMonth, position99) + } + if buffer[position] != rune('-') { + goto l95 + } + position++ + { + position100 := position + if !_rules[ruledigitDual]() { + goto l95 + } + add(ruledateMDay, position100) + } + add(rulefullDate, position96) + } + { + position101, tokenIndex101 := position, tokenIndex + if buffer[position] != rune('T') { + goto l101 + } + position++ + { + position103 := position + if !_rules[rulepartialTime]() { + goto l101 + } + { + position104 := position + { + position105, tokenIndex105 := position, tokenIndex + if buffer[position] != rune('Z') { + goto l106 + } + position++ + goto l105 + l106: + position, tokenIndex = position105, tokenIndex105 + { + position107 := position + { + position108, tokenIndex108 := position, tokenIndex + if buffer[position] != rune('-') { + goto l109 + } + position++ + goto l108 + l109: + position, tokenIndex = position108, tokenIndex108 + if buffer[position] != rune('+') { + goto l101 + } + position++ + } + l108: + if !_rules[ruletimeHour]() { + goto l101 + } + if buffer[position] != rune(':') { + goto l101 + } + position++ + if !_rules[ruletimeMinute]() { + goto l101 + } + add(ruletimeNumoffset, position107) + } + } + l105: + add(ruletimeOffset, position104) + } + add(rulefullTime, position103) + } + goto l102 + l101: + position, tokenIndex = position101, tokenIndex101 + } + l102: + goto l94 + l95: + position, tokenIndex = position94, tokenIndex94 + if !_rules[rulepartialTime]() { + goto l91 + } + } + l94: + add(ruledatetime, position93) + } + add(rulePegText, position92) + } + { + add(ruleAction7, position) + } + goto l90 + l91: + position, tokenIndex = position90, tokenIndex90 + { + position112 := position + { + position113 := position + if !_rules[ruleinteger]() { + goto l111 + } + { + position114, tokenIndex114 := position, tokenIndex + if !_rules[rulefrac]() { + goto l115 + } + { + position116, tokenIndex116 := position, tokenIndex + if !_rules[ruleexp]() { + goto l116 + } + goto l117 + l116: + position, tokenIndex = position116, tokenIndex116 + } + l117: + goto l114 + l115: + position, tokenIndex = position114, tokenIndex114 + { + position118, tokenIndex118 := position, tokenIndex + if !_rules[rulefrac]() { + goto l118 + } + goto l119 + l118: + position, tokenIndex = position118, tokenIndex118 + } + l119: + if !_rules[ruleexp]() { + goto l111 + } + } + l114: + add(rulefloat, position113) + } + add(rulePegText, position112) + } + { + add(ruleAction8, position) + } + goto l90 + l111: + position, tokenIndex = position90, tokenIndex90 + { + switch buffer[position] { + case '{': + { + position122 := position + if buffer[position] != rune('{') { + goto l88 + } + position++ + { + add(ruleAction15, position) + } + if !_rules[rulews]() { + goto l88 + } + { + position124 := position + l125: + { + position126, tokenIndex126 := position, tokenIndex + if !_rules[rulekeyval]() { + goto l126 + } + { + position127, tokenIndex127 := position, tokenIndex + { + position129 := position + if !_rules[rulews]() { + goto l127 + } + if buffer[position] != rune(',') { + goto l127 + } + position++ + if !_rules[rulews]() { + goto l127 + } + add(ruleinlineTableValSep, position129) + } + goto l128 + l127: + position, tokenIndex = position127, tokenIndex127 + } + l128: + goto l125 + l126: + position, tokenIndex = position126, tokenIndex126 + } + add(ruleinlineTableKeyValues, position124) + } + if !_rules[rulews]() { + goto l88 + } + if buffer[position] != rune('}') { + goto l88 + } + position++ + { + add(ruleAction16, position) + } + add(ruleinlineTable, position122) + } + break + case '[': + { + position131 := position + { + position132 := position + if buffer[position] != rune('[') { + goto l88 + } + position++ + { + add(ruleAction22, position) + } + if !_rules[rulewsnl]() { + goto l88 + } + { + position134, tokenIndex134 := position, tokenIndex + { + position136 := position + if !_rules[ruleval]() { + goto l134 + } + { + add(ruleAction23, position) + } + l138: + { + position139, tokenIndex139 := position, tokenIndex + if !_rules[rulewsnl]() { + goto l139 + } + { + position140, tokenIndex140 := position, tokenIndex + if !_rules[rulecomment]() { + goto l140 + } + goto l141 + l140: + position, tokenIndex = position140, tokenIndex140 + } + l141: + if !_rules[rulewsnl]() { + goto l139 + } + if !_rules[rulearraySep]() { + goto l139 + } + if !_rules[rulewsnl]() { + goto l139 + } + { + position142, tokenIndex142 := position, tokenIndex + if !_rules[rulecomment]() { + goto l142 + } + goto l143 + l142: + position, tokenIndex = position142, tokenIndex142 + } + l143: + if !_rules[rulewsnl]() { + goto l139 + } + if !_rules[ruleval]() { + goto l139 + } + { + add(ruleAction24, position) + } + goto l138 + l139: + position, tokenIndex = position139, tokenIndex139 + } + if !_rules[rulewsnl]() { + goto l134 + } + { + position145, tokenIndex145 := position, tokenIndex + if !_rules[rulearraySep]() { + goto l145 + } + goto l146 + l145: + position, tokenIndex = position145, tokenIndex145 + } + l146: + if !_rules[rulewsnl]() { + goto l134 + } + { + position147, tokenIndex147 := position, tokenIndex + if !_rules[rulecomment]() { + goto l147 + } + goto l148 + l147: + position, tokenIndex = position147, tokenIndex147 + } + l148: + add(rulearrayValues, position136) + } + goto l135 + l134: + position, tokenIndex = position134, tokenIndex134 + } + l135: + if !_rules[rulewsnl]() { + goto l88 + } + if buffer[position] != rune(']') { + goto l88 + } + position++ + add(rulearray, position132) + } + add(rulePegText, position131) + } + { + add(ruleAction12, position) + } + break + case 'f', 't': + { + position150 := position + { + position151 := position + { + position152, tokenIndex152 := position, tokenIndex + if buffer[position] != rune('t') { + goto l153 + } + position++ + if buffer[position] != rune('r') { + goto l153 + } + position++ + if buffer[position] != rune('u') { + goto l153 + } + position++ + if buffer[position] != rune('e') { + goto l153 + } + position++ + goto l152 + l153: + position, tokenIndex = position152, tokenIndex152 + if buffer[position] != rune('f') { + goto l88 + } + position++ + if buffer[position] != rune('a') { + goto l88 + } + position++ + if buffer[position] != rune('l') { + goto l88 + } + position++ + if buffer[position] != rune('s') { + goto l88 + } + position++ + if buffer[position] != rune('e') { + goto l88 + } + position++ + } + l152: + add(ruleboolean, position151) + } + add(rulePegText, position150) + } + { + add(ruleAction11, position) + } + break + case '"', '\'': + { + position155 := position + { + position156 := position + { + position157, tokenIndex157 := position, tokenIndex + { + position159 := position + if buffer[position] != rune('\'') { + goto l158 + } + position++ + if buffer[position] != rune('\'') { + goto l158 + } + position++ + if buffer[position] != rune('\'') { + goto l158 + } + position++ + { + position160 := position + { + position161 := position + l162: + { + position163, tokenIndex163 := position, tokenIndex + { + position164, tokenIndex164 := position, tokenIndex + if buffer[position] != rune('\'') { + goto l164 + } + position++ + if buffer[position] != rune('\'') { + goto l164 + } + position++ + if buffer[position] != rune('\'') { + goto l164 + } + position++ + goto l163 + l164: + position, tokenIndex = position164, tokenIndex164 + } + { + position165, tokenIndex165 := position, tokenIndex + { + position167 := position + { + position168, tokenIndex168 := position, tokenIndex + if buffer[position] != rune('\t') { + goto l169 + } + position++ + goto l168 + l169: + position, tokenIndex = position168, tokenIndex168 + if c := buffer[position]; c < rune(' ') || c > rune('\U0010ffff') { + goto l166 + } + position++ + } + l168: + add(rulemlLiteralChar, position167) + } + goto l165 + l166: + position, tokenIndex = position165, tokenIndex165 + if !_rules[rulenewline]() { + goto l163 + } + } + l165: + goto l162 + l163: + position, tokenIndex = position163, tokenIndex163 + } + add(rulemlLiteralBody, position161) + } + add(rulePegText, position160) + } + if buffer[position] != rune('\'') { + goto l158 + } + position++ + if buffer[position] != rune('\'') { + goto l158 + } + position++ + if buffer[position] != rune('\'') { + goto l158 + } + position++ + { + add(ruleAction21, position) + } + add(rulemlLiteralString, position159) + } + goto l157 + l158: + position, tokenIndex = position157, tokenIndex157 + { + position172 := position + if buffer[position] != rune('\'') { + goto l171 + } + position++ + { + position173 := position + l174: + { + position175, tokenIndex175 := position, tokenIndex + { + position176 := position + { + switch buffer[position] { + case '\t': + if buffer[position] != rune('\t') { + goto l175 + } + position++ + break + case ' ', '!', '"', '#', '$', '%', '&': + if c := buffer[position]; c < rune(' ') || c > rune('&') { + goto l175 + } + position++ + break + default: + if c := buffer[position]; c < rune('(') || c > rune('\U0010ffff') { + goto l175 + } + position++ + break + } + } + + add(ruleliteralChar, position176) + } + goto l174 + l175: + position, tokenIndex = position175, tokenIndex175 + } + add(rulePegText, position173) + } + if buffer[position] != rune('\'') { + goto l171 + } + position++ + { + add(ruleAction20, position) + } + add(ruleliteralString, position172) + } + goto l157 + l171: + position, tokenIndex = position157, tokenIndex157 + { + position180 := position + if buffer[position] != rune('"') { + goto l179 + } + position++ + if buffer[position] != rune('"') { + goto l179 + } + position++ + if buffer[position] != rune('"') { + goto l179 + } + position++ + { + position181 := position + l182: + { + position183, tokenIndex183 := position, tokenIndex + { + position184, tokenIndex184 := position, tokenIndex + { + position186 := position + { + position187, tokenIndex187 := position, tokenIndex + if !_rules[rulebasicChar]() { + goto l188 + } + goto l187 + l188: + position, tokenIndex = position187, tokenIndex187 + if !_rules[rulenewline]() { + goto l185 + } + } + l187: + add(rulePegText, position186) + } + { + add(ruleAction19, position) + } + goto l184 + l185: + position, tokenIndex = position184, tokenIndex184 + if !_rules[ruleescape]() { + goto l183 + } + if !_rules[rulenewline]() { + goto l183 + } + if !_rules[rulewsnl]() { + goto l183 + } + } + l184: + goto l182 + l183: + position, tokenIndex = position183, tokenIndex183 + } + add(rulemlBasicBody, position181) + } + if buffer[position] != rune('"') { + goto l179 + } + position++ + if buffer[position] != rune('"') { + goto l179 + } + position++ + if buffer[position] != rune('"') { + goto l179 + } + position++ + { + add(ruleAction18, position) + } + add(rulemlBasicString, position180) + } + goto l157 + l179: + position, tokenIndex = position157, tokenIndex157 + { + position191 := position + { + position192 := position + if buffer[position] != rune('"') { + goto l88 + } + position++ + l193: + { + position194, tokenIndex194 := position, tokenIndex + if !_rules[rulebasicChar]() { + goto l194 + } + goto l193 + l194: + position, tokenIndex = position194, tokenIndex194 + } + if buffer[position] != rune('"') { + goto l88 + } + position++ + add(rulePegText, position192) + } + { + add(ruleAction17, position) + } + add(rulebasicString, position191) + } + } + l157: + add(rulestring, position156) + } + add(rulePegText, position155) + } + { + add(ruleAction10, position) + } + break + default: + { + position197 := position + if !_rules[ruleinteger]() { + goto l88 + } + add(rulePegText, position197) + } + { + add(ruleAction9, position) + } + break + } + } + + } + l90: + add(ruleval, position89) + } + return true + l88: + position, tokenIndex = position88, tokenIndex88 + return false + }, + /* 11 table <- <(stdTable / arrayTable)> */ + nil, + /* 12 stdTable <- <('[' ws <tableKey> ws ']' Action13)> */ + nil, + /* 13 arrayTable <- <('[' '[' ws <tableKey> ws (']' ']') Action14)> */ + nil, + /* 14 inlineTable <- <('{' Action15 ws inlineTableKeyValues ws '}' Action16)> */ + nil, + /* 15 inlineTableKeyValues <- <(keyval inlineTableValSep?)*> */ + nil, + /* 16 tableKey <- <(key (tableKeySep key)*)> */ + func() bool { + position204, tokenIndex204 := position, tokenIndex + { + position205 := position + if !_rules[rulekey]() { + goto l204 + } + l206: + { + position207, tokenIndex207 := position, tokenIndex + { + position208 := position + if !_rules[rulews]() { + goto l207 + } + if buffer[position] != rune('.') { + goto l207 + } + position++ + if !_rules[rulews]() { + goto l207 + } + add(ruletableKeySep, position208) + } + if !_rules[rulekey]() { + goto l207 + } + goto l206 + l207: + position, tokenIndex = position207, tokenIndex207 + } + add(ruletableKey, position205) + } + return true + l204: + position, tokenIndex = position204, tokenIndex204 + return false + }, + /* 17 tableKeySep <- <(ws '.' ws)> */ + nil, + /* 18 inlineTableValSep <- <(ws ',' ws)> */ + nil, + /* 19 integer <- <(('-' / '+')? int)> */ + func() bool { + position211, tokenIndex211 := position, tokenIndex + { + position212 := position + { + position213, tokenIndex213 := position, tokenIndex + { + position215, tokenIndex215 := position, tokenIndex + if buffer[position] != rune('-') { + goto l216 + } + position++ + goto l215 + l216: + position, tokenIndex = position215, tokenIndex215 + if buffer[position] != rune('+') { + goto l213 + } + position++ + } + l215: + goto l214 + l213: + position, tokenIndex = position213, tokenIndex213 + } + l214: + { + position217 := position + { + position218, tokenIndex218 := position, tokenIndex + if c := buffer[position]; c < rune('1') || c > rune('9') { + goto l219 + } + position++ + { + position222, tokenIndex222 := position, tokenIndex + if !_rules[ruledigit]() { + goto l223 + } + goto l222 + l223: + position, tokenIndex = position222, tokenIndex222 + if buffer[position] != rune('_') { + goto l219 + } + position++ + if !_rules[ruledigit]() { + goto l219 + } + } + l222: + l220: + { + position221, tokenIndex221 := position, tokenIndex + { + position224, tokenIndex224 := position, tokenIndex + if !_rules[ruledigit]() { + goto l225 + } + goto l224 + l225: + position, tokenIndex = position224, tokenIndex224 + if buffer[position] != rune('_') { + goto l221 + } + position++ + if !_rules[ruledigit]() { + goto l221 + } + } + l224: + goto l220 + l221: + position, tokenIndex = position221, tokenIndex221 + } + goto l218 + l219: + position, tokenIndex = position218, tokenIndex218 + if !_rules[ruledigit]() { + goto l211 + } + } + l218: + add(ruleint, position217) + } + add(ruleinteger, position212) + } + return true + l211: + position, tokenIndex = position211, tokenIndex211 + return false + }, + /* 20 int <- <(([1-9] (digit / ('_' digit))+) / digit)> */ + nil, + /* 21 float <- <(integer ((frac exp?) / (frac? exp)))> */ + nil, + /* 22 frac <- <('.' digit (digit / ('_' digit))*)> */ + func() bool { + position228, tokenIndex228 := position, tokenIndex + { + position229 := position + if buffer[position] != rune('.') { + goto l228 + } + position++ + if !_rules[ruledigit]() { + goto l228 + } + l230: + { + position231, tokenIndex231 := position, tokenIndex + { + position232, tokenIndex232 := position, tokenIndex + if !_rules[ruledigit]() { + goto l233 + } + goto l232 + l233: + position, tokenIndex = position232, tokenIndex232 + if buffer[position] != rune('_') { + goto l231 + } + position++ + if !_rules[ruledigit]() { + goto l231 + } + } + l232: + goto l230 + l231: + position, tokenIndex = position231, tokenIndex231 + } + add(rulefrac, position229) + } + return true + l228: + position, tokenIndex = position228, tokenIndex228 + return false + }, + /* 23 exp <- <(('e' / 'E') ('-' / '+')? digit (digit / ('_' digit))*)> */ + func() bool { + position234, tokenIndex234 := position, tokenIndex + { + position235 := position + { + position236, tokenIndex236 := position, tokenIndex + if buffer[position] != rune('e') { + goto l237 + } + position++ + goto l236 + l237: + position, tokenIndex = position236, tokenIndex236 + if buffer[position] != rune('E') { + goto l234 + } + position++ + } + l236: + { + position238, tokenIndex238 := position, tokenIndex + { + position240, tokenIndex240 := position, tokenIndex + if buffer[position] != rune('-') { + goto l241 + } + position++ + goto l240 + l241: + position, tokenIndex = position240, tokenIndex240 + if buffer[position] != rune('+') { + goto l238 + } + position++ + } + l240: + goto l239 + l238: + position, tokenIndex = position238, tokenIndex238 + } + l239: + if !_rules[ruledigit]() { + goto l234 + } + l242: + { + position243, tokenIndex243 := position, tokenIndex + { + position244, tokenIndex244 := position, tokenIndex + if !_rules[ruledigit]() { + goto l245 + } + goto l244 + l245: + position, tokenIndex = position244, tokenIndex244 + if buffer[position] != rune('_') { + goto l243 + } + position++ + if !_rules[ruledigit]() { + goto l243 + } + } + l244: + goto l242 + l243: + position, tokenIndex = position243, tokenIndex243 + } + add(ruleexp, position235) + } + return true + l234: + position, tokenIndex = position234, tokenIndex234 + return false + }, + /* 24 string <- <(mlLiteralString / literalString / mlBasicString / basicString)> */ + nil, + /* 25 basicString <- <(<('"' basicChar* '"')> Action17)> */ + nil, + /* 26 basicChar <- <(basicUnescaped / escaped)> */ + func() bool { + position248, tokenIndex248 := position, tokenIndex + { + position249 := position + { + position250, tokenIndex250 := position, tokenIndex + { + position252 := position + { + switch buffer[position] { + case ' ', '!': + if c := buffer[position]; c < rune(' ') || c > rune('!') { + goto l251 + } + position++ + break + case '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[': + if c := buffer[position]; c < rune('#') || c > rune('[') { + goto l251 + } + position++ + break + default: + if c := buffer[position]; c < rune(']') || c > rune('\U0010ffff') { + goto l251 + } + position++ + break + } + } + + add(rulebasicUnescaped, position252) + } + goto l250 + l251: + position, tokenIndex = position250, tokenIndex250 + { + position254 := position + if !_rules[ruleescape]() { + goto l248 + } + { + switch buffer[position] { + case 'U': + if buffer[position] != rune('U') { + goto l248 + } + position++ + if !_rules[rulehexQuad]() { + goto l248 + } + if !_rules[rulehexQuad]() { + goto l248 + } + break + case 'u': + if buffer[position] != rune('u') { + goto l248 + } + position++ + if !_rules[rulehexQuad]() { + goto l248 + } + break + case '\\': + if buffer[position] != rune('\\') { + goto l248 + } + position++ + break + case '/': + if buffer[position] != rune('/') { + goto l248 + } + position++ + break + case '"': + if buffer[position] != rune('"') { + goto l248 + } + position++ + break + case 'r': + if buffer[position] != rune('r') { + goto l248 + } + position++ + break + case 'f': + if buffer[position] != rune('f') { + goto l248 + } + position++ + break + case 'n': + if buffer[position] != rune('n') { + goto l248 + } + position++ + break + case 't': + if buffer[position] != rune('t') { + goto l248 + } + position++ + break + default: + if buffer[position] != rune('b') { + goto l248 + } + position++ + break + } + } + + add(ruleescaped, position254) + } + } + l250: + add(rulebasicChar, position249) + } + return true + l248: + position, tokenIndex = position248, tokenIndex248 + return false + }, + /* 27 escaped <- <(escape ((&('U') ('U' hexQuad hexQuad)) | (&('u') ('u' hexQuad)) | (&('\\') '\\') | (&('/') '/') | (&('"') '"') | (&('r') 'r') | (&('f') 'f') | (&('n') 'n') | (&('t') 't') | (&('b') 'b')))> */ + nil, + /* 28 basicUnescaped <- <((&(' ' | '!') [ -!]) | (&('#' | '$' | '%' | '&' | '\'' | '(' | ')' | '*' | '+' | ',' | '-' | '.' | '/' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | ':' | ';' | '<' | '=' | '>' | '?' | '@' | 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z' | '[') [#-[]) | (&(']' | '^' | '_' | '`' | 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z' | '{' | '|' | '}' | '~' | '\u007f' | '\u0080' | '\u0081' | '\u0082' | '\u0083' | '\u0084' | '\u0085' | '\u0086' | '\u0087' | '\u0088' | '\u0089' | '\u008a' | '\u008b' | '\u008c' | '\u008d' | '\u008e' | '\u008f' | '\u0090' | '\u0091' | '\u0092' | '\u0093' | '\u0094' | '\u0095' | '\u0096' | '\u0097' | '\u0098' | '\u0099' | '\u009a' | '\u009b' | '\u009c' | '\u009d' | '\u009e' | '\u009f' | '\u00a0' | '¡' | '¢' | '£' | '¤' | '¥' | '¦' | '§' | '¨' | '©' | 'ª' | '«' | '¬' | '\u00ad' | '®' | '¯' | '°' | '±' | '²' | '³' | '´' | 'µ' | '¶' | '·' | '¸' | '¹' | 'º' | '»' | '¼' | '½' | '¾' | '¿' | 'À' | 'Á' | 'Â' | 'Ã' | 'Ä' | 'Å' | 'Æ' | 'Ç' | 'È' | 'É' | 'Ê' | 'Ë' | 'Ì' | 'Í' | 'Î' | 'Ï' | 'Ð' | 'Ñ' | 'Ò' | 'Ó' | 'Ô' | 'Õ' | 'Ö' | '×' | 'Ø' | 'Ù' | 'Ú' | 'Û' | 'Ü' | 'Ý' | 'Þ' | 'ß' | 'à' | 'á' | 'â' | 'ã' | 'ä' | 'å' | 'æ' | 'ç' | 'è' | 'é' | 'ê' | 'ë' | 'ì' | 'í' | 'î' | 'ï' | 'ð' | 'ñ' | 'ò' | 'ó' | 'ô' | 'õ' | 'ö' | '÷' | 'ø' | 'ù' | 'ú' | 'û' | 'ü' | 'ý' | 'þ' | 'ÿ') []-\U0010ffff]))> */ + nil, + /* 29 escape <- <'\\'> */ + func() bool { + position258, tokenIndex258 := position, tokenIndex + { + position259 := position + if buffer[position] != rune('\\') { + goto l258 + } + position++ + add(ruleescape, position259) + } + return true + l258: + position, tokenIndex = position258, tokenIndex258 + return false + }, + /* 30 mlBasicString <- <('"' '"' '"' mlBasicBody ('"' '"' '"') Action18)> */ + nil, + /* 31 mlBasicBody <- <((<(basicChar / newline)> Action19) / (escape newline wsnl))*> */ + nil, + /* 32 literalString <- <('\'' <literalChar*> '\'' Action20)> */ + nil, + /* 33 literalChar <- <((&('\t') '\t') | (&(' ' | '!' | '"' | '#' | '$' | '%' | '&') [ -&]) | (&('(' | ')' | '*' | '+' | ',' | '-' | '.' | '/' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | ':' | ';' | '<' | '=' | '>' | '?' | '@' | 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z' | '[' | '\\' | ']' | '^' | '_' | '`' | 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z' | '{' | '|' | '}' | '~' | '\u007f' | '\u0080' | '\u0081' | '\u0082' | '\u0083' | '\u0084' | '\u0085' | '\u0086' | '\u0087' | '\u0088' | '\u0089' | '\u008a' | '\u008b' | '\u008c' | '\u008d' | '\u008e' | '\u008f' | '\u0090' | '\u0091' | '\u0092' | '\u0093' | '\u0094' | '\u0095' | '\u0096' | '\u0097' | '\u0098' | '\u0099' | '\u009a' | '\u009b' | '\u009c' | '\u009d' | '\u009e' | '\u009f' | '\u00a0' | '¡' | '¢' | '£' | '¤' | '¥' | '¦' | '§' | '¨' | '©' | 'ª' | '«' | '¬' | '\u00ad' | '®' | '¯' | '°' | '±' | '²' | '³' | '´' | 'µ' | '¶' | '·' | '¸' | '¹' | 'º' | '»' | '¼' | '½' | '¾' | '¿' | 'À' | 'Á' | 'Â' | 'Ã' | 'Ä' | 'Å' | 'Æ' | 'Ç' | 'È' | 'É' | 'Ê' | 'Ë' | 'Ì' | 'Í' | 'Î' | 'Ï' | 'Ð' | 'Ñ' | 'Ò' | 'Ó' | 'Ô' | 'Õ' | 'Ö' | '×' | 'Ø' | 'Ù' | 'Ú' | 'Û' | 'Ü' | 'Ý' | 'Þ' | 'ß' | 'à' | 'á' | 'â' | 'ã' | 'ä' | 'å' | 'æ' | 'ç' | 'è' | 'é' | 'ê' | 'ë' | 'ì' | 'í' | 'î' | 'ï' | 'ð' | 'ñ' | 'ò' | 'ó' | 'ô' | 'õ' | 'ö' | '÷' | 'ø' | 'ù' | 'ú' | 'û' | 'ü' | 'ý' | 'þ' | 'ÿ') [(-\U0010ffff]))> */ + nil, + /* 34 mlLiteralString <- <('\'' '\'' '\'' <mlLiteralBody> ('\'' '\'' '\'') Action21)> */ + nil, + /* 35 mlLiteralBody <- <(!('\'' '\'' '\'') (mlLiteralChar / newline))*> */ + nil, + /* 36 mlLiteralChar <- <('\t' / [ -\U0010ffff])> */ + nil, + /* 37 hexdigit <- <((&('a' | 'b' | 'c' | 'd' | 'e' | 'f') [a-f]) | (&('A' | 'B' | 'C' | 'D' | 'E' | 'F') [A-F]) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') [0-9]))> */ + func() bool { + position267, tokenIndex267 := position, tokenIndex + { + position268 := position + { + switch buffer[position] { + case 'a', 'b', 'c', 'd', 'e', 'f': + if c := buffer[position]; c < rune('a') || c > rune('f') { + goto l267 + } + position++ + break + case 'A', 'B', 'C', 'D', 'E', 'F': + if c := buffer[position]; c < rune('A') || c > rune('F') { + goto l267 + } + position++ + break + default: + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l267 + } + position++ + break + } + } + + add(rulehexdigit, position268) + } + return true + l267: + position, tokenIndex = position267, tokenIndex267 + return false + }, + /* 38 hexQuad <- <(hexdigit hexdigit hexdigit hexdigit)> */ + func() bool { + position270, tokenIndex270 := position, tokenIndex + { + position271 := position + if !_rules[rulehexdigit]() { + goto l270 + } + if !_rules[rulehexdigit]() { + goto l270 + } + if !_rules[rulehexdigit]() { + goto l270 + } + if !_rules[rulehexdigit]() { + goto l270 + } + add(rulehexQuad, position271) + } + return true + l270: + position, tokenIndex = position270, tokenIndex270 + return false + }, + /* 39 boolean <- <(('t' 'r' 'u' 'e') / ('f' 'a' 'l' 's' 'e'))> */ + nil, + /* 40 dateFullYear <- <digitQuad> */ + nil, + /* 41 dateMonth <- <digitDual> */ + nil, + /* 42 dateMDay <- <digitDual> */ + nil, + /* 43 timeHour <- <digitDual> */ + func() bool { + position276, tokenIndex276 := position, tokenIndex + { + position277 := position + if !_rules[ruledigitDual]() { + goto l276 + } + add(ruletimeHour, position277) + } + return true + l276: + position, tokenIndex = position276, tokenIndex276 + return false + }, + /* 44 timeMinute <- <digitDual> */ + func() bool { + position278, tokenIndex278 := position, tokenIndex + { + position279 := position + if !_rules[ruledigitDual]() { + goto l278 + } + add(ruletimeMinute, position279) + } + return true + l278: + position, tokenIndex = position278, tokenIndex278 + return false + }, + /* 45 timeSecond <- <digitDual> */ + nil, + /* 46 timeSecfrac <- <('.' digit+)> */ + nil, + /* 47 timeNumoffset <- <(('-' / '+') timeHour ':' timeMinute)> */ + nil, + /* 48 timeOffset <- <('Z' / timeNumoffset)> */ + nil, + /* 49 partialTime <- <(timeHour ':' timeMinute ':' timeSecond timeSecfrac?)> */ + func() bool { + position284, tokenIndex284 := position, tokenIndex + { + position285 := position + if !_rules[ruletimeHour]() { + goto l284 + } + if buffer[position] != rune(':') { + goto l284 + } + position++ + if !_rules[ruletimeMinute]() { + goto l284 + } + if buffer[position] != rune(':') { + goto l284 + } + position++ + { + position286 := position + if !_rules[ruledigitDual]() { + goto l284 + } + add(ruletimeSecond, position286) + } + { + position287, tokenIndex287 := position, tokenIndex + { + position289 := position + if buffer[position] != rune('.') { + goto l287 + } + position++ + if !_rules[ruledigit]() { + goto l287 + } + l290: + { + position291, tokenIndex291 := position, tokenIndex + if !_rules[ruledigit]() { + goto l291 + } + goto l290 + l291: + position, tokenIndex = position291, tokenIndex291 + } + add(ruletimeSecfrac, position289) + } + goto l288 + l287: + position, tokenIndex = position287, tokenIndex287 + } + l288: + add(rulepartialTime, position285) + } + return true + l284: + position, tokenIndex = position284, tokenIndex284 + return false + }, + /* 50 fullDate <- <(dateFullYear '-' dateMonth '-' dateMDay)> */ + nil, + /* 51 fullTime <- <(partialTime timeOffset)> */ + nil, + /* 52 datetime <- <((fullDate ('T' fullTime)?) / partialTime)> */ + nil, + /* 53 digit <- <[0-9]> */ + func() bool { + position295, tokenIndex295 := position, tokenIndex + { + position296 := position + if c := buffer[position]; c < rune('0') || c > rune('9') { + goto l295 + } + position++ + add(ruledigit, position296) + } + return true + l295: + position, tokenIndex = position295, tokenIndex295 + return false + }, + /* 54 digitDual <- <(digit digit)> */ + func() bool { + position297, tokenIndex297 := position, tokenIndex + { + position298 := position + if !_rules[ruledigit]() { + goto l297 + } + if !_rules[ruledigit]() { + goto l297 + } + add(ruledigitDual, position298) + } + return true + l297: + position, tokenIndex = position297, tokenIndex297 + return false + }, + /* 55 digitQuad <- <(digitDual digitDual)> */ + nil, + /* 56 array <- <('[' Action22 wsnl arrayValues? wsnl ']')> */ + nil, + /* 57 arrayValues <- <(val Action23 (wsnl comment? wsnl arraySep wsnl comment? wsnl val Action24)* wsnl arraySep? wsnl comment?)> */ + nil, + /* 58 arraySep <- <','> */ + func() bool { + position302, tokenIndex302 := position, tokenIndex + { + position303 := position + if buffer[position] != rune(',') { + goto l302 + } + position++ + add(rulearraySep, position303) + } + return true + l302: + position, tokenIndex = position302, tokenIndex302 + return false + }, + /* 60 Action0 <- <{ _ = buffer }> */ + nil, + nil, + /* 62 Action1 <- <{ p.SetTableString(begin, end) }> */ + nil, + /* 63 Action2 <- <{ p.AddLineCount(end - begin) }> */ + nil, + /* 64 Action3 <- <{ p.AddLineCount(end - begin) }> */ + nil, + /* 65 Action4 <- <{ p.AddKeyValue() }> */ + nil, + /* 66 Action5 <- <{ p.SetKey(p.buffer, begin, end) }> */ + nil, + /* 67 Action6 <- <{ p.SetKey(p.buffer, begin-1, end+1) }> */ + nil, + /* 68 Action7 <- <{ p.SetTime(begin, end) }> */ + nil, + /* 69 Action8 <- <{ p.SetFloat64(begin, end) }> */ + nil, + /* 70 Action9 <- <{ p.SetInt64(begin, end) }> */ + nil, + /* 71 Action10 <- <{ p.SetString(begin, end) }> */ + nil, + /* 72 Action11 <- <{ p.SetBool(begin, end) }> */ + nil, + /* 73 Action12 <- <{ p.SetArray(begin, end) }> */ + nil, + /* 74 Action13 <- <{ p.SetTable(p.buffer, begin, end) }> */ + nil, + /* 75 Action14 <- <{ p.SetArrayTable(p.buffer, begin, end) }> */ + nil, + /* 76 Action15 <- <{ p.StartInlineTable() }> */ + nil, + /* 77 Action16 <- <{ p.EndInlineTable() }> */ + nil, + /* 78 Action17 <- <{ p.SetBasicString(p.buffer, begin, end) }> */ + nil, + /* 79 Action18 <- <{ p.SetMultilineString() }> */ + nil, + /* 80 Action19 <- <{ p.AddMultilineBasicBody(p.buffer, begin, end) }> */ + nil, + /* 81 Action20 <- <{ p.SetLiteralString(p.buffer, begin, end) }> */ + nil, + /* 82 Action21 <- <{ p.SetMultilineLiteralString(p.buffer, begin, end) }> */ + nil, + /* 83 Action22 <- <{ p.StartArray() }> */ + nil, + /* 84 Action23 <- <{ p.AddArrayVal() }> */ + nil, + /* 85 Action24 <- <{ p.AddArrayVal() }> */ + nil, + } + p.rules = _rules +} diff --git a/vendor/github.com/naoina/toml/util.go b/vendor/github.com/naoina/toml/util.go new file mode 100644 index 000000000..f882f4e5f --- /dev/null +++ b/vendor/github.com/naoina/toml/util.go @@ -0,0 +1,65 @@ +package toml + +import ( + "fmt" + "reflect" + "strings" +) + +const fieldTagName = "toml" + +// fieldCache maps normalized field names to their position in a struct. +type fieldCache struct { + named map[string]fieldInfo // fields with an explicit name in tag + auto map[string]fieldInfo // fields with auto-assigned normalized names +} + +type fieldInfo struct { + index []int + name string + ignored bool +} + +func makeFieldCache(cfg *Config, rt reflect.Type) fieldCache { + named, auto := make(map[string]fieldInfo), make(map[string]fieldInfo) + for i := 0; i < rt.NumField(); i++ { + ft := rt.Field(i) + // skip unexported fields + if ft.PkgPath != "" && !ft.Anonymous { + continue + } + col, _ := extractTag(ft.Tag.Get(fieldTagName)) + info := fieldInfo{index: ft.Index, name: ft.Name, ignored: col == "-"} + if col == "" || col == "-" { + auto[cfg.NormFieldName(rt, ft.Name)] = info + } else { + named[col] = info + } + } + return fieldCache{named, auto} +} + +func (fc fieldCache) findField(cfg *Config, rv reflect.Value, name string) (reflect.Value, string, error) { + info, found := fc.named[name] + if !found { + info, found = fc.auto[cfg.NormFieldName(rv.Type(), name)] + } + if !found { + if cfg.MissingField == nil { + return reflect.Value{}, "", fmt.Errorf("field corresponding to `%s' is not defined in %v", name, rv.Type()) + } else { + return reflect.Value{}, "", cfg.MissingField(rv.Type(), name) + } + } else if info.ignored { + return reflect.Value{}, "", fmt.Errorf("field corresponding to `%s' in %v cannot be set through TOML", name, rv.Type()) + } + return rv.FieldByIndex(info.index), info.name, nil +} + +func extractTag(tag string) (col, rest string) { + tags := strings.SplitN(tag, ",", 2) + if len(tags) == 2 { + return strings.TrimSpace(tags[0]), strings.TrimSpace(tags[1]) + } + return strings.TrimSpace(tags[0]), "" +} diff --git a/vendor/vendor.json b/vendor/vendor.json index ab2bd9428..d79c80a67 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -202,6 +202,18 @@ "revisionTime": "2015-03-14T17:03:34Z" }, { + "checksumSHA1": "2gmvVTDCks8cPhpmyDlvm0sbrXE=", + "path": "github.com/naoina/toml", + "revision": "ac014c6b6502388d89a85552b7208b8da7cfe104", + "revisionTime": "2017-04-10T21:57:17Z" + }, + { + "checksumSHA1": "xZBlSMT5o/A+EDOro6KbfHZwSNc=", + "path": "github.com/naoina/toml/ast", + "revision": "eb52202f758b98ac5b1a8eb26f36455205d688f0", + "revisionTime": "2017-04-03T15:03:10Z" + }, + { "checksumSHA1": "R1h9XHH3dTmLq7yKL9/uW0xFwfs=", "path": "github.com/nsf/termbox-go", "revision": "3540b76b9c77679aeffd0a47e00243fb0ce47133", |