aboutsummaryrefslogtreecommitdiffstats
path: root/cmd
diff options
context:
space:
mode:
authorgary rong <garyrong0905@gmail.com>2019-05-14 22:07:44 +0800
committerPéter Szilágyi <peterke@gmail.com>2019-05-16 15:39:34 +0800
commit37d280da411eb649ce22ab69827ac5aacd46534b (patch)
tree8d19d2071c812575a14cea54a2de9efd2dd33157 /cmd
parent42c746d6f405deb0c49d868dcc6e0afe279e19ab (diff)
downloadgo-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar
go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.gz
go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.bz2
go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.lz
go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.xz
go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.tar.zst
go-tangerine-37d280da411eb649ce22ab69827ac5aacd46534b.zip
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer * vendor, core/rawdb, cmd/geth: add db inspector * core, cmd/utils: check ancient store path forceily * cmd/geth, common, core/rawdb: a few fixes * cmd/geth: support windows file rename and fix rename error * core: support ancient plugin * core, cmd: streaming file copy * cmd, consensus, core, tests: keep genesis in leveldb * core: write txlookup during ancient init * core: bump database version
Diffstat (limited to 'cmd')
-rw-r--r--cmd/geth/chaincmd.go214
-rw-r--r--cmd/geth/main.go2
-rw-r--r--cmd/geth/os_unix.go51
-rw-r--r--cmd/geth/os_windows.go43
-rw-r--r--cmd/utils/cmd.go2
-rw-r--r--cmd/utils/flags.go2
6 files changed, 299 insertions, 15 deletions
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index 809f5cf4a..70164f82b 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -18,8 +18,12 @@ package main
import (
"encoding/json"
+ "errors"
"fmt"
+ "io"
+ "io/ioutil"
"os"
+ "path/filepath"
"runtime"
"strconv"
"sync/atomic"
@@ -167,6 +171,37 @@ Remove blockchain and state databases`,
The arguments are interpreted as block numbers or hashes.
Use "ethereum dump 0" to dump the genesis block.`,
}
+ migrateAncientCommand = cli.Command{
+ Action: utils.MigrateFlags(migrateAncient),
+ Name: "migrate-ancient",
+ Usage: "migrate ancient database forcibly",
+ ArgsUsage: " ",
+ Flags: []cli.Flag{
+ utils.DataDirFlag,
+ utils.AncientFlag,
+ utils.CacheFlag,
+ utils.TestnetFlag,
+ utils.RinkebyFlag,
+ utils.GoerliFlag,
+ },
+ Category: "BLOCKCHAIN COMMANDS",
+ }
+ inspectCommand = cli.Command{
+ Action: utils.MigrateFlags(inspect),
+ Name: "inspect",
+ Usage: "Inspect the storage size for each type of data in the database",
+ ArgsUsage: " ",
+ Flags: []cli.Flag{
+ utils.DataDirFlag,
+ utils.AncientFlag,
+ utils.CacheFlag,
+ utils.TestnetFlag,
+ utils.RinkebyFlag,
+ utils.GoerliFlag,
+ utils.SyncModeFlag,
+ },
+ Category: "BLOCKCHAIN COMMANDS",
+ }
)
// initGenesis will initialise the given JSON format genesis file and writes it as
@@ -423,29 +458,48 @@ func copyDb(ctx *cli.Context) error {
}
func removeDB(ctx *cli.Context) error {
- stack, _ := makeConfigNode(ctx)
+ stack, config := makeConfigNode(ctx)
- for _, name := range []string{"chaindata", "lightchaindata"} {
+ for i, name := range []string{"chaindata", "lightchaindata"} {
// Ensure the database exists in the first place
logger := log.New("database", name)
+ var (
+ dbdirs []string
+ freezer string
+ )
dbdir := stack.ResolvePath(name)
if !common.FileExist(dbdir) {
logger.Info("Database doesn't exist, skipping", "path", dbdir)
continue
}
- // Confirm removal and execute
- fmt.Println(dbdir)
- confirm, err := console.Stdin.PromptConfirm("Remove this database?")
- switch {
- case err != nil:
- utils.Fatalf("%v", err)
- case !confirm:
- logger.Warn("Database deletion aborted")
- default:
- start := time.Now()
- os.RemoveAll(dbdir)
- logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
+ dbdirs = append(dbdirs, dbdir)
+ if i == 0 {
+ freezer = config.Eth.DatabaseFreezer
+ switch {
+ case freezer == "":
+ freezer = filepath.Join(dbdir, "ancient")
+ case !filepath.IsAbs(freezer):
+ freezer = config.Node.ResolvePath(freezer)
+ }
+ if common.FileExist(freezer) {
+ dbdirs = append(dbdirs, freezer)
+ }
+ }
+ for i := len(dbdirs) - 1; i >= 0; i-- {
+ // Confirm removal and execute
+ fmt.Println(dbdirs[i])
+ confirm, err := console.Stdin.PromptConfirm("Remove this database?")
+ switch {
+ case err != nil:
+ utils.Fatalf("%v", err)
+ case !confirm:
+ logger.Warn("Database deletion aborted")
+ default:
+ start := time.Now()
+ os.RemoveAll(dbdirs[i])
+ logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
+ }
}
}
return nil
@@ -479,8 +533,140 @@ func dump(ctx *cli.Context) error {
return nil
}
+func migrateAncient(ctx *cli.Context) error {
+ node, config := makeConfigNode(ctx)
+ defer node.Close()
+
+ dbdir := config.Node.ResolvePath("chaindata")
+ kvdb, err := rawdb.NewLevelDBDatabase(dbdir, 128, 1024, "")
+ if err != nil {
+ return err
+ }
+ defer kvdb.Close()
+
+ freezer := config.Eth.DatabaseFreezer
+ switch {
+ case freezer == "":
+ freezer = filepath.Join(dbdir, "ancient")
+ case !filepath.IsAbs(freezer):
+ freezer = config.Node.ResolvePath(freezer)
+ }
+ stored := rawdb.ReadAncientPath(kvdb)
+ if stored != freezer && stored != "" {
+ confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Are you sure to migrate ancient database from %s to %s?", stored, freezer))
+ switch {
+ case err != nil:
+ utils.Fatalf("%v", err)
+ case !confirm:
+ log.Warn("Ancient database migration aborted")
+ default:
+ if err := rename(stored, freezer); err != nil {
+ // Renaming a file can fail if the source and destination
+ // are on different file systems.
+ if err := moveAncient(stored, freezer); err != nil {
+ utils.Fatalf("Migrate ancient database failed, %v", err)
+ }
+ }
+ rawdb.WriteAncientPath(kvdb, freezer)
+ log.Info("Ancient database successfully migrated")
+ }
+ }
+ return nil
+}
+
+func inspect(ctx *cli.Context) error {
+ node, _ := makeConfigNode(ctx)
+ defer node.Close()
+
+ _, chainDb := utils.MakeChain(ctx, node)
+ defer chainDb.Close()
+
+ return rawdb.InspectDatabase(chainDb)
+}
+
// hashish returns true for strings that look like hashes.
func hashish(x string) bool {
_, err := strconv.Atoi(x)
return err != nil
}
+
+// copyFileSynced copies data from source file to destination
+// and synces the dest file forcibly.
+func copyFileSynced(src string, dest string, info os.FileInfo) error {
+ srcf, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcf.Close()
+
+ destf, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, info.Mode().Perm())
+ if err != nil {
+ return err
+ }
+ // The maximum size of ancient file is 2GB, 4MB buffer is suitable here.
+ buff := make([]byte, 4*1024*1024)
+ for {
+ rn, err := srcf.Read(buff)
+ if err != nil && err != io.EOF {
+ return err
+ }
+ if rn == 0 {
+ break
+ }
+ if wn, err := destf.Write(buff[:rn]); err != nil || wn != rn {
+ return err
+ }
+ }
+ if err1 := destf.Sync(); err == nil {
+ err = err1
+ }
+ if err1 := destf.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// copyDirSynced recursively copies files under the specified dir
+// to dest and synces the dest dir forcibly.
+func copyDirSynced(src string, dest string, info os.FileInfo) error {
+ if err := os.MkdirAll(dest, os.ModePerm); err != nil {
+ return err
+ }
+ defer os.Chmod(dest, info.Mode())
+
+ objects, err := ioutil.ReadDir(src)
+ if err != nil {
+ return err
+ }
+ for _, obj := range objects {
+ // All files in ancient database should be flatten files.
+ if !obj.Mode().IsRegular() {
+ continue
+ }
+ subsrc, subdest := filepath.Join(src, obj.Name()), filepath.Join(dest, obj.Name())
+ if err := copyFileSynced(subsrc, subdest, obj); err != nil {
+ return err
+ }
+ }
+ return syncDir(dest)
+}
+
+// moveAncient migrates ancient database from source to destination.
+func moveAncient(src string, dest string) error {
+ srcinfo, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcinfo.IsDir() {
+ return errors.New("ancient directory expected")
+ }
+ if destinfo, err := os.Lstat(dest); !os.IsNotExist(err) {
+ if destinfo.Mode()&os.ModeSymlink != 0 {
+ return errors.New("symbolic link datadir is not supported")
+ }
+ }
+ if err := copyDirSynced(src, dest, srcinfo); err != nil {
+ return err
+ }
+ return os.RemoveAll(src)
+}
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index dc63f2302..afa39bf93 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -204,6 +204,8 @@ func init() {
copydbCommand,
removedbCommand,
dumpCommand,
+ migrateAncientCommand,
+ inspectCommand,
// See accountcmd.go:
accountCommand,
walletCommand,
diff --git a/cmd/geth/os_unix.go b/cmd/geth/os_unix.go
new file mode 100644
index 000000000..6722ec9cb
--- /dev/null
+++ b/cmd/geth/os_unix.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license.
+//
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package main
+
+import (
+ "os"
+ "syscall"
+)
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func isErrInvalid(err error) bool {
+ if err == os.ErrInvalid {
+ return true
+ }
+ // Go < 1.8
+ if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
+ return true
+ }
+ // Go >= 1.8 returns *os.PathError instead
+ if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL {
+ return true
+ }
+ return false
+}
+
+func syncDir(name string) error {
+ // As per fsync manpage, Linux seems to expect fsync on directory, however
+ // some system don't support this, so we will ignore syscall.EINVAL.
+ //
+ // From fsync(2):
+ // Calling fsync() does not necessarily ensure that the entry in the
+ // directory containing the file has also reached disk. For that an
+ // explicit fsync() on a file descriptor for the directory is also needed.
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil && !isErrInvalid(err) {
+ return err
+ }
+ return nil
+}
diff --git a/cmd/geth/os_windows.go b/cmd/geth/os_windows.go
new file mode 100644
index 000000000..f2406ec9b
--- /dev/null
+++ b/cmd/geth/os_windows.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license.
+
+package main
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+const _MOVEFILE_REPLACE_EXISTING = 1
+
+func moveFileEx(from *uint16, to *uint16, flags uint32) error {
+ r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ if r1 == 0 {
+ if e1 != 0 {
+ return error(e1)
+ }
+ return syscall.EINVAL
+ }
+ return nil
+}
+
+func rename(oldpath, newpath string) error {
+ from, err := syscall.UTF16PtrFromString(oldpath)
+ if err != nil {
+ return err
+ }
+ to, err := syscall.UTF16PtrFromString(newpath)
+ if err != nil {
+ return err
+ }
+ return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING)
+}
+
+func syncDir(name string) error { return nil }
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
index 74a8c7f39..a3ee45ba7 100644
--- a/cmd/utils/cmd.go
+++ b/cmd/utils/cmd.go
@@ -302,6 +302,8 @@ func ExportPreimages(db ethdb.Database, fn string) error {
}
// Iterate over the preimages and export them
it := db.NewIteratorWithPrefix([]byte("secure-key-"))
+ defer it.Release()
+
for it.Next() {
if err := rlp.Encode(writer, it.Value()); err != nil {
return err
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index c40da85b0..ddeb44f34 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -1573,7 +1573,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
if ctx.GlobalString(SyncModeFlag.Name) == "light" {
name = "lightchaindata"
}
- chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, "", "")
+ chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "")
if err != nil {
Fatalf("Could not open database: %v", err)
}