| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832 |
- // Copyright 2021 The go-ethereum Authors
- // This file is part of go-ethereum.
- //
- // go-ethereum is free software: you can redistribute it and/or modify
- // it under the terms of the GNU General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // go-ethereum is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU General Public License for more details.
- //
- // You should have received a copy of the GNU General Public License
- // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
- package main
- import (
- "bytes"
- "fmt"
- "os"
- "os/signal"
- "path/filepath"
- "strconv"
- "strings"
- "syscall"
- "time"
- "github.com/ethereum/go-ethereum/cmd/utils"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/console/prompt"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state/snapshot"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/internal/flags"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/olekukonko/tablewriter"
- "github.com/urfave/cli/v2"
- )
- var (
- removedbCommand = &cli.Command{
- Action: removeDB,
- Name: "removedb",
- Usage: "Remove blockchain and state databases",
- ArgsUsage: "",
- Flags: utils.DatabasePathFlags,
- Description: `
- Remove blockchain and state databases`,
- }
- dbCommand = &cli.Command{
- Name: "db",
- Usage: "Low level database operations",
- ArgsUsage: "",
- Subcommands: []*cli.Command{
- dbInspectCmd,
- dbStatCmd,
- dbCompactCmd,
- dbGetCmd,
- dbDeleteCmd,
- dbPutCmd,
- dbGetSlotsCmd,
- dbDumpFreezerIndex,
- dbImportCmd,
- dbExportCmd,
- dbMetadataCmd,
- dbMigrateFreezerCmd,
- dbCheckStateContentCmd,
- },
- }
- dbInspectCmd = &cli.Command{
- Action: inspect,
- Name: "inspect",
- ArgsUsage: "<prefix> <start>",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Usage: "Inspect the storage size for each type of data in the database",
- Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
- }
- dbCheckStateContentCmd = &cli.Command{
- Action: checkStateContent,
- Name: "check-state-content",
- ArgsUsage: "<start (optional)>",
- Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
- Usage: "Verify that state data is cryptographically correct",
- Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
- For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
- a data corruption.`,
- }
- dbStatCmd = &cli.Command{
- Action: dbStats,
- Name: "stats",
- Usage: "Print leveldb statistics",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- }
- dbCompactCmd = &cli.Command{
- Action: dbCompact,
- Name: "compact",
- Usage: "Compact leveldb database. WARNING: May take a very long time",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- utils.CacheFlag,
- utils.CacheDatabaseFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: `This command performs a database compaction.
- WARNING: This operation may take a very long time to finish, and may cause database
- corruption if it is aborted during execution'!`,
- }
- dbGetCmd = &cli.Command{
- Action: dbGet,
- Name: "get",
- Usage: "Show the value of a database key",
- ArgsUsage: "<hex-encoded key>",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: "This command looks up the specified database key from the database.",
- }
- dbDeleteCmd = &cli.Command{
- Action: dbDelete,
- Name: "delete",
- Usage: "Delete a database key (WARNING: may corrupt your database)",
- ArgsUsage: "<hex-encoded key>",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: `This command deletes the specified database key from the database.
- WARNING: This is a low-level operation which may cause database corruption!`,
- }
- dbPutCmd = &cli.Command{
- Action: dbPut,
- Name: "put",
- Usage: "Set the value of a database key (WARNING: may corrupt your database)",
- ArgsUsage: "<hex-encoded key> <hex-encoded value>",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: `This command sets a given database key to the given value.
- WARNING: This is a low-level operation which may cause database corruption!`,
- }
- dbGetSlotsCmd = &cli.Command{
- Action: dbDumpTrie,
- Name: "dumptrie",
- Usage: "Show the storage key/values of a given storage trie",
- ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: "This command looks up the specified database key from the database.",
- }
- dbDumpFreezerIndex = &cli.Command{
- Action: freezerInspect,
- Name: "freezer-index",
- Usage: "Dump out the index of a specific freezer table",
- ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: "This command displays information about the freezer index.",
- }
- dbImportCmd = &cli.Command{
- Action: importLDBdata,
- Name: "import",
- Usage: "Imports leveldb-data from an exported RLP dump.",
- ArgsUsage: "<dumpfile> <start (optional)",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: "The import command imports the specific chain data from an RLP encoded stream.",
- }
- dbExportCmd = &cli.Command{
- Action: exportChaindata,
- Name: "export",
- Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
- ArgsUsage: "<type> <dumpfile>",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
- }
- dbMetadataCmd = &cli.Command{
- Action: showMetaData,
- Name: "metadata",
- Usage: "Shows metadata about the chain status.",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: "Shows metadata about the chain status.",
- }
- dbMigrateFreezerCmd = &cli.Command{
- Action: freezerMigrate,
- Name: "freezer-migrate",
- Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
- ArgsUsage: "",
- Flags: flags.Merge([]cli.Flag{
- utils.SyncModeFlag,
- }, utils.NetworkFlags, utils.DatabasePathFlags),
- Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
- WARNING: please back-up the receipt files in your ancients before running this command.`,
- }
- )
- func removeDB(ctx *cli.Context) error {
- stack, config := makeConfigNode(ctx)
- // Remove the full node state database
- path := stack.ResolvePath("chaindata")
- if common.FileExist(path) {
- confirmAndRemoveDB(path, "full node state database")
- } else {
- log.Info("Full node state database missing", "path", path)
- }
- // Remove the full node ancient database
- path = config.Eth.DatabaseFreezer
- switch {
- case path == "":
- path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
- case !filepath.IsAbs(path):
- path = config.Node.ResolvePath(path)
- }
- if common.FileExist(path) {
- confirmAndRemoveDB(path, "full node ancient database")
- } else {
- log.Info("Full node ancient database missing", "path", path)
- }
- // Remove the light node database
- path = stack.ResolvePath("lightchaindata")
- if common.FileExist(path) {
- confirmAndRemoveDB(path, "light node database")
- } else {
- log.Info("Light node database missing", "path", path)
- }
- return nil
- }
- // confirmAndRemoveDB prompts the user for a last confirmation and removes the
- // folder if accepted.
- func confirmAndRemoveDB(database string, kind string) {
- confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
- switch {
- case err != nil:
- utils.Fatalf("%v", err)
- case !confirm:
- log.Info("Database deletion skipped", "path", database)
- default:
- start := time.Now()
- filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
- // If we're at the top level folder, recurse into
- if path == database {
- return nil
- }
- // Delete all the files, but not subfolders
- if !info.IsDir() {
- os.Remove(path)
- return nil
- }
- return filepath.SkipDir
- })
- log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
- }
- }
- func inspect(ctx *cli.Context) error {
- var (
- prefix []byte
- start []byte
- )
- if ctx.NArg() > 2 {
- return fmt.Errorf("max 2 arguments: %v", ctx.Command.ArgsUsage)
- }
- if ctx.NArg() >= 1 {
- if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
- return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
- } else {
- prefix = d
- }
- }
- if ctx.NArg() >= 2 {
- if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
- return fmt.Errorf("failed to hex-decode 'start': %v", err)
- } else {
- start = d
- }
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, true)
- defer db.Close()
- return rawdb.InspectDatabase(db, prefix, start)
- }
- func checkStateContent(ctx *cli.Context) error {
- var (
- prefix []byte
- start []byte
- )
- if ctx.NArg() > 1 {
- return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
- }
- if ctx.NArg() > 0 {
- if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
- return fmt.Errorf("failed to hex-decode 'start': %v", err)
- } else {
- start = d
- }
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, true)
- defer db.Close()
- var (
- it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
- hasher = crypto.NewKeccakState()
- got = make([]byte, 32)
- errs int
- count int
- startTime = time.Now()
- lastLog = time.Now()
- )
- for it.Next() {
- count++
- k := it.Key()
- v := it.Value()
- hasher.Reset()
- hasher.Write(v)
- hasher.Read(got)
- if !bytes.Equal(k, got) {
- errs++
- fmt.Printf("Error at %#x\n", k)
- fmt.Printf(" Hash: %#x\n", got)
- fmt.Printf(" Data: %#x\n", v)
- }
- if time.Since(lastLog) > 8*time.Second {
- log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
- lastLog = time.Now()
- }
- }
- if err := it.Error(); err != nil {
- return err
- }
- log.Info("Iterated the state content", "errors", errs, "items", count)
- return nil
- }
- func showLeveldbStats(db ethdb.KeyValueStater) {
- if stats, err := db.Stat("leveldb.stats"); err != nil {
- log.Warn("Failed to read database stats", "error", err)
- } else {
- fmt.Println(stats)
- }
- if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
- log.Warn("Failed to read database iostats", "error", err)
- } else {
- fmt.Println(ioStats)
- }
- }
- func dbStats(ctx *cli.Context) error {
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, true)
- defer db.Close()
- showLeveldbStats(db)
- return nil
- }
- func dbCompact(ctx *cli.Context) error {
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, false)
- defer db.Close()
- log.Info("Stats before compaction")
- showLeveldbStats(db)
- log.Info("Triggering compaction")
- if err := db.Compact(nil, nil); err != nil {
- log.Info("Compact err", "error", err)
- return err
- }
- log.Info("Stats after compaction")
- showLeveldbStats(db)
- return nil
- }
- // dbGet shows the value of a given database key
- func dbGet(ctx *cli.Context) error {
- if ctx.NArg() != 1 {
- return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, true)
- defer db.Close()
- key, err := common.ParseHexOrString(ctx.Args().Get(0))
- if err != nil {
- log.Info("Could not decode the key", "error", err)
- return err
- }
- data, err := db.Get(key)
- if err != nil {
- log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
- return err
- }
- fmt.Printf("key %#x: %#x\n", key, data)
- return nil
- }
- // dbDelete deletes a key from the database
- func dbDelete(ctx *cli.Context) error {
- if ctx.NArg() != 1 {
- return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, false)
- defer db.Close()
- key, err := common.ParseHexOrString(ctx.Args().Get(0))
- if err != nil {
- log.Info("Could not decode the key", "error", err)
- return err
- }
- data, err := db.Get(key)
- if err == nil {
- fmt.Printf("Previous value: %#x\n", data)
- }
- if err = db.Delete(key); err != nil {
- log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
- return err
- }
- return nil
- }
- // dbPut overwrite a value in the database
- func dbPut(ctx *cli.Context) error {
- if ctx.NArg() != 2 {
- return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, false)
- defer db.Close()
- var (
- key []byte
- value []byte
- data []byte
- err error
- )
- key, err = common.ParseHexOrString(ctx.Args().Get(0))
- if err != nil {
- log.Info("Could not decode the key", "error", err)
- return err
- }
- value, err = hexutil.Decode(ctx.Args().Get(1))
- if err != nil {
- log.Info("Could not decode the value", "error", err)
- return err
- }
- data, err = db.Get(key)
- if err == nil {
- fmt.Printf("Previous value: %#x\n", data)
- }
- return db.Put(key, value)
- }
- // dbDumpTrie shows the key-value slots of a given storage trie
- func dbDumpTrie(ctx *cli.Context) error {
- if ctx.NArg() < 1 {
- return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, true)
- defer db.Close()
- var (
- root []byte
- start []byte
- max = int64(-1)
- err error
- )
- if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
- log.Info("Could not decode the root", "error", err)
- return err
- }
- stRoot := common.BytesToHash(root)
- if ctx.NArg() >= 2 {
- if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
- log.Info("Could not decode the seek position", "error", err)
- return err
- }
- }
- if ctx.NArg() >= 3 {
- if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
- log.Info("Could not decode the max count", "error", err)
- return err
- }
- }
- theTrie, err := trie.New(common.Hash{}, stRoot, trie.NewDatabase(db))
- if err != nil {
- return err
- }
- var count int64
- it := trie.NewIterator(theTrie.NodeIterator(start))
- for it.Next() {
- if max > 0 && count == max {
- fmt.Printf("Exiting after %d values\n", count)
- break
- }
- fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value)
- count++
- }
- return it.Err
- }
- func freezerInspect(ctx *cli.Context) error {
- if ctx.NArg() < 4 {
- return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
- }
- var (
- freezer = ctx.Args().Get(0)
- table = ctx.Args().Get(1)
- )
- start, err := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
- if err != nil {
- log.Info("Could not read start-param", "err", err)
- return err
- }
- end, err := strconv.ParseInt(ctx.Args().Get(3), 10, 64)
- if err != nil {
- log.Info("Could not read count param", "err", err)
- return err
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, true)
- defer db.Close()
- ancient, err := db.AncientDatadir()
- if err != nil {
- log.Info("Failed to retrieve ancient root", "err", err)
- return err
- }
- return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
- }
- func importLDBdata(ctx *cli.Context) error {
- start := 0
- switch ctx.NArg() {
- case 1:
- break
- case 2:
- s, err := strconv.Atoi(ctx.Args().Get(1))
- if err != nil {
- return fmt.Errorf("second arg must be an integer: %v", err)
- }
- start = s
- default:
- return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
- }
- var (
- fName = ctx.Args().Get(0)
- stack, _ = makeConfigNode(ctx)
- interrupt = make(chan os.Signal, 1)
- stop = make(chan struct{})
- )
- defer stack.Close()
- signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
- defer signal.Stop(interrupt)
- defer close(interrupt)
- go func() {
- if _, ok := <-interrupt; ok {
- log.Info("Interrupted during ldb import, stopping at next batch")
- }
- close(stop)
- }()
- db := utils.MakeChainDatabase(ctx, stack, false)
- return utils.ImportLDBData(db, fName, int64(start), stop)
- }
- type preimageIterator struct {
- iter ethdb.Iterator
- }
- func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
- for iter.iter.Next() {
- key := iter.iter.Key()
- if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
- return utils.OpBatchAdd, key, iter.iter.Value(), true
- }
- }
- return 0, nil, nil, false
- }
- func (iter *preimageIterator) Release() {
- iter.iter.Release()
- }
- type snapshotIterator struct {
- init bool
- account ethdb.Iterator
- storage ethdb.Iterator
- }
- func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
- if !iter.init {
- iter.init = true
- return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
- }
- for iter.account.Next() {
- key := iter.account.Key()
- if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
- return utils.OpBatchAdd, key, iter.account.Value(), true
- }
- }
- for iter.storage.Next() {
- key := iter.storage.Key()
- if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
- return utils.OpBatchAdd, key, iter.storage.Value(), true
- }
- }
- return 0, nil, nil, false
- }
- func (iter *snapshotIterator) Release() {
- iter.account.Release()
- iter.storage.Release()
- }
- // chainExporters defines the export scheme for all exportable chain data.
- var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
- "preimage": func(db ethdb.Database) utils.ChainDataIterator {
- iter := db.NewIterator(rawdb.PreimagePrefix, nil)
- return &preimageIterator{iter: iter}
- },
- "snapshot": func(db ethdb.Database) utils.ChainDataIterator {
- account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
- storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
- return &snapshotIterator{account: account, storage: storage}
- },
- }
- func exportChaindata(ctx *cli.Context) error {
- if ctx.NArg() < 2 {
- return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
- }
- // Parse the required chain data type, make sure it's supported.
- kind := ctx.Args().Get(0)
- kind = strings.ToLower(strings.Trim(kind, " "))
- exporter, ok := chainExporters[kind]
- if !ok {
- var kinds []string
- for kind := range chainExporters {
- kinds = append(kinds, kind)
- }
- return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
- }
- var (
- stack, _ = makeConfigNode(ctx)
- interrupt = make(chan os.Signal, 1)
- stop = make(chan struct{})
- )
- defer stack.Close()
- signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
- defer signal.Stop(interrupt)
- defer close(interrupt)
- go func() {
- if _, ok := <-interrupt; ok {
- log.Info("Interrupted during db export, stopping at next batch")
- }
- close(stop)
- }()
- db := utils.MakeChainDatabase(ctx, stack, true)
- return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
- }
- func showMetaData(ctx *cli.Context) error {
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, true)
- ancients, err := db.Ancients()
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
- }
- pp := func(val *uint64) string {
- if val == nil {
- return "<nil>"
- }
- return fmt.Sprintf("%d (%#x)", *val, *val)
- }
- data := [][]string{
- {"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
- {"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
- {"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
- {"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
- if b := rawdb.ReadHeadBlock(db); b != nil {
- data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
- data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
- data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())})
- }
- if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
- data = append(data, []string{"SkeletonSyncStatus", string(b)})
- }
- if h := rawdb.ReadHeadHeader(db); h != nil {
- data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
- data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
- data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
- }
- data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
- {"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
- {"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
- {"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
- {"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
- {"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
- {"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
- {"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
- {"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
- {"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
- }...)
- table := tablewriter.NewWriter(os.Stdout)
- table.SetHeader([]string{"Field", "Value"})
- table.AppendBulk(data)
- table.Render()
- return nil
- }
- func freezerMigrate(ctx *cli.Context) error {
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, false)
- defer db.Close()
- // Check first block for legacy receipt format
- numAncients, err := db.Ancients()
- if err != nil {
- return err
- }
- if numAncients < 1 {
- log.Info("No receipts in freezer to migrate")
- return nil
- }
- isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
- if err != nil {
- return err
- }
- if !isFirstLegacy {
- log.Info("No legacy receipts to migrate")
- return nil
- }
- log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
- start := time.Now()
- if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
- return err
- }
- if err := db.Close(); err != nil {
- return err
- }
- log.Info("Migration finished", "duration", time.Since(start))
- return nil
- }
- // dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
- // non-empty receipt and checks its format. The index of this first non-empty element is
- // the second return parameter.
- func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
- // Check first block for legacy receipt format
- numAncients, err := db.Ancients()
- if err != nil {
- return false, 0, err
- }
- if numAncients < 1 {
- return false, 0, nil
- }
- if firstIdx >= numAncients {
- return false, firstIdx, nil
- }
- var (
- legacy bool
- blob []byte
- emptyRLPList = []byte{192}
- )
- // Find first block with non-empty receipt, only if
- // the index is not already provided.
- if firstIdx == 0 {
- for i := uint64(0); i < numAncients; i++ {
- blob, err = db.Ancient("receipts", i)
- if err != nil {
- return false, 0, err
- }
- if len(blob) == 0 {
- continue
- }
- if !bytes.Equal(blob, emptyRLPList) {
- firstIdx = i
- break
- }
- }
- }
- // Is first non-empty receipt legacy?
- first, err := db.Ancient("receipts", firstIdx)
- if err != nil {
- return false, 0, err
- }
- legacy, err = types.IsLegacyStoredReceipts(first)
- return legacy, firstIdx, err
- }
|