|
|
@@ -19,8 +19,14 @@ package main
|
|
|
import (
|
|
|
"bytes"
|
|
|
"errors"
|
|
|
+ "fmt"
|
|
|
+ "os"
|
|
|
+ "path/filepath"
|
|
|
"time"
|
|
|
|
|
|
+ "github.com/prometheus/tsdb/fileutil"
|
|
|
+ cli "gopkg.in/urfave/cli.v1"
|
|
|
+
|
|
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
|
@@ -28,10 +34,11 @@ import (
|
|
|
"github.com/ethereum/go-ethereum/core/state/pruner"
|
|
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
|
|
+ "github.com/ethereum/go-ethereum/ethdb"
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
+ "github.com/ethereum/go-ethereum/node"
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
|
|
- cli "gopkg.in/urfave/cli.v1"
|
|
|
)
|
|
|
|
|
|
var (
|
|
|
@@ -78,6 +85,30 @@ WARNING: It's necessary to delete the trie clean cache after the pruning.
|
|
|
If you specify another directory for the trie clean cache via "--cache.trie.journal"
|
|
|
during the use of Geth, please also specify it here for correct deletion. Otherwise
|
|
|
the trie clean cache with default directory will be deleted.
|
|
|
+`,
|
|
|
+ },
|
|
|
+ {
|
|
|
+ Name: "prune-block",
|
|
|
+ Usage: "Prune block data offline",
|
|
|
+ Action: utils.MigrateFlags(pruneBlock),
|
|
|
+ Category: "MISCELLANEOUS COMMANDS",
|
|
|
+ Flags: []cli.Flag{
|
|
|
+ utils.DataDirFlag,
|
|
|
+ utils.AncientFlag,
|
|
|
+ utils.BlockAmountReserved,
|
|
|
+ utils.TriesInMemoryFlag,
|
|
|
+ utils.CheckSnapshotWithMPT,
|
|
|
+ },
|
|
|
+ Description: `
|
|
|
+geth offline prune-block for block data in ancientdb.
|
|
|
+The amount of blocks expected for remaining after prune can be specified via block-amount-reserved in this command,
|
|
|
+will prune and only remain the specified amount of old block data in ancientdb.
|
|
|
+the brief workflow is to backup the the number of this specified amount blocks backward in original ancientdb
|
|
|
+into new ancient_backup, then delete the original ancientdb dir and rename the ancient_backup to original one for replacement,
|
|
|
+finally assemble the statedb and new ancientDb together.
|
|
|
+The purpose of doing it is because the block data will be moved into the ancient store when it
|
|
|
+becomes old enough(exceed the Threshold 90000), the disk usage will be very large over time, and is occupied mainly by ancientDb,
|
|
|
+so it's very necessary to do block data prune, this feature will handle it.
|
|
|
`,
|
|
|
},
|
|
|
{
|
|
|
@@ -149,11 +180,164 @@ It's also usable without snapshot enabled.
|
|
|
}
|
|
|
)
|
|
|
|
|
|
+func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) {
|
|
|
+ //The layer of tries trees that keep in memory.
|
|
|
+ TriesInMemory := int(ctx.GlobalUint64(utils.TriesInMemoryFlag.Name))
|
|
|
+ chaindb := utils.MakeChainDatabase(ctx, stack, false, true)
|
|
|
+ defer chaindb.Close()
|
|
|
+
|
|
|
+ if !ctx.GlobalBool(utils.CheckSnapshotWithMPT.Name) {
|
|
|
+ return chaindb, nil
|
|
|
+ }
|
|
|
+ headBlock := rawdb.ReadHeadBlock(chaindb)
|
|
|
+ if headBlock == nil {
|
|
|
+ return nil, errors.New("failed to load head block")
|
|
|
+ }
|
|
|
+ headHeader := headBlock.Header()
|
|
|
+ //Make sure the MPT and snapshot matches before pruning, otherwise the node can not start.
|
|
|
+ snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, TriesInMemory, headBlock.Root(), false, false, false)
|
|
|
+ if err != nil {
|
|
|
+ log.Error("snaptree error", "err", err)
|
|
|
+ return nil, err // The relevant snapshot(s) might not exist
|
|
|
+ }
|
|
|
+
|
|
|
+ // Use the HEAD-(n-1) as the target root. The reason for picking it is:
|
|
|
+ // - in most of the normal cases, the related state is available
|
|
|
+ // - the probability of this layer being reorg is very low
|
|
|
+
|
|
|
+ // Retrieve all snapshot layers from the current HEAD.
|
|
|
+ // In theory there are n difflayers + 1 disk layer present,
|
|
|
+ // so n diff layers are expected to be returned.
|
|
|
+ layers := snaptree.Snapshots(headHeader.Root, TriesInMemory, true)
|
|
|
+ if len(layers) != TriesInMemory {
|
|
|
+ // Reject if the accumulated diff layers are less than n. It
|
|
|
+ // means in most of normal cases, there is no associated state
|
|
|
+ // with bottom-most diff layer.
|
|
|
+ log.Error("snapshot layers != TriesInMemory", "err", err)
|
|
|
+ return nil, fmt.Errorf("snapshot not old enough yet: need %d more blocks", TriesInMemory-len(layers))
|
|
|
+ }
|
|
|
+ // Use the bottom-most diff layer as the target
|
|
|
+ targetRoot := layers[len(layers)-1].Root()
|
|
|
+
|
|
|
+ // Ensure the root is really present. The weak assumption
|
|
|
+ // is the presence of root can indicate the presence of the
|
|
|
+ // entire trie.
|
|
|
+ if blob := rawdb.ReadTrieNode(chaindb, targetRoot); len(blob) == 0 {
|
|
|
+ // The special case is for clique based networks(rinkeby, goerli
|
|
|
+ // and some other private networks), it's possible that two
|
|
|
+ // consecutive blocks will have same root. In this case snapshot
|
|
|
+ // difflayer won't be created. So HEAD-(n-1) may not paired with
|
|
|
+ // head-(n-1) layer. Instead the paired layer is higher than the
|
|
|
+ // bottom-most diff layer. Try to find the bottom-most snapshot
|
|
|
+ // layer with state available.
|
|
|
+ //
|
|
|
+ // Note HEAD is ignored. Usually there is the associated
|
|
|
+ // state available, but we don't want to use the topmost state
|
|
|
+ // as the pruning target.
|
|
|
+ var found bool
|
|
|
+ for i := len(layers) - 2; i >= 1; i-- {
|
|
|
+ if blob := rawdb.ReadTrieNode(chaindb, layers[i].Root()); len(blob) != 0 {
|
|
|
+ targetRoot = layers[i].Root()
|
|
|
+ found = true
|
|
|
+ log.Info("Selecting middle-layer as the pruning target", "root", targetRoot, "depth", i)
|
|
|
+ break
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if !found {
|
|
|
+ if blob := rawdb.ReadTrieNode(chaindb, snaptree.DiskRoot()); len(blob) != 0 {
|
|
|
+ targetRoot = snaptree.DiskRoot()
|
|
|
+ found = true
|
|
|
+ log.Info("Selecting disk-layer as the pruning target", "root", targetRoot)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if !found {
|
|
|
+ if len(layers) > 0 {
|
|
|
+ log.Error("no snapshot paired state")
|
|
|
+ return nil, errors.New("no snapshot paired state")
|
|
|
+ }
|
|
|
+ return nil, fmt.Errorf("associated state[%x] is not present", targetRoot)
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if len(layers) > 0 {
|
|
|
+ log.Info("Selecting bottom-most difflayer as the pruning target", "root", targetRoot, "height", headHeader.Number.Uint64()-uint64(len(layers)-1))
|
|
|
+ } else {
|
|
|
+ log.Info("Selecting user-specified state as the pruning target", "root", targetRoot)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return chaindb, nil
|
|
|
+}
|
|
|
+
|
|
|
+func pruneBlock(ctx *cli.Context) error {
|
|
|
+ stack, config := makeConfigNode(ctx)
|
|
|
+ defer stack.Close()
|
|
|
+ blockAmountReserved := ctx.GlobalUint64(utils.BlockAmountReserved.Name)
|
|
|
+ chaindb, err := accessDb(ctx, stack)
|
|
|
+ if err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ var newAncientPath string
|
|
|
+ oldAncientPath := ctx.GlobalString(utils.AncientFlag.Name)
|
|
|
+ if !filepath.IsAbs(oldAncientPath) {
|
|
|
+ oldAncientPath = stack.ResolvePath(oldAncientPath)
|
|
|
+ }
|
|
|
+
|
|
|
+ path, _ := filepath.Split(oldAncientPath)
|
|
|
+ if path == "" {
|
|
|
+ return errors.New("prune failed, did not specify the AncientPath")
|
|
|
+ }
|
|
|
+ newAncientPath = filepath.Join(path, "ancient_back")
|
|
|
+
|
|
|
+ blockpruner := pruner.NewBlockPruner(chaindb, stack, oldAncientPath, newAncientPath, blockAmountReserved)
|
|
|
+
|
|
|
+ lock, exist, err := fileutil.Flock(filepath.Join(oldAncientPath, "PRUNEFLOCK"))
|
|
|
+ if err != nil {
|
|
|
+ log.Error("file lock error", "err", err)
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ if exist {
|
|
|
+ defer lock.Release()
|
|
|
+ log.Info("file lock existed, waiting for prune recovery and continue", "err", err)
|
|
|
+ if err := blockpruner.RecoverInterruption("chaindata", config.Eth.DatabaseCache, utils.MakeDatabaseHandles(), "", false); err != nil {
|
|
|
+ log.Error("Pruning failed", "err", err)
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ log.Info("Block prune successfully")
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+
|
|
|
+ if _, err := os.Stat(newAncientPath); err == nil {
|
|
|
+ // No file lock found for old ancientDB but new ancientDB exsisted, indicating the geth was interrupted
|
|
|
+ // after old ancientDB removal, this happened after backup successfully, so just rename the new ancientDB
|
|
|
+ if err := blockpruner.AncientDbReplacer(); err != nil {
|
|
|
+ log.Error("Failed to rename new ancient directory")
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ log.Info("Block prune successfully")
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+ name := "chaindata"
|
|
|
+ if err := blockpruner.BlockPruneBackUp(name, config.Eth.DatabaseCache, utils.MakeDatabaseHandles(), "", false, false); err != nil {
|
|
|
+ log.Error("Failed to back up block", "err", err)
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ log.Info("backup block successfully")
|
|
|
+
|
|
|
+ //After backing up successfully, rename the new ancientdb name to the original one, and delete the old ancientdb
|
|
|
+ if err := blockpruner.AncientDbReplacer(); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ lock.Release()
|
|
|
+ log.Info("Block prune successfully")
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
func pruneState(ctx *cli.Context) error {
|
|
|
stack, config := makeConfigNode(ctx)
|
|
|
defer stack.Close()
|
|
|
|
|
|
- chaindb := utils.MakeChainDatabase(ctx, stack, false)
|
|
|
+ chaindb := utils.MakeChainDatabase(ctx, stack, false, false)
|
|
|
pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name), ctx.GlobalUint64(utils.TriesInMemoryFlag.Name))
|
|
|
if err != nil {
|
|
|
log.Error("Failed to open snapshot tree", "err", err)
|
|
|
@@ -182,7 +366,7 @@ func verifyState(ctx *cli.Context) error {
|
|
|
stack, _ := makeConfigNode(ctx)
|
|
|
defer stack.Close()
|
|
|
|
|
|
- chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
|
|
+ chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
|
|
|
headBlock := rawdb.ReadHeadBlock(chaindb)
|
|
|
if headBlock == nil {
|
|
|
log.Error("Failed to load head block")
|
|
|
@@ -220,7 +404,7 @@ func traverseState(ctx *cli.Context) error {
|
|
|
stack, _ := makeConfigNode(ctx)
|
|
|
defer stack.Close()
|
|
|
|
|
|
- chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
|
|
+ chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
|
|
|
headBlock := rawdb.ReadHeadBlock(chaindb)
|
|
|
if headBlock == nil {
|
|
|
log.Error("Failed to load head block")
|
|
|
@@ -310,7 +494,7 @@ func traverseRawState(ctx *cli.Context) error {
|
|
|
stack, _ := makeConfigNode(ctx)
|
|
|
defer stack.Close()
|
|
|
|
|
|
- chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
|
|
+ chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
|
|
|
headBlock := rawdb.ReadHeadBlock(chaindb)
|
|
|
if headBlock == nil {
|
|
|
log.Error("Failed to load head block")
|