| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539 |
- // Copyright 2015 The go-ethereum Authors
- // This file is part of go-ethereum.
- //
- // go-ethereum is free software: you can redistribute it and/or modify
- // it under the terms of the GNU General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // go-ethereum is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU General Public License for more details.
- //
- // You should have received a copy of the GNU General Public License
- // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
- package main
- import (
- "encoding/json"
- "fmt"
- "net"
- "os"
- "path"
- "runtime"
- "strconv"
- "strings"
- "sync/atomic"
- "time"
- "github.com/ethereum/go-ethereum/cmd/utils"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "gopkg.in/urfave/cli.v1"
- )
- var (
- initCommand = cli.Command{
- Action: utils.MigrateFlags(initGenesis),
- Name: "init",
- Usage: "Bootstrap and initialize a new genesis block",
- ArgsUsage: "<genesisPath>",
- Flags: []cli.Flag{
- utils.DataDirFlag,
- },
- Category: "BLOCKCHAIN COMMANDS",
- Description: `
- The init command initializes a new genesis block and definition for the network.
- This is a destructive action and changes the network in which you will be
- participating.
- It expects the genesis file as argument.`,
- }
- initNetworkCommand = cli.Command{
- Action: utils.MigrateFlags(initNetwork),
- Name: "init-network",
- Usage: "Bootstrap and initialize a new genesis block, and nodekey, config files for network nodes",
- ArgsUsage: "<genesisPath>",
- Flags: []cli.Flag{
- utils.InitNetworkDir,
- utils.InitNetworkPort,
- utils.InitNetworkSize,
- utils.InitNetworkIps,
- configFileFlag,
- },
- Category: "BLOCKCHAIN COMMANDS",
- Description: `
- The init-network command initializes a new genesis block, definition for the network, config files for network nodes.
- It expects the genesis file as argument.`,
- }
- dumpGenesisCommand = cli.Command{
- Action: utils.MigrateFlags(dumpGenesis),
- Name: "dumpgenesis",
- Usage: "Dumps genesis block JSON configuration to stdout",
- ArgsUsage: "",
- Flags: []cli.Flag{
- utils.MainnetFlag,
- utils.RopstenFlag,
- utils.RinkebyFlag,
- utils.GoerliFlag,
- utils.YoloV3Flag,
- },
- Category: "BLOCKCHAIN COMMANDS",
- Description: `
- The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
- }
- importCommand = cli.Command{
- Action: utils.MigrateFlags(importChain),
- Name: "import",
- Usage: "Import a blockchain file",
- ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
- Flags: []cli.Flag{
- utils.DataDirFlag,
- utils.CacheFlag,
- utils.SyncModeFlag,
- utils.GCModeFlag,
- utils.SnapshotFlag,
- utils.CacheDatabaseFlag,
- utils.CacheGCFlag,
- utils.MetricsEnabledFlag,
- utils.MetricsEnabledExpensiveFlag,
- utils.MetricsHTTPFlag,
- utils.MetricsPortFlag,
- utils.MetricsEnableInfluxDBFlag,
- utils.MetricsInfluxDBEndpointFlag,
- utils.MetricsInfluxDBDatabaseFlag,
- utils.MetricsInfluxDBUsernameFlag,
- utils.MetricsInfluxDBPasswordFlag,
- utils.MetricsInfluxDBTagsFlag,
- utils.TxLookupLimitFlag,
- },
- Category: "BLOCKCHAIN COMMANDS",
- Description: `
- The import command imports blocks from an RLP-encoded form. The form can be one file
- with several RLP-encoded blocks, or several files can be used.
- If only one file is used, import error will result in failure. If several files are used,
- processing will proceed even if an individual RLP-file import failure occurs.`,
- }
- exportCommand = cli.Command{
- Action: utils.MigrateFlags(exportChain),
- Name: "export",
- Usage: "Export blockchain into file",
- ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
- Flags: []cli.Flag{
- utils.DataDirFlag,
- utils.CacheFlag,
- utils.SyncModeFlag,
- },
- Category: "BLOCKCHAIN COMMANDS",
- Description: `
- Requires a first argument of the file to write to.
- Optional second and third arguments control the first and
- last block to write. In this mode, the file will be appended
- if already existing. If the file ends with .gz, the output will
- be gzipped.`,
- }
- importPreimagesCommand = cli.Command{
- Action: utils.MigrateFlags(importPreimages),
- Name: "import-preimages",
- Usage: "Import the preimage database from an RLP stream",
- ArgsUsage: "<datafile>",
- Flags: []cli.Flag{
- utils.DataDirFlag,
- utils.CacheFlag,
- utils.SyncModeFlag,
- },
- Category: "BLOCKCHAIN COMMANDS",
- Description: `
- The import-preimages command imports hash preimages from an RLP encoded stream.`,
- }
- exportPreimagesCommand = cli.Command{
- Action: utils.MigrateFlags(exportPreimages),
- Name: "export-preimages",
- Usage: "Export the preimage database into an RLP stream",
- ArgsUsage: "<dumpfile>",
- Flags: []cli.Flag{
- utils.DataDirFlag,
- utils.CacheFlag,
- utils.SyncModeFlag,
- },
- Category: "BLOCKCHAIN COMMANDS",
- Description: `
- The export-preimages command export hash preimages to an RLP encoded stream`,
- }
- dumpCommand = cli.Command{
- Action: utils.MigrateFlags(dump),
- Name: "dump",
- Usage: "Dump a specific block from storage",
- ArgsUsage: "[<blockHash> | <blockNum>]...",
- Flags: []cli.Flag{
- utils.DataDirFlag,
- utils.CacheFlag,
- utils.SyncModeFlag,
- utils.IterativeOutputFlag,
- utils.ExcludeCodeFlag,
- utils.ExcludeStorageFlag,
- utils.IncludeIncompletesFlag,
- },
- Category: "BLOCKCHAIN COMMANDS",
- Description: `
- The arguments are interpreted as block numbers or hashes.
- Use "ethereum dump 0" to dump the genesis block.`,
- }
- )
- // initGenesis will initialise the given JSON format genesis file and writes it as
- // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
- func initGenesis(ctx *cli.Context) error {
- // Make sure we have a valid genesis JSON
- genesisPath := ctx.Args().First()
- if len(genesisPath) == 0 {
- utils.Fatalf("Must supply path to genesis JSON file")
- }
- file, err := os.Open(genesisPath)
- if err != nil {
- utils.Fatalf("Failed to read genesis file: %v", err)
- }
- defer file.Close()
- genesis := new(core.Genesis)
- if err := json.NewDecoder(file).Decode(genesis); err != nil {
- utils.Fatalf("invalid genesis file: %v", err)
- }
- // Open and initialise both full and light databases
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- for _, name := range []string{"chaindata", "lightchaindata"} {
- chaindb, err := stack.OpenDatabase(name, 0, 0, "", false)
- if err != nil {
- utils.Fatalf("Failed to open database: %v", err)
- }
- _, hash, err := core.SetupGenesisBlock(chaindb, genesis)
- if err != nil {
- utils.Fatalf("Failed to write genesis block: %v", err)
- }
- chaindb.Close()
- log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
- }
- return nil
- }
- // initNetwork will bootstrap and initialize a new genesis block, and nodekey, config files for network nodes
- func initNetwork(ctx *cli.Context) error {
- initDir := ctx.String(utils.InitNetworkDir.Name)
- if len(initDir) == 0 {
- utils.Fatalf("init.dir is required")
- }
- size := ctx.Int(utils.InitNetworkSize.Name)
- port := ctx.Int(utils.InitNetworkPort.Name)
- ipStr := ctx.String(utils.InitNetworkIps.Name)
- cfgFile := ctx.String(configFileFlag.Name)
- if len(cfgFile) == 0 {
- utils.Fatalf("config file is required")
- }
- var ips []string
- if len(ipStr) != 0 {
- ips = strings.Split(ipStr, ",")
- if len(ips) != size {
- utils.Fatalf("mismatch of size and length of ips")
- }
- for i := 0; i < size; i++ {
- _, err := net.ResolveIPAddr("", ips[i])
- if err != nil {
- utils.Fatalf("invalid format of ip")
- return err
- }
- }
- } else {
- ips = make([]string, size)
- for i := 0; i < size; i++ {
- ips[i] = "127.0.0.1"
- }
- }
- // Make sure we have a valid genesis JSON
- genesisPath := ctx.Args().First()
- if len(genesisPath) == 0 {
- utils.Fatalf("Must supply path to genesis JSON file")
- }
- file, err := os.Open(genesisPath)
- if err != nil {
- utils.Fatalf("Failed to read genesis file: %v", err)
- }
- defer file.Close()
- genesis := new(core.Genesis)
- if err := json.NewDecoder(file).Decode(genesis); err != nil {
- utils.Fatalf("invalid genesis file: %v", err)
- }
- enodes := make([]*enode.Node, size)
- // load config
- var config gethConfig
- err = loadConfig(cfgFile, &config)
- if err != nil {
- return err
- }
- config.Eth.Genesis = genesis
- for i := 0; i < size; i++ {
- stack, err := node.New(&config.Node)
- if err != nil {
- return err
- }
- stack.Config().DataDir = path.Join(initDir, fmt.Sprintf("node%d", i))
- pk := stack.Config().NodeKey()
- enodes[i] = enode.NewV4(&pk.PublicKey, net.ParseIP(ips[i]), port, port)
- }
- for i := 0; i < size; i++ {
- config.Node.HTTPHost = ips[i]
- config.Node.P2P.StaticNodes = make([]*enode.Node, size-1)
- for j := 0; j < i; j++ {
- config.Node.P2P.StaticNodes[j] = enodes[j]
- }
- for j := i + 1; j < size; j++ {
- config.Node.P2P.StaticNodes[j-1] = enodes[j]
- }
- out, err := tomlSettings.Marshal(config)
- if err != nil {
- return err
- }
- dump, err := os.OpenFile(path.Join(initDir, fmt.Sprintf("node%d", i), "config.toml"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
- if err != nil {
- return err
- }
- defer dump.Close()
- dump.Write(out)
- }
- return nil
- }
- func dumpGenesis(ctx *cli.Context) error {
- // TODO(rjl493456442) support loading from the custom datadir
- genesis := utils.MakeGenesis(ctx)
- if genesis == nil {
- genesis = core.DefaultGenesisBlock()
- }
- if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
- utils.Fatalf("could not encode genesis")
- }
- return nil
- }
- func importChain(ctx *cli.Context) error {
- if len(ctx.Args()) < 1 {
- utils.Fatalf("This command requires an argument.")
- }
- // Start metrics export if enabled
- utils.SetupMetrics(ctx)
- // Start system runtime metrics collection
- go metrics.CollectProcessMetrics(3 * time.Second)
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- chain, db := utils.MakeChain(ctx, stack)
- defer db.Close()
- // Start periodically gathering memory profiles
- var peakMemAlloc, peakMemSys uint64
- go func() {
- stats := new(runtime.MemStats)
- for {
- runtime.ReadMemStats(stats)
- if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
- atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
- }
- if atomic.LoadUint64(&peakMemSys) < stats.Sys {
- atomic.StoreUint64(&peakMemSys, stats.Sys)
- }
- time.Sleep(5 * time.Second)
- }
- }()
- // Import the chain
- start := time.Now()
- var importErr error
- if len(ctx.Args()) == 1 {
- if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
- importErr = err
- log.Error("Import error", "err", err)
- }
- } else {
- for _, arg := range ctx.Args() {
- if err := utils.ImportChain(chain, arg); err != nil {
- importErr = err
- log.Error("Import error", "file", arg, "err", err)
- }
- }
- }
- chain.Stop()
- fmt.Printf("Import done in %v.\n\n", time.Since(start))
- // Output pre-compaction stats mostly to see the import trashing
- showLeveldbStats(db)
- // Print the memory statistics used by the importing
- mem := new(runtime.MemStats)
- runtime.ReadMemStats(mem)
- fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
- fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
- fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000)
- fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs))
- if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
- return nil
- }
- // Compact the entire database to more accurately measure disk io and print the stats
- start = time.Now()
- fmt.Println("Compacting entire database...")
- if err := db.Compact(nil, nil); err != nil {
- utils.Fatalf("Compaction failed: %v", err)
- }
- fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
- showLeveldbStats(db)
- return importErr
- }
- func exportChain(ctx *cli.Context) error {
- if len(ctx.Args()) < 1 {
- utils.Fatalf("This command requires an argument.")
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- chain, _ := utils.MakeChain(ctx, stack)
- start := time.Now()
- var err error
- fp := ctx.Args().First()
- if len(ctx.Args()) < 3 {
- err = utils.ExportChain(chain, fp)
- } else {
- // This can be improved to allow for numbers larger than 9223372036854775807
- first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
- last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
- if ferr != nil || lerr != nil {
- utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
- }
- if first < 0 || last < 0 {
- utils.Fatalf("Export error: block number must be greater than 0\n")
- }
- if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
- utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
- }
- err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
- }
- if err != nil {
- utils.Fatalf("Export error: %v\n", err)
- }
- fmt.Printf("Export done in %v\n", time.Since(start))
- return nil
- }
- // importPreimages imports preimage data from the specified file.
- func importPreimages(ctx *cli.Context) error {
- if len(ctx.Args()) < 1 {
- utils.Fatalf("This command requires an argument.")
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, false, false)
- start := time.Now()
- if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
- utils.Fatalf("Import error: %v\n", err)
- }
- fmt.Printf("Import done in %v\n", time.Since(start))
- return nil
- }
- // exportPreimages dumps the preimage data to specified json file in streaming way.
- func exportPreimages(ctx *cli.Context) error {
- if len(ctx.Args()) < 1 {
- utils.Fatalf("This command requires an argument.")
- }
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, true, false)
- start := time.Now()
- if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
- utils.Fatalf("Export error: %v\n", err)
- }
- fmt.Printf("Export done in %v\n", time.Since(start))
- return nil
- }
- func dump(ctx *cli.Context) error {
- stack, _ := makeConfigNode(ctx)
- defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, true, false)
- for _, arg := range ctx.Args() {
- var header *types.Header
- if hashish(arg) {
- hash := common.HexToHash(arg)
- number := rawdb.ReadHeaderNumber(db, hash)
- if number != nil {
- header = rawdb.ReadHeader(db, hash, *number)
- }
- } else {
- number, _ := strconv.Atoi(arg)
- hash := rawdb.ReadCanonicalHash(db, uint64(number))
- if hash != (common.Hash{}) {
- header = rawdb.ReadHeader(db, hash, uint64(number))
- }
- }
- if header == nil {
- fmt.Println("{}")
- utils.Fatalf("block not found")
- } else {
- state, err := state.New(header.Root, state.NewDatabase(db), nil)
- if err != nil {
- utils.Fatalf("could not create new state: %v", err)
- }
- excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
- excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
- includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
- if ctx.Bool(utils.IterativeOutputFlag.Name) {
- state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
- } else {
- if includeMissing {
- fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
- " otherwise the accounts will overwrite each other in the resulting mapping.")
- }
- fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
- }
- }
- }
- return nil
- }
- // hashish returns true for strings that look like hashes.
- func hashish(x string) bool {
- _, err := strconv.Atoi(x)
- return err != nil
- }
|