dbcmd.go 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832
  1. // Copyright 2021 The go-ethereum Authors
  2. // This file is part of go-ethereum.
  3. //
  4. // go-ethereum is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // go-ethereum is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU General Public License
  15. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
  16. package main
  17. import (
  18. "bytes"
  19. "fmt"
  20. "os"
  21. "os/signal"
  22. "path/filepath"
  23. "strconv"
  24. "strings"
  25. "syscall"
  26. "time"
  27. "github.com/ethereum/go-ethereum/cmd/utils"
  28. "github.com/ethereum/go-ethereum/common"
  29. "github.com/ethereum/go-ethereum/common/hexutil"
  30. "github.com/ethereum/go-ethereum/console/prompt"
  31. "github.com/ethereum/go-ethereum/core/rawdb"
  32. "github.com/ethereum/go-ethereum/core/state/snapshot"
  33. "github.com/ethereum/go-ethereum/core/types"
  34. "github.com/ethereum/go-ethereum/crypto"
  35. "github.com/ethereum/go-ethereum/ethdb"
  36. "github.com/ethereum/go-ethereum/internal/flags"
  37. "github.com/ethereum/go-ethereum/log"
  38. "github.com/ethereum/go-ethereum/trie"
  39. "github.com/olekukonko/tablewriter"
  40. "github.com/urfave/cli/v2"
  41. )
  42. var (
  43. removedbCommand = &cli.Command{
  44. Action: removeDB,
  45. Name: "removedb",
  46. Usage: "Remove blockchain and state databases",
  47. ArgsUsage: "",
  48. Flags: utils.DatabasePathFlags,
  49. Description: `
  50. Remove blockchain and state databases`,
  51. }
  52. dbCommand = &cli.Command{
  53. Name: "db",
  54. Usage: "Low level database operations",
  55. ArgsUsage: "",
  56. Subcommands: []*cli.Command{
  57. dbInspectCmd,
  58. dbStatCmd,
  59. dbCompactCmd,
  60. dbGetCmd,
  61. dbDeleteCmd,
  62. dbPutCmd,
  63. dbGetSlotsCmd,
  64. dbDumpFreezerIndex,
  65. dbImportCmd,
  66. dbExportCmd,
  67. dbMetadataCmd,
  68. dbMigrateFreezerCmd,
  69. dbCheckStateContentCmd,
  70. },
  71. }
  72. dbInspectCmd = &cli.Command{
  73. Action: inspect,
  74. Name: "inspect",
  75. ArgsUsage: "<prefix> <start>",
  76. Flags: flags.Merge([]cli.Flag{
  77. utils.SyncModeFlag,
  78. }, utils.NetworkFlags, utils.DatabasePathFlags),
  79. Usage: "Inspect the storage size for each type of data in the database",
  80. Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
  81. }
  82. dbCheckStateContentCmd = &cli.Command{
  83. Action: checkStateContent,
  84. Name: "check-state-content",
  85. ArgsUsage: "<start (optional)>",
  86. Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
  87. Usage: "Verify that state data is cryptographically correct",
  88. Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
  89. For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
  90. a data corruption.`,
  91. }
  92. dbStatCmd = &cli.Command{
  93. Action: dbStats,
  94. Name: "stats",
  95. Usage: "Print leveldb statistics",
  96. Flags: flags.Merge([]cli.Flag{
  97. utils.SyncModeFlag,
  98. }, utils.NetworkFlags, utils.DatabasePathFlags),
  99. }
  100. dbCompactCmd = &cli.Command{
  101. Action: dbCompact,
  102. Name: "compact",
  103. Usage: "Compact leveldb database. WARNING: May take a very long time",
  104. Flags: flags.Merge([]cli.Flag{
  105. utils.SyncModeFlag,
  106. utils.CacheFlag,
  107. utils.CacheDatabaseFlag,
  108. }, utils.NetworkFlags, utils.DatabasePathFlags),
  109. Description: `This command performs a database compaction.
  110. WARNING: This operation may take a very long time to finish, and may cause database
  111. corruption if it is aborted during execution'!`,
  112. }
  113. dbGetCmd = &cli.Command{
  114. Action: dbGet,
  115. Name: "get",
  116. Usage: "Show the value of a database key",
  117. ArgsUsage: "<hex-encoded key>",
  118. Flags: flags.Merge([]cli.Flag{
  119. utils.SyncModeFlag,
  120. }, utils.NetworkFlags, utils.DatabasePathFlags),
  121. Description: "This command looks up the specified database key from the database.",
  122. }
  123. dbDeleteCmd = &cli.Command{
  124. Action: dbDelete,
  125. Name: "delete",
  126. Usage: "Delete a database key (WARNING: may corrupt your database)",
  127. ArgsUsage: "<hex-encoded key>",
  128. Flags: flags.Merge([]cli.Flag{
  129. utils.SyncModeFlag,
  130. }, utils.NetworkFlags, utils.DatabasePathFlags),
  131. Description: `This command deletes the specified database key from the database.
  132. WARNING: This is a low-level operation which may cause database corruption!`,
  133. }
  134. dbPutCmd = &cli.Command{
  135. Action: dbPut,
  136. Name: "put",
  137. Usage: "Set the value of a database key (WARNING: may corrupt your database)",
  138. ArgsUsage: "<hex-encoded key> <hex-encoded value>",
  139. Flags: flags.Merge([]cli.Flag{
  140. utils.SyncModeFlag,
  141. }, utils.NetworkFlags, utils.DatabasePathFlags),
  142. Description: `This command sets a given database key to the given value.
  143. WARNING: This is a low-level operation which may cause database corruption!`,
  144. }
  145. dbGetSlotsCmd = &cli.Command{
  146. Action: dbDumpTrie,
  147. Name: "dumptrie",
  148. Usage: "Show the storage key/values of a given storage trie",
  149. ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
  150. Flags: flags.Merge([]cli.Flag{
  151. utils.SyncModeFlag,
  152. }, utils.NetworkFlags, utils.DatabasePathFlags),
  153. Description: "This command looks up the specified database key from the database.",
  154. }
  155. dbDumpFreezerIndex = &cli.Command{
  156. Action: freezerInspect,
  157. Name: "freezer-index",
  158. Usage: "Dump out the index of a specific freezer table",
  159. ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
  160. Flags: flags.Merge([]cli.Flag{
  161. utils.SyncModeFlag,
  162. }, utils.NetworkFlags, utils.DatabasePathFlags),
  163. Description: "This command displays information about the freezer index.",
  164. }
  165. dbImportCmd = &cli.Command{
  166. Action: importLDBdata,
  167. Name: "import",
  168. Usage: "Imports leveldb-data from an exported RLP dump.",
  169. ArgsUsage: "<dumpfile> <start (optional)",
  170. Flags: flags.Merge([]cli.Flag{
  171. utils.SyncModeFlag,
  172. }, utils.NetworkFlags, utils.DatabasePathFlags),
  173. Description: "The import command imports the specific chain data from an RLP encoded stream.",
  174. }
  175. dbExportCmd = &cli.Command{
  176. Action: exportChaindata,
  177. Name: "export",
  178. Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
  179. ArgsUsage: "<type> <dumpfile>",
  180. Flags: flags.Merge([]cli.Flag{
  181. utils.SyncModeFlag,
  182. }, utils.NetworkFlags, utils.DatabasePathFlags),
  183. Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
  184. }
  185. dbMetadataCmd = &cli.Command{
  186. Action: showMetaData,
  187. Name: "metadata",
  188. Usage: "Shows metadata about the chain status.",
  189. Flags: flags.Merge([]cli.Flag{
  190. utils.SyncModeFlag,
  191. }, utils.NetworkFlags, utils.DatabasePathFlags),
  192. Description: "Shows metadata about the chain status.",
  193. }
  194. dbMigrateFreezerCmd = &cli.Command{
  195. Action: freezerMigrate,
  196. Name: "freezer-migrate",
  197. Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
  198. ArgsUsage: "",
  199. Flags: flags.Merge([]cli.Flag{
  200. utils.SyncModeFlag,
  201. }, utils.NetworkFlags, utils.DatabasePathFlags),
  202. Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
  203. WARNING: please back-up the receipt files in your ancients before running this command.`,
  204. }
  205. )
  206. func removeDB(ctx *cli.Context) error {
  207. stack, config := makeConfigNode(ctx)
  208. // Remove the full node state database
  209. path := stack.ResolvePath("chaindata")
  210. if common.FileExist(path) {
  211. confirmAndRemoveDB(path, "full node state database")
  212. } else {
  213. log.Info("Full node state database missing", "path", path)
  214. }
  215. // Remove the full node ancient database
  216. path = config.Eth.DatabaseFreezer
  217. switch {
  218. case path == "":
  219. path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
  220. case !filepath.IsAbs(path):
  221. path = config.Node.ResolvePath(path)
  222. }
  223. if common.FileExist(path) {
  224. confirmAndRemoveDB(path, "full node ancient database")
  225. } else {
  226. log.Info("Full node ancient database missing", "path", path)
  227. }
  228. // Remove the light node database
  229. path = stack.ResolvePath("lightchaindata")
  230. if common.FileExist(path) {
  231. confirmAndRemoveDB(path, "light node database")
  232. } else {
  233. log.Info("Light node database missing", "path", path)
  234. }
  235. return nil
  236. }
  237. // confirmAndRemoveDB prompts the user for a last confirmation and removes the
  238. // folder if accepted.
  239. func confirmAndRemoveDB(database string, kind string) {
  240. confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
  241. switch {
  242. case err != nil:
  243. utils.Fatalf("%v", err)
  244. case !confirm:
  245. log.Info("Database deletion skipped", "path", database)
  246. default:
  247. start := time.Now()
  248. filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
  249. // If we're at the top level folder, recurse into
  250. if path == database {
  251. return nil
  252. }
  253. // Delete all the files, but not subfolders
  254. if !info.IsDir() {
  255. os.Remove(path)
  256. return nil
  257. }
  258. return filepath.SkipDir
  259. })
  260. log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
  261. }
  262. }
  263. func inspect(ctx *cli.Context) error {
  264. var (
  265. prefix []byte
  266. start []byte
  267. )
  268. if ctx.NArg() > 2 {
  269. return fmt.Errorf("max 2 arguments: %v", ctx.Command.ArgsUsage)
  270. }
  271. if ctx.NArg() >= 1 {
  272. if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
  273. return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
  274. } else {
  275. prefix = d
  276. }
  277. }
  278. if ctx.NArg() >= 2 {
  279. if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
  280. return fmt.Errorf("failed to hex-decode 'start': %v", err)
  281. } else {
  282. start = d
  283. }
  284. }
  285. stack, _ := makeConfigNode(ctx)
  286. defer stack.Close()
  287. db := utils.MakeChainDatabase(ctx, stack, true)
  288. defer db.Close()
  289. return rawdb.InspectDatabase(db, prefix, start)
  290. }
  291. func checkStateContent(ctx *cli.Context) error {
  292. var (
  293. prefix []byte
  294. start []byte
  295. )
  296. if ctx.NArg() > 1 {
  297. return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
  298. }
  299. if ctx.NArg() > 0 {
  300. if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
  301. return fmt.Errorf("failed to hex-decode 'start': %v", err)
  302. } else {
  303. start = d
  304. }
  305. }
  306. stack, _ := makeConfigNode(ctx)
  307. defer stack.Close()
  308. db := utils.MakeChainDatabase(ctx, stack, true)
  309. defer db.Close()
  310. var (
  311. it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
  312. hasher = crypto.NewKeccakState()
  313. got = make([]byte, 32)
  314. errs int
  315. count int
  316. startTime = time.Now()
  317. lastLog = time.Now()
  318. )
  319. for it.Next() {
  320. count++
  321. k := it.Key()
  322. v := it.Value()
  323. hasher.Reset()
  324. hasher.Write(v)
  325. hasher.Read(got)
  326. if !bytes.Equal(k, got) {
  327. errs++
  328. fmt.Printf("Error at %#x\n", k)
  329. fmt.Printf(" Hash: %#x\n", got)
  330. fmt.Printf(" Data: %#x\n", v)
  331. }
  332. if time.Since(lastLog) > 8*time.Second {
  333. log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
  334. lastLog = time.Now()
  335. }
  336. }
  337. if err := it.Error(); err != nil {
  338. return err
  339. }
  340. log.Info("Iterated the state content", "errors", errs, "items", count)
  341. return nil
  342. }
  343. func showLeveldbStats(db ethdb.KeyValueStater) {
  344. if stats, err := db.Stat("leveldb.stats"); err != nil {
  345. log.Warn("Failed to read database stats", "error", err)
  346. } else {
  347. fmt.Println(stats)
  348. }
  349. if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
  350. log.Warn("Failed to read database iostats", "error", err)
  351. } else {
  352. fmt.Println(ioStats)
  353. }
  354. }
  355. func dbStats(ctx *cli.Context) error {
  356. stack, _ := makeConfigNode(ctx)
  357. defer stack.Close()
  358. db := utils.MakeChainDatabase(ctx, stack, true)
  359. defer db.Close()
  360. showLeveldbStats(db)
  361. return nil
  362. }
  363. func dbCompact(ctx *cli.Context) error {
  364. stack, _ := makeConfigNode(ctx)
  365. defer stack.Close()
  366. db := utils.MakeChainDatabase(ctx, stack, false)
  367. defer db.Close()
  368. log.Info("Stats before compaction")
  369. showLeveldbStats(db)
  370. log.Info("Triggering compaction")
  371. if err := db.Compact(nil, nil); err != nil {
  372. log.Info("Compact err", "error", err)
  373. return err
  374. }
  375. log.Info("Stats after compaction")
  376. showLeveldbStats(db)
  377. return nil
  378. }
  379. // dbGet shows the value of a given database key
  380. func dbGet(ctx *cli.Context) error {
  381. if ctx.NArg() != 1 {
  382. return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
  383. }
  384. stack, _ := makeConfigNode(ctx)
  385. defer stack.Close()
  386. db := utils.MakeChainDatabase(ctx, stack, true)
  387. defer db.Close()
  388. key, err := common.ParseHexOrString(ctx.Args().Get(0))
  389. if err != nil {
  390. log.Info("Could not decode the key", "error", err)
  391. return err
  392. }
  393. data, err := db.Get(key)
  394. if err != nil {
  395. log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
  396. return err
  397. }
  398. fmt.Printf("key %#x: %#x\n", key, data)
  399. return nil
  400. }
  401. // dbDelete deletes a key from the database
  402. func dbDelete(ctx *cli.Context) error {
  403. if ctx.NArg() != 1 {
  404. return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
  405. }
  406. stack, _ := makeConfigNode(ctx)
  407. defer stack.Close()
  408. db := utils.MakeChainDatabase(ctx, stack, false)
  409. defer db.Close()
  410. key, err := common.ParseHexOrString(ctx.Args().Get(0))
  411. if err != nil {
  412. log.Info("Could not decode the key", "error", err)
  413. return err
  414. }
  415. data, err := db.Get(key)
  416. if err == nil {
  417. fmt.Printf("Previous value: %#x\n", data)
  418. }
  419. if err = db.Delete(key); err != nil {
  420. log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
  421. return err
  422. }
  423. return nil
  424. }
  425. // dbPut overwrite a value in the database
  426. func dbPut(ctx *cli.Context) error {
  427. if ctx.NArg() != 2 {
  428. return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
  429. }
  430. stack, _ := makeConfigNode(ctx)
  431. defer stack.Close()
  432. db := utils.MakeChainDatabase(ctx, stack, false)
  433. defer db.Close()
  434. var (
  435. key []byte
  436. value []byte
  437. data []byte
  438. err error
  439. )
  440. key, err = common.ParseHexOrString(ctx.Args().Get(0))
  441. if err != nil {
  442. log.Info("Could not decode the key", "error", err)
  443. return err
  444. }
  445. value, err = hexutil.Decode(ctx.Args().Get(1))
  446. if err != nil {
  447. log.Info("Could not decode the value", "error", err)
  448. return err
  449. }
  450. data, err = db.Get(key)
  451. if err == nil {
  452. fmt.Printf("Previous value: %#x\n", data)
  453. }
  454. return db.Put(key, value)
  455. }
  456. // dbDumpTrie shows the key-value slots of a given storage trie
  457. func dbDumpTrie(ctx *cli.Context) error {
  458. if ctx.NArg() < 1 {
  459. return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
  460. }
  461. stack, _ := makeConfigNode(ctx)
  462. defer stack.Close()
  463. db := utils.MakeChainDatabase(ctx, stack, true)
  464. defer db.Close()
  465. var (
  466. root []byte
  467. start []byte
  468. max = int64(-1)
  469. err error
  470. )
  471. if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
  472. log.Info("Could not decode the root", "error", err)
  473. return err
  474. }
  475. stRoot := common.BytesToHash(root)
  476. if ctx.NArg() >= 2 {
  477. if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
  478. log.Info("Could not decode the seek position", "error", err)
  479. return err
  480. }
  481. }
  482. if ctx.NArg() >= 3 {
  483. if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
  484. log.Info("Could not decode the max count", "error", err)
  485. return err
  486. }
  487. }
  488. theTrie, err := trie.New(common.Hash{}, stRoot, trie.NewDatabase(db))
  489. if err != nil {
  490. return err
  491. }
  492. var count int64
  493. it := trie.NewIterator(theTrie.NodeIterator(start))
  494. for it.Next() {
  495. if max > 0 && count == max {
  496. fmt.Printf("Exiting after %d values\n", count)
  497. break
  498. }
  499. fmt.Printf(" %d. key %#x: %#x\n", count, it.Key, it.Value)
  500. count++
  501. }
  502. return it.Err
  503. }
  504. func freezerInspect(ctx *cli.Context) error {
  505. if ctx.NArg() < 4 {
  506. return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
  507. }
  508. var (
  509. freezer = ctx.Args().Get(0)
  510. table = ctx.Args().Get(1)
  511. )
  512. start, err := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
  513. if err != nil {
  514. log.Info("Could not read start-param", "err", err)
  515. return err
  516. }
  517. end, err := strconv.ParseInt(ctx.Args().Get(3), 10, 64)
  518. if err != nil {
  519. log.Info("Could not read count param", "err", err)
  520. return err
  521. }
  522. stack, _ := makeConfigNode(ctx)
  523. defer stack.Close()
  524. db := utils.MakeChainDatabase(ctx, stack, true)
  525. defer db.Close()
  526. ancient, err := db.AncientDatadir()
  527. if err != nil {
  528. log.Info("Failed to retrieve ancient root", "err", err)
  529. return err
  530. }
  531. return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
  532. }
  533. func importLDBdata(ctx *cli.Context) error {
  534. start := 0
  535. switch ctx.NArg() {
  536. case 1:
  537. break
  538. case 2:
  539. s, err := strconv.Atoi(ctx.Args().Get(1))
  540. if err != nil {
  541. return fmt.Errorf("second arg must be an integer: %v", err)
  542. }
  543. start = s
  544. default:
  545. return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
  546. }
  547. var (
  548. fName = ctx.Args().Get(0)
  549. stack, _ = makeConfigNode(ctx)
  550. interrupt = make(chan os.Signal, 1)
  551. stop = make(chan struct{})
  552. )
  553. defer stack.Close()
  554. signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
  555. defer signal.Stop(interrupt)
  556. defer close(interrupt)
  557. go func() {
  558. if _, ok := <-interrupt; ok {
  559. log.Info("Interrupted during ldb import, stopping at next batch")
  560. }
  561. close(stop)
  562. }()
  563. db := utils.MakeChainDatabase(ctx, stack, false)
  564. return utils.ImportLDBData(db, fName, int64(start), stop)
  565. }
  566. type preimageIterator struct {
  567. iter ethdb.Iterator
  568. }
  569. func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
  570. for iter.iter.Next() {
  571. key := iter.iter.Key()
  572. if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
  573. return utils.OpBatchAdd, key, iter.iter.Value(), true
  574. }
  575. }
  576. return 0, nil, nil, false
  577. }
  578. func (iter *preimageIterator) Release() {
  579. iter.iter.Release()
  580. }
  581. type snapshotIterator struct {
  582. init bool
  583. account ethdb.Iterator
  584. storage ethdb.Iterator
  585. }
  586. func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
  587. if !iter.init {
  588. iter.init = true
  589. return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
  590. }
  591. for iter.account.Next() {
  592. key := iter.account.Key()
  593. if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
  594. return utils.OpBatchAdd, key, iter.account.Value(), true
  595. }
  596. }
  597. for iter.storage.Next() {
  598. key := iter.storage.Key()
  599. if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
  600. return utils.OpBatchAdd, key, iter.storage.Value(), true
  601. }
  602. }
  603. return 0, nil, nil, false
  604. }
  605. func (iter *snapshotIterator) Release() {
  606. iter.account.Release()
  607. iter.storage.Release()
  608. }
  609. // chainExporters defines the export scheme for all exportable chain data.
  610. var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
  611. "preimage": func(db ethdb.Database) utils.ChainDataIterator {
  612. iter := db.NewIterator(rawdb.PreimagePrefix, nil)
  613. return &preimageIterator{iter: iter}
  614. },
  615. "snapshot": func(db ethdb.Database) utils.ChainDataIterator {
  616. account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
  617. storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
  618. return &snapshotIterator{account: account, storage: storage}
  619. },
  620. }
  621. func exportChaindata(ctx *cli.Context) error {
  622. if ctx.NArg() < 2 {
  623. return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
  624. }
  625. // Parse the required chain data type, make sure it's supported.
  626. kind := ctx.Args().Get(0)
  627. kind = strings.ToLower(strings.Trim(kind, " "))
  628. exporter, ok := chainExporters[kind]
  629. if !ok {
  630. var kinds []string
  631. for kind := range chainExporters {
  632. kinds = append(kinds, kind)
  633. }
  634. return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
  635. }
  636. var (
  637. stack, _ = makeConfigNode(ctx)
  638. interrupt = make(chan os.Signal, 1)
  639. stop = make(chan struct{})
  640. )
  641. defer stack.Close()
  642. signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
  643. defer signal.Stop(interrupt)
  644. defer close(interrupt)
  645. go func() {
  646. if _, ok := <-interrupt; ok {
  647. log.Info("Interrupted during db export, stopping at next batch")
  648. }
  649. close(stop)
  650. }()
  651. db := utils.MakeChainDatabase(ctx, stack, true)
  652. return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
  653. }
  654. func showMetaData(ctx *cli.Context) error {
  655. stack, _ := makeConfigNode(ctx)
  656. defer stack.Close()
  657. db := utils.MakeChainDatabase(ctx, stack, true)
  658. ancients, err := db.Ancients()
  659. if err != nil {
  660. fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
  661. }
  662. pp := func(val *uint64) string {
  663. if val == nil {
  664. return "<nil>"
  665. }
  666. return fmt.Sprintf("%d (%#x)", *val, *val)
  667. }
  668. data := [][]string{
  669. {"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
  670. {"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
  671. {"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
  672. {"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
  673. if b := rawdb.ReadHeadBlock(db); b != nil {
  674. data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
  675. data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
  676. data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())})
  677. }
  678. if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
  679. data = append(data, []string{"SkeletonSyncStatus", string(b)})
  680. }
  681. if h := rawdb.ReadHeadHeader(db); h != nil {
  682. data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
  683. data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
  684. data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
  685. }
  686. data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
  687. {"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
  688. {"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
  689. {"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
  690. {"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
  691. {"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
  692. {"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
  693. {"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
  694. {"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
  695. {"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
  696. }...)
  697. table := tablewriter.NewWriter(os.Stdout)
  698. table.SetHeader([]string{"Field", "Value"})
  699. table.AppendBulk(data)
  700. table.Render()
  701. return nil
  702. }
  703. func freezerMigrate(ctx *cli.Context) error {
  704. stack, _ := makeConfigNode(ctx)
  705. defer stack.Close()
  706. db := utils.MakeChainDatabase(ctx, stack, false)
  707. defer db.Close()
  708. // Check first block for legacy receipt format
  709. numAncients, err := db.Ancients()
  710. if err != nil {
  711. return err
  712. }
  713. if numAncients < 1 {
  714. log.Info("No receipts in freezer to migrate")
  715. return nil
  716. }
  717. isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
  718. if err != nil {
  719. return err
  720. }
  721. if !isFirstLegacy {
  722. log.Info("No legacy receipts to migrate")
  723. return nil
  724. }
  725. log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
  726. start := time.Now()
  727. if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
  728. return err
  729. }
  730. if err := db.Close(); err != nil {
  731. return err
  732. }
  733. log.Info("Migration finished", "duration", time.Since(start))
  734. return nil
  735. }
  736. // dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
  737. // non-empty receipt and checks its format. The index of this first non-empty element is
  738. // the second return parameter.
  739. func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
  740. // Check first block for legacy receipt format
  741. numAncients, err := db.Ancients()
  742. if err != nil {
  743. return false, 0, err
  744. }
  745. if numAncients < 1 {
  746. return false, 0, nil
  747. }
  748. if firstIdx >= numAncients {
  749. return false, firstIdx, nil
  750. }
  751. var (
  752. legacy bool
  753. blob []byte
  754. emptyRLPList = []byte{192}
  755. )
  756. // Find first block with non-empty receipt, only if
  757. // the index is not already provided.
  758. if firstIdx == 0 {
  759. for i := uint64(0); i < numAncients; i++ {
  760. blob, err = db.Ancient("receipts", i)
  761. if err != nil {
  762. return false, 0, err
  763. }
  764. if len(blob) == 0 {
  765. continue
  766. }
  767. if !bytes.Equal(blob, emptyRLPList) {
  768. firstIdx = i
  769. break
  770. }
  771. }
  772. }
  773. // Is first non-empty receipt legacy?
  774. first, err := db.Ancient("receipts", firstIdx)
  775. if err != nil {
  776. return false, 0, err
  777. }
  778. legacy, err = types.IsLegacyStoredReceipts(first)
  779. return legacy, firstIdx, err
  780. }