cmd.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. // Copyright 2014 The go-ethereum Authors
  2. // This file is part of go-ethereum.
  3. //
  4. // go-ethereum is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // go-ethereum is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU General Public License
  15. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
  16. // Package utils contains internal helper functions for go-ethereum commands.
  17. package utils
  18. import (
  19. "bufio"
  20. "compress/gzip"
  21. "errors"
  22. "fmt"
  23. "io"
  24. "os"
  25. "os/signal"
  26. "runtime"
  27. "strings"
  28. "syscall"
  29. "time"
  30. "github.com/ethereum/go-ethereum/common"
  31. "github.com/ethereum/go-ethereum/core"
  32. "github.com/ethereum/go-ethereum/core/rawdb"
  33. "github.com/ethereum/go-ethereum/core/types"
  34. "github.com/ethereum/go-ethereum/crypto"
  35. "github.com/ethereum/go-ethereum/eth/ethconfig"
  36. "github.com/ethereum/go-ethereum/ethdb"
  37. "github.com/ethereum/go-ethereum/internal/debug"
  38. "github.com/ethereum/go-ethereum/log"
  39. "github.com/ethereum/go-ethereum/node"
  40. "github.com/ethereum/go-ethereum/rlp"
  41. "github.com/urfave/cli/v2"
  42. )
  43. const (
  44. importBatchSize = 2500
  45. )
  46. // Fatalf formats a message to standard error and exits the program.
  47. // The message is also printed to standard output if standard error
  48. // is redirected to a different file.
  49. func Fatalf(format string, args ...interface{}) {
  50. w := io.MultiWriter(os.Stdout, os.Stderr)
  51. if runtime.GOOS == "windows" {
  52. // The SameFile check below doesn't work on Windows.
  53. // stdout is unlikely to get redirected though, so just print there.
  54. w = os.Stdout
  55. } else {
  56. outf, _ := os.Stdout.Stat()
  57. errf, _ := os.Stderr.Stat()
  58. if outf != nil && errf != nil && os.SameFile(outf, errf) {
  59. w = os.Stderr
  60. }
  61. }
  62. fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
  63. os.Exit(1)
  64. }
  65. func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
  66. if err := stack.Start(); err != nil {
  67. Fatalf("Error starting protocol stack: %v", err)
  68. }
  69. go func() {
  70. sigc := make(chan os.Signal, 1)
  71. signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
  72. defer signal.Stop(sigc)
  73. minFreeDiskSpace := 2 * ethconfig.Defaults.TrieDirtyCache // Default 2 * 256Mb
  74. if ctx.IsSet(MinFreeDiskSpaceFlag.Name) {
  75. minFreeDiskSpace = ctx.Int(MinFreeDiskSpaceFlag.Name)
  76. } else if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) {
  77. minFreeDiskSpace = 2 * ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100
  78. }
  79. if minFreeDiskSpace > 0 {
  80. go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
  81. }
  82. shutdown := func() {
  83. log.Info("Got interrupt, shutting down...")
  84. go stack.Close()
  85. for i := 10; i > 0; i-- {
  86. <-sigc
  87. if i > 1 {
  88. log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
  89. }
  90. }
  91. debug.Exit() // ensure trace and CPU profile data is flushed.
  92. debug.LoudPanic("boom")
  93. }
  94. if isConsole {
  95. // In JS console mode, SIGINT is ignored because it's handled by the console.
  96. // However, SIGTERM still shuts down the node.
  97. for {
  98. sig := <-sigc
  99. if sig == syscall.SIGTERM {
  100. shutdown()
  101. return
  102. }
  103. }
  104. } else {
  105. <-sigc
  106. shutdown()
  107. }
  108. }()
  109. }
  110. func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
  111. for {
  112. freeSpace, err := getFreeDiskSpace(path)
  113. if err != nil {
  114. log.Warn("Failed to get free disk space", "path", path, "err", err)
  115. break
  116. }
  117. if freeSpace < freeDiskSpaceCritical {
  118. log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace))
  119. sigc <- syscall.SIGTERM
  120. break
  121. } else if freeSpace < 2*freeDiskSpaceCritical {
  122. log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical))
  123. }
  124. time.Sleep(30 * time.Second)
  125. }
  126. }
  127. func ImportChain(chain *core.BlockChain, fn string) error {
  128. // Watch for Ctrl-C while the import is running.
  129. // If a signal is received, the import will stop at the next batch.
  130. interrupt := make(chan os.Signal, 1)
  131. stop := make(chan struct{})
  132. signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
  133. defer signal.Stop(interrupt)
  134. defer close(interrupt)
  135. go func() {
  136. if _, ok := <-interrupt; ok {
  137. log.Info("Interrupted during import, stopping at next batch")
  138. }
  139. close(stop)
  140. }()
  141. checkInterrupt := func() bool {
  142. select {
  143. case <-stop:
  144. return true
  145. default:
  146. return false
  147. }
  148. }
  149. log.Info("Importing blockchain", "file", fn)
  150. // Open the file handle and potentially unwrap the gzip stream
  151. fh, err := os.Open(fn)
  152. if err != nil {
  153. return err
  154. }
  155. defer fh.Close()
  156. var reader io.Reader = fh
  157. if strings.HasSuffix(fn, ".gz") {
  158. if reader, err = gzip.NewReader(reader); err != nil {
  159. return err
  160. }
  161. }
  162. stream := rlp.NewStream(reader, 0)
  163. // Run actual the import.
  164. blocks := make(types.Blocks, importBatchSize)
  165. n := 0
  166. for batch := 0; ; batch++ {
  167. // Load a batch of RLP blocks.
  168. if checkInterrupt() {
  169. return fmt.Errorf("interrupted")
  170. }
  171. i := 0
  172. for ; i < importBatchSize; i++ {
  173. var b types.Block
  174. if err := stream.Decode(&b); err == io.EOF {
  175. break
  176. } else if err != nil {
  177. return fmt.Errorf("at block %d: %v", n, err)
  178. }
  179. // don't import first block
  180. if b.NumberU64() == 0 {
  181. i--
  182. continue
  183. }
  184. blocks[i] = &b
  185. n++
  186. }
  187. if i == 0 {
  188. break
  189. }
  190. // Import the batch.
  191. if checkInterrupt() {
  192. return fmt.Errorf("interrupted")
  193. }
  194. missing := missingBlocks(chain, blocks[:i])
  195. if len(missing) == 0 {
  196. log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
  197. continue
  198. }
  199. if _, err := chain.InsertChain(missing); err != nil {
  200. return fmt.Errorf("invalid block %d: %v", n, err)
  201. }
  202. }
  203. return nil
  204. }
  205. func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
  206. head := chain.CurrentBlock()
  207. for i, block := range blocks {
  208. // If we're behind the chain head, only check block, state is available at head
  209. if head.NumberU64() > block.NumberU64() {
  210. if !chain.HasBlock(block.Hash(), block.NumberU64()) {
  211. return blocks[i:]
  212. }
  213. continue
  214. }
  215. // If we're above the chain head, state availability is a must
  216. if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
  217. return blocks[i:]
  218. }
  219. }
  220. return nil
  221. }
  222. // ExportChain exports a blockchain into the specified file, truncating any data
  223. // already present in the file.
  224. func ExportChain(blockchain *core.BlockChain, fn string) error {
  225. log.Info("Exporting blockchain", "file", fn)
  226. // Open the file handle and potentially wrap with a gzip stream
  227. fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
  228. if err != nil {
  229. return err
  230. }
  231. defer fh.Close()
  232. var writer io.Writer = fh
  233. if strings.HasSuffix(fn, ".gz") {
  234. writer = gzip.NewWriter(writer)
  235. defer writer.(*gzip.Writer).Close()
  236. }
  237. // Iterate over the blocks and export them
  238. if err := blockchain.Export(writer); err != nil {
  239. return err
  240. }
  241. log.Info("Exported blockchain", "file", fn)
  242. return nil
  243. }
  244. // ExportAppendChain exports a blockchain into the specified file, appending to
  245. // the file if data already exists in it.
  246. func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
  247. log.Info("Exporting blockchain", "file", fn)
  248. // Open the file handle and potentially wrap with a gzip stream
  249. fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
  250. if err != nil {
  251. return err
  252. }
  253. defer fh.Close()
  254. var writer io.Writer = fh
  255. if strings.HasSuffix(fn, ".gz") {
  256. writer = gzip.NewWriter(writer)
  257. defer writer.(*gzip.Writer).Close()
  258. }
  259. // Iterate over the blocks and export them
  260. if err := blockchain.ExportN(writer, first, last); err != nil {
  261. return err
  262. }
  263. log.Info("Exported blockchain to", "file", fn)
  264. return nil
  265. }
  266. // ImportPreimages imports a batch of exported hash preimages into the database.
  267. // It's a part of the deprecated functionality, should be removed in the future.
  268. func ImportPreimages(db ethdb.Database, fn string) error {
  269. log.Info("Importing preimages", "file", fn)
  270. // Open the file handle and potentially unwrap the gzip stream
  271. fh, err := os.Open(fn)
  272. if err != nil {
  273. return err
  274. }
  275. defer fh.Close()
  276. var reader io.Reader = bufio.NewReader(fh)
  277. if strings.HasSuffix(fn, ".gz") {
  278. if reader, err = gzip.NewReader(reader); err != nil {
  279. return err
  280. }
  281. }
  282. stream := rlp.NewStream(reader, 0)
  283. // Import the preimages in batches to prevent disk thrashing
  284. preimages := make(map[common.Hash][]byte)
  285. for {
  286. // Read the next entry and ensure it's not junk
  287. var blob []byte
  288. if err := stream.Decode(&blob); err != nil {
  289. if err == io.EOF {
  290. break
  291. }
  292. return err
  293. }
  294. // Accumulate the preimages and flush when enough ws gathered
  295. preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
  296. if len(preimages) > 1024 {
  297. rawdb.WritePreimages(db, preimages)
  298. preimages = make(map[common.Hash][]byte)
  299. }
  300. }
  301. // Flush the last batch preimage data
  302. if len(preimages) > 0 {
  303. rawdb.WritePreimages(db, preimages)
  304. }
  305. return nil
  306. }
  307. // ExportPreimages exports all known hash preimages into the specified file,
  308. // truncating any data already present in the file.
  309. // It's a part of the deprecated functionality, should be removed in the future.
  310. func ExportPreimages(db ethdb.Database, fn string) error {
  311. log.Info("Exporting preimages", "file", fn)
  312. // Open the file handle and potentially wrap with a gzip stream
  313. fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
  314. if err != nil {
  315. return err
  316. }
  317. defer fh.Close()
  318. var writer io.Writer = fh
  319. if strings.HasSuffix(fn, ".gz") {
  320. writer = gzip.NewWriter(writer)
  321. defer writer.(*gzip.Writer).Close()
  322. }
  323. // Iterate over the preimages and export them
  324. it := db.NewIterator([]byte("secure-key-"), nil)
  325. defer it.Release()
  326. for it.Next() {
  327. if err := rlp.Encode(writer, it.Value()); err != nil {
  328. return err
  329. }
  330. }
  331. log.Info("Exported preimages", "file", fn)
  332. return nil
  333. }
  334. // exportHeader is used in the export/import flow. When we do an export,
  335. // the first element we output is the exportHeader.
  336. // Whenever a backwards-incompatible change is made, the Version header
  337. // should be bumped.
  338. // If the importer sees a higher version, it should reject the import.
  339. type exportHeader struct {
  340. Magic string // Always set to 'gethdbdump' for disambiguation
  341. Version uint64
  342. Kind string
  343. UnixTime uint64
  344. }
  345. const exportMagic = "gethdbdump"
  346. const (
  347. OpBatchAdd = 0
  348. OpBatchDel = 1
  349. )
  350. // ImportLDBData imports a batch of snapshot data into the database
  351. func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
  352. log.Info("Importing leveldb data", "file", f)
  353. // Open the file handle and potentially unwrap the gzip stream
  354. fh, err := os.Open(f)
  355. if err != nil {
  356. return err
  357. }
  358. defer fh.Close()
  359. var reader io.Reader = bufio.NewReader(fh)
  360. if strings.HasSuffix(f, ".gz") {
  361. if reader, err = gzip.NewReader(reader); err != nil {
  362. return err
  363. }
  364. }
  365. stream := rlp.NewStream(reader, 0)
  366. // Read the header
  367. var header exportHeader
  368. if err := stream.Decode(&header); err != nil {
  369. return fmt.Errorf("could not decode header: %v", err)
  370. }
  371. if header.Magic != exportMagic {
  372. return errors.New("incompatible data, wrong magic")
  373. }
  374. if header.Version != 0 {
  375. return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
  376. }
  377. log.Info("Importing data", "file", f, "type", header.Kind, "data age",
  378. common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
  379. // Import the snapshot in batches to prevent disk thrashing
  380. var (
  381. count int64
  382. start = time.Now()
  383. logged = time.Now()
  384. batch = db.NewBatch()
  385. )
  386. for {
  387. // Read the next entry
  388. var (
  389. op byte
  390. key, val []byte
  391. )
  392. if err := stream.Decode(&op); err != nil {
  393. if err == io.EOF {
  394. break
  395. }
  396. return err
  397. }
  398. if err := stream.Decode(&key); err != nil {
  399. return err
  400. }
  401. if err := stream.Decode(&val); err != nil {
  402. return err
  403. }
  404. if count < startIndex {
  405. count++
  406. continue
  407. }
  408. switch op {
  409. case OpBatchDel:
  410. batch.Delete(key)
  411. case OpBatchAdd:
  412. batch.Put(key, val)
  413. default:
  414. return fmt.Errorf("unknown op %d\n", op)
  415. }
  416. if batch.ValueSize() > ethdb.IdealBatchSize {
  417. if err := batch.Write(); err != nil {
  418. return err
  419. }
  420. batch.Reset()
  421. }
  422. // Check interruption emitted by ctrl+c
  423. if count%1000 == 0 {
  424. select {
  425. case <-interrupt:
  426. if err := batch.Write(); err != nil {
  427. return err
  428. }
  429. log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  430. return nil
  431. default:
  432. }
  433. }
  434. if count%1000 == 0 && time.Since(logged) > 8*time.Second {
  435. log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  436. logged = time.Now()
  437. }
  438. count += 1
  439. }
  440. // Flush the last batch snapshot data
  441. if batch.ValueSize() > 0 {
  442. if err := batch.Write(); err != nil {
  443. return err
  444. }
  445. }
  446. log.Info("Imported chain data", "file", f, "count", count,
  447. "elapsed", common.PrettyDuration(time.Since(start)))
  448. return nil
  449. }
  450. // ChainDataIterator is an interface wraps all necessary functions to iterate
  451. // the exporting chain data.
  452. type ChainDataIterator interface {
  453. // Next returns the key-value pair for next exporting entry in the iterator.
  454. // When the end is reached, it will return (0, nil, nil, false).
  455. Next() (byte, []byte, []byte, bool)
  456. // Release releases associated resources. Release should always succeed and can
  457. // be called multiple times without causing error.
  458. Release()
  459. }
  460. // ExportChaindata exports the given data type (truncating any data already present)
  461. // in the file. If the suffix is 'gz', gzip compression is used.
  462. func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
  463. log.Info("Exporting chain data", "file", fn, "kind", kind)
  464. defer iter.Release()
  465. // Open the file handle and potentially wrap with a gzip stream
  466. fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
  467. if err != nil {
  468. return err
  469. }
  470. defer fh.Close()
  471. var writer io.Writer = fh
  472. if strings.HasSuffix(fn, ".gz") {
  473. writer = gzip.NewWriter(writer)
  474. defer writer.(*gzip.Writer).Close()
  475. }
  476. // Write the header
  477. if err := rlp.Encode(writer, &exportHeader{
  478. Magic: exportMagic,
  479. Version: 0,
  480. Kind: kind,
  481. UnixTime: uint64(time.Now().Unix()),
  482. }); err != nil {
  483. return err
  484. }
  485. // Extract data from source iterator and dump them out to file
  486. var (
  487. count int64
  488. start = time.Now()
  489. logged = time.Now()
  490. )
  491. for {
  492. op, key, val, ok := iter.Next()
  493. if !ok {
  494. break
  495. }
  496. if err := rlp.Encode(writer, op); err != nil {
  497. return err
  498. }
  499. if err := rlp.Encode(writer, key); err != nil {
  500. return err
  501. }
  502. if err := rlp.Encode(writer, val); err != nil {
  503. return err
  504. }
  505. if count%1000 == 0 {
  506. // Check interruption emitted by ctrl+c
  507. select {
  508. case <-interrupt:
  509. log.Info("Chain data exporting interrupted", "file", fn,
  510. "kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  511. return nil
  512. default:
  513. }
  514. if time.Since(logged) > 8*time.Second {
  515. log.Info("Exporting chain data", "file", fn, "kind", kind,
  516. "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  517. logged = time.Now()
  518. }
  519. }
  520. count++
  521. }
  522. log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
  523. "elapsed", common.PrettyDuration(time.Since(start)))
  524. return nil
  525. }