pruner.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. // Copyright 2020 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package pruner
  17. import (
  18. "bytes"
  19. "encoding/binary"
  20. "errors"
  21. "fmt"
  22. "math"
  23. "os"
  24. "path/filepath"
  25. "strings"
  26. "time"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/core/rawdb"
  29. "github.com/ethereum/go-ethereum/core/state"
  30. "github.com/ethereum/go-ethereum/core/state/snapshot"
  31. "github.com/ethereum/go-ethereum/core/types"
  32. "github.com/ethereum/go-ethereum/crypto"
  33. "github.com/ethereum/go-ethereum/ethdb"
  34. "github.com/ethereum/go-ethereum/log"
  35. "github.com/ethereum/go-ethereum/rlp"
  36. "github.com/ethereum/go-ethereum/trie"
  37. )
  38. const (
  39. // stateBloomFilePrefix is the filename prefix of state bloom filter.
  40. stateBloomFilePrefix = "statebloom"
  41. // stateBloomFilePrefix is the filename suffix of state bloom filter.
  42. stateBloomFileSuffix = "bf.gz"
  43. // stateBloomFileTempSuffix is the filename suffix of state bloom filter
  44. // while it is being written out to detect write aborts.
  45. stateBloomFileTempSuffix = ".tmp"
  46. // rangeCompactionThreshold is the minimal deleted entry number for
  47. // triggering range compaction. It's a quite arbitrary number but just
  48. // to avoid triggering range compaction because of small deletion.
  49. rangeCompactionThreshold = 100000
  50. )
  51. var (
  52. // emptyRoot is the known root hash of an empty trie.
  53. emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
  54. // emptyCode is the known hash of the empty EVM bytecode.
  55. emptyCode = crypto.Keccak256(nil)
  56. )
  57. // Pruner is an offline tool to prune the stale state with the
  58. // help of the snapshot. The workflow of pruner is very simple:
  59. //
  60. // - iterate the snapshot, reconstruct the relevant state
  61. // - iterate the database, delete all other state entries which
  62. // don't belong to the target state and the genesis state
  63. //
  64. // It can take several hours(around 2 hours for mainnet) to finish
  65. // the whole pruning work. It's recommended to run this offline tool
  66. // periodically in order to release the disk usage and improve the
  67. // disk read performance to some extent.
  68. type Pruner struct {
  69. db ethdb.Database
  70. stateBloom *stateBloom
  71. datadir string
  72. trieCachePath string
  73. headHeader *types.Header
  74. snaptree *snapshot.Tree
  75. }
  76. // NewPruner creates the pruner instance.
  77. func NewPruner(db ethdb.Database, headHeader *types.Header, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) {
  78. snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, false)
  79. if err != nil {
  80. return nil, err // The relevant snapshot(s) might not exist
  81. }
  82. // Sanitize the bloom filter size if it's too small.
  83. if bloomSize < 256 {
  84. log.Warn("Sanitizing bloomfilter size", "provided(MB)", bloomSize, "updated(MB)", 256)
  85. bloomSize = 256
  86. }
  87. stateBloom, err := newStateBloomWithSize(bloomSize)
  88. if err != nil {
  89. return nil, err
  90. }
  91. return &Pruner{
  92. db: db,
  93. stateBloom: stateBloom,
  94. datadir: datadir,
  95. trieCachePath: trieCachePath,
  96. headHeader: headHeader,
  97. snaptree: snaptree,
  98. }, nil
  99. }
  100. func prune(maindb ethdb.Database, stateBloom *stateBloom, middleStateRoots map[common.Hash]struct{}, start time.Time) error {
  101. // Delete all stale trie nodes in the disk. With the help of state bloom
  102. // the trie nodes(and codes) belong to the active state will be filtered
  103. // out. A very small part of stale tries will also be filtered because of
  104. // the false-positive rate of bloom filter. But the assumption is held here
  105. // that the false-positive is low enough(~0.05%). The probablity of the
  106. // dangling node is the state root is super low. So the dangling nodes in
  107. // theory will never ever be visited again.
  108. var (
  109. count int
  110. size common.StorageSize
  111. pstart = time.Now()
  112. logged = time.Now()
  113. batch = maindb.NewBatch()
  114. iter = maindb.NewIterator(nil, nil)
  115. )
  116. for iter.Next() {
  117. key := iter.Key()
  118. // All state entries don't belong to specific state and genesis are deleted here
  119. // - trie node
  120. // - legacy contract code
  121. // - new-scheme contract code
  122. isCode, codeKey := rawdb.IsCodeKey(key)
  123. if len(key) == common.HashLength || isCode {
  124. checkKey := key
  125. if isCode {
  126. checkKey = codeKey
  127. }
  128. if _, exist := middleStateRoots[common.BytesToHash(checkKey)]; exist {
  129. log.Debug("Forcibly delete the middle state roots", "hash", common.BytesToHash(checkKey))
  130. } else {
  131. if ok, err := stateBloom.Contain(checkKey); err != nil {
  132. return err
  133. } else if ok {
  134. continue
  135. }
  136. }
  137. count += 1
  138. size += common.StorageSize(len(key) + len(iter.Value()))
  139. batch.Delete(key)
  140. var eta time.Duration // Realistically will never remain uninited
  141. if done := binary.BigEndian.Uint64(key[:8]); done > 0 {
  142. var (
  143. left = math.MaxUint64 - binary.BigEndian.Uint64(key[:8])
  144. speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
  145. )
  146. eta = time.Duration(left/speed) * time.Millisecond
  147. }
  148. if time.Since(logged) > 8*time.Second {
  149. log.Info("Pruning state data", "nodes", count, "size", size,
  150. "elapsed", common.PrettyDuration(time.Since(pstart)), "eta", common.PrettyDuration(eta))
  151. logged = time.Now()
  152. }
  153. // Recreate the iterator after every batch commit in order
  154. // to allow the underlying compactor to delete the entries.
  155. if batch.ValueSize() >= ethdb.IdealBatchSize {
  156. batch.Write()
  157. batch.Reset()
  158. iter.Release()
  159. iter = maindb.NewIterator(nil, key)
  160. }
  161. }
  162. }
  163. if batch.ValueSize() > 0 {
  164. batch.Write()
  165. batch.Reset()
  166. }
  167. iter.Release()
  168. log.Info("Pruned state data", "nodes", count, "size", size, "elapsed", common.PrettyDuration(time.Since(pstart)))
  169. // Start compactions, will remove the deleted data from the disk immediately.
  170. // Note for small pruning, the compaction is skipped.
  171. if count >= rangeCompactionThreshold {
  172. cstart := time.Now()
  173. for b := byte(0); b < byte(16); b++ {
  174. var (
  175. start = []byte{b << 4}
  176. end = []byte{(b+1)<<4 - 1}
  177. )
  178. log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart)))
  179. if b == 15 {
  180. end = nil
  181. }
  182. if err := maindb.Compact(start, end); err != nil {
  183. log.Error("Database compaction failed", "error", err)
  184. return err
  185. }
  186. }
  187. log.Info("Database compaction finished", "elapsed", common.PrettyDuration(time.Since(cstart)))
  188. }
  189. log.Info("State pruning successful", "pruned", size, "elapsed", common.PrettyDuration(time.Since(start)))
  190. return nil
  191. }
  192. // Prune deletes all historical state nodes except the nodes belong to the
  193. // specified state version. If user doesn't specify the state version, use
  194. // the bottom-most snapshot diff layer as the target.
  195. func (p *Pruner) Prune(root common.Hash) error {
  196. // If the state bloom filter is already committed previously,
  197. // reuse it for pruning instead of generating a new one. It's
  198. // mandatory because a part of state may already be deleted,
  199. // the recovery procedure is necessary.
  200. _, stateBloomRoot, err := findBloomFilter(p.datadir)
  201. if err != nil {
  202. return err
  203. }
  204. if stateBloomRoot != (common.Hash{}) {
  205. return RecoverPruning(p.datadir, p.db, p.trieCachePath)
  206. }
  207. // If the target state root is not specified, use the HEAD-127 as the
  208. // target. The reason for picking it is:
  209. // - in most of the normal cases, the related state is available
  210. // - the probability of this layer being reorg is very low
  211. var layers []snapshot.Snapshot
  212. if root == (common.Hash{}) {
  213. // Retrieve all snapshot layers from the current HEAD.
  214. // In theory there are 128 difflayers + 1 disk layer present,
  215. // so 128 diff layers are expected to be returned.
  216. layers = p.snaptree.Snapshots(p.headHeader.Root, 128, true)
  217. if len(layers) != 128 {
  218. // Reject if the accumulated diff layers are less than 128. It
  219. // means in most of normal cases, there is no associated state
  220. // with bottom-most diff layer.
  221. return fmt.Errorf("snapshot not old enough yet: need %d more blocks", 128-len(layers))
  222. }
  223. // Use the bottom-most diff layer as the target
  224. root = layers[len(layers)-1].Root()
  225. }
  226. // Ensure the root is really present. The weak assumption
  227. // is the presence of root can indicate the presence of the
  228. // entire trie.
  229. if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 {
  230. // The special case is for clique based networks(rinkeby, goerli
  231. // and some other private networks), it's possible that two
  232. // consecutive blocks will have same root. In this case snapshot
  233. // difflayer won't be created. So HEAD-127 may not paired with
  234. // head-127 layer. Instead the paired layer is higher than the
  235. // bottom-most diff layer. Try to find the bottom-most snapshot
  236. // layer with state available.
  237. //
  238. // Note HEAD and HEAD-1 is ignored. Usually there is the associated
  239. // state available, but we don't want to use the topmost state
  240. // as the pruning target.
  241. var found bool
  242. for i := len(layers) - 2; i >= 2; i-- {
  243. if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 {
  244. root = layers[i].Root()
  245. found = true
  246. log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)
  247. break
  248. }
  249. }
  250. if !found {
  251. if len(layers) > 0 {
  252. return errors.New("no snapshot paired state")
  253. }
  254. return fmt.Errorf("associated state[%x] is not present", root)
  255. }
  256. } else {
  257. if len(layers) > 0 {
  258. log.Info("Selecting bottom-most difflayer as the pruning target", "root", root, "height", p.headHeader.Number.Uint64()-127)
  259. } else {
  260. log.Info("Selecting user-specified state as the pruning target", "root", root)
  261. }
  262. }
  263. // Before start the pruning, delete the clean trie cache first.
  264. // It's necessary otherwise in the next restart we will hit the
  265. // deleted state root in the "clean cache" so that the incomplete
  266. // state is picked for usage.
  267. deleteCleanTrieCache(p.trieCachePath)
  268. // All the state roots of the middle layer should be forcibly pruned,
  269. // otherwise the dangling state will be left.
  270. middleRoots := make(map[common.Hash]struct{})
  271. for _, layer := range layers {
  272. if layer.Root() == root {
  273. break
  274. }
  275. middleRoots[layer.Root()] = struct{}{}
  276. }
  277. // Traverse the target state, re-construct the whole state trie and
  278. // commit to the given bloom filter.
  279. start := time.Now()
  280. if err := snapshot.GenerateTrie(p.snaptree, root, p.db, p.stateBloom); err != nil {
  281. return err
  282. }
  283. // Traverse the genesis, put all genesis state entries into the
  284. // bloom filter too.
  285. if err := extractGenesis(p.db, p.stateBloom); err != nil {
  286. return err
  287. }
  288. filterName := bloomFilterName(p.datadir, root)
  289. log.Info("Writing state bloom to disk", "name", filterName)
  290. if err := p.stateBloom.Commit(filterName, filterName+stateBloomFileTempSuffix); err != nil {
  291. return err
  292. }
  293. log.Info("State bloom filter committed", "name", filterName)
  294. if err := prune(p.db, p.stateBloom, middleRoots, start); err != nil {
  295. return err
  296. }
  297. // Pruning is done, now drop the "useless" layers from the snapshot.
  298. // Firstly, flushing the target layer into the disk. After that all
  299. // diff layers below the target will all be merged into the disk.
  300. if err := p.snaptree.Cap(root, 0); err != nil {
  301. return err
  302. }
  303. // Secondly, flushing the snapshot journal into the disk. All diff
  304. // layers upon the target layer are dropped silently. Eventually the
  305. // entire snapshot tree is converted into a single disk layer with
  306. // the pruning target as the root.
  307. if _, err := p.snaptree.Journal(root); err != nil {
  308. return err
  309. }
  310. // Delete the state bloom, it marks the entire pruning procedure is
  311. // finished. If any crashes or manual exit happens before this,
  312. // `RecoverPruning` will pick it up in the next restarts to redo all
  313. // the things.
  314. os.RemoveAll(filterName)
  315. return nil
  316. }
  317. // RecoverPruning will resume the pruning procedure during the system restart.
  318. // This function is used in this case: user tries to prune state data, but the
  319. // system was interrupted midway because of crash or manual-kill. In this case
  320. // if the bloom filter for filtering active state is already constructed, the
  321. // pruning can be resumed. What's more if the bloom filter is constructed, the
  322. // pruning **has to be resumed**. Otherwise a lot of dangling nodes may be left
  323. // in the disk.
  324. func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) error {
  325. stateBloomPath, stateBloomRoot, err := findBloomFilter(datadir)
  326. if err != nil {
  327. return err
  328. }
  329. if stateBloomPath == "" {
  330. return nil // nothing to recover
  331. }
  332. headHeader, err := getHeadHeader(db)
  333. if err != nil {
  334. return err
  335. }
  336. // Initialize the snapshot tree in recovery mode to handle this special case:
  337. // - Users run the `prune-state` command multiple times
  338. // - Neither these `prune-state` running is finished(e.g. interrupted manually)
  339. // - The state bloom filter is already generated, a part of state is deleted,
  340. // so that resuming the pruning here is mandatory
  341. // - The state HEAD is rewound already because of multiple incomplete `prune-state`
  342. // In this case, even the state HEAD is not exactly matched with snapshot, it
  343. // still feasible to recover the pruning correctly.
  344. snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, true)
  345. if err != nil {
  346. return err // The relevant snapshot(s) might not exist
  347. }
  348. stateBloom, err := NewStateBloomFromDisk(stateBloomPath)
  349. if err != nil {
  350. return err
  351. }
  352. log.Info("Loaded state bloom filter", "path", stateBloomPath)
  353. // Before start the pruning, delete the clean trie cache first.
  354. // It's necessary otherwise in the next restart we will hit the
  355. // deleted state root in the "clean cache" so that the incomplete
  356. // state is picked for usage.
  357. deleteCleanTrieCache(trieCachePath)
  358. // All the state roots of the middle layers should be forcibly pruned,
  359. // otherwise the dangling state will be left.
  360. var (
  361. found bool
  362. layers = snaptree.Snapshots(headHeader.Root, 128, true)
  363. middleRoots = make(map[common.Hash]struct{})
  364. )
  365. for _, layer := range layers {
  366. if layer.Root() == stateBloomRoot {
  367. found = true
  368. break
  369. }
  370. middleRoots[layer.Root()] = struct{}{}
  371. }
  372. if !found {
  373. log.Error("Pruning target state is not existent")
  374. return errors.New("non-existent target state")
  375. }
  376. if err := prune(db, stateBloom, middleRoots, time.Now()); err != nil {
  377. return err
  378. }
  379. // Pruning is done, now drop the "useless" layers from the snapshot.
  380. // Firstly, flushing the target layer into the disk. After that all
  381. // diff layers below the target will all be merged into the disk.
  382. if err := snaptree.Cap(stateBloomRoot, 0); err != nil {
  383. return err
  384. }
  385. // Secondly, flushing the snapshot journal into the disk. All diff
  386. // layers upon are dropped silently. Eventually the entire snapshot
  387. // tree is converted into a single disk layer with the pruning target
  388. // as the root.
  389. if _, err := snaptree.Journal(stateBloomRoot); err != nil {
  390. return err
  391. }
  392. // Delete the state bloom, it marks the entire pruning procedure is
  393. // finished. If any crashes or manual exit happens before this,
  394. // `RecoverPruning` will pick it up in the next restarts to redo all
  395. // the things.
  396. os.RemoveAll(stateBloomPath)
  397. return nil
  398. }
  399. // extractGenesis loads the genesis state and commits all the state entries
  400. // into the given bloomfilter.
  401. func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
  402. genesisHash := rawdb.ReadCanonicalHash(db, 0)
  403. if genesisHash == (common.Hash{}) {
  404. return errors.New("missing genesis hash")
  405. }
  406. genesis := rawdb.ReadBlock(db, genesisHash, 0)
  407. if genesis == nil {
  408. return errors.New("missing genesis block")
  409. }
  410. t, err := trie.NewSecure(genesis.Root(), trie.NewDatabase(db))
  411. if err != nil {
  412. return err
  413. }
  414. accIter := t.NodeIterator(nil)
  415. for accIter.Next(true) {
  416. hash := accIter.Hash()
  417. // Embedded nodes don't have hash.
  418. if hash != (common.Hash{}) {
  419. stateBloom.Put(hash.Bytes(), nil)
  420. }
  421. // If it's a leaf node, yes we are touching an account,
  422. // dig into the storage trie further.
  423. if accIter.Leaf() {
  424. var acc state.Account
  425. if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
  426. return err
  427. }
  428. if acc.Root != emptyRoot {
  429. storageTrie, err := trie.NewSecure(acc.Root, trie.NewDatabase(db))
  430. if err != nil {
  431. return err
  432. }
  433. storageIter := storageTrie.NodeIterator(nil)
  434. for storageIter.Next(true) {
  435. hash := storageIter.Hash()
  436. if hash != (common.Hash{}) {
  437. stateBloom.Put(hash.Bytes(), nil)
  438. }
  439. }
  440. if storageIter.Error() != nil {
  441. return storageIter.Error()
  442. }
  443. }
  444. if !bytes.Equal(acc.CodeHash, emptyCode) {
  445. stateBloom.Put(acc.CodeHash, nil)
  446. }
  447. }
  448. }
  449. return accIter.Error()
  450. }
  451. func bloomFilterName(datadir string, hash common.Hash) string {
  452. return filepath.Join(datadir, fmt.Sprintf("%s.%s.%s", stateBloomFilePrefix, hash.Hex(), stateBloomFileSuffix))
  453. }
  454. func isBloomFilter(filename string) (bool, common.Hash) {
  455. filename = filepath.Base(filename)
  456. if strings.HasPrefix(filename, stateBloomFilePrefix) && strings.HasSuffix(filename, stateBloomFileSuffix) {
  457. return true, common.HexToHash(filename[len(stateBloomFilePrefix)+1 : len(filename)-len(stateBloomFileSuffix)-1])
  458. }
  459. return false, common.Hash{}
  460. }
  461. func findBloomFilter(datadir string) (string, common.Hash, error) {
  462. var (
  463. stateBloomPath string
  464. stateBloomRoot common.Hash
  465. )
  466. if err := filepath.Walk(datadir, func(path string, info os.FileInfo, err error) error {
  467. if info != nil && !info.IsDir() {
  468. ok, root := isBloomFilter(path)
  469. if ok {
  470. stateBloomPath = path
  471. stateBloomRoot = root
  472. }
  473. }
  474. return nil
  475. }); err != nil {
  476. return "", common.Hash{}, err
  477. }
  478. return stateBloomPath, stateBloomRoot, nil
  479. }
  480. func getHeadHeader(db ethdb.Database) (*types.Header, error) {
  481. headHeaderHash := rawdb.ReadHeadBlockHash(db)
  482. if headHeaderHash == (common.Hash{}) {
  483. return nil, errors.New("empty head block hash")
  484. }
  485. headHeaderNumber := rawdb.ReadHeaderNumber(db, headHeaderHash)
  486. if headHeaderNumber == nil {
  487. return nil, errors.New("empty head block number")
  488. }
  489. headHeader := rawdb.ReadHeader(db, headHeaderHash, *headHeaderNumber)
  490. if headHeader == nil {
  491. return nil, errors.New("empty head header")
  492. }
  493. return headHeader, nil
  494. }
  495. const warningLog = `
  496. WARNING!
  497. The clean trie cache is not found. Please delete it by yourself after the
  498. pruning. Remember don't start the Geth without deleting the clean trie cache
  499. otherwise the entire database may be damaged!
  500. Check the command description "geth snapshot prune-state --help" for more details.
  501. `
  502. func deleteCleanTrieCache(path string) {
  503. if _, err := os.Stat(path); os.IsNotExist(err) {
  504. log.Warn(warningLog)
  505. return
  506. }
  507. os.RemoveAll(path)
  508. log.Info("Deleted trie clean cache", "path", path)
  509. }