snapshot.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package snapshot implements a journalled, dynamic state dump.
  17. package snapshot
  18. import (
  19. "bytes"
  20. "errors"
  21. "fmt"
  22. "sync"
  23. "sync/atomic"
  24. "github.com/ethereum/go-ethereum/common"
  25. "github.com/ethereum/go-ethereum/core/rawdb"
  26. "github.com/ethereum/go-ethereum/ethdb"
  27. "github.com/ethereum/go-ethereum/log"
  28. "github.com/ethereum/go-ethereum/metrics"
  29. "github.com/ethereum/go-ethereum/trie"
  30. )
  31. var (
  32. snapshotCleanAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
  33. snapshotCleanAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
  34. snapshotCleanAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
  35. snapshotCleanAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
  36. snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
  37. snapshotCleanStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
  38. snapshotCleanStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
  39. snapshotCleanStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
  40. snapshotCleanStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
  41. snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
  42. snapshotDirtyAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
  43. snapshotDirtyAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
  44. snapshotDirtyAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
  45. snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
  46. snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
  47. snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
  48. snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
  49. snapshotDirtyStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
  50. snapshotDirtyStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
  51. snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
  52. snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
  53. snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
  54. snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
  55. snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
  56. snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)
  57. snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil)
  58. snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil)
  59. snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil)
  60. snapshotBloomAccountTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil)
  61. snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil)
  62. snapshotBloomAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil)
  63. snapshotBloomStorageTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil)
  64. snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil)
  65. snapshotBloomStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil)
  66. // ErrSnapshotStale is returned from data accessors if the underlying snapshot
  67. // layer had been invalidated due to the chain progressing forward far enough
  68. // to not maintain the layer's original state.
  69. ErrSnapshotStale = errors.New("snapshot stale")
  70. // ErrNotCoveredYet is returned from data accessors if the underlying snapshot
  71. // is being generated currently and the requested data item is not yet in the
  72. // range of accounts covered.
  73. ErrNotCoveredYet = errors.New("not covered yet")
  74. // errSnapshotCycle is returned if a snapshot is attempted to be inserted
  75. // that forms a cycle in the snapshot tree.
  76. errSnapshotCycle = errors.New("snapshot cycle")
  77. )
  78. // Snapshot represents the functionality supported by a snapshot storage layer.
  79. type Snapshot interface {
  80. // Root returns the root hash for which this snapshot was made.
  81. Root() common.Hash
  82. // Account directly retrieves the account associated with a particular hash in
  83. // the snapshot slim data format.
  84. Account(hash common.Hash) (*Account, error)
  85. // AccountRLP directly retrieves the account RLP associated with a particular
  86. // hash in the snapshot slim data format.
  87. AccountRLP(hash common.Hash) ([]byte, error)
  88. // Storage directly retrieves the storage data associated with a particular hash,
  89. // within a particular account.
  90. Storage(accountHash, storageHash common.Hash) ([]byte, error)
  91. }
  92. // snapshot is the internal version of the snapshot data layer that supports some
  93. // additional methods compared to the public API.
  94. type snapshot interface {
  95. Snapshot
  96. // Parent returns the subsequent layer of a snapshot, or nil if the base was
  97. // reached.
  98. //
  99. // Note, the method is an internal helper to avoid type switching between the
  100. // disk and diff layers. There is no locking involved.
  101. Parent() snapshot
  102. // Update creates a new layer on top of the existing snapshot diff tree with
  103. // the specified data items.
  104. //
  105. // Note, the maps are retained by the method to avoid copying everything.
  106. Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
  107. // Journal commits an entire diff hierarchy to disk into a single journal entry.
  108. // This is meant to be used during shutdown to persist the snapshot without
  109. // flattening everything down (bad for reorgs).
  110. Journal(buffer *bytes.Buffer) (common.Hash, error)
  111. // Stale return whether this layer has become stale (was flattened across) or
  112. // if it's still live.
  113. Stale() bool
  114. // AccountIterator creates an account iterator over an arbitrary layer.
  115. AccountIterator(seek common.Hash) AccountIterator
  116. // StorageIterator creates a storage iterator over an arbitrary layer.
  117. StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
  118. }
  119. // SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
  120. // base layer backed by a key-value store, on top of which arbitrarily many in-
  121. // memory diff layers are topped. The memory diffs can form a tree with branching,
  122. // but the disk layer is singleton and common to all. If a reorg goes deeper than
  123. // the disk layer, everything needs to be deleted.
  124. //
  125. // The goal of a state snapshot is twofold: to allow direct access to account and
  126. // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
  127. // cheap iteration of the account/storage tries for sync aid.
  128. type Tree struct {
  129. diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
  130. triedb *trie.Database // In-memory cache to access the trie through
  131. cache int // Megabytes permitted to use for read caches
  132. layers map[common.Hash]snapshot // Collection of all known layers
  133. lock sync.RWMutex
  134. }
  135. // New attempts to load an already existing snapshot from a persistent key-value
  136. // store (with a number of memory layers from a journal), ensuring that the head
  137. // of the snapshot matches the expected one.
  138. //
  139. // If the snapshot is missing or inconsistent, the entirety is deleted and will
  140. // be reconstructed from scratch based on the tries in the key-value store, on a
  141. // background thread.
  142. func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree {
  143. // Create a new, empty snapshot tree
  144. snap := &Tree{
  145. diskdb: diskdb,
  146. triedb: triedb,
  147. cache: cache,
  148. layers: make(map[common.Hash]snapshot),
  149. }
  150. if !async {
  151. defer snap.waitBuild()
  152. }
  153. // Attempt to load a previously persisted snapshot and rebuild one if failed
  154. head, err := loadSnapshot(diskdb, triedb, cache, root)
  155. if err != nil {
  156. log.Warn("Failed to load snapshot, regenerating", "err", err)
  157. snap.Rebuild(root)
  158. return snap
  159. }
  160. // Existing snapshot loaded, seed all the layers
  161. for head != nil {
  162. snap.layers[head.Root()] = head
  163. head = head.Parent()
  164. }
  165. return snap
  166. }
  167. // waitBuild blocks until the snapshot finishes rebuilding. This method is meant
  168. // to be used by tests to ensure we're testing what we believe we are.
  169. func (t *Tree) waitBuild() {
  170. // Find the rebuild termination channel
  171. var done chan struct{}
  172. t.lock.RLock()
  173. for _, layer := range t.layers {
  174. if layer, ok := layer.(*diskLayer); ok {
  175. done = layer.genPending
  176. break
  177. }
  178. }
  179. t.lock.RUnlock()
  180. // Wait until the snapshot is generated
  181. if done != nil {
  182. <-done
  183. }
  184. }
  185. // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
  186. // snapshot is maintained for that block.
  187. func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
  188. t.lock.RLock()
  189. defer t.lock.RUnlock()
  190. return t.layers[blockRoot]
  191. }
  192. // Update adds a new snapshot into the tree, if that can be linked to an existing
  193. // old parent. It is disallowed to insert a disk layer (the origin of all).
  194. func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
  195. // Reject noop updates to avoid self-loops in the snapshot tree. This is a
  196. // special case that can only happen for Clique networks where empty blocks
  197. // don't modify the state (0 block subsidy).
  198. //
  199. // Although we could silently ignore this internally, it should be the caller's
  200. // responsibility to avoid even attempting to insert such a snapshot.
  201. if blockRoot == parentRoot {
  202. return errSnapshotCycle
  203. }
  204. // Generate a new snapshot on top of the parent
  205. parent := t.Snapshot(parentRoot).(snapshot)
  206. if parent == nil {
  207. return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
  208. }
  209. snap := parent.Update(blockRoot, destructs, accounts, storage)
  210. // Save the new snapshot for later
  211. t.lock.Lock()
  212. defer t.lock.Unlock()
  213. t.layers[snap.root] = snap
  214. return nil
  215. }
  216. // Cap traverses downwards the snapshot tree from a head block hash until the
  217. // number of allowed layers are crossed. All layers beyond the permitted number
  218. // are flattened downwards.
  219. func (t *Tree) Cap(root common.Hash, layers int) error {
  220. // Retrieve the head snapshot to cap from
  221. snap := t.Snapshot(root)
  222. if snap == nil {
  223. return fmt.Errorf("snapshot [%#x] missing", root)
  224. }
  225. diff, ok := snap.(*diffLayer)
  226. if !ok {
  227. return fmt.Errorf("snapshot [%#x] is disk layer", root)
  228. }
  229. // If the generator is still running, use a more aggressive cap
  230. diff.origin.lock.RLock()
  231. if diff.origin.genMarker != nil && layers > 8 {
  232. layers = 8
  233. }
  234. diff.origin.lock.RUnlock()
  235. // Run the internal capping and discard all stale layers
  236. t.lock.Lock()
  237. defer t.lock.Unlock()
  238. // Flattening the bottom-most diff layer requires special casing since there's
  239. // no child to rewire to the grandparent. In that case we can fake a temporary
  240. // child for the capping and then remove it.
  241. var persisted *diskLayer
  242. switch layers {
  243. case 0:
  244. // If full commit was requested, flatten the diffs and merge onto disk
  245. diff.lock.RLock()
  246. base := diffToDisk(diff.flatten().(*diffLayer))
  247. diff.lock.RUnlock()
  248. // Replace the entire snapshot tree with the flat base
  249. t.layers = map[common.Hash]snapshot{base.root: base}
  250. return nil
  251. case 1:
  252. // If full flattening was requested, flatten the diffs but only merge if the
  253. // memory limit was reached
  254. var (
  255. bottom *diffLayer
  256. base *diskLayer
  257. )
  258. diff.lock.RLock()
  259. bottom = diff.flatten().(*diffLayer)
  260. if bottom.memory >= aggregatorMemoryLimit {
  261. base = diffToDisk(bottom)
  262. }
  263. diff.lock.RUnlock()
  264. // If all diff layers were removed, replace the entire snapshot tree
  265. if base != nil {
  266. t.layers = map[common.Hash]snapshot{base.root: base}
  267. return nil
  268. }
  269. // Merge the new aggregated layer into the snapshot tree, clean stales below
  270. t.layers[bottom.root] = bottom
  271. default:
  272. // Many layers requested to be retained, cap normally
  273. persisted = t.cap(diff, layers)
  274. }
  275. // Remove any layer that is stale or links into a stale layer
  276. children := make(map[common.Hash][]common.Hash)
  277. for root, snap := range t.layers {
  278. if diff, ok := snap.(*diffLayer); ok {
  279. parent := diff.parent.Root()
  280. children[parent] = append(children[parent], root)
  281. }
  282. }
  283. var remove func(root common.Hash)
  284. remove = func(root common.Hash) {
  285. delete(t.layers, root)
  286. for _, child := range children[root] {
  287. remove(child)
  288. }
  289. delete(children, root)
  290. }
  291. for root, snap := range t.layers {
  292. if snap.Stale() {
  293. remove(root)
  294. }
  295. }
  296. // If the disk layer was modified, regenerate all the cumulative blooms
  297. if persisted != nil {
  298. var rebloom func(root common.Hash)
  299. rebloom = func(root common.Hash) {
  300. if diff, ok := t.layers[root].(*diffLayer); ok {
  301. diff.rebloom(persisted)
  302. }
  303. for _, child := range children[root] {
  304. rebloom(child)
  305. }
  306. }
  307. rebloom(persisted.root)
  308. }
  309. return nil
  310. }
  311. // cap traverses downwards the diff tree until the number of allowed layers are
  312. // crossed. All diffs beyond the permitted number are flattened downwards. If the
  313. // layer limit is reached, memory cap is also enforced (but not before).
  314. //
  315. // The method returns the new disk layer if diffs were persistend into it.
  316. func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
  317. // Dive until we run out of layers or reach the persistent database
  318. for ; layers > 2; layers-- {
  319. // If we still have diff layers below, continue down
  320. if parent, ok := diff.parent.(*diffLayer); ok {
  321. diff = parent
  322. } else {
  323. // Diff stack too shallow, return without modifications
  324. return nil
  325. }
  326. }
  327. // We're out of layers, flatten anything below, stopping if it's the disk or if
  328. // the memory limit is not yet exceeded.
  329. switch parent := diff.parent.(type) {
  330. case *diskLayer:
  331. return nil
  332. case *diffLayer:
  333. // Flatten the parent into the grandparent. The flattening internally obtains a
  334. // write lock on grandparent.
  335. flattened := parent.flatten().(*diffLayer)
  336. t.layers[flattened.root] = flattened
  337. diff.lock.Lock()
  338. defer diff.lock.Unlock()
  339. diff.parent = flattened
  340. if flattened.memory < aggregatorMemoryLimit {
  341. // Accumulator layer is smaller than the limit, so we can abort, unless
  342. // there's a snapshot being generated currently. In that case, the trie
  343. // will move fron underneath the generator so we **must** merge all the
  344. // partial data down into the snapshot and restart the generation.
  345. if flattened.parent.(*diskLayer).genAbort == nil {
  346. return nil
  347. }
  348. }
  349. default:
  350. panic(fmt.Sprintf("unknown data layer: %T", parent))
  351. }
  352. // If the bottom-most layer is larger than our memory cap, persist to disk
  353. bottom := diff.parent.(*diffLayer)
  354. bottom.lock.RLock()
  355. base := diffToDisk(bottom)
  356. bottom.lock.RUnlock()
  357. t.layers[base.root] = base
  358. diff.parent = base
  359. return base
  360. }
  361. // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
  362. // it. The method will panic if called onto a non-bottom-most diff layer.
  363. func diffToDisk(bottom *diffLayer) *diskLayer {
  364. var (
  365. base = bottom.parent.(*diskLayer)
  366. batch = base.diskdb.NewBatch()
  367. stats *generatorStats
  368. )
  369. // If the disk layer is running a snapshot generator, abort it
  370. if base.genAbort != nil {
  371. abort := make(chan *generatorStats)
  372. base.genAbort <- abort
  373. stats = <-abort
  374. }
  375. // Start by temporarily deleting the current snapshot block marker. This
  376. // ensures that in the case of a crash, the entire snapshot is invalidated.
  377. rawdb.DeleteSnapshotRoot(batch)
  378. // Mark the original base as stale as we're going to create a new wrapper
  379. base.lock.Lock()
  380. if base.stale {
  381. panic("parent disk layer is stale") // we've committed into the same base from two children, boo
  382. }
  383. base.stale = true
  384. base.lock.Unlock()
  385. // Destroy all the destructed accounts from the database
  386. for hash := range bottom.destructSet {
  387. // Skip any account not covered yet by the snapshot
  388. if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
  389. continue
  390. }
  391. // Remove all storage slots
  392. rawdb.DeleteAccountSnapshot(batch, hash)
  393. base.cache.Set(hash[:], nil)
  394. it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
  395. for it.Next() {
  396. if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
  397. batch.Delete(key)
  398. base.cache.Del(key[1:])
  399. snapshotFlushStorageItemMeter.Mark(1)
  400. }
  401. }
  402. it.Release()
  403. }
  404. // Push all updated accounts into the database
  405. for hash, data := range bottom.accountData {
  406. // Skip any account not covered yet by the snapshot
  407. if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
  408. continue
  409. }
  410. // Push the account to disk
  411. rawdb.WriteAccountSnapshot(batch, hash, data)
  412. base.cache.Set(hash[:], data)
  413. snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
  414. if batch.ValueSize() > ethdb.IdealBatchSize {
  415. if err := batch.Write(); err != nil {
  416. log.Crit("Failed to write account snapshot", "err", err)
  417. }
  418. batch.Reset()
  419. }
  420. snapshotFlushAccountItemMeter.Mark(1)
  421. snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
  422. }
  423. // Push all the storage slots into the database
  424. for accountHash, storage := range bottom.storageData {
  425. // Skip any account not covered yet by the snapshot
  426. if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 {
  427. continue
  428. }
  429. // Generation might be mid-account, track that case too
  430. midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength])
  431. for storageHash, data := range storage {
  432. // Skip any slot not covered yet by the snapshot
  433. if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 {
  434. continue
  435. }
  436. if len(data) > 0 {
  437. rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
  438. base.cache.Set(append(accountHash[:], storageHash[:]...), data)
  439. snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
  440. } else {
  441. rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
  442. base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
  443. }
  444. snapshotFlushStorageItemMeter.Mark(1)
  445. snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
  446. }
  447. if batch.ValueSize() > ethdb.IdealBatchSize {
  448. if err := batch.Write(); err != nil {
  449. log.Crit("Failed to write storage snapshot", "err", err)
  450. }
  451. batch.Reset()
  452. }
  453. }
  454. // Update the snapshot block marker and write any remainder data
  455. rawdb.WriteSnapshotRoot(batch, bottom.root)
  456. if err := batch.Write(); err != nil {
  457. log.Crit("Failed to write leftover snapshot", "err", err)
  458. }
  459. res := &diskLayer{
  460. root: bottom.root,
  461. cache: base.cache,
  462. diskdb: base.diskdb,
  463. triedb: base.triedb,
  464. genMarker: base.genMarker,
  465. genPending: base.genPending,
  466. }
  467. // If snapshot generation hasn't finished yet, port over all the starts and
  468. // continue where the previous round left off.
  469. //
  470. // Note, the `base.genAbort` comparison is not used normally, it's checked
  471. // to allow the tests to play with the marker without triggering this path.
  472. if base.genMarker != nil && base.genAbort != nil {
  473. res.genMarker = base.genMarker
  474. res.genAbort = make(chan chan *generatorStats)
  475. go res.generate(stats)
  476. }
  477. return res
  478. }
  479. // Journal commits an entire diff hierarchy to disk into a single journal entry.
  480. // This is meant to be used during shutdown to persist the snapshot without
  481. // flattening everything down (bad for reorgs).
  482. //
  483. // The method returns the root hash of the base layer that needs to be persisted
  484. // to disk as a trie too to allow continuing any pending generation op.
  485. func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
  486. // Retrieve the head snapshot to journal from var snap snapshot
  487. snap := t.Snapshot(root)
  488. if snap == nil {
  489. return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
  490. }
  491. // Run the journaling
  492. t.lock.Lock()
  493. defer t.lock.Unlock()
  494. journal := new(bytes.Buffer)
  495. base, err := snap.(snapshot).Journal(journal)
  496. if err != nil {
  497. return common.Hash{}, err
  498. }
  499. // Store the journal into the database and return
  500. rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
  501. return base, nil
  502. }
  503. // Rebuild wipes all available snapshot data from the persistent database and
  504. // discard all caches and diff layers. Afterwards, it starts a new snapshot
  505. // generator with the given root hash.
  506. func (t *Tree) Rebuild(root common.Hash) {
  507. t.lock.Lock()
  508. defer t.lock.Unlock()
  509. // Track whether there's a wipe currently running and keep it alive if so
  510. var wiper chan struct{}
  511. // Iterate over and mark all layers stale
  512. for _, layer := range t.layers {
  513. switch layer := layer.(type) {
  514. case *diskLayer:
  515. // If the base layer is generating, abort it and save
  516. if layer.genAbort != nil {
  517. abort := make(chan *generatorStats)
  518. layer.genAbort <- abort
  519. if stats := <-abort; stats != nil {
  520. wiper = stats.wiping
  521. }
  522. }
  523. // Layer should be inactive now, mark it as stale
  524. layer.lock.Lock()
  525. layer.stale = true
  526. layer.lock.Unlock()
  527. case *diffLayer:
  528. // If the layer is a simple diff, simply mark as stale
  529. layer.lock.Lock()
  530. atomic.StoreUint32(&layer.stale, 1)
  531. layer.lock.Unlock()
  532. default:
  533. panic(fmt.Sprintf("unknown layer type: %T", layer))
  534. }
  535. }
  536. // Start generating a new snapshot from scratch on a backgroung thread. The
  537. // generator will run a wiper first if there's not one running right now.
  538. log.Info("Rebuilding state snapshot")
  539. t.layers = map[common.Hash]snapshot{
  540. root: generateSnapshot(t.diskdb, t.triedb, t.cache, root, wiper),
  541. }
  542. }
  543. // AccountIterator creates a new account iterator for the specified root hash and
  544. // seeks to a starting account hash.
  545. func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
  546. return newFastAccountIterator(t, root, seek)
  547. }
  548. // StorageIterator creates a new storage iterator for the specified root hash and
  549. // account. The iterator will be move to the specific start position.
  550. func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
  551. return newFastStorageIterator(t, root, account, seek)
  552. }