snapshot.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package snapshot implements a journalled, dynamic state dump.
  17. package snapshot
  18. import (
  19. "errors"
  20. "fmt"
  21. "os"
  22. "sync"
  23. "github.com/VictoriaMetrics/fastcache"
  24. "github.com/ethereum/go-ethereum/common"
  25. "github.com/ethereum/go-ethereum/core/rawdb"
  26. "github.com/ethereum/go-ethereum/ethdb"
  27. "github.com/ethereum/go-ethereum/log"
  28. "github.com/ethereum/go-ethereum/metrics"
  29. "github.com/ethereum/go-ethereum/rlp"
  30. )
  31. var (
  32. snapshotCleanHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/hit", nil)
  33. snapshotCleanMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/miss", nil)
  34. snapshotCleanReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/read", nil)
  35. snapshotCleanWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/write", nil)
  36. // ErrSnapshotStale is returned from data accessors if the underlying snapshot
  37. // layer had been invalidated due to the chain progressing forward far enough
  38. // to not maintain the layer's original state.
  39. ErrSnapshotStale = errors.New("snapshot stale")
  40. // errSnapshotCycle is returned if a snapshot is attempted to be inserted
  41. // that forms a cycle in the snapshot tree.
  42. errSnapshotCycle = errors.New("snapshot cycle")
  43. )
  44. // Snapshot represents the functionality supported by a snapshot storage layer.
  45. type Snapshot interface {
  46. // Root returns the root hash for which this snapshot was made.
  47. Root() common.Hash
  48. // Account directly retrieves the account associated with a particular hash in
  49. // the snapshot slim data format.
  50. Account(hash common.Hash) (*Account, error)
  51. // AccountRLP directly retrieves the account RLP associated with a particular
  52. // hash in the snapshot slim data format.
  53. AccountRLP(hash common.Hash) ([]byte, error)
  54. // Storage directly retrieves the storage data associated with a particular hash,
  55. // within a particular account.
  56. Storage(accountHash, storageHash common.Hash) ([]byte, error)
  57. }
  58. // snapshot is the internal version of the snapshot data layer that supports some
  59. // additional methods compared to the public API.
  60. type snapshot interface {
  61. Snapshot
  62. // Update creates a new layer on top of the existing snapshot diff tree with
  63. // the specified data items. Note, the maps are retained by the method to avoid
  64. // copying everything.
  65. Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
  66. // Journal commits an entire diff hierarchy to disk into a single journal file.
  67. // This is meant to be used during shutdown to persist the snapshot without
  68. // flattening everything down (bad for reorgs).
  69. Journal() error
  70. // Stale return whether this layer has become stale (was flattened across) or
  71. // if it's still live.
  72. Stale() bool
  73. }
  74. // SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
  75. // base layer backed by a key-value store, on top of which arbitrarily many in-
  76. // memory diff layers are topped. The memory diffs can form a tree with branching,
  77. // but the disk layer is singleton and common to all. If a reorg goes deeper than
  78. // the disk layer, everything needs to be deleted.
  79. //
  80. // The goal of a state snapshot is twofold: to allow direct access to account and
  81. // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
  82. // cheap iteration of the account/storage tries for sync aid.
  83. type Tree struct {
  84. layers map[common.Hash]snapshot // Collection of all known layers // TODO(karalabe): split Clique overlaps
  85. lock sync.RWMutex
  86. }
  87. // New attempts to load an already existing snapshot from a persistent key-value
  88. // store (with a number of memory layers from a journal), ensuring that the head
  89. // of the snapshot matches the expected one.
  90. //
  91. // If the snapshot is missing or inconsistent, the entirety is deleted and will
  92. // be reconstructed from scratch based on the tries in the key-value store.
  93. func New(db ethdb.KeyValueStore, journal string, root common.Hash) (*Tree, error) {
  94. // Attempt to load a previously persisted snapshot
  95. head, err := loadSnapshot(db, journal, root)
  96. if err != nil {
  97. log.Warn("Failed to load snapshot, regenerating", "err", err)
  98. if head, err = generateSnapshot(db, journal, root); err != nil {
  99. return nil, err
  100. }
  101. }
  102. // Existing snapshot loaded or one regenerated, seed all the layers
  103. snap := &Tree{
  104. layers: make(map[common.Hash]snapshot),
  105. }
  106. for head != nil {
  107. snap.layers[head.Root()] = head
  108. switch self := head.(type) {
  109. case *diffLayer:
  110. head = self.parent
  111. case *diskLayer:
  112. head = nil
  113. default:
  114. panic(fmt.Sprintf("unknown data layer: %T", self))
  115. }
  116. }
  117. return snap, nil
  118. }
  119. // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
  120. // snapshot is maintained for that block.
  121. func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
  122. t.lock.RLock()
  123. defer t.lock.RUnlock()
  124. return t.layers[blockRoot]
  125. }
  126. // Update adds a new snapshot into the tree, if that can be linked to an existing
  127. // old parent. It is disallowed to insert a disk layer (the origin of all).
  128. func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
  129. // Reject noop updates to avoid self-loops in the snapshot tree. This is a
  130. // special case that can only happen for Clique networks where empty blocks
  131. // don't modify the state (0 block subsidy).
  132. //
  133. // Although we could silently ignore this internally, it should be the caller's
  134. // responsibility to avoid even attempting to insert such a snapshot.
  135. if blockRoot == parentRoot {
  136. return errSnapshotCycle
  137. }
  138. // Generate a new snapshot on top of the parent
  139. parent := t.Snapshot(parentRoot).(snapshot)
  140. if parent == nil {
  141. return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
  142. }
  143. snap := parent.Update(blockRoot, accounts, storage)
  144. // Save the new snapshot for later
  145. t.lock.Lock()
  146. defer t.lock.Unlock()
  147. t.layers[snap.root] = snap
  148. return nil
  149. }
  150. // Cap traverses downwards the snapshot tree from a head block hash until the
  151. // number of allowed layers are crossed. All layers beyond the permitted number
  152. // are flattened downwards.
  153. func (t *Tree) Cap(root common.Hash, layers int, memory uint64) error {
  154. // Retrieve the head snapshot to cap from
  155. snap := t.Snapshot(root)
  156. if snap == nil {
  157. return fmt.Errorf("snapshot [%#x] missing", root)
  158. }
  159. diff, ok := snap.(*diffLayer)
  160. if !ok {
  161. return fmt.Errorf("snapshot [%#x] is disk layer", root)
  162. }
  163. // Run the internal capping and discard all stale layers
  164. t.lock.Lock()
  165. defer t.lock.Unlock()
  166. // Flattening the bottom-most diff layer requires special casing since there's
  167. // no child to rewire to the grandparent. In that case we can fake a temporary
  168. // child for the capping and then remove it.
  169. switch layers {
  170. case 0:
  171. // If full commit was requested, flatten the diffs and merge onto disk
  172. diff.lock.RLock()
  173. base := diffToDisk(diff.flatten().(*diffLayer))
  174. diff.lock.RUnlock()
  175. // Replace the entire snapshot tree with the flat base
  176. t.layers = map[common.Hash]snapshot{base.root: base}
  177. return nil
  178. case 1:
  179. // If full flattening was requested, flatten the diffs but only merge if the
  180. // memory limit was reached
  181. var (
  182. bottom *diffLayer
  183. base *diskLayer
  184. )
  185. diff.lock.RLock()
  186. bottom = diff.flatten().(*diffLayer)
  187. if bottom.memory >= memory {
  188. base = diffToDisk(bottom)
  189. }
  190. diff.lock.RUnlock()
  191. // If all diff layers were removed, replace the entire snapshot tree
  192. if base != nil {
  193. t.layers = map[common.Hash]snapshot{base.root: base}
  194. return nil
  195. }
  196. // Merge the new aggregated layer into the snapshot tree, clean stales below
  197. t.layers[bottom.root] = bottom
  198. default:
  199. // Many layers requested to be retained, cap normally
  200. t.cap(diff, layers, memory)
  201. }
  202. // Remove any layer that is stale or links into a stale layer
  203. children := make(map[common.Hash][]common.Hash)
  204. for root, snap := range t.layers {
  205. if diff, ok := snap.(*diffLayer); ok {
  206. parent := diff.parent.Root()
  207. children[parent] = append(children[parent], root)
  208. }
  209. }
  210. var remove func(root common.Hash)
  211. remove = func(root common.Hash) {
  212. delete(t.layers, root)
  213. for _, child := range children[root] {
  214. remove(child)
  215. }
  216. delete(children, root)
  217. }
  218. for root, snap := range t.layers {
  219. if snap.Stale() {
  220. remove(root)
  221. }
  222. }
  223. return nil
  224. }
  225. // cap traverses downwards the diff tree until the number of allowed layers are
  226. // crossed. All diffs beyond the permitted number are flattened downwards. If the
  227. // layer limit is reached, memory cap is also enforced (but not before).
  228. func (t *Tree) cap(diff *diffLayer, layers int, memory uint64) {
  229. // Dive until we run out of layers or reach the persistent database
  230. for ; layers > 2; layers-- {
  231. // If we still have diff layers below, continue down
  232. if parent, ok := diff.parent.(*diffLayer); ok {
  233. diff = parent
  234. } else {
  235. // Diff stack too shallow, return without modifications
  236. return
  237. }
  238. }
  239. // We're out of layers, flatten anything below, stopping if it's the disk or if
  240. // the memory limit is not yet exceeded.
  241. switch parent := diff.parent.(type) {
  242. case *diskLayer:
  243. return
  244. case *diffLayer:
  245. // Flatten the parent into the grandparent. The flattening internally obtains a
  246. // write lock on grandparent.
  247. flattened := parent.flatten().(*diffLayer)
  248. t.layers[flattened.root] = flattened
  249. diff.lock.Lock()
  250. defer diff.lock.Unlock()
  251. diff.parent = flattened
  252. if flattened.memory < memory {
  253. return
  254. }
  255. default:
  256. panic(fmt.Sprintf("unknown data layer: %T", parent))
  257. }
  258. // If the bottom-most layer is larger than our memory cap, persist to disk
  259. bottom := diff.parent.(*diffLayer)
  260. bottom.lock.RLock()
  261. base := diffToDisk(bottom)
  262. bottom.lock.RUnlock()
  263. t.layers[base.root] = base
  264. diff.parent = base
  265. }
  266. // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
  267. // it. The method will panic if called onto a non-bottom-most diff layer.
  268. func diffToDisk(bottom *diffLayer) *diskLayer {
  269. var (
  270. base = bottom.parent.(*diskLayer)
  271. batch = base.db.NewBatch()
  272. )
  273. // Start by temporarily deleting the current snapshot block marker. This
  274. // ensures that in the case of a crash, the entire snapshot is invalidated.
  275. rawdb.DeleteSnapshotRoot(batch)
  276. // Mark the original base as stale as we're going to create a new wrapper
  277. base.lock.Lock()
  278. if base.stale {
  279. panic("parent disk layer is stale") // we've committed into the same base from two children, boo
  280. }
  281. base.stale = true
  282. base.lock.Unlock()
  283. // Push all the accounts into the database
  284. for hash, data := range bottom.accountData {
  285. if len(data) > 0 {
  286. // Account was updated, push to disk
  287. rawdb.WriteAccountSnapshot(batch, hash, data)
  288. base.cache.Set(hash[:], data)
  289. if batch.ValueSize() > ethdb.IdealBatchSize {
  290. if err := batch.Write(); err != nil {
  291. log.Crit("Failed to write account snapshot", "err", err)
  292. }
  293. batch.Reset()
  294. }
  295. } else {
  296. // Account was deleted, remove all storage slots too
  297. rawdb.DeleteAccountSnapshot(batch, hash)
  298. base.cache.Set(hash[:], nil)
  299. it := rawdb.IterateStorageSnapshots(base.db, hash)
  300. for it.Next() {
  301. if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
  302. batch.Delete(key)
  303. base.cache.Del(key[1:])
  304. }
  305. }
  306. it.Release()
  307. }
  308. }
  309. // Push all the storage slots into the database
  310. for accountHash, storage := range bottom.storageData {
  311. for storageHash, data := range storage {
  312. if len(data) > 0 {
  313. rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
  314. base.cache.Set(append(accountHash[:], storageHash[:]...), data)
  315. } else {
  316. rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
  317. base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
  318. }
  319. }
  320. if batch.ValueSize() > ethdb.IdealBatchSize {
  321. if err := batch.Write(); err != nil {
  322. log.Crit("Failed to write storage snapshot", "err", err)
  323. }
  324. batch.Reset()
  325. }
  326. }
  327. // Update the snapshot block marker and write any remainder data
  328. rawdb.WriteSnapshotRoot(batch, bottom.root)
  329. if err := batch.Write(); err != nil {
  330. log.Crit("Failed to write leftover snapshot", "err", err)
  331. }
  332. return &diskLayer{
  333. root: bottom.root,
  334. cache: base.cache,
  335. db: base.db,
  336. journal: base.journal,
  337. }
  338. }
  339. // Journal commits an entire diff hierarchy to disk into a single journal file.
  340. // This is meant to be used during shutdown to persist the snapshot without
  341. // flattening everything down (bad for reorgs).
  342. func (t *Tree) Journal(blockRoot common.Hash) error {
  343. // Retrieve the head snapshot to journal from var snap snapshot
  344. snap := t.Snapshot(blockRoot)
  345. if snap == nil {
  346. return fmt.Errorf("snapshot [%#x] missing", blockRoot)
  347. }
  348. // Run the journaling
  349. t.lock.Lock()
  350. defer t.lock.Unlock()
  351. return snap.(snapshot).Journal()
  352. }
  353. // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
  354. func loadSnapshot(db ethdb.KeyValueStore, journal string, root common.Hash) (snapshot, error) {
  355. // Retrieve the block number and hash of the snapshot, failing if no snapshot
  356. // is present in the database (or crashed mid-update).
  357. baseRoot := rawdb.ReadSnapshotRoot(db)
  358. if baseRoot == (common.Hash{}) {
  359. return nil, errors.New("missing or corrupted snapshot")
  360. }
  361. base := &diskLayer{
  362. journal: journal,
  363. db: db,
  364. cache: fastcache.New(512 * 1024 * 1024),
  365. root: baseRoot,
  366. }
  367. // Load all the snapshot diffs from the journal, failing if their chain is broken
  368. // or does not lead from the disk snapshot to the specified head.
  369. if _, err := os.Stat(journal); os.IsNotExist(err) {
  370. // Journal doesn't exist, don't worry if it's not supposed to
  371. if baseRoot != root {
  372. return nil, fmt.Errorf("snapshot journal missing, head doesn't match snapshot: have %#x, want %#x", baseRoot, root)
  373. }
  374. return base, nil
  375. }
  376. file, err := os.Open(journal)
  377. if err != nil {
  378. return nil, err
  379. }
  380. snapshot, err := loadDiffLayer(base, rlp.NewStream(file, 0))
  381. if err != nil {
  382. return nil, err
  383. }
  384. // Entire snapshot journal loaded, sanity check the head and return
  385. // Journal doesn't exist, don't worry if it's not supposed to
  386. if head := snapshot.Root(); head != root {
  387. return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
  388. }
  389. return snapshot, nil
  390. }