snapshot.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package snapshot implements a journalled, dynamic state dump.
  17. package snapshot
  18. import (
  19. "errors"
  20. "fmt"
  21. "os"
  22. "sync"
  23. "time"
  24. "github.com/allegro/bigcache"
  25. "github.com/ethereum/go-ethereum/common"
  26. "github.com/ethereum/go-ethereum/core/rawdb"
  27. "github.com/ethereum/go-ethereum/ethdb"
  28. "github.com/ethereum/go-ethereum/log"
  29. "github.com/ethereum/go-ethereum/metrics"
  30. "github.com/ethereum/go-ethereum/rlp"
  31. )
  32. var (
  33. snapshotCleanHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/hit", nil)
  34. snapshotCleanMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/miss", nil)
  35. snapshotCleanReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/read", nil)
  36. snapshotCleanWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/write", nil)
  37. // ErrSnapshotStale is returned from data accessors if the underlying snapshot
  38. // layer had been invalidated due to the chain progressing forward far enough
  39. // to not maintain the layer's original state.
  40. ErrSnapshotStale = errors.New("snapshot stale")
  41. )
  42. // Snapshot represents the functionality supported by a snapshot storage layer.
  43. type Snapshot interface {
  44. // Info returns the block number and root hash for which this snapshot was made.
  45. Info() (uint64, common.Hash)
  46. // Account directly retrieves the account associated with a particular hash in
  47. // the snapshot slim data format.
  48. Account(hash common.Hash) (*Account, error)
  49. // AccountRLP directly retrieves the account RLP associated with a particular
  50. // hash in the snapshot slim data format.
  51. AccountRLP(hash common.Hash) ([]byte, error)
  52. // Storage directly retrieves the storage data associated with a particular hash,
  53. // within a particular account.
  54. Storage(accountHash, storageHash common.Hash) ([]byte, error)
  55. }
  56. // snapshot is the internal version of the snapshot data layer that supports some
  57. // additional methods compared to the public API.
  58. type snapshot interface {
  59. Snapshot
  60. // Update creates a new layer on top of the existing snapshot diff tree with
  61. // the specified data items. Note, the maps are retained by the method to avoid
  62. // copying everything.
  63. Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
  64. // Journal commits an entire diff hierarchy to disk into a single journal file.
  65. // This is meant to be used during shutdown to persist the snapshot without
  66. // flattening everything down (bad for reorgs).
  67. Journal() error
  68. }
  69. // SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
  70. // base layer backed by a key-value store, on top of which arbitrarily many in-
  71. // memory diff layers are topped. The memory diffs can form a tree with branching,
  72. // but the disk layer is singleton and common to all. If a reorg goes deeper than
  73. // the disk layer, everything needs to be deleted.
  74. //
  75. // The goal of a state snapshot is twofold: to allow direct access to account and
  76. // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
  77. // cheap iteration of the account/storage tries for sync aid.
  78. type SnapshotTree struct {
  79. layers map[common.Hash]snapshot // Collection of all known layers // TODO(karalabe): split Clique overlaps
  80. lock sync.RWMutex
  81. }
  82. // New attempts to load an already existing snapshot from a persistent key-value
  83. // store (with a number of memory layers from a journal), ensuring that the head
  84. // of the snapshot matches the expected one.
  85. //
  86. // If the snapshot is missing or inconsistent, the entirety is deleted and will
  87. // be reconstructed from scratch based on the tries in the key-value store.
  88. func New(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot common.Hash) (*SnapshotTree, error) {
  89. // Attempt to load a previously persisted snapshot
  90. head, err := loadSnapshot(db, journal, headNumber, headRoot)
  91. if err != nil {
  92. log.Warn("Failed to load snapshot, regenerating", "err", err)
  93. if head, err = generateSnapshot(db, journal, headNumber, headRoot); err != nil {
  94. return nil, err
  95. }
  96. }
  97. // Existing snapshot loaded or one regenerated, seed all the layers
  98. snap := &SnapshotTree{
  99. layers: make(map[common.Hash]snapshot),
  100. }
  101. for head != nil {
  102. _, root := head.Info()
  103. snap.layers[root] = head
  104. switch self := head.(type) {
  105. case *diffLayer:
  106. head = self.parent
  107. case *diskLayer:
  108. head = nil
  109. default:
  110. panic(fmt.Sprintf("unknown data layer: %T", self))
  111. }
  112. }
  113. return snap, nil
  114. }
  115. // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
  116. // snapshot is maintained for that block.
  117. func (st *SnapshotTree) Snapshot(blockRoot common.Hash) Snapshot {
  118. st.lock.RLock()
  119. defer st.lock.RUnlock()
  120. return st.layers[blockRoot]
  121. }
  122. // Update adds a new snapshot into the tree, if that can be linked to an existing
  123. // old parent. It is disallowed to insert a disk layer (the origin of all).
  124. func (st *SnapshotTree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
  125. // Generate a new snapshot on top of the parent
  126. parent := st.Snapshot(parentRoot).(snapshot)
  127. if parent == nil {
  128. return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
  129. }
  130. snap := parent.Update(blockRoot, accounts, storage)
  131. // Save the new snapshot for later
  132. st.lock.Lock()
  133. defer st.lock.Unlock()
  134. st.layers[snap.root] = snap
  135. return nil
  136. }
  137. // Cap traverses downwards the snapshot tree from a head block hash until the
  138. // number of allowed layers are crossed. All layers beyond the permitted number
  139. // are flattened downwards.
  140. func (st *SnapshotTree) Cap(blockRoot common.Hash, layers int, memory uint64) error {
  141. // Retrieve the head snapshot to cap from
  142. var snap snapshot
  143. if s := st.Snapshot(blockRoot); s == nil {
  144. return fmt.Errorf("snapshot [%#x] missing", blockRoot)
  145. } else {
  146. snap = s.(snapshot)
  147. }
  148. diff, ok := snap.(*diffLayer)
  149. if !ok {
  150. return fmt.Errorf("snapshot [%#x] is disk layer", blockRoot)
  151. }
  152. // Run the internal capping and discard all stale layers
  153. st.lock.Lock()
  154. defer st.lock.Unlock()
  155. var (
  156. diskNumber uint64
  157. diffNumber uint64
  158. )
  159. // Flattening the bottom-most diff layer requires special casing since there's
  160. // no child to rewire to the grandparent. In that case we can fake a temporary
  161. // child for the capping and then remove it.
  162. switch layers {
  163. case 0:
  164. // If full commit was requested, flatten the diffs and merge onto disk
  165. diff.lock.RLock()
  166. base := diffToDisk(diff.flatten().(*diffLayer))
  167. diff.lock.RUnlock()
  168. st.layers[base.root] = base
  169. diskNumber, diffNumber = base.number, base.number
  170. case 1:
  171. // If full flattening was requested, flatten the diffs but only merge if the
  172. // memory limit was reached
  173. var (
  174. bottom *diffLayer
  175. base *diskLayer
  176. )
  177. diff.lock.RLock()
  178. bottom = diff.flatten().(*diffLayer)
  179. if bottom.memory >= memory {
  180. base = diffToDisk(bottom)
  181. }
  182. diff.lock.RUnlock()
  183. if base != nil {
  184. st.layers[base.root] = base
  185. diskNumber, diffNumber = base.number, base.number
  186. } else {
  187. st.layers[bottom.root] = bottom
  188. diskNumber, diffNumber = bottom.parent.(*diskLayer).number, bottom.number
  189. }
  190. default:
  191. diskNumber, diffNumber = st.cap(diff, layers, memory)
  192. }
  193. for root, snap := range st.layers {
  194. if number, _ := snap.Info(); number != diskNumber && number < diffNumber {
  195. delete(st.layers, root)
  196. }
  197. }
  198. return nil
  199. }
  200. // cap traverses downwards the diff tree until the number of allowed layers are
  201. // crossed. All diffs beyond the permitted number are flattened downwards. If
  202. // the layer limit is reached, memory cap is also enforced (but not before). The
  203. // block numbers for the disk layer and first diff layer are returned for GC.
  204. func (st *SnapshotTree) cap(diff *diffLayer, layers int, memory uint64) (uint64, uint64) {
  205. // Dive until we run out of layers or reach the persistent database
  206. for ; layers > 2; layers-- {
  207. // If we still have diff layers below, continue down
  208. if parent, ok := diff.parent.(*diffLayer); ok {
  209. diff = parent
  210. } else {
  211. // Diff stack too shallow, return block numbers without modifications
  212. return diff.parent.(*diskLayer).number, diff.number
  213. }
  214. }
  215. // We're out of layers, flatten anything below, stopping if it's the disk or if
  216. // the memory limit is not yet exceeded.
  217. switch parent := diff.parent.(type) {
  218. case *diskLayer:
  219. return parent.number, diff.number
  220. case *diffLayer:
  221. // Flatten the parent into the grandparent. The flattening internally obtains a
  222. // write lock on grandparent.
  223. flattened := parent.flatten().(*diffLayer)
  224. st.layers[flattened.root] = flattened
  225. diff.lock.Lock()
  226. defer diff.lock.Unlock()
  227. diff.parent = flattened
  228. if flattened.memory < memory {
  229. diskNumber, _ := flattened.parent.Info()
  230. return diskNumber, flattened.number
  231. }
  232. default:
  233. panic(fmt.Sprintf("unknown data layer: %T", parent))
  234. }
  235. // If the bottom-most layer is larger than our memory cap, persist to disk
  236. bottom := diff.parent.(*diffLayer)
  237. bottom.lock.RLock()
  238. base := diffToDisk(bottom)
  239. bottom.lock.RUnlock()
  240. st.layers[base.root] = base
  241. diff.parent = base
  242. return base.number, diff.number
  243. }
  244. // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
  245. // it. The method will panic if called onto a non-bottom-most diff layer.
  246. func diffToDisk(bottom *diffLayer) *diskLayer {
  247. var (
  248. base = bottom.parent.(*diskLayer)
  249. batch = base.db.NewBatch()
  250. )
  251. // Start by temporarily deleting the current snapshot block marker. This
  252. // ensures that in the case of a crash, the entire snapshot is invalidated.
  253. rawdb.DeleteSnapshotBlock(batch)
  254. // Mark the original base as stale as we're going to create a new wrapper
  255. base.lock.Lock()
  256. if base.stale {
  257. panic("parent disk layer is stale") // we've committed into the same base from two children, boo
  258. }
  259. base.stale = true
  260. base.lock.Unlock()
  261. // Push all the accounts into the database
  262. for hash, data := range bottom.accountData {
  263. if len(data) > 0 {
  264. // Account was updated, push to disk
  265. rawdb.WriteAccountSnapshot(batch, hash, data)
  266. base.cache.Set(string(hash[:]), data)
  267. if batch.ValueSize() > ethdb.IdealBatchSize {
  268. if err := batch.Write(); err != nil {
  269. log.Crit("Failed to write account snapshot", "err", err)
  270. }
  271. batch.Reset()
  272. }
  273. } else {
  274. // Account was deleted, remove all storage slots too
  275. rawdb.DeleteAccountSnapshot(batch, hash)
  276. base.cache.Set(string(hash[:]), nil)
  277. it := rawdb.IterateStorageSnapshots(base.db, hash)
  278. for it.Next() {
  279. if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
  280. batch.Delete(key)
  281. base.cache.Delete(string(key[1:]))
  282. }
  283. }
  284. it.Release()
  285. }
  286. }
  287. // Push all the storage slots into the database
  288. for accountHash, storage := range bottom.storageData {
  289. for storageHash, data := range storage {
  290. if len(data) > 0 {
  291. rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
  292. base.cache.Set(string(append(accountHash[:], storageHash[:]...)), data)
  293. } else {
  294. rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
  295. base.cache.Set(string(append(accountHash[:], storageHash[:]...)), nil)
  296. }
  297. }
  298. if batch.ValueSize() > ethdb.IdealBatchSize {
  299. if err := batch.Write(); err != nil {
  300. log.Crit("Failed to write storage snapshot", "err", err)
  301. }
  302. batch.Reset()
  303. }
  304. }
  305. // Update the snapshot block marker and write any remainder data
  306. rawdb.WriteSnapshotBlock(batch, bottom.number, bottom.root)
  307. if err := batch.Write(); err != nil {
  308. log.Crit("Failed to write leftover snapshot", "err", err)
  309. }
  310. return &diskLayer{
  311. root: bottom.root,
  312. number: bottom.number,
  313. cache: base.cache,
  314. db: base.db,
  315. journal: base.journal,
  316. }
  317. }
  318. // Journal commits an entire diff hierarchy to disk into a single journal file.
  319. // This is meant to be used during shutdown to persist the snapshot without
  320. // flattening everything down (bad for reorgs).
  321. func (st *SnapshotTree) Journal(blockRoot common.Hash) error {
  322. // Retrieve the head snapshot to journal from
  323. var snap snapshot
  324. if s := st.Snapshot(blockRoot); s == nil {
  325. return fmt.Errorf("snapshot [%#x] missing", blockRoot)
  326. } else {
  327. snap = s.(snapshot)
  328. }
  329. // Run the journaling
  330. st.lock.Lock()
  331. defer st.lock.Unlock()
  332. return snap.Journal()
  333. }
  334. // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
  335. func loadSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot common.Hash) (snapshot, error) {
  336. // Retrieve the block number and hash of the snapshot, failing if no snapshot
  337. // is present in the database (or crashed mid-update).
  338. number, root := rawdb.ReadSnapshotBlock(db)
  339. if root == (common.Hash{}) {
  340. return nil, errors.New("missing or corrupted snapshot")
  341. }
  342. cache, _ := bigcache.NewBigCache(bigcache.Config{ // TODO(karalabe): dedup
  343. Shards: 1024,
  344. LifeWindow: time.Hour,
  345. MaxEntriesInWindow: 512 * 1024,
  346. MaxEntrySize: 512,
  347. HardMaxCacheSize: 512,
  348. })
  349. base := &diskLayer{
  350. journal: journal,
  351. db: db,
  352. cache: cache,
  353. number: number,
  354. root: root,
  355. }
  356. // Load all the snapshot diffs from the journal, failing if their chain is broken
  357. // or does not lead from the disk snapshot to the specified head.
  358. if _, err := os.Stat(journal); os.IsNotExist(err) {
  359. // Journal doesn't exist, don't worry if it's not supposed to
  360. if number != headNumber || root != headRoot {
  361. return nil, fmt.Errorf("snapshot journal missing, head doesn't match snapshot: #%d [%#x] vs. #%d [%#x]",
  362. headNumber, headRoot, number, root)
  363. }
  364. return base, nil
  365. }
  366. file, err := os.Open(journal)
  367. if err != nil {
  368. return nil, err
  369. }
  370. snapshot, err := loadDiffLayer(base, rlp.NewStream(file, 0))
  371. if err != nil {
  372. return nil, err
  373. }
  374. // Entire snapshot journal loaded, sanity check the head and return
  375. // Journal doesn't exist, don't worry if it's not supposed to
  376. number, root = snapshot.Info()
  377. if number != headNumber || root != headRoot {
  378. return nil, fmt.Errorf("head doesn't match snapshot: #%d [%#x] vs. #%d [%#x]",
  379. headNumber, headRoot, number, root)
  380. }
  381. return snapshot, nil
  382. }