snapshot.go 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package snapshot implements a journalled, dynamic state dump.
  17. package snapshot
  18. import (
  19. "errors"
  20. "fmt"
  21. "os"
  22. "sync"
  23. "time"
  24. "github.com/allegro/bigcache"
  25. "github.com/ethereum/go-ethereum/common"
  26. "github.com/ethereum/go-ethereum/core/rawdb"
  27. "github.com/ethereum/go-ethereum/ethdb"
  28. "github.com/ethereum/go-ethereum/log"
  29. "github.com/ethereum/go-ethereum/metrics"
  30. "github.com/ethereum/go-ethereum/rlp"
  31. )
  32. var (
  33. snapshotCleanHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/hit", nil)
  34. snapshotCleanMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/miss", nil)
  35. snapshotCleanReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/read", nil)
  36. snapshotCleanWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/write", nil)
  37. // ErrSnapshotStale is returned from data accessors if the underlying snapshot
  38. // layer had been invalidated due to the chain progressing forward far enough
  39. // to not maintain the layer's original state.
  40. ErrSnapshotStale = errors.New("snapshot stale")
  41. )
  42. // Snapshot represents the functionality supported by a snapshot storage layer.
  43. type Snapshot interface {
  44. // Info returns the block number and root hash for which this snapshot was made.
  45. Info() (uint64, common.Hash)
  46. // Account directly retrieves the account associated with a particular hash in
  47. // the snapshot slim data format.
  48. Account(hash common.Hash) (*Account, error)
  49. // AccountRLP directly retrieves the account RLP associated with a particular
  50. // hash in the snapshot slim data format.
  51. AccountRLP(hash common.Hash) ([]byte, error)
  52. // Storage directly retrieves the storage data associated with a particular hash,
  53. // within a particular account.
  54. Storage(accountHash, storageHash common.Hash) ([]byte, error)
  55. }
  56. // snapshot is the internal version of the snapshot data layer that supports some
  57. // additional methods compared to the public API.
  58. type snapshot interface {
  59. Snapshot
  60. // Update creates a new layer on top of the existing snapshot diff tree with
  61. // the specified data items. Note, the maps are retained by the method to avoid
  62. // copying everything.
  63. Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
  64. // Cap traverses downwards the diff tree until the number of allowed layers are
  65. // crossed. All diffs beyond the permitted number are flattened downwards. The
  66. // block numbers for the disk layer and first diff layer are returned for GC.
  67. Cap(layers int, memory uint64) (uint64, uint64)
  68. // Journal commits an entire diff hierarchy to disk into a single journal file.
  69. // This is meant to be used during shutdown to persist the snapshot without
  70. // flattening everything down (bad for reorgs).
  71. Journal() error
  72. }
  73. // SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
  74. // base layer backed by a key-value store, on top of which arbitrarily many in-
  75. // memory diff layers are topped. The memory diffs can form a tree with branching,
  76. // but the disk layer is singleton and common to all. If a reorg goes deeper than
  77. // the disk layer, everything needs to be deleted.
  78. //
  79. // The goal of a state snapshot is twofold: to allow direct access to account and
  80. // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
  81. // cheap iteration of the account/storage tries for sync aid.
  82. type SnapshotTree struct {
  83. layers map[common.Hash]snapshot // Collection of all known layers // TODO(karalabe): split Clique overlaps
  84. lock sync.RWMutex
  85. }
  86. // New attempts to load an already existing snapshot from a persistent key-value
  87. // store (with a number of memory layers from a journal), ensuring that the head
  88. // of the snapshot matches the expected one.
  89. //
  90. // If the snapshot is missing or inconsistent, the entirety is deleted and will
  91. // be reconstructed from scratch based on the tries in the key-value store.
  92. func New(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot common.Hash) (*SnapshotTree, error) {
  93. // Attempt to load a previously persisted snapshot
  94. head, err := loadSnapshot(db, journal, headNumber, headRoot)
  95. if err != nil {
  96. log.Warn("Failed to load snapshot, regenerating", "err", err)
  97. if head, err = generateSnapshot(db, journal, headNumber, headRoot); err != nil {
  98. return nil, err
  99. }
  100. }
  101. // Existing snapshot loaded or one regenerated, seed all the layers
  102. snap := &SnapshotTree{
  103. layers: make(map[common.Hash]snapshot),
  104. }
  105. for head != nil {
  106. _, root := head.Info()
  107. snap.layers[root] = head
  108. switch self := head.(type) {
  109. case *diffLayer:
  110. head = self.parent
  111. case *diskLayer:
  112. head = nil
  113. default:
  114. panic(fmt.Sprintf("unknown data layer: %T", self))
  115. }
  116. }
  117. return snap, nil
  118. }
  119. // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
  120. // snapshot is maintained for that block.
  121. func (st *SnapshotTree) Snapshot(blockRoot common.Hash) Snapshot {
  122. st.lock.RLock()
  123. defer st.lock.RUnlock()
  124. return st.layers[blockRoot]
  125. }
  126. // Update adds a new snapshot into the tree, if that can be linked to an existing
  127. // old parent. It is disallowed to insert a disk layer (the origin of all).
  128. func (st *SnapshotTree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
  129. // Generate a new snapshot on top of the parent
  130. parent := st.Snapshot(parentRoot).(snapshot)
  131. if parent == nil {
  132. return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
  133. }
  134. snap := parent.Update(blockRoot, accounts, storage)
  135. // Save the new snapshot for later
  136. st.lock.Lock()
  137. defer st.lock.Unlock()
  138. st.layers[snap.root] = snap
  139. return nil
  140. }
  141. // Cap traverses downwards the snapshot tree from a head block hash until the
  142. // number of allowed layers are crossed. All layers beyond the permitted number
  143. // are flattened downwards.
  144. func (st *SnapshotTree) Cap(blockRoot common.Hash, layers int, memory uint64) error {
  145. // Retrieve the head snapshot to cap from
  146. snap := st.Snapshot(blockRoot).(snapshot)
  147. if snap == nil {
  148. return fmt.Errorf("snapshot [%#x] missing", blockRoot)
  149. }
  150. // Run the internal capping and discard all stale layers
  151. st.lock.Lock()
  152. defer st.lock.Unlock()
  153. diskNumber, diffNumber := snap.Cap(layers, memory)
  154. for root, snap := range st.layers {
  155. if number, _ := snap.Info(); number != diskNumber && number < diffNumber {
  156. delete(st.layers, root)
  157. }
  158. }
  159. return nil
  160. }
  161. // Journal commits an entire diff hierarchy to disk into a single journal file.
  162. // This is meant to be used during shutdown to persist the snapshot without
  163. // flattening everything down (bad for reorgs).
  164. func (st *SnapshotTree) Journal(blockRoot common.Hash) error {
  165. // Retrieve the head snapshot to journal from
  166. snap := st.Snapshot(blockRoot).(snapshot)
  167. if snap == nil {
  168. return fmt.Errorf("snapshot [%#x] missing", blockRoot)
  169. }
  170. // Run the journaling
  171. st.lock.Lock()
  172. defer st.lock.Unlock()
  173. return snap.Journal()
  174. }
  175. // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
  176. func loadSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot common.Hash) (snapshot, error) {
  177. // Retrieve the block number and hash of the snapshot, failing if no snapshot
  178. // is present in the database (or crashed mid-update).
  179. number, root := rawdb.ReadSnapshotBlock(db)
  180. if root == (common.Hash{}) {
  181. return nil, errors.New("missing or corrupted snapshot")
  182. }
  183. cache, _ := bigcache.NewBigCache(bigcache.Config{ // TODO(karalabe): dedup
  184. Shards: 1024,
  185. LifeWindow: time.Hour,
  186. MaxEntriesInWindow: 512 * 1024,
  187. MaxEntrySize: 512,
  188. HardMaxCacheSize: 512,
  189. })
  190. base := &diskLayer{
  191. journal: journal,
  192. db: db,
  193. cache: cache,
  194. number: number,
  195. root: root,
  196. }
  197. // Load all the snapshot diffs from the journal, failing if their chain is broken
  198. // or does not lead from the disk snapshot to the specified head.
  199. if _, err := os.Stat(journal); os.IsNotExist(err) {
  200. // Journal doesn't exist, don't worry if it's not supposed to
  201. if number != headNumber || root != headRoot {
  202. return nil, fmt.Errorf("snapshot journal missing, head doesn't match snapshot: #%d [%#x] vs. #%d [%#x]",
  203. headNumber, headRoot, number, root)
  204. }
  205. return base, nil
  206. }
  207. file, err := os.Open(journal)
  208. if err != nil {
  209. return nil, err
  210. }
  211. snapshot, err := loadDiffLayer(base, rlp.NewStream(file, 0))
  212. if err != nil {
  213. return nil, err
  214. }
  215. // Entire snapshot journal loaded, sanity check the head and return
  216. // Journal doesn't exist, don't worry if it's not supposed to
  217. number, root = snapshot.Info()
  218. if number != headNumber || root != headRoot {
  219. return nil, fmt.Errorf("head doesn't match snapshot: #%d [%#x] vs. #%d [%#x]",
  220. headNumber, headRoot, number, root)
  221. }
  222. return snapshot, nil
  223. }