difflayer.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snapshot
  17. import (
  18. "fmt"
  19. "sort"
  20. "sync"
  21. "github.com/ethereum/go-ethereum/common"
  22. "github.com/ethereum/go-ethereum/core/rawdb"
  23. "github.com/ethereum/go-ethereum/ethdb"
  24. "github.com/ethereum/go-ethereum/log"
  25. "github.com/ethereum/go-ethereum/rlp"
  26. )
  27. // diffLayer represents a collection of modifications made to a state snapshot
  28. // after running a block on top. It contains one sorted list for the account trie
  29. // and one-one list for each storage tries.
  30. //
  31. // The goal of a diff layer is to act as a journal, tracking recent modifications
  32. // made to the state, that have not yet graduated into a semi-immutable state.
  33. type diffLayer struct {
  34. parent snapshot // Parent snapshot modified by this one, never nil
  35. memory uint64 // Approximate guess as to how much memory we use
  36. number uint64 // Block number to which this snapshot diff belongs to
  37. root common.Hash // Root hash to which this snapshot diff belongs to
  38. stale bool // Signals that the layer became stale (state progressed)
  39. accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
  40. accountData map[common.Hash][]byte // Keyed accounts for direct retrival (nil means deleted)
  41. storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
  42. storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrival. one per account (nil means deleted)
  43. lock sync.RWMutex
  44. }
  45. // newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
  46. // level persistent database or a hierarchical diff already.
  47. func newDiffLayer(parent snapshot, number uint64, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
  48. // Create the new layer with some pre-allocated data segments
  49. dl := &diffLayer{
  50. parent: parent,
  51. number: number,
  52. root: root,
  53. accountData: accounts,
  54. storageData: storage,
  55. }
  56. // Determine mem size
  57. for _, data := range accounts {
  58. dl.memory += uint64(len(data))
  59. }
  60. // Fill the storage hashes and sort them for the iterator
  61. dl.storageList = make(map[common.Hash][]common.Hash)
  62. for accountHash, slots := range storage {
  63. // If the slots are nil, sanity check that it's a deleted account
  64. if slots == nil {
  65. // Ensure that the account was just marked as deleted
  66. if account, ok := accounts[accountHash]; account != nil || !ok {
  67. panic(fmt.Sprintf("storage in %#x nil, but account conflicts (%#x, exists: %v)", accountHash, account, ok))
  68. }
  69. // Everything ok, store the deletion mark and continue
  70. dl.storageList[accountHash] = nil
  71. continue
  72. }
  73. // Storage slots are not nil so entire contract was not deleted, ensure the
  74. // account was just updated.
  75. if account, ok := accounts[accountHash]; account == nil || !ok {
  76. log.Error(fmt.Sprintf("storage in %#x exists, but account nil (exists: %v)", accountHash, ok))
  77. }
  78. // Determine mem size
  79. for _, data := range slots {
  80. dl.memory += uint64(len(data))
  81. }
  82. }
  83. dl.memory += uint64(len(dl.storageList) * common.HashLength)
  84. return dl
  85. }
  86. // Info returns the block number and root hash for which this snapshot was made.
  87. func (dl *diffLayer) Info() (uint64, common.Hash) {
  88. return dl.number, dl.root
  89. }
  90. // Account directly retrieves the account associated with a particular hash in
  91. // the snapshot slim data format.
  92. func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
  93. data, err := dl.AccountRLP(hash)
  94. if err != nil {
  95. return nil, err
  96. }
  97. if len(data) == 0 { // can be both nil and []byte{}
  98. return nil, nil
  99. }
  100. account := new(Account)
  101. if err := rlp.DecodeBytes(data, account); err != nil {
  102. panic(err)
  103. }
  104. return account, nil
  105. }
  106. // AccountRLP directly retrieves the account RLP associated with a particular
  107. // hash in the snapshot slim data format.
  108. func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
  109. dl.lock.RLock()
  110. defer dl.lock.RUnlock()
  111. // If the layer was flattened into, consider it invalid (any live reference to
  112. // the original should be marked as unusable).
  113. if dl.stale {
  114. return nil, ErrSnapshotStale
  115. }
  116. // If the account is known locally, return it. Note, a nil account means it was
  117. // deleted, and is a different notion than an unknown account!
  118. if data, ok := dl.accountData[hash]; ok {
  119. return data, nil
  120. }
  121. // Account unknown to this diff, resolve from parent
  122. return dl.parent.AccountRLP(hash)
  123. }
  124. // Storage directly retrieves the storage data associated with a particular hash,
  125. // within a particular account. If the slot is unknown to this diff, it's parent
  126. // is consulted.
  127. func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
  128. dl.lock.RLock()
  129. defer dl.lock.RUnlock()
  130. // If the layer was flattened into, consider it invalid (any live reference to
  131. // the original should be marked as unusable).
  132. if dl.stale {
  133. return nil, ErrSnapshotStale
  134. }
  135. // If the account is known locally, try to resolve the slot locally. Note, a nil
  136. // account means it was deleted, and is a different notion than an unknown account!
  137. if storage, ok := dl.storageData[accountHash]; ok {
  138. if storage == nil {
  139. return nil, nil
  140. }
  141. if data, ok := storage[storageHash]; ok {
  142. return data, nil
  143. }
  144. }
  145. // Account - or slot within - unknown to this diff, resolve from parent
  146. return dl.parent.Storage(accountHash, storageHash)
  147. }
  148. // Update creates a new layer on top of the existing snapshot diff tree with
  149. // the specified data items.
  150. func (dl *diffLayer) Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
  151. return newDiffLayer(dl, dl.number+1, blockRoot, accounts, storage)
  152. }
  153. // Cap traverses downwards the diff tree until the number of allowed layers are
  154. // crossed. All diffs beyond the permitted number are flattened downwards. If
  155. // the layer limit is reached, memory cap is also enforced (but not before). The
  156. // block numbers for the disk layer and first diff layer are returned for GC.
  157. func (dl *diffLayer) Cap(layers int, memory uint64) (uint64, uint64) {
  158. // Dive until we run out of layers or reach the persistent database
  159. if layers > 2 {
  160. // If we still have diff layers below, recurse
  161. if parent, ok := dl.parent.(*diffLayer); ok {
  162. return parent.Cap(layers-1, memory)
  163. }
  164. // Diff stack too shallow, return block numbers without modifications
  165. return dl.parent.(*diskLayer).number, dl.number
  166. }
  167. // We're out of layers, flatten anything below, stopping if it's the disk or if
  168. // the memory limit is not yet exceeded.
  169. switch parent := dl.parent.(type) {
  170. case *diskLayer:
  171. return parent.number, dl.number
  172. case *diffLayer:
  173. // Flatten the parent into the grandparent. The flattening internally obtains a
  174. // write lock on grandparent.
  175. flattened := parent.flatten().(*diffLayer)
  176. dl.lock.Lock()
  177. defer dl.lock.Unlock()
  178. dl.parent = flattened
  179. if flattened.memory < memory {
  180. diskNumber, _ := flattened.parent.Info()
  181. return diskNumber, flattened.number
  182. }
  183. default:
  184. panic(fmt.Sprintf("unknown data layer: %T", parent))
  185. }
  186. // If the bottommost layer is larger than our memory cap, persist to disk
  187. var (
  188. parent = dl.parent.(*diffLayer)
  189. base = parent.parent.(*diskLayer)
  190. batch = base.db.NewBatch()
  191. )
  192. parent.lock.RLock()
  193. defer parent.lock.RUnlock()
  194. // Start by temporarily deleting the current snapshot block marker. This
  195. // ensures that in the case of a crash, the entire snapshot is invalidated.
  196. rawdb.DeleteSnapshotBlock(batch)
  197. // Mark the original base as stale as we're going to create a new wrapper
  198. base.lock.Lock()
  199. if base.stale {
  200. panic("parent disk layer is stale") // we've committed into the same base from two children, boo
  201. }
  202. base.stale = true
  203. base.lock.Unlock()
  204. // Push all the accounts into the database
  205. for hash, data := range parent.accountData {
  206. if len(data) > 0 {
  207. // Account was updated, push to disk
  208. rawdb.WriteAccountSnapshot(batch, hash, data)
  209. base.cache.Set(string(hash[:]), data)
  210. if batch.ValueSize() > ethdb.IdealBatchSize {
  211. if err := batch.Write(); err != nil {
  212. log.Crit("Failed to write account snapshot", "err", err)
  213. }
  214. batch.Reset()
  215. }
  216. } else {
  217. // Account was deleted, remove all storage slots too
  218. rawdb.DeleteAccountSnapshot(batch, hash)
  219. base.cache.Set(string(hash[:]), nil)
  220. it := rawdb.IterateStorageSnapshots(base.db, hash)
  221. for it.Next() {
  222. if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
  223. batch.Delete(key)
  224. base.cache.Delete(string(key[1:]))
  225. }
  226. }
  227. it.Release()
  228. }
  229. }
  230. // Push all the storage slots into the database
  231. for accountHash, storage := range parent.storageData {
  232. for storageHash, data := range storage {
  233. if len(data) > 0 {
  234. rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
  235. base.cache.Set(string(append(accountHash[:], storageHash[:]...)), data)
  236. } else {
  237. rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
  238. base.cache.Set(string(append(accountHash[:], storageHash[:]...)), nil)
  239. }
  240. }
  241. if batch.ValueSize() > ethdb.IdealBatchSize {
  242. if err := batch.Write(); err != nil {
  243. log.Crit("Failed to write storage snapshot", "err", err)
  244. }
  245. batch.Reset()
  246. }
  247. }
  248. // Update the snapshot block marker and write any remainder data
  249. newBase := &diskLayer{
  250. root: parent.root,
  251. number: parent.number,
  252. cache: base.cache,
  253. db: base.db,
  254. journal: base.journal,
  255. }
  256. rawdb.WriteSnapshotBlock(batch, newBase.number, newBase.root)
  257. if err := batch.Write(); err != nil {
  258. log.Crit("Failed to write leftover snapshot", "err", err)
  259. }
  260. dl.parent = newBase
  261. return newBase.number, dl.number
  262. }
  263. // flatten pushes all data from this point downwards, flattening everything into
  264. // a single diff at the bottom. Since usually the lowermost diff is the largest,
  265. // the flattening bulds up from there in reverse.
  266. func (dl *diffLayer) flatten() snapshot {
  267. // If the parent is not diff, we're the first in line, return unmodified
  268. parent, ok := dl.parent.(*diffLayer)
  269. if !ok {
  270. return dl
  271. }
  272. // Parent is a diff, flatten it first (note, apart from weird corned cases,
  273. // flatten will realistically only ever merge 1 layer, so there's no need to
  274. // be smarter about grouping flattens together).
  275. parent = parent.flatten().(*diffLayer)
  276. parent.lock.Lock()
  277. defer parent.lock.Unlock()
  278. // Before actually writing all our data to the parent, first ensure that the
  279. // parent hasn't been 'corrupted' by someone else already flattening into it
  280. if parent.stale {
  281. panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
  282. }
  283. parent.stale = true
  284. // Overwrite all the updated accounts blindly, merge the sorted list
  285. for hash, data := range dl.accountData {
  286. parent.accountData[hash] = data
  287. }
  288. // Overwrite all the updates storage slots (individually)
  289. for accountHash, storage := range dl.storageData {
  290. // If storage didn't exist (or was deleted) in the parent; or if the storage
  291. // was freshly deleted in the child, overwrite blindly
  292. if parent.storageData[accountHash] == nil || storage == nil {
  293. parent.storageData[accountHash] = storage
  294. continue
  295. }
  296. // Storage exists in both parent and child, merge the slots
  297. comboData := parent.storageData[accountHash]
  298. for storageHash, data := range storage {
  299. comboData[storageHash] = data
  300. }
  301. parent.storageData[accountHash] = comboData
  302. }
  303. // Return the combo parent
  304. return &diffLayer{
  305. parent: parent.parent,
  306. number: dl.number,
  307. root: dl.root,
  308. storageList: parent.storageList,
  309. storageData: parent.storageData,
  310. accountList: parent.accountList,
  311. accountData: parent.accountData,
  312. memory: parent.memory + dl.memory,
  313. }
  314. }
  315. // Journal commits an entire diff hierarchy to disk into a single journal file.
  316. // This is meant to be used during shutdown to persist the snapshot without
  317. // flattening everything down (bad for reorgs).
  318. func (dl *diffLayer) Journal() error {
  319. dl.lock.RLock()
  320. defer dl.lock.RUnlock()
  321. writer, err := dl.journal()
  322. if err != nil {
  323. return err
  324. }
  325. writer.Close()
  326. return nil
  327. }
  328. // AccountList returns a sorted list of all accounts in this difflayer.
  329. func (dl *diffLayer) AccountList() []common.Hash {
  330. dl.lock.Lock()
  331. defer dl.lock.Unlock()
  332. if dl.accountList != nil {
  333. return dl.accountList
  334. }
  335. accountList := make([]common.Hash, len(dl.accountData))
  336. i := 0
  337. for k, _ := range dl.accountData {
  338. accountList[i] = k
  339. i++
  340. // This would be a pretty good opportunity to also
  341. // calculate the size, if we want to
  342. }
  343. sort.Sort(hashes(accountList))
  344. dl.accountList = accountList
  345. return dl.accountList
  346. }
  347. // StorageList returns a sorted list of all storage slot hashes
  348. // in this difflayer for the given account.
  349. func (dl *diffLayer) StorageList(accountHash common.Hash) []common.Hash {
  350. dl.lock.Lock()
  351. defer dl.lock.Unlock()
  352. if dl.storageList[accountHash] != nil {
  353. return dl.storageList[accountHash]
  354. }
  355. accountStorageMap := dl.storageData[accountHash]
  356. accountStorageList := make([]common.Hash, len(accountStorageMap))
  357. i := 0
  358. for k, _ := range accountStorageMap {
  359. accountStorageList[i] = k
  360. i++
  361. // This would be a pretty good opportunity to also
  362. // calculate the size, if we want to
  363. }
  364. sort.Sort(hashes(accountStorageList))
  365. dl.storageList[accountHash] = accountStorageList
  366. return accountStorageList
  367. }