journal.go 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snapshot
  17. import (
  18. "bytes"
  19. "encoding/binary"
  20. "errors"
  21. "fmt"
  22. "io"
  23. "time"
  24. "github.com/VictoriaMetrics/fastcache"
  25. "github.com/ethereum/go-ethereum/common"
  26. "github.com/ethereum/go-ethereum/core/rawdb"
  27. "github.com/ethereum/go-ethereum/ethdb"
  28. "github.com/ethereum/go-ethereum/log"
  29. "github.com/ethereum/go-ethereum/rlp"
  30. "github.com/ethereum/go-ethereum/trie"
  31. )
  32. // journalGenerator is a disk layer entry containing the generator progress marker.
  33. type journalGenerator struct {
  34. Wiping bool // Whether the database was in progress of being wiped
  35. Done bool // Whether the generator finished creating the snapshot
  36. Marker []byte
  37. Accounts uint64
  38. Slots uint64
  39. Storage uint64
  40. }
  41. // journalDestruct is an account deletion entry in a diffLayer's disk journal.
  42. type journalDestruct struct {
  43. Hash common.Hash
  44. }
  45. // journalAccount is an account entry in a diffLayer's disk journal.
  46. type journalAccount struct {
  47. Hash common.Hash
  48. Blob []byte
  49. }
  50. // journalStorage is an account's storage map in a diffLayer's disk journal.
  51. type journalStorage struct {
  52. Hash common.Hash
  53. Keys []common.Hash
  54. Vals [][]byte
  55. }
  56. // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
  57. func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) (snapshot, error) {
  58. // Retrieve the block number and hash of the snapshot, failing if no snapshot
  59. // is present in the database (or crashed mid-update).
  60. baseRoot := rawdb.ReadSnapshotRoot(diskdb)
  61. if baseRoot == (common.Hash{}) {
  62. return nil, errors.New("missing or corrupted snapshot")
  63. }
  64. base := &diskLayer{
  65. diskdb: diskdb,
  66. triedb: triedb,
  67. cache: fastcache.New(cache * 1024 * 1024),
  68. root: baseRoot,
  69. }
  70. // Retrieve the journal, it must exist since even for 0 layer it stores whether
  71. // we've already generated the snapshot or are in progress only
  72. journal := rawdb.ReadSnapshotJournal(diskdb)
  73. if len(journal) == 0 {
  74. return nil, errors.New("missing or corrupted snapshot journal")
  75. }
  76. r := rlp.NewStream(bytes.NewReader(journal), 0)
  77. // Read the snapshot generation progress for the disk layer
  78. var generator journalGenerator
  79. if err := r.Decode(&generator); err != nil {
  80. return nil, fmt.Errorf("failed to load snapshot progress marker: %v", err)
  81. }
  82. // Load all the snapshot diffs from the journal
  83. snapshot, err := loadDiffLayer(base, r)
  84. if err != nil {
  85. return nil, err
  86. }
  87. // Entire snapshot journal loaded, sanity check the head and return
  88. // Journal doesn't exist, don't worry if it's not supposed to
  89. if head := snapshot.Root(); head != root {
  90. return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
  91. }
  92. // Everything loaded correctly, resume any suspended operations
  93. if !generator.Done {
  94. // If the generator was still wiping, restart one from scratch (fine for
  95. // now as it's rare and the wiper deletes the stuff it touches anyway, so
  96. // restarting won't incur a lot of extra database hops.
  97. var wiper chan struct{}
  98. if generator.Wiping {
  99. log.Info("Resuming previous snapshot wipe")
  100. wiper = wipeSnapshot(diskdb, false)
  101. }
  102. // Whether or not wiping was in progress, load any generator progress too
  103. base.genMarker = generator.Marker
  104. if base.genMarker == nil {
  105. base.genMarker = []byte{}
  106. }
  107. base.genPending = make(chan struct{})
  108. base.genAbort = make(chan chan *generatorStats)
  109. var origin uint64
  110. if len(generator.Marker) >= 8 {
  111. origin = binary.BigEndian.Uint64(generator.Marker)
  112. }
  113. go base.generate(&generatorStats{
  114. wiping: wiper,
  115. origin: origin,
  116. start: time.Now(),
  117. accounts: generator.Accounts,
  118. slots: generator.Slots,
  119. storage: common.StorageSize(generator.Storage),
  120. })
  121. }
  122. return snapshot, nil
  123. }
  124. // loadDiffLayer reads the next sections of a snapshot journal, reconstructing a new
  125. // diff and verifying that it can be linked to the requested parent.
  126. func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
  127. // Read the next diff journal entry
  128. var root common.Hash
  129. if err := r.Decode(&root); err != nil {
  130. // The first read may fail with EOF, marking the end of the journal
  131. if err == io.EOF {
  132. return parent, nil
  133. }
  134. return nil, fmt.Errorf("load diff root: %v", err)
  135. }
  136. var destructs []journalDestruct
  137. if err := r.Decode(&destructs); err != nil {
  138. return nil, fmt.Errorf("load diff destructs: %v", err)
  139. }
  140. destructSet := make(map[common.Hash]struct{})
  141. for _, entry := range destructs {
  142. destructSet[entry.Hash] = struct{}{}
  143. }
  144. var accounts []journalAccount
  145. if err := r.Decode(&accounts); err != nil {
  146. return nil, fmt.Errorf("load diff accounts: %v", err)
  147. }
  148. accountData := make(map[common.Hash][]byte)
  149. for _, entry := range accounts {
  150. if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
  151. accountData[entry.Hash] = entry.Blob
  152. } else {
  153. accountData[entry.Hash] = nil
  154. }
  155. }
  156. var storage []journalStorage
  157. if err := r.Decode(&storage); err != nil {
  158. return nil, fmt.Errorf("load diff storage: %v", err)
  159. }
  160. storageData := make(map[common.Hash]map[common.Hash][]byte)
  161. for _, entry := range storage {
  162. slots := make(map[common.Hash][]byte)
  163. for i, key := range entry.Keys {
  164. if len(entry.Vals[i]) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
  165. slots[key] = entry.Vals[i]
  166. } else {
  167. slots[key] = nil
  168. }
  169. }
  170. storageData[entry.Hash] = slots
  171. }
  172. return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r)
  173. }
  174. // Journal writes the persistent layer generator stats into a buffer to be stored
  175. // in the database as the snapshot journal.
  176. func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
  177. // If the snapshot is currently being generated, abort it
  178. var stats *generatorStats
  179. if dl.genAbort != nil {
  180. abort := make(chan *generatorStats)
  181. dl.genAbort <- abort
  182. if stats = <-abort; stats != nil {
  183. stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker)
  184. }
  185. }
  186. // Ensure the layer didn't get stale
  187. dl.lock.RLock()
  188. defer dl.lock.RUnlock()
  189. if dl.stale {
  190. return common.Hash{}, ErrSnapshotStale
  191. }
  192. // Write out the generator marker
  193. entry := journalGenerator{
  194. Done: dl.genMarker == nil,
  195. Marker: dl.genMarker,
  196. }
  197. if stats != nil {
  198. entry.Wiping = (stats.wiping != nil)
  199. entry.Accounts = stats.accounts
  200. entry.Slots = stats.slots
  201. entry.Storage = uint64(stats.storage)
  202. }
  203. if err := rlp.Encode(buffer, entry); err != nil {
  204. return common.Hash{}, err
  205. }
  206. return dl.root, nil
  207. }
  208. // Journal writes the memory layer contents into a buffer to be stored in the
  209. // database as the snapshot journal.
  210. func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
  211. // Journal the parent first
  212. base, err := dl.parent.Journal(buffer)
  213. if err != nil {
  214. return common.Hash{}, err
  215. }
  216. // Ensure the layer didn't get stale
  217. dl.lock.RLock()
  218. defer dl.lock.RUnlock()
  219. if dl.Stale() {
  220. return common.Hash{}, ErrSnapshotStale
  221. }
  222. // Everything below was journalled, persist this layer too
  223. if err := rlp.Encode(buffer, dl.root); err != nil {
  224. return common.Hash{}, err
  225. }
  226. destructs := make([]journalDestruct, 0, len(dl.destructSet))
  227. for hash := range dl.destructSet {
  228. destructs = append(destructs, journalDestruct{Hash: hash})
  229. }
  230. if err := rlp.Encode(buffer, destructs); err != nil {
  231. return common.Hash{}, err
  232. }
  233. accounts := make([]journalAccount, 0, len(dl.accountData))
  234. for hash, blob := range dl.accountData {
  235. accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
  236. }
  237. if err := rlp.Encode(buffer, accounts); err != nil {
  238. return common.Hash{}, err
  239. }
  240. storage := make([]journalStorage, 0, len(dl.storageData))
  241. for hash, slots := range dl.storageData {
  242. keys := make([]common.Hash, 0, len(slots))
  243. vals := make([][]byte, 0, len(slots))
  244. for key, val := range slots {
  245. keys = append(keys, key)
  246. vals = append(vals, val)
  247. }
  248. storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
  249. }
  250. if err := rlp.Encode(buffer, storage); err != nil {
  251. return common.Hash{}, err
  252. }
  253. return base, nil
  254. }