|
|
@@ -148,11 +148,11 @@ type snapshot interface {
|
|
|
StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
|
|
|
}
|
|
|
|
|
|
-// SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
|
|
|
-// base layer backed by a key-value store, on top of which arbitrarily many in-
|
|
|
-// memory diff layers are topped. The memory diffs can form a tree with branching,
|
|
|
-// but the disk layer is singleton and common to all. If a reorg goes deeper than
|
|
|
-// the disk layer, everything needs to be deleted.
|
|
|
+// Tree is an Ethereum state snapshot tree. It consists of one persistent base
|
|
|
+// layer backed by a key-value store, on top of which arbitrarily many in-memory
|
|
|
+// diff layers are topped. The memory diffs can form a tree with branching, but
|
|
|
+// the disk layer is singleton and common to all. If a reorg goes deeper than the
|
|
|
+// disk layer, everything needs to be deleted.
|
|
|
//
|
|
|
// The goal of a state snapshot is twofold: to allow direct access to account and
|
|
|
// storage data to avoid expensive multi-level trie lookups; and to allow sorted,
|
|
|
@@ -186,7 +186,11 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
|
|
|
defer snap.waitBuild()
|
|
|
}
|
|
|
// Attempt to load a previously persisted snapshot and rebuild one if failed
|
|
|
- head, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
|
|
|
+ head, disabled, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
|
|
|
+ if disabled {
|
|
|
+ log.Warn("Snapshot maintenance disabled (syncing)")
|
|
|
+ return snap, nil
|
|
|
+ }
|
|
|
if err != nil {
|
|
|
if rebuild {
|
|
|
log.Warn("Failed to load snapshot, regenerating", "err", err)
|
|
|
@@ -224,6 +228,55 @@ func (t *Tree) waitBuild() {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+// Disable interrupts any pending snapshot generator, deletes all the snapshot
|
|
|
+// layers in memory and marks snapshots disabled globally. In order to resume
|
|
|
+// the snapshot functionality, the caller must invoke Rebuild.
|
|
|
+func (t *Tree) Disable() {
|
|
|
+ // Interrupt any live snapshot layers
|
|
|
+ t.lock.Lock()
|
|
|
+ defer t.lock.Unlock()
|
|
|
+
|
|
|
+ for _, layer := range t.layers {
|
|
|
+ switch layer := layer.(type) {
|
|
|
+ case *diskLayer:
|
|
|
+ // If the base layer is generating, abort it
|
|
|
+ if layer.genAbort != nil {
|
|
|
+ abort := make(chan *generatorStats)
|
|
|
+ layer.genAbort <- abort
|
|
|
+ <-abort
|
|
|
+ }
|
|
|
+ // Layer should be inactive now, mark it as stale
|
|
|
+ layer.lock.Lock()
|
|
|
+ layer.stale = true
|
|
|
+ layer.lock.Unlock()
|
|
|
+
|
|
|
+ case *diffLayer:
|
|
|
+ // If the layer is a simple diff, simply mark as stale
|
|
|
+ layer.lock.Lock()
|
|
|
+ atomic.StoreUint32(&layer.stale, 1)
|
|
|
+ layer.lock.Unlock()
|
|
|
+
|
|
|
+ default:
|
|
|
+ panic(fmt.Sprintf("unknown layer type: %T", layer))
|
|
|
+ }
|
|
|
+ }
|
|
|
+ t.layers = map[common.Hash]snapshot{}
|
|
|
+
|
|
|
+ // Delete all snapshot liveness information from the database
|
|
|
+ batch := t.diskdb.NewBatch()
|
|
|
+
|
|
|
+ rawdb.WriteSnapshotDisabled(batch)
|
|
|
+ rawdb.DeleteSnapshotRoot(batch)
|
|
|
+ rawdb.DeleteSnapshotJournal(batch)
|
|
|
+ rawdb.DeleteSnapshotGenerator(batch)
|
|
|
+ rawdb.DeleteSnapshotRecoveryNumber(batch)
|
|
|
+ // Note, we don't delete the sync progress
|
|
|
+
|
|
|
+ if err := batch.Write(); err != nil {
|
|
|
+ log.Crit("Failed to disable snapshots", "err", err)
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
// Snapshot retrieves a snapshot belonging to the given block root, or nil if no
|
|
|
// snapshot is maintained for that block.
|
|
|
func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
|
|
|
@@ -626,8 +679,9 @@ func (t *Tree) Rebuild(root common.Hash) {
|
|
|
defer t.lock.Unlock()
|
|
|
|
|
|
// Firstly delete any recovery flag in the database. Because now we are
|
|
|
- // building a brand new snapshot.
|
|
|
+ // building a brand new snapshot. Also reenable the snapshot feature.
|
|
|
rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
|
|
|
+ rawdb.DeleteSnapshotDisabled(t.diskdb)
|
|
|
|
|
|
// Iterate over and mark all layers stale
|
|
|
for _, layer := range t.layers {
|