database.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. // Copyright 2018 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package rawdb
  17. import (
  18. "bytes"
  19. "errors"
  20. "fmt"
  21. "math/big"
  22. "os"
  23. "sync/atomic"
  24. "time"
  25. "github.com/olekukonko/tablewriter"
  26. "github.com/ethereum/go-ethereum/common"
  27. "github.com/ethereum/go-ethereum/ethdb"
  28. "github.com/ethereum/go-ethereum/ethdb/leveldb"
  29. "github.com/ethereum/go-ethereum/ethdb/memorydb"
  30. "github.com/ethereum/go-ethereum/log"
  31. )
  32. // freezerdb is a database wrapper that enabled freezer data retrievals.
  33. type freezerdb struct {
  34. ethdb.KeyValueStore
  35. ethdb.AncientStore
  36. diffStore ethdb.KeyValueStore
  37. }
  38. // Close implements io.Closer, closing both the fast key-value store as well as
  39. // the slow ancient tables.
  40. func (frdb *freezerdb) Close() error {
  41. var errs []error
  42. if err := frdb.AncientStore.Close(); err != nil {
  43. errs = append(errs, err)
  44. }
  45. if err := frdb.KeyValueStore.Close(); err != nil {
  46. errs = append(errs, err)
  47. }
  48. if frdb.diffStore != nil {
  49. if err := frdb.diffStore.Close(); err != nil {
  50. errs = append(errs, err)
  51. }
  52. }
  53. if len(errs) != 0 {
  54. return fmt.Errorf("%v", errs)
  55. }
  56. return nil
  57. }
  58. func (frdb *freezerdb) DiffStore() ethdb.KeyValueStore {
  59. return frdb.diffStore
  60. }
  61. func (frdb *freezerdb) SetDiffStore(diff ethdb.KeyValueStore) {
  62. if frdb.diffStore != nil {
  63. frdb.diffStore.Close()
  64. }
  65. frdb.diffStore = diff
  66. }
  67. // Freeze is a helper method used for external testing to trigger and block until
  68. // a freeze cycle completes, without having to sleep for a minute to trigger the
  69. // automatic background run.
  70. func (frdb *freezerdb) Freeze(threshold uint64) error {
  71. if frdb.AncientStore.(*freezer).readonly {
  72. return errReadOnly
  73. }
  74. // Set the freezer threshold to a temporary value
  75. defer func(old uint64) {
  76. atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, old)
  77. }(atomic.LoadUint64(&frdb.AncientStore.(*freezer).threshold))
  78. atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, threshold)
  79. // Trigger a freeze cycle and block until it's done
  80. trigger := make(chan struct{}, 1)
  81. frdb.AncientStore.(*freezer).trigger <- trigger
  82. <-trigger
  83. return nil
  84. }
  85. // nofreezedb is a database wrapper that disables freezer data retrievals.
  86. type nofreezedb struct {
  87. ethdb.KeyValueStore
  88. diffStore ethdb.KeyValueStore
  89. }
  90. // HasAncient returns an error as we don't have a backing chain freezer.
  91. func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
  92. return false, errNotSupported
  93. }
  94. // Ancient returns an error as we don't have a backing chain freezer.
  95. func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
  96. return nil, errNotSupported
  97. }
  98. // Ancients returns an error as we don't have a backing chain freezer.
  99. func (db *nofreezedb) Ancients() (uint64, error) {
  100. return 0, errNotSupported
  101. }
  102. // Ancients returns an error as we don't have a backing chain freezer.
  103. func (db *nofreezedb) ItemAmountInAncient() (uint64, error) {
  104. return 0, errNotSupported
  105. }
  106. // AncientSize returns an error as we don't have a backing chain freezer.
  107. func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
  108. return 0, errNotSupported
  109. }
  110. // AppendAncient returns an error as we don't have a backing chain freezer.
  111. func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
  112. return errNotSupported
  113. }
  114. // TruncateAncients returns an error as we don't have a backing chain freezer.
  115. func (db *nofreezedb) TruncateAncients(items uint64) error {
  116. return errNotSupported
  117. }
  118. // Sync returns an error as we don't have a backing chain freezer.
  119. func (db *nofreezedb) Sync() error {
  120. return errNotSupported
  121. }
  122. func (db *nofreezedb) DiffStore() ethdb.KeyValueStore {
  123. return db.diffStore
  124. }
  125. func (db *nofreezedb) SetDiffStore(diff ethdb.KeyValueStore) {
  126. db.diffStore = diff
  127. }
  128. func (db *nofreezedb) AncientOffSet() uint64 {
  129. return 0
  130. }
  131. // NewDatabase creates a high level database on top of a given key-value data
  132. // store without a freezer moving immutable chain segments into cold storage.
  133. func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
  134. return &nofreezedb{
  135. KeyValueStore: db,
  136. }
  137. }
  138. func ReadOffSetOfCurrentAncientFreezer(db ethdb.KeyValueReader) uint64 {
  139. offset, _ := db.Get(offSetOfCurrentAncientFreezer)
  140. if offset == nil {
  141. return 0
  142. }
  143. return new(big.Int).SetBytes(offset).Uint64()
  144. }
  145. func ReadOffSetOfLastAncientFreezer(db ethdb.KeyValueReader) uint64 {
  146. offset, _ := db.Get(offSetOfLastAncientFreezer)
  147. if offset == nil {
  148. return 0
  149. }
  150. return new(big.Int).SetBytes(offset).Uint64()
  151. }
  152. func WriteOffSetOfCurrentAncientFreezer(db ethdb.KeyValueWriter, offset uint64) {
  153. if err := db.Put(offSetOfCurrentAncientFreezer, new(big.Int).SetUint64(offset).Bytes()); err != nil {
  154. log.Crit("Failed to store offSetOfAncientFreezer", "err", err)
  155. }
  156. }
  157. func WriteOffSetOfLastAncientFreezer(db ethdb.KeyValueWriter, offset uint64) {
  158. if err := db.Put(offSetOfLastAncientFreezer, new(big.Int).SetUint64(offset).Bytes()); err != nil {
  159. log.Crit("Failed to store offSetOfAncientFreezer", "err", err)
  160. }
  161. }
  162. // NewFreezerDb only create a freezer without statedb.
  163. func NewFreezerDb(db ethdb.KeyValueStore, frz, namespace string, readonly bool, newOffSet uint64) (*freezer, error) {
  164. // Create the idle freezer instance, this operation should be atomic to avoid mismatch between offset and acientDB.
  165. frdb, err := newFreezer(frz, namespace, readonly)
  166. if err != nil {
  167. return nil, err
  168. }
  169. frdb.offset = newOffSet
  170. frdb.frozen += newOffSet
  171. return frdb, nil
  172. }
  173. // NewDatabaseWithFreezer creates a high level database on top of a given key-
  174. // value data store with a freezer moving immutable chain segments into cold
  175. // storage.
  176. func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly, disableFreeze, isLastOffset bool) (ethdb.Database, error) {
  177. // Create the idle freezer instance
  178. frdb, err := newFreezer(freezer, namespace, readonly)
  179. if err != nil {
  180. return nil, err
  181. }
  182. var offset uint64
  183. // The offset of ancientDB should be handled differently in different scenarios.
  184. if isLastOffset {
  185. offset = ReadOffSetOfLastAncientFreezer(db)
  186. } else {
  187. offset = ReadOffSetOfCurrentAncientFreezer(db)
  188. }
  189. frdb.offset = offset
  190. // Some blocks in ancientDB may have already been frozen and been pruned, so adding the offset to
  191. // reprensent the absolute number of blocks already frozen.
  192. frdb.frozen += offset
  193. // Since the freezer can be stored separately from the user's key-value database,
  194. // there's a fairly high probability that the user requests invalid combinations
  195. // of the freezer and database. Ensure that we don't shoot ourselves in the foot
  196. // by serving up conflicting data, leading to both datastores getting corrupted.
  197. //
  198. // - If both the freezer and key-value store is empty (no genesis), we just
  199. // initialized a new empty freezer, so everything's fine.
  200. // - If the key-value store is empty, but the freezer is not, we need to make
  201. // sure the user's genesis matches the freezer. That will be checked in the
  202. // blockchain, since we don't have the genesis block here (nor should we at
  203. // this point care, the key-value/freezer combo is valid).
  204. // - If neither the key-value store nor the freezer is empty, cross validate
  205. // the genesis hashes to make sure they are compatible. If they are, also
  206. // ensure that there's no gap between the freezer and sunsequently leveldb.
  207. // - If the key-value store is not empty, but the freezer is we might just be
  208. // upgrading to the freezer release, or we might have had a small chain and
  209. // not frozen anything yet. Ensure that no blocks are missing yet from the
  210. // key-value store, since that would mean we already had an old freezer.
  211. // If the genesis hash is empty, we have a new key-value store, so nothing to
  212. // validate in this method. If, however, the genesis hash is not nil, compare
  213. // it to the freezer content.
  214. // Only to check the followings when offset equal to 0, otherwise the block number
  215. // in ancientdb did not start with 0, no genesis block in ancientdb as well.
  216. if kvgenesis, _ := db.Get(headerHashKey(0)); offset == 0 && len(kvgenesis) > 0 {
  217. if frozen, _ := frdb.Ancients(); frozen > 0 {
  218. // If the freezer already contains something, ensure that the genesis blocks
  219. // match, otherwise we might mix up freezers across chains and destroy both
  220. // the freezer and the key-value store.
  221. frgenesis, err := frdb.Ancient(freezerHashTable, 0)
  222. if err != nil {
  223. return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
  224. } else if !bytes.Equal(kvgenesis, frgenesis) {
  225. return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis)
  226. }
  227. // Key-value store and freezer belong to the same network. Ensure that they
  228. // are contiguous, otherwise we might end up with a non-functional freezer.
  229. if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 {
  230. // Subsequent header after the freezer limit is missing from the database.
  231. // Reject startup is the database has a more recent head.
  232. if *ReadHeaderNumber(db, ReadHeadHeaderHash(db)) > frozen-1 {
  233. return nil, fmt.Errorf("gap (#%d) in the chain between ancients and leveldb", frozen)
  234. }
  235. // Database contains only older data than the freezer, this happens if the
  236. // state was wiped and reinited from an existing freezer.
  237. }
  238. // Otherwise, key-value store continues where the freezer left off, all is fine.
  239. // We might have duplicate blocks (crash after freezer write but before key-value
  240. // store deletion, but that's fine).
  241. } else {
  242. // If the freezer is empty, ensure nothing was moved yet from the key-value
  243. // store, otherwise we'll end up missing data. We check block #1 to decide
  244. // if we froze anything previously or not, but do take care of databases with
  245. // only the genesis block.
  246. if ReadHeadHeaderHash(db) != common.BytesToHash(kvgenesis) {
  247. // Key-value store contains more data than the genesis block, make sure we
  248. // didn't freeze anything yet.
  249. if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 {
  250. return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
  251. }
  252. // Block #1 is still in the database, we're allowed to init a new feezer
  253. }
  254. // Otherwise, the head header is still the genesis, we're allowed to init a new
  255. // feezer.
  256. }
  257. }
  258. // Freezer is consistent with the key-value database, permit combining the two
  259. if !disableFreeze && !frdb.readonly {
  260. go frdb.freeze(db)
  261. }
  262. return &freezerdb{
  263. KeyValueStore: db,
  264. AncientStore: frdb,
  265. }, nil
  266. }
  267. // NewMemoryDatabase creates an ephemeral in-memory key-value database without a
  268. // freezer moving immutable chain segments into cold storage.
  269. func NewMemoryDatabase() ethdb.Database {
  270. return NewDatabase(memorydb.New())
  271. }
  272. // NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
  273. // with an initial starting capacity, but without a freezer moving immutable
  274. // chain segments into cold storage.
  275. func NewMemoryDatabaseWithCap(size int) ethdb.Database {
  276. return NewDatabase(memorydb.NewWithCap(size))
  277. }
  278. // NewLevelDBDatabase creates a persistent key-value database without a freezer
  279. // moving immutable chain segments into cold storage.
  280. func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
  281. db, err := leveldb.New(file, cache, handles, namespace, readonly)
  282. if err != nil {
  283. return nil, err
  284. }
  285. return NewDatabase(db), nil
  286. }
  287. // NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a
  288. // freezer moving immutable chain segments into cold storage.
  289. func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string, readonly, disableFreeze, isLastOffset bool) (ethdb.Database, error) {
  290. kvdb, err := leveldb.New(file, cache, handles, namespace, readonly)
  291. if err != nil {
  292. return nil, err
  293. }
  294. frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace, readonly, disableFreeze, isLastOffset)
  295. if err != nil {
  296. kvdb.Close()
  297. return nil, err
  298. }
  299. return frdb, nil
  300. }
  301. type counter uint64
  302. func (c counter) String() string {
  303. return fmt.Sprintf("%d", c)
  304. }
  305. func (c counter) Percentage(current uint64) string {
  306. return fmt.Sprintf("%d", current*100/uint64(c))
  307. }
  308. // stat stores sizes and count for a parameter
  309. type stat struct {
  310. size common.StorageSize
  311. count counter
  312. }
  313. // Add size to the stat and increase the counter by 1
  314. func (s *stat) Add(size common.StorageSize) {
  315. s.size += size
  316. s.count++
  317. }
  318. func (s *stat) Size() string {
  319. return s.size.String()
  320. }
  321. func (s *stat) Count() string {
  322. return s.count.String()
  323. }
  324. func AncientInspect(db ethdb.Database) error {
  325. offset := counter(ReadOffSetOfCurrentAncientFreezer(db))
  326. // Get number of ancient rows inside the freezer.
  327. ancients := counter(0)
  328. if count, err := db.ItemAmountInAncient(); err != nil {
  329. log.Error("failed to get the items amount in ancientDB", "err", err)
  330. return err
  331. } else {
  332. ancients = counter(count)
  333. }
  334. var endNumber counter
  335. if offset+ancients <= 0 {
  336. endNumber = 0
  337. } else {
  338. endNumber = offset + ancients - 1
  339. }
  340. stats := [][]string{
  341. {"Offset/StartBlockNumber", "Offset/StartBlockNumber of ancientDB", offset.String()},
  342. {"Amount of remained items in AncientStore", "Remaining items of ancientDB", ancients.String()},
  343. {"The last BlockNumber within ancientDB", "The last BlockNumber", endNumber.String()},
  344. }
  345. table := tablewriter.NewWriter(os.Stdout)
  346. table.SetHeader([]string{"Database", "Category", "Items"})
  347. table.SetFooter([]string{"", "AncientStore information", ""})
  348. table.AppendBulk(stats)
  349. table.Render()
  350. return nil
  351. }
  352. // InspectDatabase traverses the entire database and checks the size
  353. // of all different categories of data.
  354. func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
  355. it := db.NewIterator(keyPrefix, keyStart)
  356. defer it.Release()
  357. var (
  358. count int64
  359. start = time.Now()
  360. logged = time.Now()
  361. // Key-value store statistics
  362. headers stat
  363. bodies stat
  364. receipts stat
  365. tds stat
  366. numHashPairings stat
  367. hashNumPairings stat
  368. tries stat
  369. codes stat
  370. txLookups stat
  371. accountSnaps stat
  372. storageSnaps stat
  373. preimages stat
  374. bloomBits stat
  375. cliqueSnaps stat
  376. parliaSnaps stat
  377. // Ancient store statistics
  378. ancientHeadersSize common.StorageSize
  379. ancientBodiesSize common.StorageSize
  380. ancientReceiptsSize common.StorageSize
  381. ancientTdsSize common.StorageSize
  382. ancientHashesSize common.StorageSize
  383. // Les statistic
  384. chtTrieNodes stat
  385. bloomTrieNodes stat
  386. // Meta- and unaccounted data
  387. metadata stat
  388. unaccounted stat
  389. shutdownInfo stat
  390. // Totals
  391. total common.StorageSize
  392. )
  393. // Inspect key-value database first.
  394. for it.Next() {
  395. var (
  396. key = it.Key()
  397. size = common.StorageSize(len(key) + len(it.Value()))
  398. )
  399. total += size
  400. switch {
  401. case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
  402. headers.Add(size)
  403. case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
  404. bodies.Add(size)
  405. case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
  406. receipts.Add(size)
  407. case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
  408. tds.Add(size)
  409. case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
  410. numHashPairings.Add(size)
  411. case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
  412. hashNumPairings.Add(size)
  413. case len(key) == common.HashLength:
  414. tries.Add(size)
  415. case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
  416. codes.Add(size)
  417. case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
  418. txLookups.Add(size)
  419. case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
  420. accountSnaps.Add(size)
  421. case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
  422. storageSnaps.Add(size)
  423. case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength):
  424. preimages.Add(size)
  425. case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
  426. bloomBits.Add(size)
  427. case bytes.HasPrefix(key, BloomBitsIndexPrefix):
  428. bloomBits.Add(size)
  429. case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
  430. cliqueSnaps.Add(size)
  431. case bytes.HasPrefix(key, []byte("parlia-")) && len(key) == 7+common.HashLength:
  432. parliaSnaps.Add(size)
  433. case bytes.HasPrefix(key, []byte("cht-")) ||
  434. bytes.HasPrefix(key, []byte("chtIndexV2-")) ||
  435. bytes.HasPrefix(key, []byte("chtRootV2-")): // Canonical hash trie
  436. chtTrieNodes.Add(size)
  437. case bytes.HasPrefix(key, []byte("blt-")) ||
  438. bytes.HasPrefix(key, []byte("bltIndex-")) ||
  439. bytes.HasPrefix(key, []byte("bltRoot-")): // Bloomtrie sub
  440. bloomTrieNodes.Add(size)
  441. case bytes.Equal(key, uncleanShutdownKey):
  442. shutdownInfo.Add(size)
  443. default:
  444. var accounted bool
  445. for _, meta := range [][]byte{
  446. databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
  447. fastTrieProgressKey, snapshotDisabledKey, snapshotRootKey, snapshotJournalKey,
  448. snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
  449. uncleanShutdownKey, badBlockKey,
  450. } {
  451. if bytes.Equal(key, meta) {
  452. metadata.Add(size)
  453. accounted = true
  454. break
  455. }
  456. }
  457. if !accounted {
  458. unaccounted.Add(size)
  459. }
  460. }
  461. count++
  462. if count%1000 == 0 && time.Since(logged) > 8*time.Second {
  463. log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  464. logged = time.Now()
  465. }
  466. }
  467. // Inspect append-only file store then.
  468. ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
  469. for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} {
  470. if size, err := db.AncientSize(category); err == nil {
  471. *ancientSizes[i] += common.StorageSize(size)
  472. total += common.StorageSize(size)
  473. }
  474. }
  475. // Get number of ancient rows inside the freezer
  476. ancients := counter(0)
  477. if count, err := db.ItemAmountInAncient(); err == nil {
  478. ancients = counter(count)
  479. }
  480. // Display the database statistic.
  481. stats := [][]string{
  482. {"Key-Value store", "Headers", headers.Size(), headers.Count()},
  483. {"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
  484. {"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
  485. {"Key-Value store", "Difficulties", tds.Size(), tds.Count()},
  486. {"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
  487. {"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
  488. {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
  489. {"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
  490. {"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
  491. {"Key-Value store", "Trie nodes", tries.Size(), tries.Count()},
  492. {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
  493. {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
  494. {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
  495. {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
  496. {"Key-Value store", "Parlia snapshots", parliaSnaps.Size(), parliaSnaps.Count()},
  497. {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
  498. {"Key-Value store", "Shutdown metadata", shutdownInfo.Size(), shutdownInfo.Count()},
  499. {"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
  500. {"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()},
  501. {"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()},
  502. {"Ancient store", "Difficulties", ancientTdsSize.String(), ancients.String()},
  503. {"Ancient store", "Block number->hash", ancientHashesSize.String(), ancients.String()},
  504. {"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
  505. {"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
  506. }
  507. table := tablewriter.NewWriter(os.Stdout)
  508. table.SetHeader([]string{"Database", "Category", "Size", "Items"})
  509. table.SetFooter([]string{"", "Total", total.String(), " "})
  510. table.AppendBulk(stats)
  511. table.Render()
  512. if unaccounted.size > 0 {
  513. log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
  514. }
  515. return nil
  516. }