database.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. // Copyright 2018 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package rawdb
  17. import (
  18. "bytes"
  19. "errors"
  20. "fmt"
  21. "os"
  22. "path"
  23. "sync/atomic"
  24. "time"
  25. "github.com/ethereum/go-ethereum/common"
  26. "github.com/ethereum/go-ethereum/ethdb"
  27. "github.com/ethereum/go-ethereum/ethdb/leveldb"
  28. "github.com/ethereum/go-ethereum/ethdb/memorydb"
  29. "github.com/ethereum/go-ethereum/log"
  30. "github.com/olekukonko/tablewriter"
  31. )
  32. // freezerdb is a database wrapper that enabled freezer data retrievals.
  33. type freezerdb struct {
  34. ancientRoot string
  35. ethdb.KeyValueStore
  36. ethdb.AncientStore
  37. }
  38. // AncientDatadir returns the path of root ancient directory.
  39. func (frdb *freezerdb) AncientDatadir() (string, error) {
  40. return frdb.ancientRoot, nil
  41. }
  42. // Close implements io.Closer, closing both the fast key-value store as well as
  43. // the slow ancient tables.
  44. func (frdb *freezerdb) Close() error {
  45. var errs []error
  46. if err := frdb.AncientStore.Close(); err != nil {
  47. errs = append(errs, err)
  48. }
  49. if err := frdb.KeyValueStore.Close(); err != nil {
  50. errs = append(errs, err)
  51. }
  52. if len(errs) != 0 {
  53. return fmt.Errorf("%v", errs)
  54. }
  55. return nil
  56. }
  57. // Freeze is a helper method used for external testing to trigger and block until
  58. // a freeze cycle completes, without having to sleep for a minute to trigger the
  59. // automatic background run.
  60. func (frdb *freezerdb) Freeze(threshold uint64) error {
  61. if frdb.AncientStore.(*chainFreezer).readonly {
  62. return errReadOnly
  63. }
  64. // Set the freezer threshold to a temporary value
  65. defer func(old uint64) {
  66. atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, old)
  67. }(atomic.LoadUint64(&frdb.AncientStore.(*chainFreezer).threshold))
  68. atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, threshold)
  69. // Trigger a freeze cycle and block until it's done
  70. trigger := make(chan struct{}, 1)
  71. frdb.AncientStore.(*chainFreezer).trigger <- trigger
  72. <-trigger
  73. return nil
  74. }
  75. // nofreezedb is a database wrapper that disables freezer data retrievals.
  76. type nofreezedb struct {
  77. ethdb.KeyValueStore
  78. }
  79. // HasAncient returns an error as we don't have a backing chain freezer.
  80. func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
  81. return false, errNotSupported
  82. }
  83. // Ancient returns an error as we don't have a backing chain freezer.
  84. func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
  85. return nil, errNotSupported
  86. }
  87. // AncientRange returns an error as we don't have a backing chain freezer.
  88. func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
  89. return nil, errNotSupported
  90. }
  91. // Ancients returns an error as we don't have a backing chain freezer.
  92. func (db *nofreezedb) Ancients() (uint64, error) {
  93. return 0, errNotSupported
  94. }
  95. // Tail returns an error as we don't have a backing chain freezer.
  96. func (db *nofreezedb) Tail() (uint64, error) {
  97. return 0, errNotSupported
  98. }
  99. // AncientSize returns an error as we don't have a backing chain freezer.
  100. func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
  101. return 0, errNotSupported
  102. }
  103. // ModifyAncients is not supported.
  104. func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
  105. return 0, errNotSupported
  106. }
  107. // TruncateHead returns an error as we don't have a backing chain freezer.
  108. func (db *nofreezedb) TruncateHead(items uint64) error {
  109. return errNotSupported
  110. }
  111. // TruncateTail returns an error as we don't have a backing chain freezer.
  112. func (db *nofreezedb) TruncateTail(items uint64) error {
  113. return errNotSupported
  114. }
  115. // Sync returns an error as we don't have a backing chain freezer.
  116. func (db *nofreezedb) Sync() error {
  117. return errNotSupported
  118. }
  119. func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
  120. // Unlike other ancient-related methods, this method does not return
  121. // errNotSupported when invoked.
  122. // The reason for this is that the caller might want to do several things:
  123. // 1. Check if something is in freezer,
  124. // 2. If not, check leveldb.
  125. //
  126. // This will work, since the ancient-checks inside 'fn' will return errors,
  127. // and the leveldb work will continue.
  128. //
  129. // If we instead were to return errNotSupported here, then the caller would
  130. // have to explicitly check for that, having an extra clause to do the
  131. // non-ancient operations.
  132. return fn(db)
  133. }
  134. // MigrateTable processes the entries in a given table in sequence
  135. // converting them to a new format if they're of an old format.
  136. func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
  137. return errNotSupported
  138. }
  139. // AncientDatadir returns an error as we don't have a backing chain freezer.
  140. func (db *nofreezedb) AncientDatadir() (string, error) {
  141. return "", errNotSupported
  142. }
  143. // NewDatabase creates a high level database on top of a given key-value data
  144. // store without a freezer moving immutable chain segments into cold storage.
  145. func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
  146. return &nofreezedb{KeyValueStore: db}
  147. }
  148. // resolveChainFreezerDir is a helper function which resolves the absolute path
  149. // of chain freezer by considering backward compatibility.
  150. func resolveChainFreezerDir(ancient string) string {
  151. // Check if the chain freezer is already present in the specified
  152. // sub folder, if not then two possibilities:
  153. // - chain freezer is not initialized
  154. // - chain freezer exists in legacy location (root ancient folder)
  155. freezer := path.Join(ancient, chainFreezerName)
  156. if !common.FileExist(freezer) {
  157. if !common.FileExist(ancient) {
  158. // The entire ancient store is not initialized, still use the sub
  159. // folder for initialization.
  160. } else {
  161. // Ancient root is already initialized, then we hold the assumption
  162. // that chain freezer is also initialized and located in root folder.
  163. // In this case fallback to legacy location.
  164. freezer = ancient
  165. log.Info("Found legacy ancient chain path", "location", ancient)
  166. }
  167. }
  168. return freezer
  169. }
  170. // NewDatabaseWithFreezer creates a high level database on top of a given key-
  171. // value data store with a freezer moving immutable chain segments into cold
  172. // storage. The passed ancient indicates the path of root ancient directory
  173. // where the chain freezer can be opened.
  174. func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
  175. // Create the idle freezer instance
  176. frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, freezerTableSize, chainFreezerNoSnappy)
  177. if err != nil {
  178. return nil, err
  179. }
  180. // Since the freezer can be stored separately from the user's key-value database,
  181. // there's a fairly high probability that the user requests invalid combinations
  182. // of the freezer and database. Ensure that we don't shoot ourselves in the foot
  183. // by serving up conflicting data, leading to both datastores getting corrupted.
  184. //
  185. // - If both the freezer and key-value store is empty (no genesis), we just
  186. // initialized a new empty freezer, so everything's fine.
  187. // - If the key-value store is empty, but the freezer is not, we need to make
  188. // sure the user's genesis matches the freezer. That will be checked in the
  189. // blockchain, since we don't have the genesis block here (nor should we at
  190. // this point care, the key-value/freezer combo is valid).
  191. // - If neither the key-value store nor the freezer is empty, cross validate
  192. // the genesis hashes to make sure they are compatible. If they are, also
  193. // ensure that there's no gap between the freezer and subsequently leveldb.
  194. // - If the key-value store is not empty, but the freezer is we might just be
  195. // upgrading to the freezer release, or we might have had a small chain and
  196. // not frozen anything yet. Ensure that no blocks are missing yet from the
  197. // key-value store, since that would mean we already had an old freezer.
  198. // If the genesis hash is empty, we have a new key-value store, so nothing to
  199. // validate in this method. If, however, the genesis hash is not nil, compare
  200. // it to the freezer content.
  201. if kvgenesis, _ := db.Get(headerHashKey(0)); len(kvgenesis) > 0 {
  202. if frozen, _ := frdb.Ancients(); frozen > 0 {
  203. // If the freezer already contains something, ensure that the genesis blocks
  204. // match, otherwise we might mix up freezers across chains and destroy both
  205. // the freezer and the key-value store.
  206. frgenesis, err := frdb.Ancient(chainFreezerHashTable, 0)
  207. if err != nil {
  208. return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
  209. } else if !bytes.Equal(kvgenesis, frgenesis) {
  210. return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis)
  211. }
  212. // Key-value store and freezer belong to the same network. Ensure that they
  213. // are contiguous, otherwise we might end up with a non-functional freezer.
  214. if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 {
  215. // Subsequent header after the freezer limit is missing from the database.
  216. // Reject startup if the database has a more recent head.
  217. if *ReadHeaderNumber(db, ReadHeadHeaderHash(db)) > frozen-1 {
  218. return nil, fmt.Errorf("gap (#%d) in the chain between ancients and leveldb", frozen)
  219. }
  220. // Database contains only older data than the freezer, this happens if the
  221. // state was wiped and reinited from an existing freezer.
  222. }
  223. // Otherwise, key-value store continues where the freezer left off, all is fine.
  224. // We might have duplicate blocks (crash after freezer write but before key-value
  225. // store deletion, but that's fine).
  226. } else {
  227. // If the freezer is empty, ensure nothing was moved yet from the key-value
  228. // store, otherwise we'll end up missing data. We check block #1 to decide
  229. // if we froze anything previously or not, but do take care of databases with
  230. // only the genesis block.
  231. if ReadHeadHeaderHash(db) != common.BytesToHash(kvgenesis) {
  232. // Key-value store contains more data than the genesis block, make sure we
  233. // didn't freeze anything yet.
  234. if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 {
  235. return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
  236. }
  237. // Block #1 is still in the database, we're allowed to init a new freezer
  238. }
  239. // Otherwise, the head header is still the genesis, we're allowed to init a new
  240. // freezer.
  241. }
  242. }
  243. // Freezer is consistent with the key-value database, permit combining the two
  244. if !frdb.readonly {
  245. frdb.wg.Add(1)
  246. go func() {
  247. frdb.freeze(db)
  248. frdb.wg.Done()
  249. }()
  250. }
  251. return &freezerdb{
  252. ancientRoot: ancient,
  253. KeyValueStore: db,
  254. AncientStore: frdb,
  255. }, nil
  256. }
  257. // NewMemoryDatabase creates an ephemeral in-memory key-value database without a
  258. // freezer moving immutable chain segments into cold storage.
  259. func NewMemoryDatabase() ethdb.Database {
  260. return NewDatabase(memorydb.New())
  261. }
  262. // NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
  263. // with an initial starting capacity, but without a freezer moving immutable
  264. // chain segments into cold storage.
  265. func NewMemoryDatabaseWithCap(size int) ethdb.Database {
  266. return NewDatabase(memorydb.NewWithCap(size))
  267. }
  268. // NewLevelDBDatabase creates a persistent key-value database without a freezer
  269. // moving immutable chain segments into cold storage.
  270. func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
  271. db, err := leveldb.New(file, cache, handles, namespace, readonly)
  272. if err != nil {
  273. return nil, err
  274. }
  275. return NewDatabase(db), nil
  276. }
  277. // NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a
  278. // freezer moving immutable chain segments into cold storage. The passed ancient
  279. // indicates the path of root ancient directory where the chain freezer can be
  280. // opened.
  281. func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
  282. kvdb, err := leveldb.New(file, cache, handles, namespace, readonly)
  283. if err != nil {
  284. return nil, err
  285. }
  286. frdb, err := NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly)
  287. if err != nil {
  288. kvdb.Close()
  289. return nil, err
  290. }
  291. return frdb, nil
  292. }
  293. type counter uint64
  294. func (c counter) String() string {
  295. return fmt.Sprintf("%d", c)
  296. }
  297. func (c counter) Percentage(current uint64) string {
  298. return fmt.Sprintf("%d", current*100/uint64(c))
  299. }
  300. // stat stores sizes and count for a parameter
  301. type stat struct {
  302. size common.StorageSize
  303. count counter
  304. }
  305. // Add size to the stat and increase the counter by 1
  306. func (s *stat) Add(size common.StorageSize) {
  307. s.size += size
  308. s.count++
  309. }
  310. func (s *stat) Size() string {
  311. return s.size.String()
  312. }
  313. func (s *stat) Count() string {
  314. return s.count.String()
  315. }
  316. // InspectDatabase traverses the entire database and checks the size
  317. // of all different categories of data.
  318. func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
  319. it := db.NewIterator(keyPrefix, keyStart)
  320. defer it.Release()
  321. var (
  322. count int64
  323. start = time.Now()
  324. logged = time.Now()
  325. // Key-value store statistics
  326. headers stat
  327. bodies stat
  328. receipts stat
  329. tds stat
  330. numHashPairings stat
  331. hashNumPairings stat
  332. tries stat
  333. codes stat
  334. txLookups stat
  335. accountSnaps stat
  336. storageSnaps stat
  337. preimages stat
  338. bloomBits stat
  339. beaconHeaders stat
  340. cliqueSnaps stat
  341. // Ancient store statistics
  342. ancientHeadersSize common.StorageSize
  343. ancientBodiesSize common.StorageSize
  344. ancientReceiptsSize common.StorageSize
  345. ancientTdsSize common.StorageSize
  346. ancientHashesSize common.StorageSize
  347. // Les statistic
  348. chtTrieNodes stat
  349. bloomTrieNodes stat
  350. // Meta- and unaccounted data
  351. metadata stat
  352. unaccounted stat
  353. // Totals
  354. total common.StorageSize
  355. )
  356. // Inspect key-value database first.
  357. for it.Next() {
  358. var (
  359. key = it.Key()
  360. size = common.StorageSize(len(key) + len(it.Value()))
  361. )
  362. total += size
  363. switch {
  364. case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
  365. headers.Add(size)
  366. case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
  367. bodies.Add(size)
  368. case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
  369. receipts.Add(size)
  370. case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
  371. tds.Add(size)
  372. case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
  373. numHashPairings.Add(size)
  374. case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
  375. hashNumPairings.Add(size)
  376. case len(key) == common.HashLength:
  377. tries.Add(size)
  378. case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
  379. codes.Add(size)
  380. case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
  381. txLookups.Add(size)
  382. case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
  383. accountSnaps.Add(size)
  384. case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
  385. storageSnaps.Add(size)
  386. case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
  387. preimages.Add(size)
  388. case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
  389. metadata.Add(size)
  390. case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
  391. metadata.Add(size)
  392. case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
  393. bloomBits.Add(size)
  394. case bytes.HasPrefix(key, BloomBitsIndexPrefix):
  395. bloomBits.Add(size)
  396. case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
  397. beaconHeaders.Add(size)
  398. case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
  399. cliqueSnaps.Add(size)
  400. case bytes.HasPrefix(key, []byte("cht-")) ||
  401. bytes.HasPrefix(key, []byte("chtIndexV2-")) ||
  402. bytes.HasPrefix(key, []byte("chtRootV2-")): // Canonical hash trie
  403. chtTrieNodes.Add(size)
  404. case bytes.HasPrefix(key, []byte("blt-")) ||
  405. bytes.HasPrefix(key, []byte("bltIndex-")) ||
  406. bytes.HasPrefix(key, []byte("bltRoot-")): // Bloomtrie sub
  407. bloomTrieNodes.Add(size)
  408. default:
  409. var accounted bool
  410. for _, meta := range [][]byte{
  411. databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey,
  412. lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
  413. snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
  414. uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
  415. } {
  416. if bytes.Equal(key, meta) {
  417. metadata.Add(size)
  418. accounted = true
  419. break
  420. }
  421. }
  422. if !accounted {
  423. unaccounted.Add(size)
  424. }
  425. }
  426. count++
  427. if count%1000 == 0 && time.Since(logged) > 8*time.Second {
  428. log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
  429. logged = time.Now()
  430. }
  431. }
  432. // Inspect append-only file store then.
  433. ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
  434. for i, category := range []string{chainFreezerHeaderTable, chainFreezerBodiesTable, chainFreezerReceiptTable, chainFreezerHashTable, chainFreezerDifficultyTable} {
  435. if size, err := db.AncientSize(category); err == nil {
  436. *ancientSizes[i] += common.StorageSize(size)
  437. total += common.StorageSize(size)
  438. }
  439. }
  440. // Get number of ancient rows inside the freezer
  441. ancients := counter(0)
  442. if count, err := db.Ancients(); err == nil {
  443. ancients = counter(count)
  444. }
  445. // Display the database statistic.
  446. stats := [][]string{
  447. {"Key-Value store", "Headers", headers.Size(), headers.Count()},
  448. {"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
  449. {"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
  450. {"Key-Value store", "Difficulties", tds.Size(), tds.Count()},
  451. {"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
  452. {"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
  453. {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
  454. {"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
  455. {"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
  456. {"Key-Value store", "Trie nodes", tries.Size(), tries.Count()},
  457. {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
  458. {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
  459. {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
  460. {"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
  461. {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
  462. {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
  463. {"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
  464. {"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()},
  465. {"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()},
  466. {"Ancient store", "Difficulties", ancientTdsSize.String(), ancients.String()},
  467. {"Ancient store", "Block number->hash", ancientHashesSize.String(), ancients.String()},
  468. {"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
  469. {"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
  470. }
  471. table := tablewriter.NewWriter(os.Stdout)
  472. table.SetHeader([]string{"Database", "Category", "Size", "Items"})
  473. table.SetFooter([]string{"", "Total", total.String(), " "})
  474. table.AppendBulk(stats)
  475. table.Render()
  476. if unaccounted.size > 0 {
  477. log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
  478. }
  479. return nil
  480. }