accessors_chain.go 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. // Copyright 2018 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package rawdb
  17. import (
  18. "bytes"
  19. "encoding/binary"
  20. "math/big"
  21. "sort"
  22. "github.com/ethereum/go-ethereum/common"
  23. "github.com/ethereum/go-ethereum/core/types"
  24. "github.com/ethereum/go-ethereum/crypto"
  25. "github.com/ethereum/go-ethereum/ethdb"
  26. "github.com/ethereum/go-ethereum/log"
  27. "github.com/ethereum/go-ethereum/params"
  28. "github.com/ethereum/go-ethereum/rlp"
  29. )
  30. // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
  31. func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
  32. data, _ := db.Ancient(freezerHashTable, number)
  33. if len(data) == 0 {
  34. data, _ = db.Get(headerHashKey(number))
  35. // In the background freezer is moving data from leveldb to flatten files.
  36. // So during the first check for ancient db, the data is not yet in there,
  37. // but when we reach into leveldb, the data was already moved. That would
  38. // result in a not found error.
  39. if len(data) == 0 {
  40. data, _ = db.Ancient(freezerHashTable, number)
  41. }
  42. }
  43. if len(data) == 0 {
  44. return common.Hash{}
  45. }
  46. return common.BytesToHash(data)
  47. }
  48. // WriteCanonicalHash stores the hash assigned to a canonical block number.
  49. func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
  50. if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
  51. log.Crit("Failed to store number to hash mapping", "err", err)
  52. }
  53. }
  54. // DeleteCanonicalHash removes the number to hash canonical mapping.
  55. func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
  56. if err := db.Delete(headerHashKey(number)); err != nil {
  57. log.Crit("Failed to delete number to hash mapping", "err", err)
  58. }
  59. }
  60. // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
  61. // both canonical and reorged forks included.
  62. func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
  63. prefix := headerKeyPrefix(number)
  64. hashes := make([]common.Hash, 0, 1)
  65. it := db.NewIterator(prefix, nil)
  66. defer it.Release()
  67. for it.Next() {
  68. if key := it.Key(); len(key) == len(prefix)+32 {
  69. hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
  70. }
  71. }
  72. return hashes
  73. }
  74. // ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
  75. // certain chain range. If the accumulated entries reaches the given threshold,
  76. // abort the iteration and return the semi-finish result.
  77. func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
  78. // Short circuit if the limit is 0.
  79. if limit == 0 {
  80. return nil, nil
  81. }
  82. var (
  83. numbers []uint64
  84. hashes []common.Hash
  85. )
  86. // Construct the key prefix of start point.
  87. start, end := headerHashKey(from), headerHashKey(to)
  88. it := db.NewIterator(nil, start)
  89. defer it.Release()
  90. for it.Next() {
  91. if bytes.Compare(it.Key(), end) >= 0 {
  92. break
  93. }
  94. if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
  95. numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
  96. hashes = append(hashes, common.BytesToHash(it.Value()))
  97. // If the accumulated entries reaches the limit threshold, return.
  98. if len(numbers) >= limit {
  99. break
  100. }
  101. }
  102. }
  103. return numbers, hashes
  104. }
  105. // ReadHeaderNumber returns the header number assigned to a hash.
  106. func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
  107. data, _ := db.Get(headerNumberKey(hash))
  108. if len(data) != 8 {
  109. return nil
  110. }
  111. number := binary.BigEndian.Uint64(data)
  112. return &number
  113. }
  114. // WriteHeaderNumber stores the hash->number mapping.
  115. func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
  116. key := headerNumberKey(hash)
  117. enc := encodeBlockNumber(number)
  118. if err := db.Put(key, enc); err != nil {
  119. log.Crit("Failed to store hash to number mapping", "err", err)
  120. }
  121. }
  122. // DeleteHeaderNumber removes hash->number mapping.
  123. func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
  124. if err := db.Delete(headerNumberKey(hash)); err != nil {
  125. log.Crit("Failed to delete hash to number mapping", "err", err)
  126. }
  127. }
  128. // ReadHeadHeaderHash retrieves the hash of the current canonical head header.
  129. func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
  130. data, _ := db.Get(headHeaderKey)
  131. if len(data) == 0 {
  132. return common.Hash{}
  133. }
  134. return common.BytesToHash(data)
  135. }
  136. // WriteHeadHeaderHash stores the hash of the current canonical head header.
  137. func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
  138. if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
  139. log.Crit("Failed to store last header's hash", "err", err)
  140. }
  141. }
  142. // ReadHeadBlockHash retrieves the hash of the current canonical head block.
  143. func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
  144. data, _ := db.Get(headBlockKey)
  145. if len(data) == 0 {
  146. return common.Hash{}
  147. }
  148. return common.BytesToHash(data)
  149. }
  150. // WriteHeadBlockHash stores the head block's hash.
  151. func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
  152. if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
  153. log.Crit("Failed to store last block's hash", "err", err)
  154. }
  155. }
  156. // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
  157. func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
  158. data, _ := db.Get(headFastBlockKey)
  159. if len(data) == 0 {
  160. return common.Hash{}
  161. }
  162. return common.BytesToHash(data)
  163. }
  164. // WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
  165. func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
  166. if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
  167. log.Crit("Failed to store last fast block's hash", "err", err)
  168. }
  169. }
  170. // ReadLastPivotNumber retrieves the number of the last pivot block. If the node
  171. // full synced, the last pivot will always be nil.
  172. func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
  173. data, _ := db.Get(lastPivotKey)
  174. if len(data) == 0 {
  175. return nil
  176. }
  177. var pivot uint64
  178. if err := rlp.DecodeBytes(data, &pivot); err != nil {
  179. log.Error("Invalid pivot block number in database", "err", err)
  180. return nil
  181. }
  182. return &pivot
  183. }
  184. // WriteLastPivotNumber stores the number of the last pivot block.
  185. func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
  186. enc, err := rlp.EncodeToBytes(pivot)
  187. if err != nil {
  188. log.Crit("Failed to encode pivot block number", "err", err)
  189. }
  190. if err := db.Put(lastPivotKey, enc); err != nil {
  191. log.Crit("Failed to store pivot block number", "err", err)
  192. }
  193. }
  194. // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
  195. // reporting correct numbers across restarts.
  196. func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
  197. data, _ := db.Get(fastTrieProgressKey)
  198. if len(data) == 0 {
  199. return 0
  200. }
  201. return new(big.Int).SetBytes(data).Uint64()
  202. }
  203. // WriteFastTrieProgress stores the fast sync trie process counter to support
  204. // retrieving it across restarts.
  205. func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
  206. if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
  207. log.Crit("Failed to store fast sync trie progress", "err", err)
  208. }
  209. }
  210. // ReadTxIndexTail retrieves the number of oldest indexed block
  211. // whose transaction indices has been indexed. If the corresponding entry
  212. // is non-existent in database it means the indexing has been finished.
  213. func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
  214. data, _ := db.Get(txIndexTailKey)
  215. if len(data) != 8 {
  216. return nil
  217. }
  218. number := binary.BigEndian.Uint64(data)
  219. return &number
  220. }
  221. // WriteTxIndexTail stores the number of oldest indexed block
  222. // into database.
  223. func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
  224. if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
  225. log.Crit("Failed to store the transaction index tail", "err", err)
  226. }
  227. }
  228. // ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
  229. func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
  230. data, _ := db.Get(fastTxLookupLimitKey)
  231. if len(data) != 8 {
  232. return nil
  233. }
  234. number := binary.BigEndian.Uint64(data)
  235. return &number
  236. }
  237. // WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
  238. func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
  239. if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
  240. log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
  241. }
  242. }
  243. // ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
  244. func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
  245. // First try to look up the data in ancient database. Extra hash
  246. // comparison is necessary since ancient database only maintains
  247. // the canonical data.
  248. data, _ := db.Ancient(freezerHeaderTable, number)
  249. if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
  250. return data
  251. }
  252. // Then try to look up the data in leveldb.
  253. data, _ = db.Get(headerKey(number, hash))
  254. if len(data) > 0 {
  255. return data
  256. }
  257. // In the background freezer is moving data from leveldb to flatten files.
  258. // So during the first check for ancient db, the data is not yet in there,
  259. // but when we reach into leveldb, the data was already moved. That would
  260. // result in a not found error.
  261. data, _ = db.Ancient(freezerHeaderTable, number)
  262. if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
  263. return data
  264. }
  265. return nil // Can't find the data anywhere.
  266. }
  267. // HasHeader verifies the existence of a block header corresponding to the hash.
  268. func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
  269. if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
  270. return true
  271. }
  272. if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
  273. return false
  274. }
  275. return true
  276. }
  277. // ReadHeader retrieves the block header corresponding to the hash.
  278. func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
  279. data := ReadHeaderRLP(db, hash, number)
  280. if len(data) == 0 {
  281. return nil
  282. }
  283. header := new(types.Header)
  284. if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
  285. log.Error("Invalid block header RLP", "hash", hash, "err", err)
  286. return nil
  287. }
  288. return header
  289. }
  290. // WriteHeader stores a block header into the database and also stores the hash-
  291. // to-number mapping.
  292. func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
  293. var (
  294. hash = header.Hash()
  295. number = header.Number.Uint64()
  296. )
  297. // Write the hash -> number mapping
  298. WriteHeaderNumber(db, hash, number)
  299. // Write the encoded header
  300. data, err := rlp.EncodeToBytes(header)
  301. if err != nil {
  302. log.Crit("Failed to RLP encode header", "err", err)
  303. }
  304. key := headerKey(number, hash)
  305. if err := db.Put(key, data); err != nil {
  306. log.Crit("Failed to store header", "err", err)
  307. }
  308. }
  309. // DeleteHeader removes all block header data associated with a hash.
  310. func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
  311. deleteHeaderWithoutNumber(db, hash, number)
  312. if err := db.Delete(headerNumberKey(hash)); err != nil {
  313. log.Crit("Failed to delete hash to number mapping", "err", err)
  314. }
  315. }
  316. // deleteHeaderWithoutNumber removes only the block header but does not remove
  317. // the hash to number mapping.
  318. func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
  319. if err := db.Delete(headerKey(number, hash)); err != nil {
  320. log.Crit("Failed to delete header", "err", err)
  321. }
  322. }
  323. // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
  324. func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
  325. // First try to look up the data in ancient database. Extra hash
  326. // comparison is necessary since ancient database only maintains
  327. // the canonical data.
  328. data, _ := db.Ancient(freezerBodiesTable, number)
  329. if len(data) > 0 {
  330. h, _ := db.Ancient(freezerHashTable, number)
  331. if common.BytesToHash(h) == hash {
  332. return data
  333. }
  334. }
  335. // Then try to look up the data in leveldb.
  336. data, _ = db.Get(blockBodyKey(number, hash))
  337. if len(data) > 0 {
  338. return data
  339. }
  340. // In the background freezer is moving data from leveldb to flatten files.
  341. // So during the first check for ancient db, the data is not yet in there,
  342. // but when we reach into leveldb, the data was already moved. That would
  343. // result in a not found error.
  344. data, _ = db.Ancient(freezerBodiesTable, number)
  345. if len(data) > 0 {
  346. h, _ := db.Ancient(freezerHashTable, number)
  347. if common.BytesToHash(h) == hash {
  348. return data
  349. }
  350. }
  351. return nil // Can't find the data anywhere.
  352. }
  353. // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
  354. // block at number, in RLP encoding.
  355. func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
  356. // If it's an ancient one, we don't need the canonical hash
  357. data, _ := db.Ancient(freezerBodiesTable, number)
  358. if len(data) == 0 {
  359. // Need to get the hash
  360. data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
  361. // In the background freezer is moving data from leveldb to flatten files.
  362. // So during the first check for ancient db, the data is not yet in there,
  363. // but when we reach into leveldb, the data was already moved. That would
  364. // result in a not found error.
  365. if len(data) == 0 {
  366. data, _ = db.Ancient(freezerBodiesTable, number)
  367. }
  368. }
  369. return data
  370. }
  371. // WriteBodyRLP stores an RLP encoded block body into the database.
  372. func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
  373. if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
  374. log.Crit("Failed to store block body", "err", err)
  375. }
  376. }
  377. // HasBody verifies the existence of a block body corresponding to the hash.
  378. func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
  379. if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
  380. return true
  381. }
  382. if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
  383. return false
  384. }
  385. return true
  386. }
  387. // ReadBody retrieves the block body corresponding to the hash.
  388. func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
  389. data := ReadBodyRLP(db, hash, number)
  390. if len(data) == 0 {
  391. return nil
  392. }
  393. body := new(types.Body)
  394. if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
  395. log.Error("Invalid block body RLP", "hash", hash, "err", err)
  396. return nil
  397. }
  398. return body
  399. }
  400. // WriteBody stores a block body into the database.
  401. func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
  402. data, err := rlp.EncodeToBytes(body)
  403. if err != nil {
  404. log.Crit("Failed to RLP encode body", "err", err)
  405. }
  406. WriteBodyRLP(db, hash, number, data)
  407. }
  408. // DeleteBody removes all block body data associated with a hash.
  409. func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
  410. if err := db.Delete(blockBodyKey(number, hash)); err != nil {
  411. log.Crit("Failed to delete block body", "err", err)
  412. }
  413. }
  414. // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
  415. func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
  416. // First try to look up the data in ancient database. Extra hash
  417. // comparison is necessary since ancient database only maintains
  418. // the canonical data.
  419. data, _ := db.Ancient(freezerDifficultyTable, number)
  420. if len(data) > 0 {
  421. h, _ := db.Ancient(freezerHashTable, number)
  422. if common.BytesToHash(h) == hash {
  423. return data
  424. }
  425. }
  426. // Then try to look up the data in leveldb.
  427. data, _ = db.Get(headerTDKey(number, hash))
  428. if len(data) > 0 {
  429. return data
  430. }
  431. // In the background freezer is moving data from leveldb to flatten files.
  432. // So during the first check for ancient db, the data is not yet in there,
  433. // but when we reach into leveldb, the data was already moved. That would
  434. // result in a not found error.
  435. data, _ = db.Ancient(freezerDifficultyTable, number)
  436. if len(data) > 0 {
  437. h, _ := db.Ancient(freezerHashTable, number)
  438. if common.BytesToHash(h) == hash {
  439. return data
  440. }
  441. }
  442. return nil // Can't find the data anywhere.
  443. }
  444. // ReadTd retrieves a block's total difficulty corresponding to the hash.
  445. func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
  446. data := ReadTdRLP(db, hash, number)
  447. if len(data) == 0 {
  448. return nil
  449. }
  450. td := new(big.Int)
  451. if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
  452. log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
  453. return nil
  454. }
  455. return td
  456. }
  457. // WriteTd stores the total difficulty of a block into the database.
  458. func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
  459. data, err := rlp.EncodeToBytes(td)
  460. if err != nil {
  461. log.Crit("Failed to RLP encode block total difficulty", "err", err)
  462. }
  463. if err := db.Put(headerTDKey(number, hash), data); err != nil {
  464. log.Crit("Failed to store block total difficulty", "err", err)
  465. }
  466. }
  467. // DeleteTd removes all block total difficulty data associated with a hash.
  468. func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
  469. if err := db.Delete(headerTDKey(number, hash)); err != nil {
  470. log.Crit("Failed to delete block total difficulty", "err", err)
  471. }
  472. }
  473. // HasReceipts verifies the existence of all the transaction receipts belonging
  474. // to a block.
  475. func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
  476. if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
  477. return true
  478. }
  479. if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
  480. return false
  481. }
  482. return true
  483. }
  484. // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
  485. func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
  486. // First try to look up the data in ancient database. Extra hash
  487. // comparison is necessary since ancient database only maintains
  488. // the canonical data.
  489. data, _ := db.Ancient(freezerReceiptTable, number)
  490. if len(data) > 0 {
  491. h, _ := db.Ancient(freezerHashTable, number)
  492. if common.BytesToHash(h) == hash {
  493. return data
  494. }
  495. }
  496. // Then try to look up the data in leveldb.
  497. data, _ = db.Get(blockReceiptsKey(number, hash))
  498. if len(data) > 0 {
  499. return data
  500. }
  501. // In the background freezer is moving data from leveldb to flatten files.
  502. // So during the first check for ancient db, the data is not yet in there,
  503. // but when we reach into leveldb, the data was already moved. That would
  504. // result in a not found error.
  505. data, _ = db.Ancient(freezerReceiptTable, number)
  506. if len(data) > 0 {
  507. h, _ := db.Ancient(freezerHashTable, number)
  508. if common.BytesToHash(h) == hash {
  509. return data
  510. }
  511. }
  512. return nil // Can't find the data anywhere.
  513. }
  514. // ReadRawReceipts retrieves all the transaction receipts belonging to a block.
  515. // The receipt metadata fields are not guaranteed to be populated, so they
  516. // should not be used. Use ReadReceipts instead if the metadata is needed.
  517. func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
  518. // Retrieve the flattened receipt slice
  519. data := ReadReceiptsRLP(db, hash, number)
  520. if len(data) == 0 {
  521. return nil
  522. }
  523. // Convert the receipts from their storage form to their internal representation
  524. storageReceipts := []*types.ReceiptForStorage{}
  525. if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
  526. log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
  527. return nil
  528. }
  529. receipts := make(types.Receipts, len(storageReceipts))
  530. for i, storageReceipt := range storageReceipts {
  531. receipts[i] = (*types.Receipt)(storageReceipt)
  532. }
  533. return receipts
  534. }
  535. // ReadReceipts retrieves all the transaction receipts belonging to a block, including
  536. // its correspoinding metadata fields. If it is unable to populate these metadata
  537. // fields then nil is returned.
  538. //
  539. // The current implementation populates these metadata fields by reading the receipts'
  540. // corresponding block body, so if the block body is not found it will return nil even
  541. // if the receipt itself is stored.
  542. func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
  543. // We're deriving many fields from the block body, retrieve beside the receipt
  544. receipts := ReadRawReceipts(db, hash, number)
  545. if receipts == nil {
  546. return nil
  547. }
  548. body := ReadBody(db, hash, number)
  549. if body == nil {
  550. log.Error("Missing body but have receipt", "hash", hash, "number", number)
  551. return nil
  552. }
  553. if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil {
  554. log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
  555. return nil
  556. }
  557. return receipts
  558. }
  559. // WriteReceipts stores all the transaction receipts belonging to a block.
  560. func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
  561. // Convert the receipts into their storage form and serialize them
  562. storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
  563. for i, receipt := range receipts {
  564. storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
  565. }
  566. bytes, err := rlp.EncodeToBytes(storageReceipts)
  567. if err != nil {
  568. log.Crit("Failed to encode block receipts", "err", err)
  569. }
  570. // Store the flattened receipt slice
  571. if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
  572. log.Crit("Failed to store block receipts", "err", err)
  573. }
  574. }
  575. // DeleteReceipts removes all receipt data associated with a block hash.
  576. func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
  577. if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
  578. log.Crit("Failed to delete block receipts", "err", err)
  579. }
  580. }
  581. // ReadBlock retrieves an entire block corresponding to the hash, assembling it
  582. // back from the stored header and body. If either the header or body could not
  583. // be retrieved nil is returned.
  584. //
  585. // Note, due to concurrent download of header and block body the header and thus
  586. // canonical hash can be stored in the database but the body data not (yet).
  587. func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
  588. header := ReadHeader(db, hash, number)
  589. if header == nil {
  590. return nil
  591. }
  592. body := ReadBody(db, hash, number)
  593. if body == nil {
  594. return nil
  595. }
  596. return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
  597. }
  598. // WriteBlock serializes a block into the database, header and body separately.
  599. func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
  600. WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
  601. WriteHeader(db, block.Header())
  602. }
  603. // WriteAncientBlock writes entire block data into ancient store and returns the total written size.
  604. func WriteAncientBlock(db ethdb.AncientWriter, block *types.Block, receipts types.Receipts, td *big.Int) int {
  605. // Encode all block components to RLP format.
  606. headerBlob, err := rlp.EncodeToBytes(block.Header())
  607. if err != nil {
  608. log.Crit("Failed to RLP encode block header", "err", err)
  609. }
  610. bodyBlob, err := rlp.EncodeToBytes(block.Body())
  611. if err != nil {
  612. log.Crit("Failed to RLP encode body", "err", err)
  613. }
  614. storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
  615. for i, receipt := range receipts {
  616. storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
  617. }
  618. receiptBlob, err := rlp.EncodeToBytes(storageReceipts)
  619. if err != nil {
  620. log.Crit("Failed to RLP encode block receipts", "err", err)
  621. }
  622. tdBlob, err := rlp.EncodeToBytes(td)
  623. if err != nil {
  624. log.Crit("Failed to RLP encode block total difficulty", "err", err)
  625. }
  626. // Write all blob to flatten files.
  627. err = db.AppendAncient(block.NumberU64(), block.Hash().Bytes(), headerBlob, bodyBlob, receiptBlob, tdBlob)
  628. if err != nil {
  629. log.Crit("Failed to write block data to ancient store", "err", err)
  630. }
  631. return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength
  632. }
  633. // DeleteBlock removes all block data associated with a hash.
  634. func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
  635. DeleteReceipts(db, hash, number)
  636. DeleteHeader(db, hash, number)
  637. DeleteBody(db, hash, number)
  638. DeleteTd(db, hash, number)
  639. }
  640. // DeleteBlockWithoutNumber removes all block data associated with a hash, except
  641. // the hash to number mapping.
  642. func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
  643. DeleteReceipts(db, hash, number)
  644. deleteHeaderWithoutNumber(db, hash, number)
  645. DeleteBody(db, hash, number)
  646. DeleteTd(db, hash, number)
  647. }
  648. const badBlockToKeep = 10
  649. type badBlock struct {
  650. Header *types.Header
  651. Body *types.Body
  652. }
  653. // badBlockList implements the sort interface to allow sorting a list of
  654. // bad blocks by their number in the reverse order.
  655. type badBlockList []*badBlock
  656. func (s badBlockList) Len() int { return len(s) }
  657. func (s badBlockList) Less(i, j int) bool {
  658. return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64()
  659. }
  660. func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
  661. // ReadBadBlock retrieves the bad block with the corresponding block hash.
  662. func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block {
  663. blob, err := db.Get(badBlockKey)
  664. if err != nil {
  665. return nil
  666. }
  667. var badBlocks badBlockList
  668. if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
  669. return nil
  670. }
  671. for _, bad := range badBlocks {
  672. if bad.Header.Hash() == hash {
  673. return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)
  674. }
  675. }
  676. return nil
  677. }
  678. // ReadAllBadBlocks retrieves all the bad blocks in the database.
  679. // All returned blocks are sorted in reverse order by number.
  680. func ReadAllBadBlocks(db ethdb.Reader) []*types.Block {
  681. blob, err := db.Get(badBlockKey)
  682. if err != nil {
  683. return nil
  684. }
  685. var badBlocks badBlockList
  686. if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
  687. return nil
  688. }
  689. var blocks []*types.Block
  690. for _, bad := range badBlocks {
  691. blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles))
  692. }
  693. return blocks
  694. }
  695. // WriteBadBlock serializes the bad block into the database. If the cumulated
  696. // bad blocks exceeds the limitation, the oldest will be dropped.
  697. func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) {
  698. blob, err := db.Get(badBlockKey)
  699. if err != nil {
  700. log.Warn("Failed to load old bad blocks", "error", err)
  701. }
  702. var badBlocks badBlockList
  703. if len(blob) > 0 {
  704. if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
  705. log.Crit("Failed to decode old bad blocks", "error", err)
  706. }
  707. }
  708. for _, b := range badBlocks {
  709. if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() {
  710. log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash())
  711. return
  712. }
  713. }
  714. badBlocks = append(badBlocks, &badBlock{
  715. Header: block.Header(),
  716. Body: block.Body(),
  717. })
  718. sort.Sort(sort.Reverse(badBlocks))
  719. if len(badBlocks) > badBlockToKeep {
  720. badBlocks = badBlocks[:badBlockToKeep]
  721. }
  722. data, err := rlp.EncodeToBytes(badBlocks)
  723. if err != nil {
  724. log.Crit("Failed to encode bad blocks", "err", err)
  725. }
  726. if err := db.Put(badBlockKey, data); err != nil {
  727. log.Crit("Failed to write bad blocks", "err", err)
  728. }
  729. }
  730. // DeleteBadBlocks deletes all the bad blocks from the database
  731. func DeleteBadBlocks(db ethdb.KeyValueWriter) {
  732. if err := db.Delete(badBlockKey); err != nil {
  733. log.Crit("Failed to delete bad blocks", "err", err)
  734. }
  735. }
  736. // FindCommonAncestor returns the last common ancestor of two block headers
  737. func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
  738. for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
  739. a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
  740. if a == nil {
  741. return nil
  742. }
  743. }
  744. for an := a.Number.Uint64(); an < b.Number.Uint64(); {
  745. b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
  746. if b == nil {
  747. return nil
  748. }
  749. }
  750. for a.Hash() != b.Hash() {
  751. a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
  752. if a == nil {
  753. return nil
  754. }
  755. b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
  756. if b == nil {
  757. return nil
  758. }
  759. }
  760. return a
  761. }
  762. // ReadHeadHeader returns the current canonical head header.
  763. func ReadHeadHeader(db ethdb.Reader) *types.Header {
  764. headHeaderHash := ReadHeadHeaderHash(db)
  765. if headHeaderHash == (common.Hash{}) {
  766. return nil
  767. }
  768. headHeaderNumber := ReadHeaderNumber(db, headHeaderHash)
  769. if headHeaderNumber == nil {
  770. return nil
  771. }
  772. return ReadHeader(db, headHeaderHash, *headHeaderNumber)
  773. }
  774. // ReadHeadBlock returns the current canonical head block.
  775. func ReadHeadBlock(db ethdb.Reader) *types.Block {
  776. headBlockHash := ReadHeadBlockHash(db)
  777. if headBlockHash == (common.Hash{}) {
  778. return nil
  779. }
  780. headBlockNumber := ReadHeaderNumber(db, headBlockHash)
  781. if headBlockNumber == nil {
  782. return nil
  783. }
  784. return ReadBlock(db, headBlockHash, *headBlockNumber)
  785. }