|
|
@@ -30,10 +30,17 @@ import (
|
|
|
)
|
|
|
|
|
|
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
|
|
-func ReadCanonicalHash(db ethdb.AncientReader, number uint64) common.Hash {
|
|
|
- data, _ := db.Ancient("hashes", number)
|
|
|
+func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
|
|
|
+ data, _ := db.Ancient(freezerHashTable, number)
|
|
|
if len(data) == 0 {
|
|
|
data, _ = db.Get(headerHashKey(number))
|
|
|
+ // In the background freezer is moving data from leveldb to flatten files.
|
|
|
+ // So during the first check for ancient db, the data is not yet in there,
|
|
|
+ // but when we reach into leveldb, the data was already moved. That would
|
|
|
+ // result in a not found error.
|
|
|
+ if len(data) == 0 {
|
|
|
+ data, _ = db.Ancient(freezerHashTable, number)
|
|
|
+ }
|
|
|
}
|
|
|
if len(data) == 0 {
|
|
|
return common.Hash{}
|
|
|
@@ -42,29 +49,28 @@ func ReadCanonicalHash(db ethdb.AncientReader, number uint64) common.Hash {
|
|
|
}
|
|
|
|
|
|
// WriteCanonicalHash stores the hash assigned to a canonical block number.
|
|
|
-func WriteCanonicalHash(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
+func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
|
|
|
log.Crit("Failed to store number to hash mapping", "err", err)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// DeleteCanonicalHash removes the number to hash canonical mapping.
|
|
|
-func DeleteCanonicalHash(db ethdb.Writer, number uint64) {
|
|
|
+func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
|
|
|
if err := db.Delete(headerHashKey(number)); err != nil {
|
|
|
log.Crit("Failed to delete number to hash mapping", "err", err)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-// readAllHashes retrieves all the hashes assigned to blocks at a certain heights,
|
|
|
+// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
|
|
|
// both canonical and reorged forks included.
|
|
|
-//
|
|
|
-// This method is a helper for the chain reader. It should never be exposed to the
|
|
|
-// outside world.
|
|
|
-func readAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
|
|
|
+func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
|
|
|
prefix := headerKeyPrefix(number)
|
|
|
|
|
|
hashes := make([]common.Hash, 0, 1)
|
|
|
it := db.NewIteratorWithPrefix(prefix)
|
|
|
+ defer it.Release()
|
|
|
+
|
|
|
for it.Next() {
|
|
|
if key := it.Key(); len(key) == len(prefix)+32 {
|
|
|
hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
|
|
|
@@ -74,7 +80,7 @@ func readAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
|
|
|
}
|
|
|
|
|
|
// ReadHeaderNumber returns the header number assigned to a hash.
|
|
|
-func ReadHeaderNumber(db ethdb.Reader, hash common.Hash) *uint64 {
|
|
|
+func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
|
|
|
data, _ := db.Get(headerNumberKey(hash))
|
|
|
if len(data) != 8 {
|
|
|
return nil
|
|
|
@@ -83,8 +89,15 @@ func ReadHeaderNumber(db ethdb.Reader, hash common.Hash) *uint64 {
|
|
|
return &number
|
|
|
}
|
|
|
|
|
|
+// DeleteHeaderNumber removes hash to number mapping.
|
|
|
+func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
+ if err := db.Delete(headerNumberKey(hash)); err != nil {
|
|
|
+ log.Crit("Failed to delete hash to number mapping", "err", err)
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
|
|
|
-func ReadHeadHeaderHash(db ethdb.Reader) common.Hash {
|
|
|
+func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
|
|
|
data, _ := db.Get(headHeaderKey)
|
|
|
if len(data) == 0 {
|
|
|
return common.Hash{}
|
|
|
@@ -93,14 +106,14 @@ func ReadHeadHeaderHash(db ethdb.Reader) common.Hash {
|
|
|
}
|
|
|
|
|
|
// WriteHeadHeaderHash stores the hash of the current canonical head header.
|
|
|
-func WriteHeadHeaderHash(db ethdb.Writer, hash common.Hash) {
|
|
|
+func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
|
|
log.Crit("Failed to store last header's hash", "err", err)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// ReadHeadBlockHash retrieves the hash of the current canonical head block.
|
|
|
-func ReadHeadBlockHash(db ethdb.Reader) common.Hash {
|
|
|
+func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
|
|
|
data, _ := db.Get(headBlockKey)
|
|
|
if len(data) == 0 {
|
|
|
return common.Hash{}
|
|
|
@@ -109,14 +122,14 @@ func ReadHeadBlockHash(db ethdb.Reader) common.Hash {
|
|
|
}
|
|
|
|
|
|
// WriteHeadBlockHash stores the head block's hash.
|
|
|
-func WriteHeadBlockHash(db ethdb.Writer, hash common.Hash) {
|
|
|
+func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
|
|
log.Crit("Failed to store last block's hash", "err", err)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
|
|
|
-func ReadHeadFastBlockHash(db ethdb.Reader) common.Hash {
|
|
|
+func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
|
|
|
data, _ := db.Get(headFastBlockKey)
|
|
|
if len(data) == 0 {
|
|
|
return common.Hash{}
|
|
|
@@ -125,7 +138,7 @@ func ReadHeadFastBlockHash(db ethdb.Reader) common.Hash {
|
|
|
}
|
|
|
|
|
|
// WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
|
|
|
-func WriteHeadFastBlockHash(db ethdb.Writer, hash common.Hash) {
|
|
|
+func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
|
|
|
log.Crit("Failed to store last fast block's hash", "err", err)
|
|
|
}
|
|
|
@@ -133,7 +146,7 @@ func WriteHeadFastBlockHash(db ethdb.Writer, hash common.Hash) {
|
|
|
|
|
|
// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
|
|
|
// reporting correct numbers across restarts.
|
|
|
-func ReadFastTrieProgress(db ethdb.Reader) uint64 {
|
|
|
+func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
|
|
|
data, _ := db.Get(fastTrieProgressKey)
|
|
|
if len(data) == 0 {
|
|
|
return 0
|
|
|
@@ -143,24 +156,31 @@ func ReadFastTrieProgress(db ethdb.Reader) uint64 {
|
|
|
|
|
|
// WriteFastTrieProgress stores the fast sync trie process counter to support
|
|
|
// retrieving it across restarts.
|
|
|
-func WriteFastTrieProgress(db ethdb.Writer, count uint64) {
|
|
|
+func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
|
|
|
if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
|
|
|
log.Crit("Failed to store fast sync trie progress", "err", err)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
|
|
-func ReadHeaderRLP(db ethdb.AncientReader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
- data, _ := db.Ancient("headers", number)
|
|
|
+func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
+ data, _ := db.Ancient(freezerHeaderTable, number)
|
|
|
if len(data) == 0 {
|
|
|
data, _ = db.Get(headerKey(number, hash))
|
|
|
+ // In the background freezer is moving data from leveldb to flatten files.
|
|
|
+ // So during the first check for ancient db, the data is not yet in there,
|
|
|
+ // but when we reach into leveldb, the data was already moved. That would
|
|
|
+ // result in a not found error.
|
|
|
+ if len(data) == 0 {
|
|
|
+ data, _ = db.Ancient(freezerHeaderTable, number)
|
|
|
+ }
|
|
|
}
|
|
|
return data
|
|
|
}
|
|
|
|
|
|
// HasHeader verifies the existence of a block header corresponding to the hash.
|
|
|
-func HasHeader(db ethdb.AncientReader, hash common.Hash, number uint64) bool {
|
|
|
- if has, err := db.Ancient("hashes", number); err == nil && common.BytesToHash(has) == hash {
|
|
|
+func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
|
|
+ if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
|
|
return true
|
|
|
}
|
|
|
if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
|
|
|
@@ -170,7 +190,7 @@ func HasHeader(db ethdb.AncientReader, hash common.Hash, number uint64) bool {
|
|
|
}
|
|
|
|
|
|
// ReadHeader retrieves the block header corresponding to the hash.
|
|
|
-func ReadHeader(db ethdb.AncientReader, hash common.Hash, number uint64) *types.Header {
|
|
|
+func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
|
|
|
data := ReadHeaderRLP(db, hash, number)
|
|
|
if len(data) == 0 {
|
|
|
return nil
|
|
|
@@ -185,7 +205,7 @@ func ReadHeader(db ethdb.AncientReader, hash common.Hash, number uint64) *types.
|
|
|
|
|
|
// WriteHeader stores a block header into the database and also stores the hash-
|
|
|
// to-number mapping.
|
|
|
-func WriteHeader(db ethdb.Writer, header *types.Header) {
|
|
|
+func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
|
|
|
// Write the hash -> number mapping
|
|
|
var (
|
|
|
hash = header.Hash()
|
|
|
@@ -208,7 +228,7 @@ func WriteHeader(db ethdb.Writer, header *types.Header) {
|
|
|
}
|
|
|
|
|
|
// DeleteHeader removes all block header data associated with a hash.
|
|
|
-func DeleteHeader(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
+func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
deleteHeaderWithoutNumber(db, hash, number)
|
|
|
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
|
|
log.Crit("Failed to delete hash to number mapping", "err", err)
|
|
|
@@ -217,31 +237,38 @@ func DeleteHeader(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
|
|
|
// deleteHeaderWithoutNumber removes only the block header but does not remove
|
|
|
// the hash to number mapping.
|
|
|
-func deleteHeaderWithoutNumber(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
+func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
if err := db.Delete(headerKey(number, hash)); err != nil {
|
|
|
log.Crit("Failed to delete header", "err", err)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
|
|
-func ReadBodyRLP(db ethdb.AncientReader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
- data, _ := db.Ancient("bodies", number)
|
|
|
+func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
+ data, _ := db.Ancient(freezerBodiesTable, number)
|
|
|
if len(data) == 0 {
|
|
|
data, _ = db.Get(blockBodyKey(number, hash))
|
|
|
+ // In the background freezer is moving data from leveldb to flatten files.
|
|
|
+ // So during the first check for ancient db, the data is not yet in there,
|
|
|
+ // but when we reach into leveldb, the data was already moved. That would
|
|
|
+ // result in a not found error.
|
|
|
+ if len(data) == 0 {
|
|
|
+ data, _ = db.Ancient(freezerBodiesTable, number)
|
|
|
+ }
|
|
|
}
|
|
|
return data
|
|
|
}
|
|
|
|
|
|
// WriteBodyRLP stores an RLP encoded block body into the database.
|
|
|
-func WriteBodyRLP(db ethdb.Writer, hash common.Hash, number uint64, rlp rlp.RawValue) {
|
|
|
+func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
|
|
|
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
|
|
|
log.Crit("Failed to store block body", "err", err)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// HasBody verifies the existence of a block body corresponding to the hash.
|
|
|
-func HasBody(db ethdb.AncientReader, hash common.Hash, number uint64) bool {
|
|
|
- if has, err := db.Ancient("hashes", number); err == nil && common.BytesToHash(has) == hash {
|
|
|
+func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
|
|
+ if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
|
|
return true
|
|
|
}
|
|
|
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
|
|
|
@@ -251,7 +278,7 @@ func HasBody(db ethdb.AncientReader, hash common.Hash, number uint64) bool {
|
|
|
}
|
|
|
|
|
|
// ReadBody retrieves the block body corresponding to the hash.
|
|
|
-func ReadBody(db ethdb.AncientReader, hash common.Hash, number uint64) *types.Body {
|
|
|
+func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
|
|
|
data := ReadBodyRLP(db, hash, number)
|
|
|
if len(data) == 0 {
|
|
|
return nil
|
|
|
@@ -265,7 +292,7 @@ func ReadBody(db ethdb.AncientReader, hash common.Hash, number uint64) *types.Bo
|
|
|
}
|
|
|
|
|
|
// WriteBody stores a block body into the database.
|
|
|
-func WriteBody(db ethdb.Writer, hash common.Hash, number uint64, body *types.Body) {
|
|
|
+func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
|
|
|
data, err := rlp.EncodeToBytes(body)
|
|
|
if err != nil {
|
|
|
log.Crit("Failed to RLP encode body", "err", err)
|
|
|
@@ -274,23 +301,30 @@ func WriteBody(db ethdb.Writer, hash common.Hash, number uint64, body *types.Bod
|
|
|
}
|
|
|
|
|
|
// DeleteBody removes all block body data associated with a hash.
|
|
|
-func DeleteBody(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
+func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
if err := db.Delete(blockBodyKey(number, hash)); err != nil {
|
|
|
log.Crit("Failed to delete block body", "err", err)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
|
|
|
-func ReadTdRLP(db ethdb.AncientReader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
- data, _ := db.Ancient("diffs", number)
|
|
|
+func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
+ data, _ := db.Ancient(freezerDifficultyTable, number)
|
|
|
if len(data) == 0 {
|
|
|
data, _ = db.Get(headerTDKey(number, hash))
|
|
|
+ // In the background freezer is moving data from leveldb to flatten files.
|
|
|
+ // So during the first check for ancient db, the data is not yet in there,
|
|
|
+ // but when we reach into leveldb, the data was already moved. That would
|
|
|
+ // result in a not found error.
|
|
|
+ if len(data) == 0 {
|
|
|
+ data, _ = db.Ancient(freezerDifficultyTable, number)
|
|
|
+ }
|
|
|
}
|
|
|
return data
|
|
|
}
|
|
|
|
|
|
// ReadTd retrieves a block's total difficulty corresponding to the hash.
|
|
|
-func ReadTd(db ethdb.AncientReader, hash common.Hash, number uint64) *big.Int {
|
|
|
+func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
|
|
|
data := ReadTdRLP(db, hash, number)
|
|
|
if len(data) == 0 {
|
|
|
return nil
|
|
|
@@ -304,7 +338,7 @@ func ReadTd(db ethdb.AncientReader, hash common.Hash, number uint64) *big.Int {
|
|
|
}
|
|
|
|
|
|
// WriteTd stores the total difficulty of a block into the database.
|
|
|
-func WriteTd(db ethdb.Writer, hash common.Hash, number uint64, td *big.Int) {
|
|
|
+func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
|
|
|
data, err := rlp.EncodeToBytes(td)
|
|
|
if err != nil {
|
|
|
log.Crit("Failed to RLP encode block total difficulty", "err", err)
|
|
|
@@ -315,7 +349,7 @@ func WriteTd(db ethdb.Writer, hash common.Hash, number uint64, td *big.Int) {
|
|
|
}
|
|
|
|
|
|
// DeleteTd removes all block total difficulty data associated with a hash.
|
|
|
-func DeleteTd(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
+func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
if err := db.Delete(headerTDKey(number, hash)); err != nil {
|
|
|
log.Crit("Failed to delete block total difficulty", "err", err)
|
|
|
}
|
|
|
@@ -323,8 +357,8 @@ func DeleteTd(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
|
|
|
// HasReceipts verifies the existence of all the transaction receipts belonging
|
|
|
// to a block.
|
|
|
-func HasReceipts(db ethdb.AncientReader, hash common.Hash, number uint64) bool {
|
|
|
- if has, err := db.Ancient("hashes", number); err == nil && common.BytesToHash(has) == hash {
|
|
|
+func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
|
|
+ if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
|
|
return true
|
|
|
}
|
|
|
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
|
|
|
@@ -334,10 +368,17 @@ func HasReceipts(db ethdb.AncientReader, hash common.Hash, number uint64) bool {
|
|
|
}
|
|
|
|
|
|
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
|
|
|
-func ReadReceiptsRLP(db ethdb.AncientReader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
- data, _ := db.Ancient("receipts", number)
|
|
|
+func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
+ data, _ := db.Ancient(freezerReceiptTable, number)
|
|
|
if len(data) == 0 {
|
|
|
data, _ = db.Get(blockReceiptsKey(number, hash))
|
|
|
+ // In the background freezer is moving data from leveldb to flatten files.
|
|
|
+ // So during the first check for ancient db, the data is not yet in there,
|
|
|
+ // but when we reach into leveldb, the data was already moved. That would
|
|
|
+ // result in a not found error.
|
|
|
+ if len(data) == 0 {
|
|
|
+ data, _ = db.Ancient(freezerReceiptTable, number)
|
|
|
+ }
|
|
|
}
|
|
|
return data
|
|
|
}
|
|
|
@@ -345,7 +386,7 @@ func ReadReceiptsRLP(db ethdb.AncientReader, hash common.Hash, number uint64) rl
|
|
|
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
|
|
|
// The receipt metadata fields are not guaranteed to be populated, so they
|
|
|
// should not be used. Use ReadReceipts instead if the metadata is needed.
|
|
|
-func ReadRawReceipts(db ethdb.AncientReader, hash common.Hash, number uint64) types.Receipts {
|
|
|
+func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
|
|
|
// Retrieve the flattened receipt slice
|
|
|
data := ReadReceiptsRLP(db, hash, number)
|
|
|
if len(data) == 0 {
|
|
|
@@ -371,7 +412,7 @@ func ReadRawReceipts(db ethdb.AncientReader, hash common.Hash, number uint64) ty
|
|
|
// The current implementation populates these metadata fields by reading the receipts'
|
|
|
// corresponding block body, so if the block body is not found it will return nil even
|
|
|
// if the receipt itself is stored.
|
|
|
-func ReadReceipts(db ethdb.AncientReader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
|
|
|
+func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
|
|
|
// We're deriving many fields from the block body, retrieve beside the receipt
|
|
|
receipts := ReadRawReceipts(db, hash, number)
|
|
|
if receipts == nil {
|
|
|
@@ -390,7 +431,7 @@ func ReadReceipts(db ethdb.AncientReader, hash common.Hash, number uint64, confi
|
|
|
}
|
|
|
|
|
|
// WriteReceipts stores all the transaction receipts belonging to a block.
|
|
|
-func WriteReceipts(db ethdb.Writer, hash common.Hash, number uint64, receipts types.Receipts) {
|
|
|
+func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
|
|
|
// Convert the receipts into their storage form and serialize them
|
|
|
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
|
|
|
for i, receipt := range receipts {
|
|
|
@@ -407,7 +448,7 @@ func WriteReceipts(db ethdb.Writer, hash common.Hash, number uint64, receipts ty
|
|
|
}
|
|
|
|
|
|
// DeleteReceipts removes all receipt data associated with a block hash.
|
|
|
-func DeleteReceipts(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
+func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
|
|
|
log.Crit("Failed to delete block receipts", "err", err)
|
|
|
}
|
|
|
@@ -419,7 +460,7 @@ func DeleteReceipts(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
//
|
|
|
// Note, due to concurrent download of header and block body the header and thus
|
|
|
// canonical hash can be stored in the database but the body data not (yet).
|
|
|
-func ReadBlock(db ethdb.AncientReader, hash common.Hash, number uint64) *types.Block {
|
|
|
+func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
|
|
|
header := ReadHeader(db, hash, number)
|
|
|
if header == nil {
|
|
|
return nil
|
|
|
@@ -432,22 +473,53 @@ func ReadBlock(db ethdb.AncientReader, hash common.Hash, number uint64) *types.B
|
|
|
}
|
|
|
|
|
|
// WriteBlock serializes a block into the database, header and body separately.
|
|
|
-func WriteBlock(db ethdb.Writer, block *types.Block) {
|
|
|
+func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
WriteHeader(db, block.Header())
|
|
|
}
|
|
|
|
|
|
+// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
|
|
|
+func WriteAncientBlock(db ethdb.AncientWriter, block *types.Block, receipts types.Receipts, td *big.Int) int {
|
|
|
+ // Encode all block components to RLP format.
|
|
|
+ headerBlob, err := rlp.EncodeToBytes(block.Header())
|
|
|
+ if err != nil {
|
|
|
+ log.Crit("Failed to RLP encode block header", "err", err)
|
|
|
+ }
|
|
|
+ bodyBlob, err := rlp.EncodeToBytes(block.Body())
|
|
|
+ if err != nil {
|
|
|
+ log.Crit("Failed to RLP encode body", "err", err)
|
|
|
+ }
|
|
|
+ storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
|
|
|
+ for i, receipt := range receipts {
|
|
|
+ storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
|
|
|
+ }
|
|
|
+ receiptBlob, err := rlp.EncodeToBytes(storageReceipts)
|
|
|
+ if err != nil {
|
|
|
+ log.Crit("Failed to RLP encode block receipts", "err", err)
|
|
|
+ }
|
|
|
+ tdBlob, err := rlp.EncodeToBytes(td)
|
|
|
+ if err != nil {
|
|
|
+ log.Crit("Failed to RLP encode block total difficulty", "err", err)
|
|
|
+ }
|
|
|
+ // Write all blob to flatten files.
|
|
|
+ err = db.AppendAncient(block.NumberU64(), block.Hash().Bytes(), headerBlob, bodyBlob, receiptBlob, tdBlob)
|
|
|
+ if err != nil {
|
|
|
+ log.Crit("Failed to write block data to ancient store", "err", err)
|
|
|
+ }
|
|
|
+ return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength
|
|
|
+}
|
|
|
+
|
|
|
// DeleteBlock removes all block data associated with a hash.
|
|
|
-func DeleteBlock(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
+func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
DeleteReceipts(db, hash, number)
|
|
|
DeleteHeader(db, hash, number)
|
|
|
DeleteBody(db, hash, number)
|
|
|
DeleteTd(db, hash, number)
|
|
|
}
|
|
|
|
|
|
-// deleteBlockWithoutNumber removes all block data associated with a hash, except
|
|
|
+// DeleteBlockWithoutNumber removes all block data associated with a hash, except
|
|
|
// the hash to number mapping.
|
|
|
-func deleteBlockWithoutNumber(db ethdb.Writer, hash common.Hash, number uint64) {
|
|
|
+func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
DeleteReceipts(db, hash, number)
|
|
|
deleteHeaderWithoutNumber(db, hash, number)
|
|
|
DeleteBody(db, hash, number)
|
|
|
@@ -455,7 +527,7 @@ func deleteBlockWithoutNumber(db ethdb.Writer, hash common.Hash, number uint64)
|
|
|
}
|
|
|
|
|
|
// FindCommonAncestor returns the last common ancestor of two block headers
|
|
|
-func FindCommonAncestor(db ethdb.AncientReader, a, b *types.Header) *types.Header {
|
|
|
+func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
|
|
|
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
|
|
|
a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
|
|
if a == nil {
|