blockchain.go 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821
  1. // Copyright 2014 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package core implements the Ethereum consensus protocol.
  17. package core
  18. import (
  19. "errors"
  20. "fmt"
  21. "io"
  22. "math/big"
  23. mrand "math/rand"
  24. "sync"
  25. "sync/atomic"
  26. "time"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/common/mclock"
  29. "github.com/ethereum/go-ethereum/common/prque"
  30. "github.com/ethereum/go-ethereum/consensus"
  31. "github.com/ethereum/go-ethereum/core/rawdb"
  32. "github.com/ethereum/go-ethereum/core/state"
  33. "github.com/ethereum/go-ethereum/core/types"
  34. "github.com/ethereum/go-ethereum/core/vm"
  35. "github.com/ethereum/go-ethereum/ethdb"
  36. "github.com/ethereum/go-ethereum/event"
  37. "github.com/ethereum/go-ethereum/log"
  38. "github.com/ethereum/go-ethereum/metrics"
  39. "github.com/ethereum/go-ethereum/params"
  40. "github.com/ethereum/go-ethereum/rlp"
  41. "github.com/ethereum/go-ethereum/trie"
  42. "github.com/hashicorp/golang-lru"
  43. )
  44. var (
  45. accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil)
  46. accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil)
  47. accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
  48. accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
  49. storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
  50. storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
  51. storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
  52. storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
  53. blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
  54. blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
  55. blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
  56. blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
  57. blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
  58. blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
  59. ErrNoGenesis = errors.New("Genesis not found in chain")
  60. )
  61. const (
  62. bodyCacheLimit = 256
  63. blockCacheLimit = 256
  64. receiptsCacheLimit = 32
  65. maxFutureBlocks = 256
  66. maxTimeFutureBlocks = 30
  67. badBlockLimit = 10
  68. triesInMemory = 128
  69. // BlockChainVersion ensures that an incompatible database forces a resync from scratch.
  70. //
  71. // Changelog:
  72. //
  73. // - Version 4
  74. // The following incompatible database changes were added:
  75. // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
  76. // * the `Bloom` field of receipt is deleted
  77. // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
  78. // - Version 5
  79. // The following incompatible database changes were added:
  80. // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
  81. // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
  82. // receipts' corresponding block
  83. // - Version 6
  84. // The following incompatible database changes were added:
  85. // * Transaction lookup information stores the corresponding block number instead of block hash
  86. BlockChainVersion uint64 = 6
  87. )
  88. // CacheConfig contains the configuration values for the trie caching/pruning
  89. // that's resident in a blockchain.
  90. type CacheConfig struct {
  91. TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
  92. TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
  93. TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
  94. TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
  95. TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
  96. }
  97. // BlockChain represents the canonical chain given a database with a genesis
  98. // block. The Blockchain manages chain imports, reverts, chain reorganisations.
  99. //
  100. // Importing blocks in to the block chain happens according to the set of rules
  101. // defined by the two stage Validator. Processing of blocks is done using the
  102. // Processor which processes the included transaction. The validation of the state
  103. // is done in the second part of the Validator. Failing results in aborting of
  104. // the import.
  105. //
  106. // The BlockChain also helps in returning blocks from **any** chain included
  107. // in the database as well as blocks that represents the canonical chain. It's
  108. // important to note that GetBlock can return any block and does not need to be
  109. // included in the canonical one where as GetBlockByNumber always represents the
  110. // canonical chain.
  111. type BlockChain struct {
  112. chainConfig *params.ChainConfig // Chain & network configuration
  113. cacheConfig *CacheConfig // Cache configuration for pruning
  114. db ethdb.Database // Low level persistent database to store final content in
  115. triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
  116. gcproc time.Duration // Accumulates canonical block processing for trie dumping
  117. hc *HeaderChain
  118. rmLogsFeed event.Feed
  119. chainFeed event.Feed
  120. chainSideFeed event.Feed
  121. chainHeadFeed event.Feed
  122. logsFeed event.Feed
  123. blockProcFeed event.Feed
  124. scope event.SubscriptionScope
  125. genesisBlock *types.Block
  126. chainmu sync.RWMutex // blockchain insertion lock
  127. checkpoint int // checkpoint counts towards the new checkpoint
  128. currentBlock atomic.Value // Current head of the block chain
  129. currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
  130. stateCache state.Database // State database to reuse between imports (contains state cache)
  131. bodyCache *lru.Cache // Cache for the most recent block bodies
  132. bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
  133. receiptsCache *lru.Cache // Cache for the most recent receipts per block
  134. blockCache *lru.Cache // Cache for the most recent entire blocks
  135. futureBlocks *lru.Cache // future blocks are blocks added for later processing
  136. quit chan struct{} // blockchain quit channel
  137. running int32 // running must be called atomically
  138. // procInterrupt must be atomically called
  139. procInterrupt int32 // interrupt signaler for block processing
  140. wg sync.WaitGroup // chain processing wait group for shutting down
  141. engine consensus.Engine
  142. validator Validator // Block and state validator interface
  143. prefetcher Prefetcher // Block state prefetcher interface
  144. processor Processor // Block transaction processor interface
  145. vmConfig vm.Config
  146. badBlocks *lru.Cache // Bad block cache
  147. shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
  148. }
  149. // NewBlockChain returns a fully initialised block chain using information
  150. // available in the database. It initialises the default Ethereum Validator and
  151. // Processor.
  152. func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
  153. if cacheConfig == nil {
  154. cacheConfig = &CacheConfig{
  155. TrieCleanLimit: 256,
  156. TrieDirtyLimit: 256,
  157. TrieTimeLimit: 5 * time.Minute,
  158. }
  159. }
  160. bodyCache, _ := lru.New(bodyCacheLimit)
  161. bodyRLPCache, _ := lru.New(bodyCacheLimit)
  162. receiptsCache, _ := lru.New(receiptsCacheLimit)
  163. blockCache, _ := lru.New(blockCacheLimit)
  164. futureBlocks, _ := lru.New(maxFutureBlocks)
  165. badBlocks, _ := lru.New(badBlockLimit)
  166. bc := &BlockChain{
  167. chainConfig: chainConfig,
  168. cacheConfig: cacheConfig,
  169. db: db,
  170. triegc: prque.New(nil),
  171. stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
  172. quit: make(chan struct{}),
  173. shouldPreserve: shouldPreserve,
  174. bodyCache: bodyCache,
  175. bodyRLPCache: bodyRLPCache,
  176. receiptsCache: receiptsCache,
  177. blockCache: blockCache,
  178. futureBlocks: futureBlocks,
  179. engine: engine,
  180. vmConfig: vmConfig,
  181. badBlocks: badBlocks,
  182. }
  183. bc.validator = NewBlockValidator(chainConfig, bc, engine)
  184. bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
  185. bc.processor = NewStateProcessor(chainConfig, bc, engine)
  186. var err error
  187. bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
  188. if err != nil {
  189. return nil, err
  190. }
  191. bc.genesisBlock = bc.GetBlockByNumber(0)
  192. if bc.genesisBlock == nil {
  193. return nil, ErrNoGenesis
  194. }
  195. if err := bc.loadLastState(); err != nil {
  196. return nil, err
  197. }
  198. // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
  199. for hash := range BadHashes {
  200. if header := bc.GetHeaderByHash(hash); header != nil {
  201. // get the canonical block corresponding to the offending header's number
  202. headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
  203. // make sure the headerByNumber (if present) is in our current canonical chain
  204. if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
  205. log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
  206. bc.SetHead(header.Number.Uint64() - 1)
  207. log.Error("Chain rewind was successful, resuming normal operation")
  208. }
  209. }
  210. }
  211. // Take ownership of this particular state
  212. go bc.update()
  213. return bc, nil
  214. }
  215. func (bc *BlockChain) getProcInterrupt() bool {
  216. return atomic.LoadInt32(&bc.procInterrupt) == 1
  217. }
  218. // GetVMConfig returns the block chain VM config.
  219. func (bc *BlockChain) GetVMConfig() *vm.Config {
  220. return &bc.vmConfig
  221. }
  222. // loadLastState loads the last known chain state from the database. This method
  223. // assumes that the chain manager mutex is held.
  224. func (bc *BlockChain) loadLastState() error {
  225. // Restore the last known head block
  226. head := rawdb.ReadHeadBlockHash(bc.db)
  227. if head == (common.Hash{}) {
  228. // Corrupt or empty database, init from scratch
  229. log.Warn("Empty database, resetting chain")
  230. return bc.Reset()
  231. }
  232. // Make sure the entire head block is available
  233. currentBlock := bc.GetBlockByHash(head)
  234. if currentBlock == nil {
  235. // Corrupt or empty database, init from scratch
  236. log.Warn("Head block missing, resetting chain", "hash", head)
  237. return bc.Reset()
  238. }
  239. // Make sure the state associated with the block is available
  240. if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
  241. // Dangling block without a state associated, init from scratch
  242. log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
  243. if err := bc.repair(&currentBlock); err != nil {
  244. return err
  245. }
  246. }
  247. // Everything seems to be fine, set as the head block
  248. bc.currentBlock.Store(currentBlock)
  249. // Restore the last known head header
  250. currentHeader := currentBlock.Header()
  251. if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
  252. if header := bc.GetHeaderByHash(head); header != nil {
  253. currentHeader = header
  254. }
  255. }
  256. bc.hc.SetCurrentHeader(currentHeader)
  257. // Restore the last known head fast block
  258. bc.currentFastBlock.Store(currentBlock)
  259. if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
  260. if block := bc.GetBlockByHash(head); block != nil {
  261. bc.currentFastBlock.Store(block)
  262. }
  263. }
  264. // Issue a status log for the user
  265. currentFastBlock := bc.CurrentFastBlock()
  266. headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
  267. blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  268. fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
  269. log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
  270. log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
  271. log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
  272. return nil
  273. }
  274. // SetHead rewinds the local chain to a new head. In the case of headers, everything
  275. // above the new head will be deleted and the new one set. In the case of blocks
  276. // though, the head may be further rewound if block bodies are missing (non-archive
  277. // nodes after a fast sync).
  278. func (bc *BlockChain) SetHead(head uint64) error {
  279. log.Warn("Rewinding blockchain", "target", head)
  280. bc.chainmu.Lock()
  281. defer bc.chainmu.Unlock()
  282. // Rewind the header chain, deleting all block bodies until then
  283. delFn := func(db ethdb.Writer, hash common.Hash, num uint64) {
  284. rawdb.DeleteBody(db, hash, num)
  285. }
  286. bc.hc.SetHead(head, delFn)
  287. currentHeader := bc.hc.CurrentHeader()
  288. // Clear out any stale content from the caches
  289. bc.bodyCache.Purge()
  290. bc.bodyRLPCache.Purge()
  291. bc.receiptsCache.Purge()
  292. bc.blockCache.Purge()
  293. bc.futureBlocks.Purge()
  294. // Rewind the block chain, ensuring we don't end up with a stateless head block
  295. if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
  296. bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
  297. }
  298. if currentBlock := bc.CurrentBlock(); currentBlock != nil {
  299. if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
  300. // Rewound state missing, rolled back to before pivot, reset to genesis
  301. bc.currentBlock.Store(bc.genesisBlock)
  302. }
  303. }
  304. // Rewind the fast block in a simpleton way to the target head
  305. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
  306. bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
  307. }
  308. // If either blocks reached nil, reset to the genesis state
  309. if currentBlock := bc.CurrentBlock(); currentBlock == nil {
  310. bc.currentBlock.Store(bc.genesisBlock)
  311. }
  312. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
  313. bc.currentFastBlock.Store(bc.genesisBlock)
  314. }
  315. currentBlock := bc.CurrentBlock()
  316. currentFastBlock := bc.CurrentFastBlock()
  317. rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
  318. rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
  319. return bc.loadLastState()
  320. }
  321. // FastSyncCommitHead sets the current head block to the one defined by the hash
  322. // irrelevant what the chain contents were prior.
  323. func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
  324. // Make sure that both the block as well at its state trie exists
  325. block := bc.GetBlockByHash(hash)
  326. if block == nil {
  327. return fmt.Errorf("non existent block [%x…]", hash[:4])
  328. }
  329. if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
  330. return err
  331. }
  332. // If all checks out, manually set the head block
  333. bc.chainmu.Lock()
  334. bc.currentBlock.Store(block)
  335. bc.chainmu.Unlock()
  336. log.Info("Committed new head block", "number", block.Number(), "hash", hash)
  337. return nil
  338. }
  339. // GasLimit returns the gas limit of the current HEAD block.
  340. func (bc *BlockChain) GasLimit() uint64 {
  341. return bc.CurrentBlock().GasLimit()
  342. }
  343. // CurrentBlock retrieves the current head block of the canonical chain. The
  344. // block is retrieved from the blockchain's internal cache.
  345. func (bc *BlockChain) CurrentBlock() *types.Block {
  346. return bc.currentBlock.Load().(*types.Block)
  347. }
  348. // CurrentFastBlock retrieves the current fast-sync head block of the canonical
  349. // chain. The block is retrieved from the blockchain's internal cache.
  350. func (bc *BlockChain) CurrentFastBlock() *types.Block {
  351. return bc.currentFastBlock.Load().(*types.Block)
  352. }
  353. // Validator returns the current validator.
  354. func (bc *BlockChain) Validator() Validator {
  355. return bc.validator
  356. }
  357. // Processor returns the current processor.
  358. func (bc *BlockChain) Processor() Processor {
  359. return bc.processor
  360. }
  361. // State returns a new mutable state based on the current HEAD block.
  362. func (bc *BlockChain) State() (*state.StateDB, error) {
  363. return bc.StateAt(bc.CurrentBlock().Root())
  364. }
  365. // StateAt returns a new mutable state based on a particular point in time.
  366. func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
  367. return state.New(root, bc.stateCache)
  368. }
  369. // StateCache returns the caching database underpinning the blockchain instance.
  370. func (bc *BlockChain) StateCache() state.Database {
  371. return bc.stateCache
  372. }
  373. // Reset purges the entire blockchain, restoring it to its genesis state.
  374. func (bc *BlockChain) Reset() error {
  375. return bc.ResetWithGenesisBlock(bc.genesisBlock)
  376. }
  377. // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
  378. // specified genesis state.
  379. func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
  380. // Dump the entire block chain and purge the caches
  381. if err := bc.SetHead(0); err != nil {
  382. return err
  383. }
  384. bc.chainmu.Lock()
  385. defer bc.chainmu.Unlock()
  386. // Prepare the genesis block and reinitialise the chain
  387. if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
  388. log.Crit("Failed to write genesis block TD", "err", err)
  389. }
  390. rawdb.WriteBlock(bc.db, genesis)
  391. bc.genesisBlock = genesis
  392. bc.insert(bc.genesisBlock)
  393. bc.currentBlock.Store(bc.genesisBlock)
  394. bc.hc.SetGenesis(bc.genesisBlock.Header())
  395. bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
  396. bc.currentFastBlock.Store(bc.genesisBlock)
  397. return nil
  398. }
  399. // repair tries to repair the current blockchain by rolling back the current block
  400. // until one with associated state is found. This is needed to fix incomplete db
  401. // writes caused either by crashes/power outages, or simply non-committed tries.
  402. //
  403. // This method only rolls back the current block. The current header and current
  404. // fast block are left intact.
  405. func (bc *BlockChain) repair(head **types.Block) error {
  406. for {
  407. // Abort if we've rewound to a head block that does have associated state
  408. if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
  409. log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
  410. return nil
  411. }
  412. // Otherwise rewind one block and recheck state availability there
  413. block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
  414. if block == nil {
  415. return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
  416. }
  417. *head = block
  418. }
  419. }
  420. // Export writes the active chain to the given writer.
  421. func (bc *BlockChain) Export(w io.Writer) error {
  422. return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
  423. }
  424. // ExportN writes a subset of the active chain to the given writer.
  425. func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
  426. bc.chainmu.RLock()
  427. defer bc.chainmu.RUnlock()
  428. if first > last {
  429. return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
  430. }
  431. log.Info("Exporting batch of blocks", "count", last-first+1)
  432. start, reported := time.Now(), time.Now()
  433. for nr := first; nr <= last; nr++ {
  434. block := bc.GetBlockByNumber(nr)
  435. if block == nil {
  436. return fmt.Errorf("export failed on #%d: not found", nr)
  437. }
  438. if err := block.EncodeRLP(w); err != nil {
  439. return err
  440. }
  441. if time.Since(reported) >= statsReportLimit {
  442. log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
  443. reported = time.Now()
  444. }
  445. }
  446. return nil
  447. }
  448. // insert injects a new head block into the current block chain. This method
  449. // assumes that the block is indeed a true head. It will also reset the head
  450. // header and the head fast sync block to this very same block if they are older
  451. // or if they are on a different side chain.
  452. //
  453. // Note, this function assumes that the `mu` mutex is held!
  454. func (bc *BlockChain) insert(block *types.Block) {
  455. // If the block is on a side chain or an unknown one, force other heads onto it too
  456. updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
  457. // Add the block to the canonical chain number scheme and mark as the head
  458. rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
  459. rawdb.WriteHeadBlockHash(bc.db, block.Hash())
  460. bc.currentBlock.Store(block)
  461. // If the block is better than our head or is on a different chain, force update heads
  462. if updateHeads {
  463. bc.hc.SetCurrentHeader(block.Header())
  464. rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
  465. bc.currentFastBlock.Store(block)
  466. }
  467. }
  468. // Genesis retrieves the chain's genesis block.
  469. func (bc *BlockChain) Genesis() *types.Block {
  470. return bc.genesisBlock
  471. }
  472. // GetBody retrieves a block body (transactions and uncles) from the database by
  473. // hash, caching it if found.
  474. func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
  475. // Short circuit if the body's already in the cache, retrieve otherwise
  476. if cached, ok := bc.bodyCache.Get(hash); ok {
  477. body := cached.(*types.Body)
  478. return body
  479. }
  480. number := bc.hc.GetBlockNumber(hash)
  481. if number == nil {
  482. return nil
  483. }
  484. body := rawdb.ReadBody(bc.db, hash, *number)
  485. if body == nil {
  486. return nil
  487. }
  488. // Cache the found body for next time and return
  489. bc.bodyCache.Add(hash, body)
  490. return body
  491. }
  492. // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
  493. // caching it if found.
  494. func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
  495. // Short circuit if the body's already in the cache, retrieve otherwise
  496. if cached, ok := bc.bodyRLPCache.Get(hash); ok {
  497. return cached.(rlp.RawValue)
  498. }
  499. number := bc.hc.GetBlockNumber(hash)
  500. if number == nil {
  501. return nil
  502. }
  503. body := rawdb.ReadBodyRLP(bc.db, hash, *number)
  504. if len(body) == 0 {
  505. return nil
  506. }
  507. // Cache the found body for next time and return
  508. bc.bodyRLPCache.Add(hash, body)
  509. return body
  510. }
  511. // HasBlock checks if a block is fully present in the database or not.
  512. func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
  513. if bc.blockCache.Contains(hash) {
  514. return true
  515. }
  516. return rawdb.HasBody(bc.db, hash, number)
  517. }
  518. // HasFastBlock checks if a fast block is fully present in the database or not.
  519. func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
  520. if !bc.HasBlock(hash, number) {
  521. return false
  522. }
  523. if bc.receiptsCache.Contains(hash) {
  524. return true
  525. }
  526. return rawdb.HasReceipts(bc.db, hash, number)
  527. }
  528. // HasState checks if state trie is fully present in the database or not.
  529. func (bc *BlockChain) HasState(hash common.Hash) bool {
  530. _, err := bc.stateCache.OpenTrie(hash)
  531. return err == nil
  532. }
  533. // HasBlockAndState checks if a block and associated state trie is fully present
  534. // in the database or not, caching it if present.
  535. func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
  536. // Check first that the block itself is known
  537. block := bc.GetBlock(hash, number)
  538. if block == nil {
  539. return false
  540. }
  541. return bc.HasState(block.Root())
  542. }
  543. // GetBlock retrieves a block from the database by hash and number,
  544. // caching it if found.
  545. func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
  546. // Short circuit if the block's already in the cache, retrieve otherwise
  547. if block, ok := bc.blockCache.Get(hash); ok {
  548. return block.(*types.Block)
  549. }
  550. block := rawdb.ReadBlock(bc.db, hash, number)
  551. if block == nil {
  552. return nil
  553. }
  554. // Cache the found block for next time and return
  555. bc.blockCache.Add(block.Hash(), block)
  556. return block
  557. }
  558. // GetBlockByHash retrieves a block from the database by hash, caching it if found.
  559. func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
  560. number := bc.hc.GetBlockNumber(hash)
  561. if number == nil {
  562. return nil
  563. }
  564. return bc.GetBlock(hash, *number)
  565. }
  566. // GetBlockByNumber retrieves a block from the database by number, caching it
  567. // (associated with its hash) if found.
  568. func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
  569. hash := rawdb.ReadCanonicalHash(bc.db, number)
  570. if hash == (common.Hash{}) {
  571. return nil
  572. }
  573. return bc.GetBlock(hash, number)
  574. }
  575. // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
  576. func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
  577. if receipts, ok := bc.receiptsCache.Get(hash); ok {
  578. return receipts.(types.Receipts)
  579. }
  580. number := rawdb.ReadHeaderNumber(bc.db, hash)
  581. if number == nil {
  582. return nil
  583. }
  584. receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
  585. if receipts == nil {
  586. return nil
  587. }
  588. bc.receiptsCache.Add(hash, receipts)
  589. return receipts
  590. }
  591. // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
  592. // [deprecated by eth/62]
  593. func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
  594. number := bc.hc.GetBlockNumber(hash)
  595. if number == nil {
  596. return nil
  597. }
  598. for i := 0; i < n; i++ {
  599. block := bc.GetBlock(hash, *number)
  600. if block == nil {
  601. break
  602. }
  603. blocks = append(blocks, block)
  604. hash = block.ParentHash()
  605. *number--
  606. }
  607. return
  608. }
  609. // GetUnclesInChain retrieves all the uncles from a given block backwards until
  610. // a specific distance is reached.
  611. func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
  612. uncles := []*types.Header{}
  613. for i := 0; block != nil && i < length; i++ {
  614. uncles = append(uncles, block.Uncles()...)
  615. block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  616. }
  617. return uncles
  618. }
  619. // TrieNode retrieves a blob of data associated with a trie node (or code hash)
  620. // either from ephemeral in-memory cache, or from persistent storage.
  621. func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
  622. return bc.stateCache.TrieDB().Node(hash)
  623. }
  624. // Stop stops the blockchain service. If any imports are currently in progress
  625. // it will abort them using the procInterrupt.
  626. func (bc *BlockChain) Stop() {
  627. if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
  628. return
  629. }
  630. // Unsubscribe all subscriptions registered from blockchain
  631. bc.scope.Close()
  632. close(bc.quit)
  633. atomic.StoreInt32(&bc.procInterrupt, 1)
  634. bc.wg.Wait()
  635. // Ensure the state of a recent block is also stored to disk before exiting.
  636. // We're writing three different states to catch different restart scenarios:
  637. // - HEAD: So we don't need to reprocess any blocks in the general case
  638. // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
  639. // - HEAD-127: So we have a hard limit on the number of blocks reexecuted
  640. if !bc.cacheConfig.TrieDirtyDisabled {
  641. triedb := bc.stateCache.TrieDB()
  642. for _, offset := range []uint64{0, 1, triesInMemory - 1} {
  643. if number := bc.CurrentBlock().NumberU64(); number > offset {
  644. recent := bc.GetBlockByNumber(number - offset)
  645. log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
  646. if err := triedb.Commit(recent.Root(), true); err != nil {
  647. log.Error("Failed to commit recent state trie", "err", err)
  648. }
  649. }
  650. }
  651. for !bc.triegc.Empty() {
  652. triedb.Dereference(bc.triegc.PopItem().(common.Hash))
  653. }
  654. if size, _ := triedb.Size(); size != 0 {
  655. log.Error("Dangling trie nodes after full cleanup")
  656. }
  657. }
  658. log.Info("Blockchain manager stopped")
  659. }
  660. func (bc *BlockChain) procFutureBlocks() {
  661. blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
  662. for _, hash := range bc.futureBlocks.Keys() {
  663. if block, exist := bc.futureBlocks.Peek(hash); exist {
  664. blocks = append(blocks, block.(*types.Block))
  665. }
  666. }
  667. if len(blocks) > 0 {
  668. types.BlockBy(types.Number).Sort(blocks)
  669. // Insert one by one as chain insertion needs contiguous ancestry between blocks
  670. for i := range blocks {
  671. bc.InsertChain(blocks[i : i+1])
  672. }
  673. }
  674. }
  675. // WriteStatus status of write
  676. type WriteStatus byte
  677. const (
  678. NonStatTy WriteStatus = iota
  679. CanonStatTy
  680. SideStatTy
  681. )
  682. // Rollback is designed to remove a chain of links from the database that aren't
  683. // certain enough to be valid.
  684. func (bc *BlockChain) Rollback(chain []common.Hash) {
  685. bc.chainmu.Lock()
  686. defer bc.chainmu.Unlock()
  687. for i := len(chain) - 1; i >= 0; i-- {
  688. hash := chain[i]
  689. currentHeader := bc.hc.CurrentHeader()
  690. if currentHeader.Hash() == hash {
  691. bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
  692. }
  693. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
  694. newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
  695. bc.currentFastBlock.Store(newFastBlock)
  696. rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
  697. }
  698. if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
  699. newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
  700. bc.currentBlock.Store(newBlock)
  701. rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
  702. }
  703. }
  704. }
  705. // InsertReceiptChain attempts to complete an already existing header chain with
  706. // transaction and receipt data.
  707. func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  708. bc.wg.Add(1)
  709. defer bc.wg.Done()
  710. // Do a sanity check that the provided chain is actually ordered and linked
  711. for i := 1; i < len(blockChain); i++ {
  712. if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
  713. log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
  714. "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
  715. return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
  716. blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
  717. }
  718. }
  719. var (
  720. stats = struct{ processed, ignored int32 }{}
  721. start = time.Now()
  722. bytes = 0
  723. batch = bc.db.NewBatch()
  724. )
  725. for i, block := range blockChain {
  726. receipts := receiptChain[i]
  727. // Short circuit insertion if shutting down or processing failed
  728. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  729. return 0, nil
  730. }
  731. // Short circuit if the owner header is unknown
  732. if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  733. return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  734. }
  735. // Skip if the entire data is already known
  736. if bc.HasBlock(block.Hash(), block.NumberU64()) {
  737. stats.ignored++
  738. continue
  739. }
  740. // Compute all the non-consensus fields of the receipts
  741. if err := receipts.DeriveFields(bc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil {
  742. return i, fmt.Errorf("failed to derive receipts data: %v", err)
  743. }
  744. // Write all the data out into the database
  745. rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  746. rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  747. rawdb.WriteTxLookupEntries(batch, block)
  748. stats.processed++
  749. if batch.ValueSize() >= ethdb.IdealBatchSize {
  750. if err := batch.Write(); err != nil {
  751. return 0, err
  752. }
  753. bytes += batch.ValueSize()
  754. batch.Reset()
  755. }
  756. }
  757. if batch.ValueSize() > 0 {
  758. bytes += batch.ValueSize()
  759. if err := batch.Write(); err != nil {
  760. return 0, err
  761. }
  762. }
  763. // Update the head fast sync block if better
  764. bc.chainmu.Lock()
  765. head := blockChain[len(blockChain)-1]
  766. if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
  767. currentFastBlock := bc.CurrentFastBlock()
  768. if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
  769. rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
  770. bc.currentFastBlock.Store(head)
  771. }
  772. }
  773. bc.chainmu.Unlock()
  774. context := []interface{}{
  775. "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  776. "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  777. "size", common.StorageSize(bytes),
  778. }
  779. if stats.ignored > 0 {
  780. context = append(context, []interface{}{"ignored", stats.ignored}...)
  781. }
  782. log.Info("Imported new block receipts", context...)
  783. return 0, nil
  784. }
  785. var lastWrite uint64
  786. // writeBlockWithoutState writes only the block and its metadata to the database,
  787. // but does not write any state. This is used to construct competing side forks
  788. // up to the point where they exceed the canonical total difficulty.
  789. func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  790. bc.wg.Add(1)
  791. defer bc.wg.Done()
  792. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
  793. return err
  794. }
  795. rawdb.WriteBlock(bc.db, block)
  796. return nil
  797. }
  798. // writeKnownBlock updates the head block flag with a known block
  799. // and introduces chain reorg if necessary.
  800. func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
  801. bc.wg.Add(1)
  802. defer bc.wg.Done()
  803. current := bc.CurrentBlock()
  804. if block.ParentHash() != current.Hash() {
  805. if err := bc.reorg(current, block); err != nil {
  806. return err
  807. }
  808. }
  809. // Write the positional metadata for transaction/receipt lookups.
  810. // Preimages here is empty, ignore it.
  811. rawdb.WriteTxLookupEntries(bc.db, block)
  812. bc.insert(block)
  813. return nil
  814. }
  815. // WriteBlockWithState writes the block and all associated state to the database.
  816. func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  817. bc.chainmu.Lock()
  818. defer bc.chainmu.Unlock()
  819. return bc.writeBlockWithState(block, receipts, state)
  820. }
  821. // writeBlockWithState writes the block and all associated state to the database,
  822. // but is expects the chain mutex to be held.
  823. func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  824. bc.wg.Add(1)
  825. defer bc.wg.Done()
  826. // Calculate the total difficulty of the block
  827. ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  828. if ptd == nil {
  829. return NonStatTy, consensus.ErrUnknownAncestor
  830. }
  831. // Make sure no inconsistent state is leaked during insertion
  832. currentBlock := bc.CurrentBlock()
  833. localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  834. externTd := new(big.Int).Add(block.Difficulty(), ptd)
  835. // Irrelevant of the canonical status, write the block itself to the database
  836. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
  837. return NonStatTy, err
  838. }
  839. rawdb.WriteBlock(bc.db, block)
  840. root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  841. if err != nil {
  842. return NonStatTy, err
  843. }
  844. triedb := bc.stateCache.TrieDB()
  845. // If we're running an archive node, always flush
  846. if bc.cacheConfig.TrieDirtyDisabled {
  847. if err := triedb.Commit(root, false); err != nil {
  848. return NonStatTy, err
  849. }
  850. } else {
  851. // Full but not archive node, do proper garbage collection
  852. triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  853. bc.triegc.Push(root, -int64(block.NumberU64()))
  854. if current := block.NumberU64(); current > triesInMemory {
  855. // If we exceeded our memory allowance, flush matured singleton nodes to disk
  856. var (
  857. nodes, imgs = triedb.Size()
  858. limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  859. )
  860. if nodes > limit || imgs > 4*1024*1024 {
  861. triedb.Cap(limit - ethdb.IdealBatchSize)
  862. }
  863. // Find the next state trie we need to commit
  864. chosen := current - triesInMemory
  865. // If we exceeded out time allowance, flush an entire trie to disk
  866. if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  867. // If the header is missing (canonical chain behind), we're reorging a low
  868. // diff sidechain. Suspend committing until this operation is completed.
  869. header := bc.GetHeaderByNumber(chosen)
  870. if header == nil {
  871. log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  872. } else {
  873. // If we're exceeding limits but haven't reached a large enough memory gap,
  874. // warn the user that the system is becoming unstable.
  875. if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  876. log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
  877. }
  878. // Flush an entire trie and restart the counters
  879. triedb.Commit(header.Root, true)
  880. lastWrite = chosen
  881. bc.gcproc = 0
  882. }
  883. }
  884. // Garbage collect anything below our required write retention
  885. for !bc.triegc.Empty() {
  886. root, number := bc.triegc.Pop()
  887. if uint64(-number) > chosen {
  888. bc.triegc.Push(root, number)
  889. break
  890. }
  891. triedb.Dereference(root.(common.Hash))
  892. }
  893. }
  894. }
  895. // Write other block data using a batch.
  896. batch := bc.db.NewBatch()
  897. rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  898. // If the total difficulty is higher than our known, add it to the canonical chain
  899. // Second clause in the if statement reduces the vulnerability to selfish mining.
  900. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  901. reorg := externTd.Cmp(localTd) > 0
  902. currentBlock = bc.CurrentBlock()
  903. if !reorg && externTd.Cmp(localTd) == 0 {
  904. // Split same-difficulty blocks by number, then preferentially select
  905. // the block generated by the local miner as the canonical block.
  906. if block.NumberU64() < currentBlock.NumberU64() {
  907. reorg = true
  908. } else if block.NumberU64() == currentBlock.NumberU64() {
  909. var currentPreserve, blockPreserve bool
  910. if bc.shouldPreserve != nil {
  911. currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
  912. }
  913. reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
  914. }
  915. }
  916. if reorg {
  917. // Reorganise the chain if the parent is not the head block
  918. if block.ParentHash() != currentBlock.Hash() {
  919. if err := bc.reorg(currentBlock, block); err != nil {
  920. return NonStatTy, err
  921. }
  922. }
  923. // Write the positional metadata for transaction/receipt lookups and preimages
  924. rawdb.WriteTxLookupEntries(batch, block)
  925. rawdb.WritePreimages(batch, state.Preimages())
  926. status = CanonStatTy
  927. } else {
  928. status = SideStatTy
  929. }
  930. if err := batch.Write(); err != nil {
  931. return NonStatTy, err
  932. }
  933. // Set new head.
  934. if status == CanonStatTy {
  935. bc.insert(block)
  936. }
  937. bc.futureBlocks.Remove(block.Hash())
  938. return status, nil
  939. }
  940. // addFutureBlock checks if the block is within the max allowed window to get
  941. // accepted for future processing, and returns an error if the block is too far
  942. // ahead and was not added.
  943. func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  944. max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  945. if block.Time() > max {
  946. return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  947. }
  948. bc.futureBlocks.Add(block.Hash(), block)
  949. return nil
  950. }
  951. // InsertChain attempts to insert the given batch of blocks in to the canonical
  952. // chain or, otherwise, create a fork. If an error is returned it will return
  953. // the index number of the failing block as well an error describing what went
  954. // wrong.
  955. //
  956. // After insertion is done, all accumulated events will be fired.
  957. func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  958. // Sanity check that we have something meaningful to import
  959. if len(chain) == 0 {
  960. return 0, nil
  961. }
  962. bc.blockProcFeed.Send(true)
  963. defer bc.blockProcFeed.Send(false)
  964. // Remove already known canon-blocks
  965. var (
  966. block, prev *types.Block
  967. )
  968. // Do a sanity check that the provided chain is actually ordered and linked
  969. for i := 1; i < len(chain); i++ {
  970. block = chain[i]
  971. prev = chain[i-1]
  972. if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  973. // Chain broke ancestry, log a message (programming error) and skip insertion
  974. log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
  975. "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
  976. return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  977. prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  978. }
  979. }
  980. // Pre-checks passed, start the full block imports
  981. bc.wg.Add(1)
  982. bc.chainmu.Lock()
  983. n, events, logs, err := bc.insertChain(chain, true)
  984. bc.chainmu.Unlock()
  985. bc.wg.Done()
  986. bc.PostChainEvents(events, logs)
  987. return n, err
  988. }
  989. // insertChain is the internal implementation of InsertChain, which assumes that
  990. // 1) chains are contiguous, and 2) The chain mutex is held.
  991. //
  992. // This method is split out so that import batches that require re-injecting
  993. // historical blocks can do so without releasing the lock, which could lead to
  994. // racey behaviour. If a sidechain import is in progress, and the historic state
  995. // is imported, but then new canon-head is added before the actual sidechain
  996. // completes, then the historic state could be pruned again
  997. func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
  998. // If the chain is terminating, don't even bother starting up
  999. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1000. return 0, nil, nil, nil
  1001. }
  1002. // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1003. senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1004. // A queued approach to delivering events. This is generally
  1005. // faster than direct delivery and requires much less mutex
  1006. // acquiring.
  1007. var (
  1008. stats = insertStats{startTime: mclock.Now()}
  1009. events = make([]interface{}, 0, len(chain))
  1010. lastCanon *types.Block
  1011. coalescedLogs []*types.Log
  1012. )
  1013. // Start the parallel header verifier
  1014. headers := make([]*types.Header, len(chain))
  1015. seals := make([]bool, len(chain))
  1016. for i, block := range chain {
  1017. headers[i] = block.Header()
  1018. seals[i] = verifySeals
  1019. }
  1020. abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1021. defer close(abort)
  1022. // Peek the error for the first block to decide the directing import logic
  1023. it := newInsertIterator(chain, results, bc.validator)
  1024. block, err := it.next()
  1025. // Left-trim all the known blocks
  1026. if err == ErrKnownBlock {
  1027. // First block (and state) is known
  1028. // 1. We did a roll-back, and should now do a re-import
  1029. // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1030. // from the canonical chain, which has not been verified.
  1031. // Skip all known blocks that are behind us
  1032. var (
  1033. current = bc.CurrentBlock()
  1034. localTd = bc.GetTd(current.Hash(), current.NumberU64())
  1035. externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
  1036. )
  1037. for block != nil && err == ErrKnownBlock {
  1038. externTd = new(big.Int).Add(externTd, block.Difficulty())
  1039. if localTd.Cmp(externTd) < 0 {
  1040. break
  1041. }
  1042. stats.ignored++
  1043. block, err = it.next()
  1044. }
  1045. // The remaining blocks are still known blocks, the only scenario here is:
  1046. // During the fast sync, the pivot point is already submitted but rollback
  1047. // happens. Then node resets the head full block to a lower height via `rollback`
  1048. // and leaves a few known blocks in the database.
  1049. //
  1050. // When node runs a fast sync again, it can re-import a batch of known blocks via
  1051. // `insertChain` while a part of them have higher total difficulty than current
  1052. // head full block(new pivot point).
  1053. for block != nil && err == ErrKnownBlock {
  1054. if err := bc.writeKnownBlock(block); err != nil {
  1055. return it.index, nil, nil, err
  1056. }
  1057. lastCanon = block
  1058. block, err = it.next()
  1059. }
  1060. // Falls through to the block import
  1061. }
  1062. switch {
  1063. // First block is pruned, insert as sidechain and reorg only if TD grows enough
  1064. case err == consensus.ErrPrunedAncestor:
  1065. return bc.insertSideChain(block, it)
  1066. // First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1067. case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
  1068. for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
  1069. if err := bc.addFutureBlock(block); err != nil {
  1070. return it.index, events, coalescedLogs, err
  1071. }
  1072. block, err = it.next()
  1073. }
  1074. stats.queued += it.processed()
  1075. stats.ignored += it.remaining()
  1076. // If there are any still remaining, mark as ignored
  1077. return it.index, events, coalescedLogs, err
  1078. // Some other error occurred, abort
  1079. case err != nil:
  1080. stats.ignored += len(it.chain)
  1081. bc.reportBlock(block, nil, err)
  1082. return it.index, events, coalescedLogs, err
  1083. }
  1084. // No validation errors for the first block (or chain prefix skipped)
  1085. for ; block != nil && err == nil; block, err = it.next() {
  1086. // If the chain is terminating, stop processing blocks
  1087. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1088. log.Debug("Premature abort during blocks processing")
  1089. break
  1090. }
  1091. // If the header is a banned one, straight out abort
  1092. if BadHashes[block.Hash()] {
  1093. bc.reportBlock(block, nil, ErrBlacklistedHash)
  1094. return it.index, events, coalescedLogs, ErrBlacklistedHash
  1095. }
  1096. // Retrieve the parent block and it's state to execute on top
  1097. start := time.Now()
  1098. parent := it.previous()
  1099. if parent == nil {
  1100. parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1101. }
  1102. statedb, err := state.New(parent.Root, bc.stateCache)
  1103. if err != nil {
  1104. return it.index, events, coalescedLogs, err
  1105. }
  1106. // If we have a followup block, run that against the current state to pre-cache
  1107. // transactions and probabilistically some of the account/storage trie nodes.
  1108. var followupInterrupt uint32
  1109. if !bc.cacheConfig.TrieCleanNoPrefetch {
  1110. if followup, err := it.peek(); followup != nil && err == nil {
  1111. go func(start time.Time) {
  1112. throwaway, _ := state.New(parent.Root, bc.stateCache)
  1113. bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
  1114. blockPrefetchExecuteTimer.Update(time.Since(start))
  1115. if atomic.LoadUint32(&followupInterrupt) == 1 {
  1116. blockPrefetchInterruptMeter.Mark(1)
  1117. }
  1118. }(time.Now())
  1119. }
  1120. }
  1121. // Process block using the parent state as reference point
  1122. substart := time.Now()
  1123. receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1124. if err != nil {
  1125. bc.reportBlock(block, receipts, err)
  1126. atomic.StoreUint32(&followupInterrupt, 1)
  1127. return it.index, events, coalescedLogs, err
  1128. }
  1129. // Update the metrics touched during block processing
  1130. accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
  1131. storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them
  1132. accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
  1133. storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
  1134. triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
  1135. trieproc := statedb.AccountReads + statedb.AccountUpdates
  1136. trieproc += statedb.StorageReads + statedb.StorageUpdates
  1137. blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
  1138. // Validate the state using the default validator
  1139. substart = time.Now()
  1140. if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1141. bc.reportBlock(block, receipts, err)
  1142. atomic.StoreUint32(&followupInterrupt, 1)
  1143. return it.index, events, coalescedLogs, err
  1144. }
  1145. proctime := time.Since(start)
  1146. // Update the metrics touched during block validation
  1147. accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
  1148. storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
  1149. blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
  1150. // Write the block to the chain and get the status.
  1151. substart = time.Now()
  1152. status, err := bc.writeBlockWithState(block, receipts, statedb)
  1153. if err != nil {
  1154. atomic.StoreUint32(&followupInterrupt, 1)
  1155. return it.index, events, coalescedLogs, err
  1156. }
  1157. atomic.StoreUint32(&followupInterrupt, 1)
  1158. // Update the metrics touched during block commit
  1159. accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
  1160. storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
  1161. blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits)
  1162. blockInsertTimer.UpdateSince(start)
  1163. switch status {
  1164. case CanonStatTy:
  1165. log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1166. "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1167. "elapsed", common.PrettyDuration(time.Since(start)),
  1168. "root", block.Root())
  1169. coalescedLogs = append(coalescedLogs, logs...)
  1170. events = append(events, ChainEvent{block, block.Hash(), logs})
  1171. lastCanon = block
  1172. // Only count canonical blocks for GC processing time
  1173. bc.gcproc += proctime
  1174. case SideStatTy:
  1175. log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1176. "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1177. "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1178. "root", block.Root())
  1179. events = append(events, ChainSideEvent{block})
  1180. }
  1181. stats.processed++
  1182. stats.usedGas += usedGas
  1183. dirty, _ := bc.stateCache.TrieDB().Size()
  1184. stats.report(chain, it.index, dirty)
  1185. }
  1186. // Any blocks remaining here? The only ones we care about are the future ones
  1187. if block != nil && err == consensus.ErrFutureBlock {
  1188. if err := bc.addFutureBlock(block); err != nil {
  1189. return it.index, events, coalescedLogs, err
  1190. }
  1191. block, err = it.next()
  1192. for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
  1193. if err := bc.addFutureBlock(block); err != nil {
  1194. return it.index, events, coalescedLogs, err
  1195. }
  1196. stats.queued++
  1197. }
  1198. }
  1199. stats.ignored += it.remaining()
  1200. // Append a single chain head event if we've progressed the chain
  1201. if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1202. events = append(events, ChainHeadEvent{lastCanon})
  1203. }
  1204. return it.index, events, coalescedLogs, err
  1205. }
  1206. // insertSideChain is called when an import batch hits upon a pruned ancestor
  1207. // error, which happens when a sidechain with a sufficiently old fork-block is
  1208. // found.
  1209. //
  1210. // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1211. // switch over to the new chain if the TD exceeded the current chain.
  1212. func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
  1213. var (
  1214. externTd *big.Int
  1215. current = bc.CurrentBlock()
  1216. )
  1217. // The first sidechain block error is already verified to be ErrPrunedAncestor.
  1218. // Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1219. // ones. Any other errors means that the block is invalid, and should not be written
  1220. // to disk.
  1221. err := consensus.ErrPrunedAncestor
  1222. for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
  1223. // Check the canonical state root for that number
  1224. if number := block.NumberU64(); current.NumberU64() >= number {
  1225. canonical := bc.GetBlockByNumber(number)
  1226. if canonical != nil && canonical.Hash() == block.Hash() {
  1227. // Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1228. continue
  1229. }
  1230. if canonical != nil && canonical.Root() == block.Root() {
  1231. // This is most likely a shadow-state attack. When a fork is imported into the
  1232. // database, and it eventually reaches a block height which is not pruned, we
  1233. // just found that the state already exist! This means that the sidechain block
  1234. // refers to a state which already exists in our canon chain.
  1235. //
  1236. // If left unchecked, we would now proceed importing the blocks, without actually
  1237. // having verified the state of the previous blocks.
  1238. log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1239. // If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1240. // we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1241. // mechanism.
  1242. return it.index, nil, nil, errors.New("sidechain ghost-state attack")
  1243. }
  1244. }
  1245. if externTd == nil {
  1246. externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1247. }
  1248. externTd = new(big.Int).Add(externTd, block.Difficulty())
  1249. if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  1250. start := time.Now()
  1251. if err := bc.writeBlockWithoutState(block, externTd); err != nil {
  1252. return it.index, nil, nil, err
  1253. }
  1254. log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  1255. "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1256. "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1257. "root", block.Root())
  1258. }
  1259. }
  1260. // At this point, we've written all sidechain blocks to database. Loop ended
  1261. // either on some other error or all were processed. If there was some other
  1262. // error, we can ignore the rest of those blocks.
  1263. //
  1264. // If the externTd was larger than our local TD, we now need to reimport the previous
  1265. // blocks to regenerate the required state
  1266. localTd := bc.GetTd(current.Hash(), current.NumberU64())
  1267. if localTd.Cmp(externTd) > 0 {
  1268. log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  1269. return it.index, nil, nil, err
  1270. }
  1271. // Gather all the sidechain hashes (full blocks may be memory heavy)
  1272. var (
  1273. hashes []common.Hash
  1274. numbers []uint64
  1275. )
  1276. parent := it.previous()
  1277. for parent != nil && !bc.HasState(parent.Root) {
  1278. hashes = append(hashes, parent.Hash())
  1279. numbers = append(numbers, parent.Number.Uint64())
  1280. parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  1281. }
  1282. if parent == nil {
  1283. return it.index, nil, nil, errors.New("missing parent")
  1284. }
  1285. // Import all the pruned blocks to make the state available
  1286. var (
  1287. blocks []*types.Block
  1288. memory common.StorageSize
  1289. )
  1290. for i := len(hashes) - 1; i >= 0; i-- {
  1291. // Append the next block to our batch
  1292. block := bc.GetBlock(hashes[i], numbers[i])
  1293. blocks = append(blocks, block)
  1294. memory += block.Size()
  1295. // If memory use grew too large, import and continue. Sadly we need to discard
  1296. // all raised events and logs from notifications since we're too heavy on the
  1297. // memory here.
  1298. if len(blocks) >= 2048 || memory > 64*1024*1024 {
  1299. log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  1300. if _, _, _, err := bc.insertChain(blocks, false); err != nil {
  1301. return 0, nil, nil, err
  1302. }
  1303. blocks, memory = blocks[:0], 0
  1304. // If the chain is terminating, stop processing blocks
  1305. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1306. log.Debug("Premature abort during blocks processing")
  1307. return 0, nil, nil, nil
  1308. }
  1309. }
  1310. }
  1311. if len(blocks) > 0 {
  1312. log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  1313. return bc.insertChain(blocks, false)
  1314. }
  1315. return 0, nil, nil, nil
  1316. }
  1317. // reorg takes two blocks, an old chain and a new chain and will reconstruct the
  1318. // blocks and inserts them to be part of the new canonical chain and accumulates
  1319. // potential missing transactions and post an event about them.
  1320. func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1321. var (
  1322. newChain types.Blocks
  1323. oldChain types.Blocks
  1324. commonBlock *types.Block
  1325. deletedTxs types.Transactions
  1326. addedTxs types.Transactions
  1327. deletedLogs []*types.Log
  1328. rebirthLogs []*types.Log
  1329. // collectLogs collects the logs that were generated during the
  1330. // processing of the block that corresponds with the given hash.
  1331. // These logs are later announced as deleted or reborn
  1332. collectLogs = func(hash common.Hash, removed bool) {
  1333. number := bc.hc.GetBlockNumber(hash)
  1334. if number == nil {
  1335. return
  1336. }
  1337. receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
  1338. for _, receipt := range receipts {
  1339. for _, log := range receipt.Logs {
  1340. l := *log
  1341. if removed {
  1342. l.Removed = true
  1343. deletedLogs = append(deletedLogs, &l)
  1344. } else {
  1345. rebirthLogs = append(rebirthLogs, &l)
  1346. }
  1347. }
  1348. }
  1349. }
  1350. )
  1351. // Reduce the longer chain to the same number as the shorter one
  1352. if oldBlock.NumberU64() > newBlock.NumberU64() {
  1353. // Old chain is longer, gather all transactions and logs as deleted ones
  1354. for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1355. oldChain = append(oldChain, oldBlock)
  1356. deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1357. collectLogs(oldBlock.Hash(), true)
  1358. }
  1359. } else {
  1360. // New chain is longer, stash all blocks away for subsequent insertion
  1361. for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1362. newChain = append(newChain, newBlock)
  1363. }
  1364. }
  1365. if oldBlock == nil {
  1366. return fmt.Errorf("invalid old chain")
  1367. }
  1368. if newBlock == nil {
  1369. return fmt.Errorf("invalid new chain")
  1370. }
  1371. // Both sides of the reorg are at the same number, reduce both until the common
  1372. // ancestor is found
  1373. for {
  1374. // If the common ancestor was found, bail out
  1375. if oldBlock.Hash() == newBlock.Hash() {
  1376. commonBlock = oldBlock
  1377. break
  1378. }
  1379. // Remove an old block as well as stash away a new block
  1380. oldChain = append(oldChain, oldBlock)
  1381. deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1382. collectLogs(oldBlock.Hash(), true)
  1383. newChain = append(newChain, newBlock)
  1384. // Step back with both chains
  1385. oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
  1386. if oldBlock == nil {
  1387. return fmt.Errorf("invalid old chain")
  1388. }
  1389. newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1390. if newBlock == nil {
  1391. return fmt.Errorf("invalid new chain")
  1392. }
  1393. }
  1394. // Ensure the user sees large reorgs
  1395. if len(oldChain) > 0 && len(newChain) > 0 {
  1396. logFn := log.Debug
  1397. if len(oldChain) > 63 {
  1398. logFn = log.Warn
  1399. }
  1400. logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1401. "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1402. } else {
  1403. log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1404. }
  1405. // Insert the new chain(except the head block(reverse order)),
  1406. // taking care of the proper incremental order.
  1407. for i := len(newChain) - 1; i >= 1; i-- {
  1408. // Insert the block in the canonical way, re-writing history
  1409. bc.insert(newChain[i])
  1410. // Collect reborn logs due to chain reorg
  1411. collectLogs(newChain[i].Hash(), false)
  1412. // Write lookup entries for hash based transaction/receipt searches
  1413. rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1414. addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1415. }
  1416. // When transactions get deleted from the database, the receipts that were
  1417. // created in the fork must also be deleted
  1418. batch := bc.db.NewBatch()
  1419. for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
  1420. rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1421. }
  1422. // Delete any canonical number assignments above the new head
  1423. number := bc.CurrentBlock().NumberU64()
  1424. for i := number + 1; ; i++ {
  1425. hash := rawdb.ReadCanonicalHash(bc.db, i)
  1426. if hash == (common.Hash{}) {
  1427. break
  1428. }
  1429. rawdb.DeleteCanonicalHash(batch, i)
  1430. }
  1431. batch.Write()
  1432. // If any logs need to be fired, do it now. In theory we could avoid creating
  1433. // this goroutine if there are no events to fire, but realistcally that only
  1434. // ever happens if we're reorging empty blocks, which will only happen on idle
  1435. // networks where performance is not an issue either way.
  1436. //
  1437. // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
  1438. // event ordering?
  1439. go func() {
  1440. if len(deletedLogs) > 0 {
  1441. bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1442. }
  1443. if len(rebirthLogs) > 0 {
  1444. bc.logsFeed.Send(rebirthLogs)
  1445. }
  1446. if len(oldChain) > 0 {
  1447. for _, block := range oldChain {
  1448. bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1449. }
  1450. }
  1451. }()
  1452. return nil
  1453. }
  1454. // PostChainEvents iterates over the events generated by a chain insertion and
  1455. // posts them into the event feed.
  1456. // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1457. func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1458. // post event logs for further processing
  1459. if logs != nil {
  1460. bc.logsFeed.Send(logs)
  1461. }
  1462. for _, event := range events {
  1463. switch ev := event.(type) {
  1464. case ChainEvent:
  1465. bc.chainFeed.Send(ev)
  1466. case ChainHeadEvent:
  1467. bc.chainHeadFeed.Send(ev)
  1468. case ChainSideEvent:
  1469. bc.chainSideFeed.Send(ev)
  1470. }
  1471. }
  1472. }
  1473. func (bc *BlockChain) update() {
  1474. futureTimer := time.NewTicker(5 * time.Second)
  1475. defer futureTimer.Stop()
  1476. for {
  1477. select {
  1478. case <-futureTimer.C:
  1479. bc.procFutureBlocks()
  1480. case <-bc.quit:
  1481. return
  1482. }
  1483. }
  1484. }
  1485. // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1486. func (bc *BlockChain) BadBlocks() []*types.Block {
  1487. blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1488. for _, hash := range bc.badBlocks.Keys() {
  1489. if blk, exist := bc.badBlocks.Peek(hash); exist {
  1490. block := blk.(*types.Block)
  1491. blocks = append(blocks, block)
  1492. }
  1493. }
  1494. return blocks
  1495. }
  1496. // addBadBlock adds a bad block to the bad-block LRU cache
  1497. func (bc *BlockChain) addBadBlock(block *types.Block) {
  1498. bc.badBlocks.Add(block.Hash(), block)
  1499. }
  1500. // reportBlock logs a bad block error.
  1501. func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1502. bc.addBadBlock(block)
  1503. var receiptString string
  1504. for i, receipt := range receipts {
  1505. receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  1506. i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  1507. receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  1508. }
  1509. log.Error(fmt.Sprintf(`
  1510. ########## BAD BLOCK #########
  1511. Chain config: %v
  1512. Number: %v
  1513. Hash: 0x%x
  1514. %v
  1515. Error: %v
  1516. ##############################
  1517. `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1518. }
  1519. // InsertHeaderChain attempts to insert the given header chain in to the local
  1520. // chain, possibly creating a reorg. If an error is returned, it will return the
  1521. // index number of the failing header as well an error describing what went wrong.
  1522. //
  1523. // The verify parameter can be used to fine tune whether nonce verification
  1524. // should be done or not. The reason behind the optional check is because some
  1525. // of the header retrieval mechanisms already need to verify nonces, as well as
  1526. // because nonces can be verified sparsely, not needing to check each.
  1527. func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1528. start := time.Now()
  1529. if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1530. return i, err
  1531. }
  1532. // Make sure only one thread manipulates the chain at once
  1533. bc.chainmu.Lock()
  1534. defer bc.chainmu.Unlock()
  1535. bc.wg.Add(1)
  1536. defer bc.wg.Done()
  1537. whFunc := func(header *types.Header) error {
  1538. _, err := bc.hc.WriteHeader(header)
  1539. return err
  1540. }
  1541. return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1542. }
  1543. // CurrentHeader retrieves the current head header of the canonical chain. The
  1544. // header is retrieved from the HeaderChain's internal cache.
  1545. func (bc *BlockChain) CurrentHeader() *types.Header {
  1546. return bc.hc.CurrentHeader()
  1547. }
  1548. // GetTd retrieves a block's total difficulty in the canonical chain from the
  1549. // database by hash and number, caching it if found.
  1550. func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1551. return bc.hc.GetTd(hash, number)
  1552. }
  1553. // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1554. // database by hash, caching it if found.
  1555. func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1556. return bc.hc.GetTdByHash(hash)
  1557. }
  1558. // GetHeader retrieves a block header from the database by hash and number,
  1559. // caching it if found.
  1560. func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1561. return bc.hc.GetHeader(hash, number)
  1562. }
  1563. // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1564. // found.
  1565. func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1566. return bc.hc.GetHeaderByHash(hash)
  1567. }
  1568. // HasHeader checks if a block header is present in the database or not, caching
  1569. // it if present.
  1570. func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1571. return bc.hc.HasHeader(hash, number)
  1572. }
  1573. // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1574. // hash, fetching towards the genesis block.
  1575. func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1576. return bc.hc.GetBlockHashesFromHash(hash, max)
  1577. }
  1578. // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  1579. // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  1580. // number of blocks to be individually checked before we reach the canonical chain.
  1581. //
  1582. // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  1583. func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  1584. bc.chainmu.RLock()
  1585. defer bc.chainmu.RUnlock()
  1586. return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  1587. }
  1588. // GetHeaderByNumber retrieves a block header from the database by number,
  1589. // caching it (associated with its hash) if found.
  1590. func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1591. return bc.hc.GetHeaderByNumber(number)
  1592. }
  1593. // Config retrieves the blockchain's chain configuration.
  1594. func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1595. // Engine retrieves the blockchain's consensus engine.
  1596. func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1597. // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1598. func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1599. return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1600. }
  1601. // SubscribeChainEvent registers a subscription of ChainEvent.
  1602. func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1603. return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1604. }
  1605. // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1606. func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1607. return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1608. }
  1609. // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1610. func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1611. return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1612. }
  1613. // SubscribeLogsEvent registers a subscription of []*types.Log.
  1614. func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1615. return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1616. }
  1617. // SubscribeBlockProcessingEvent registers a subscription of bool where true means
  1618. // block processing has started while false means it has stopped.
  1619. func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
  1620. return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
  1621. }