blockchain.go 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620
  1. // Copyright 2014 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package core implements the Ethereum consensus protocol.
  17. package core
  18. import (
  19. "errors"
  20. "fmt"
  21. "io"
  22. "math/big"
  23. mrand "math/rand"
  24. "sync"
  25. "sync/atomic"
  26. "time"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/common/mclock"
  29. "github.com/ethereum/go-ethereum/common/prque"
  30. "github.com/ethereum/go-ethereum/consensus"
  31. "github.com/ethereum/go-ethereum/core/rawdb"
  32. "github.com/ethereum/go-ethereum/core/state"
  33. "github.com/ethereum/go-ethereum/core/types"
  34. "github.com/ethereum/go-ethereum/core/vm"
  35. "github.com/ethereum/go-ethereum/crypto"
  36. "github.com/ethereum/go-ethereum/ethdb"
  37. "github.com/ethereum/go-ethereum/event"
  38. "github.com/ethereum/go-ethereum/log"
  39. "github.com/ethereum/go-ethereum/metrics"
  40. "github.com/ethereum/go-ethereum/params"
  41. "github.com/ethereum/go-ethereum/rlp"
  42. "github.com/ethereum/go-ethereum/trie"
  43. "github.com/hashicorp/golang-lru"
  44. )
  45. var (
  46. blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
  47. ErrNoGenesis = errors.New("Genesis not found in chain")
  48. )
  49. const (
  50. bodyCacheLimit = 256
  51. blockCacheLimit = 256
  52. receiptsCacheLimit = 32
  53. maxFutureBlocks = 256
  54. maxTimeFutureBlocks = 30
  55. badBlockLimit = 10
  56. triesInMemory = 128
  57. // BlockChainVersion ensures that an incompatible database forces a resync from scratch.
  58. BlockChainVersion = 3
  59. )
  60. // CacheConfig contains the configuration values for the trie caching/pruning
  61. // that's resident in a blockchain.
  62. type CacheConfig struct {
  63. Disabled bool // Whether to disable trie write caching (archive node)
  64. TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
  65. TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
  66. TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
  67. }
  68. // BlockChain represents the canonical chain given a database with a genesis
  69. // block. The Blockchain manages chain imports, reverts, chain reorganisations.
  70. //
  71. // Importing blocks in to the block chain happens according to the set of rules
  72. // defined by the two stage Validator. Processing of blocks is done using the
  73. // Processor which processes the included transaction. The validation of the state
  74. // is done in the second part of the Validator. Failing results in aborting of
  75. // the import.
  76. //
  77. // The BlockChain also helps in returning blocks from **any** chain included
  78. // in the database as well as blocks that represents the canonical chain. It's
  79. // important to note that GetBlock can return any block and does not need to be
  80. // included in the canonical one where as GetBlockByNumber always represents the
  81. // canonical chain.
  82. type BlockChain struct {
  83. chainConfig *params.ChainConfig // Chain & network configuration
  84. cacheConfig *CacheConfig // Cache configuration for pruning
  85. db ethdb.Database // Low level persistent database to store final content in
  86. triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
  87. gcproc time.Duration // Accumulates canonical block processing for trie dumping
  88. hc *HeaderChain
  89. rmLogsFeed event.Feed
  90. chainFeed event.Feed
  91. chainSideFeed event.Feed
  92. chainHeadFeed event.Feed
  93. logsFeed event.Feed
  94. scope event.SubscriptionScope
  95. genesisBlock *types.Block
  96. mu sync.RWMutex // global mutex for locking chain operations
  97. chainmu sync.RWMutex // blockchain insertion lock
  98. procmu sync.RWMutex // block processor lock
  99. checkpoint int // checkpoint counts towards the new checkpoint
  100. currentBlock atomic.Value // Current head of the block chain
  101. currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
  102. stateCache state.Database // State database to reuse between imports (contains state cache)
  103. bodyCache *lru.Cache // Cache for the most recent block bodies
  104. bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
  105. receiptsCache *lru.Cache // Cache for the most recent receipts per block
  106. blockCache *lru.Cache // Cache for the most recent entire blocks
  107. futureBlocks *lru.Cache // future blocks are blocks added for later processing
  108. quit chan struct{} // blockchain quit channel
  109. running int32 // running must be called atomically
  110. // procInterrupt must be atomically called
  111. procInterrupt int32 // interrupt signaler for block processing
  112. wg sync.WaitGroup // chain processing wait group for shutting down
  113. engine consensus.Engine
  114. processor Processor // block processor interface
  115. validator Validator // block and state validator interface
  116. vmConfig vm.Config
  117. badBlocks *lru.Cache // Bad block cache
  118. shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
  119. }
  120. // NewBlockChain returns a fully initialised block chain using information
  121. // available in the database. It initialises the default Ethereum Validator and
  122. // Processor.
  123. func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
  124. if cacheConfig == nil {
  125. cacheConfig = &CacheConfig{
  126. TrieCleanLimit: 256,
  127. TrieDirtyLimit: 256,
  128. TrieTimeLimit: 5 * time.Minute,
  129. }
  130. }
  131. bodyCache, _ := lru.New(bodyCacheLimit)
  132. bodyRLPCache, _ := lru.New(bodyCacheLimit)
  133. receiptsCache, _ := lru.New(receiptsCacheLimit)
  134. blockCache, _ := lru.New(blockCacheLimit)
  135. futureBlocks, _ := lru.New(maxFutureBlocks)
  136. badBlocks, _ := lru.New(badBlockLimit)
  137. bc := &BlockChain{
  138. chainConfig: chainConfig,
  139. cacheConfig: cacheConfig,
  140. db: db,
  141. triegc: prque.New(nil),
  142. stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
  143. quit: make(chan struct{}),
  144. shouldPreserve: shouldPreserve,
  145. bodyCache: bodyCache,
  146. bodyRLPCache: bodyRLPCache,
  147. receiptsCache: receiptsCache,
  148. blockCache: blockCache,
  149. futureBlocks: futureBlocks,
  150. engine: engine,
  151. vmConfig: vmConfig,
  152. badBlocks: badBlocks,
  153. }
  154. bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
  155. bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
  156. var err error
  157. bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
  158. if err != nil {
  159. return nil, err
  160. }
  161. bc.genesisBlock = bc.GetBlockByNumber(0)
  162. if bc.genesisBlock == nil {
  163. return nil, ErrNoGenesis
  164. }
  165. if err := bc.loadLastState(); err != nil {
  166. return nil, err
  167. }
  168. // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
  169. for hash := range BadHashes {
  170. if header := bc.GetHeaderByHash(hash); header != nil {
  171. // get the canonical block corresponding to the offending header's number
  172. headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
  173. // make sure the headerByNumber (if present) is in our current canonical chain
  174. if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
  175. log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
  176. bc.SetHead(header.Number.Uint64() - 1)
  177. log.Error("Chain rewind was successful, resuming normal operation")
  178. }
  179. }
  180. }
  181. // Take ownership of this particular state
  182. go bc.update()
  183. return bc, nil
  184. }
  185. func (bc *BlockChain) getProcInterrupt() bool {
  186. return atomic.LoadInt32(&bc.procInterrupt) == 1
  187. }
  188. // loadLastState loads the last known chain state from the database. This method
  189. // assumes that the chain manager mutex is held.
  190. func (bc *BlockChain) loadLastState() error {
  191. // Restore the last known head block
  192. head := rawdb.ReadHeadBlockHash(bc.db)
  193. if head == (common.Hash{}) {
  194. // Corrupt or empty database, init from scratch
  195. log.Warn("Empty database, resetting chain")
  196. return bc.Reset()
  197. }
  198. // Make sure the entire head block is available
  199. currentBlock := bc.GetBlockByHash(head)
  200. if currentBlock == nil {
  201. // Corrupt or empty database, init from scratch
  202. log.Warn("Head block missing, resetting chain", "hash", head)
  203. return bc.Reset()
  204. }
  205. // Make sure the state associated with the block is available
  206. if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
  207. // Dangling block without a state associated, init from scratch
  208. log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
  209. if err := bc.repair(&currentBlock); err != nil {
  210. return err
  211. }
  212. }
  213. // Everything seems to be fine, set as the head block
  214. bc.currentBlock.Store(currentBlock)
  215. // Restore the last known head header
  216. currentHeader := currentBlock.Header()
  217. if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
  218. if header := bc.GetHeaderByHash(head); header != nil {
  219. currentHeader = header
  220. }
  221. }
  222. bc.hc.SetCurrentHeader(currentHeader)
  223. // Restore the last known head fast block
  224. bc.currentFastBlock.Store(currentBlock)
  225. if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
  226. if block := bc.GetBlockByHash(head); block != nil {
  227. bc.currentFastBlock.Store(block)
  228. }
  229. }
  230. // Issue a status log for the user
  231. currentFastBlock := bc.CurrentFastBlock()
  232. headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
  233. blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  234. fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
  235. log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)))
  236. log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)))
  237. log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)))
  238. return nil
  239. }
  240. // SetHead rewinds the local chain to a new head. In the case of headers, everything
  241. // above the new head will be deleted and the new one set. In the case of blocks
  242. // though, the head may be further rewound if block bodies are missing (non-archive
  243. // nodes after a fast sync).
  244. func (bc *BlockChain) SetHead(head uint64) error {
  245. log.Warn("Rewinding blockchain", "target", head)
  246. bc.mu.Lock()
  247. defer bc.mu.Unlock()
  248. // Rewind the header chain, deleting all block bodies until then
  249. delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) {
  250. rawdb.DeleteBody(db, hash, num)
  251. }
  252. bc.hc.SetHead(head, delFn)
  253. currentHeader := bc.hc.CurrentHeader()
  254. // Clear out any stale content from the caches
  255. bc.bodyCache.Purge()
  256. bc.bodyRLPCache.Purge()
  257. bc.receiptsCache.Purge()
  258. bc.blockCache.Purge()
  259. bc.futureBlocks.Purge()
  260. // Rewind the block chain, ensuring we don't end up with a stateless head block
  261. if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
  262. bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
  263. }
  264. if currentBlock := bc.CurrentBlock(); currentBlock != nil {
  265. if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
  266. // Rewound state missing, rolled back to before pivot, reset to genesis
  267. bc.currentBlock.Store(bc.genesisBlock)
  268. }
  269. }
  270. // Rewind the fast block in a simpleton way to the target head
  271. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
  272. bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
  273. }
  274. // If either blocks reached nil, reset to the genesis state
  275. if currentBlock := bc.CurrentBlock(); currentBlock == nil {
  276. bc.currentBlock.Store(bc.genesisBlock)
  277. }
  278. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
  279. bc.currentFastBlock.Store(bc.genesisBlock)
  280. }
  281. currentBlock := bc.CurrentBlock()
  282. currentFastBlock := bc.CurrentFastBlock()
  283. rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
  284. rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
  285. return bc.loadLastState()
  286. }
  287. // FastSyncCommitHead sets the current head block to the one defined by the hash
  288. // irrelevant what the chain contents were prior.
  289. func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
  290. // Make sure that both the block as well at its state trie exists
  291. block := bc.GetBlockByHash(hash)
  292. if block == nil {
  293. return fmt.Errorf("non existent block [%x…]", hash[:4])
  294. }
  295. if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB(), 0); err != nil {
  296. return err
  297. }
  298. // If all checks out, manually set the head block
  299. bc.mu.Lock()
  300. bc.currentBlock.Store(block)
  301. bc.mu.Unlock()
  302. log.Info("Committed new head block", "number", block.Number(), "hash", hash)
  303. return nil
  304. }
  305. // GasLimit returns the gas limit of the current HEAD block.
  306. func (bc *BlockChain) GasLimit() uint64 {
  307. return bc.CurrentBlock().GasLimit()
  308. }
  309. // CurrentBlock retrieves the current head block of the canonical chain. The
  310. // block is retrieved from the blockchain's internal cache.
  311. func (bc *BlockChain) CurrentBlock() *types.Block {
  312. return bc.currentBlock.Load().(*types.Block)
  313. }
  314. // CurrentFastBlock retrieves the current fast-sync head block of the canonical
  315. // chain. The block is retrieved from the blockchain's internal cache.
  316. func (bc *BlockChain) CurrentFastBlock() *types.Block {
  317. return bc.currentFastBlock.Load().(*types.Block)
  318. }
  319. // SetProcessor sets the processor required for making state modifications.
  320. func (bc *BlockChain) SetProcessor(processor Processor) {
  321. bc.procmu.Lock()
  322. defer bc.procmu.Unlock()
  323. bc.processor = processor
  324. }
  325. // SetValidator sets the validator which is used to validate incoming blocks.
  326. func (bc *BlockChain) SetValidator(validator Validator) {
  327. bc.procmu.Lock()
  328. defer bc.procmu.Unlock()
  329. bc.validator = validator
  330. }
  331. // Validator returns the current validator.
  332. func (bc *BlockChain) Validator() Validator {
  333. bc.procmu.RLock()
  334. defer bc.procmu.RUnlock()
  335. return bc.validator
  336. }
  337. // Processor returns the current processor.
  338. func (bc *BlockChain) Processor() Processor {
  339. bc.procmu.RLock()
  340. defer bc.procmu.RUnlock()
  341. return bc.processor
  342. }
  343. // State returns a new mutable state based on the current HEAD block.
  344. func (bc *BlockChain) State() (*state.StateDB, error) {
  345. return bc.StateAt(bc.CurrentBlock().Root())
  346. }
  347. // StateAt returns a new mutable state based on a particular point in time.
  348. func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
  349. return state.New(root, bc.stateCache)
  350. }
  351. // StateCache returns the caching database underpinning the blockchain instance.
  352. func (bc *BlockChain) StateCache() state.Database {
  353. return bc.stateCache
  354. }
  355. // Reset purges the entire blockchain, restoring it to its genesis state.
  356. func (bc *BlockChain) Reset() error {
  357. return bc.ResetWithGenesisBlock(bc.genesisBlock)
  358. }
  359. // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
  360. // specified genesis state.
  361. func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
  362. // Dump the entire block chain and purge the caches
  363. if err := bc.SetHead(0); err != nil {
  364. return err
  365. }
  366. bc.mu.Lock()
  367. defer bc.mu.Unlock()
  368. // Prepare the genesis block and reinitialise the chain
  369. if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
  370. log.Crit("Failed to write genesis block TD", "err", err)
  371. }
  372. rawdb.WriteBlock(bc.db, genesis)
  373. bc.genesisBlock = genesis
  374. bc.insert(bc.genesisBlock)
  375. bc.currentBlock.Store(bc.genesisBlock)
  376. bc.hc.SetGenesis(bc.genesisBlock.Header())
  377. bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
  378. bc.currentFastBlock.Store(bc.genesisBlock)
  379. return nil
  380. }
  381. // repair tries to repair the current blockchain by rolling back the current block
  382. // until one with associated state is found. This is needed to fix incomplete db
  383. // writes caused either by crashes/power outages, or simply non-committed tries.
  384. //
  385. // This method only rolls back the current block. The current header and current
  386. // fast block are left intact.
  387. func (bc *BlockChain) repair(head **types.Block) error {
  388. for {
  389. // Abort if we've rewound to a head block that does have associated state
  390. if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
  391. log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
  392. return nil
  393. }
  394. // Otherwise rewind one block and recheck state availability there
  395. (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
  396. }
  397. }
  398. // Export writes the active chain to the given writer.
  399. func (bc *BlockChain) Export(w io.Writer) error {
  400. return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
  401. }
  402. // ExportN writes a subset of the active chain to the given writer.
  403. func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
  404. bc.mu.RLock()
  405. defer bc.mu.RUnlock()
  406. if first > last {
  407. return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
  408. }
  409. log.Info("Exporting batch of blocks", "count", last-first+1)
  410. start, reported := time.Now(), time.Now()
  411. for nr := first; nr <= last; nr++ {
  412. block := bc.GetBlockByNumber(nr)
  413. if block == nil {
  414. return fmt.Errorf("export failed on #%d: not found", nr)
  415. }
  416. if err := block.EncodeRLP(w); err != nil {
  417. return err
  418. }
  419. if time.Since(reported) >= statsReportLimit {
  420. log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
  421. reported = time.Now()
  422. }
  423. }
  424. return nil
  425. }
  426. // insert injects a new head block into the current block chain. This method
  427. // assumes that the block is indeed a true head. It will also reset the head
  428. // header and the head fast sync block to this very same block if they are older
  429. // or if they are on a different side chain.
  430. //
  431. // Note, this function assumes that the `mu` mutex is held!
  432. func (bc *BlockChain) insert(block *types.Block) {
  433. // If the block is on a side chain or an unknown one, force other heads onto it too
  434. updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
  435. // Add the block to the canonical chain number scheme and mark as the head
  436. rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
  437. rawdb.WriteHeadBlockHash(bc.db, block.Hash())
  438. bc.currentBlock.Store(block)
  439. // If the block is better than our head or is on a different chain, force update heads
  440. if updateHeads {
  441. bc.hc.SetCurrentHeader(block.Header())
  442. rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
  443. bc.currentFastBlock.Store(block)
  444. }
  445. }
  446. // Genesis retrieves the chain's genesis block.
  447. func (bc *BlockChain) Genesis() *types.Block {
  448. return bc.genesisBlock
  449. }
  450. // GetBody retrieves a block body (transactions and uncles) from the database by
  451. // hash, caching it if found.
  452. func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
  453. // Short circuit if the body's already in the cache, retrieve otherwise
  454. if cached, ok := bc.bodyCache.Get(hash); ok {
  455. body := cached.(*types.Body)
  456. return body
  457. }
  458. number := bc.hc.GetBlockNumber(hash)
  459. if number == nil {
  460. return nil
  461. }
  462. body := rawdb.ReadBody(bc.db, hash, *number)
  463. if body == nil {
  464. return nil
  465. }
  466. // Cache the found body for next time and return
  467. bc.bodyCache.Add(hash, body)
  468. return body
  469. }
  470. // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
  471. // caching it if found.
  472. func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
  473. // Short circuit if the body's already in the cache, retrieve otherwise
  474. if cached, ok := bc.bodyRLPCache.Get(hash); ok {
  475. return cached.(rlp.RawValue)
  476. }
  477. number := bc.hc.GetBlockNumber(hash)
  478. if number == nil {
  479. return nil
  480. }
  481. body := rawdb.ReadBodyRLP(bc.db, hash, *number)
  482. if len(body) == 0 {
  483. return nil
  484. }
  485. // Cache the found body for next time and return
  486. bc.bodyRLPCache.Add(hash, body)
  487. return body
  488. }
  489. // HasBlock checks if a block is fully present in the database or not.
  490. func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
  491. if bc.blockCache.Contains(hash) {
  492. return true
  493. }
  494. return rawdb.HasBody(bc.db, hash, number)
  495. }
  496. // HasState checks if state trie is fully present in the database or not.
  497. func (bc *BlockChain) HasState(hash common.Hash) bool {
  498. _, err := bc.stateCache.OpenTrie(hash)
  499. return err == nil
  500. }
  501. // HasBlockAndState checks if a block and associated state trie is fully present
  502. // in the database or not, caching it if present.
  503. func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
  504. // Check first that the block itself is known
  505. block := bc.GetBlock(hash, number)
  506. if block == nil {
  507. return false
  508. }
  509. return bc.HasState(block.Root())
  510. }
  511. // GetBlock retrieves a block from the database by hash and number,
  512. // caching it if found.
  513. func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
  514. // Short circuit if the block's already in the cache, retrieve otherwise
  515. if block, ok := bc.blockCache.Get(hash); ok {
  516. return block.(*types.Block)
  517. }
  518. block := rawdb.ReadBlock(bc.db, hash, number)
  519. if block == nil {
  520. return nil
  521. }
  522. // Cache the found block for next time and return
  523. bc.blockCache.Add(block.Hash(), block)
  524. return block
  525. }
  526. // GetBlockByHash retrieves a block from the database by hash, caching it if found.
  527. func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
  528. number := bc.hc.GetBlockNumber(hash)
  529. if number == nil {
  530. return nil
  531. }
  532. return bc.GetBlock(hash, *number)
  533. }
  534. // GetBlockByNumber retrieves a block from the database by number, caching it
  535. // (associated with its hash) if found.
  536. func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
  537. hash := rawdb.ReadCanonicalHash(bc.db, number)
  538. if hash == (common.Hash{}) {
  539. return nil
  540. }
  541. return bc.GetBlock(hash, number)
  542. }
  543. // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
  544. func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
  545. if receipts, ok := bc.receiptsCache.Get(hash); ok {
  546. return receipts.(types.Receipts)
  547. }
  548. number := rawdb.ReadHeaderNumber(bc.db, hash)
  549. if number == nil {
  550. return nil
  551. }
  552. receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  553. bc.receiptsCache.Add(hash, receipts)
  554. return receipts
  555. }
  556. // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
  557. // [deprecated by eth/62]
  558. func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
  559. number := bc.hc.GetBlockNumber(hash)
  560. if number == nil {
  561. return nil
  562. }
  563. for i := 0; i < n; i++ {
  564. block := bc.GetBlock(hash, *number)
  565. if block == nil {
  566. break
  567. }
  568. blocks = append(blocks, block)
  569. hash = block.ParentHash()
  570. *number--
  571. }
  572. return
  573. }
  574. // GetUnclesInChain retrieves all the uncles from a given block backwards until
  575. // a specific distance is reached.
  576. func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
  577. uncles := []*types.Header{}
  578. for i := 0; block != nil && i < length; i++ {
  579. uncles = append(uncles, block.Uncles()...)
  580. block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  581. }
  582. return uncles
  583. }
  584. // TrieNode retrieves a blob of data associated with a trie node (or code hash)
  585. // either from ephemeral in-memory cache, or from persistent storage.
  586. func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
  587. return bc.stateCache.TrieDB().Node(hash)
  588. }
  589. // Stop stops the blockchain service. If any imports are currently in progress
  590. // it will abort them using the procInterrupt.
  591. func (bc *BlockChain) Stop() {
  592. if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
  593. return
  594. }
  595. // Unsubscribe all subscriptions registered from blockchain
  596. bc.scope.Close()
  597. close(bc.quit)
  598. atomic.StoreInt32(&bc.procInterrupt, 1)
  599. bc.wg.Wait()
  600. // Ensure the state of a recent block is also stored to disk before exiting.
  601. // We're writing three different states to catch different restart scenarios:
  602. // - HEAD: So we don't need to reprocess any blocks in the general case
  603. // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
  604. // - HEAD-127: So we have a hard limit on the number of blocks reexecuted
  605. if !bc.cacheConfig.Disabled {
  606. triedb := bc.stateCache.TrieDB()
  607. for _, offset := range []uint64{0, 1, triesInMemory - 1} {
  608. if number := bc.CurrentBlock().NumberU64(); number > offset {
  609. recent := bc.GetBlockByNumber(number - offset)
  610. log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
  611. if err := triedb.Commit(recent.Root(), true); err != nil {
  612. log.Error("Failed to commit recent state trie", "err", err)
  613. }
  614. }
  615. }
  616. for !bc.triegc.Empty() {
  617. triedb.Dereference(bc.triegc.PopItem().(common.Hash))
  618. }
  619. if size, _ := triedb.Size(); size != 0 {
  620. log.Error("Dangling trie nodes after full cleanup")
  621. }
  622. }
  623. log.Info("Blockchain manager stopped")
  624. }
  625. func (bc *BlockChain) procFutureBlocks() {
  626. blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
  627. for _, hash := range bc.futureBlocks.Keys() {
  628. if block, exist := bc.futureBlocks.Peek(hash); exist {
  629. blocks = append(blocks, block.(*types.Block))
  630. }
  631. }
  632. if len(blocks) > 0 {
  633. types.BlockBy(types.Number).Sort(blocks)
  634. // Insert one by one as chain insertion needs contiguous ancestry between blocks
  635. for i := range blocks {
  636. bc.InsertChain(blocks[i : i+1])
  637. }
  638. }
  639. }
  640. // WriteStatus status of write
  641. type WriteStatus byte
  642. const (
  643. NonStatTy WriteStatus = iota
  644. CanonStatTy
  645. SideStatTy
  646. )
  647. // Rollback is designed to remove a chain of links from the database that aren't
  648. // certain enough to be valid.
  649. func (bc *BlockChain) Rollback(chain []common.Hash) {
  650. bc.mu.Lock()
  651. defer bc.mu.Unlock()
  652. for i := len(chain) - 1; i >= 0; i-- {
  653. hash := chain[i]
  654. currentHeader := bc.hc.CurrentHeader()
  655. if currentHeader.Hash() == hash {
  656. bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
  657. }
  658. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
  659. newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
  660. bc.currentFastBlock.Store(newFastBlock)
  661. rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
  662. }
  663. if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
  664. newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
  665. bc.currentBlock.Store(newBlock)
  666. rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
  667. }
  668. }
  669. }
  670. // SetReceiptsData computes all the non-consensus fields of the receipts
  671. func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
  672. signer := types.MakeSigner(config, block.Number())
  673. transactions, logIndex := block.Transactions(), uint(0)
  674. if len(transactions) != len(receipts) {
  675. return errors.New("transaction and receipt count mismatch")
  676. }
  677. for j := 0; j < len(receipts); j++ {
  678. // The transaction hash can be retrieved from the transaction itself
  679. receipts[j].TxHash = transactions[j].Hash()
  680. // The contract address can be derived from the transaction itself
  681. if transactions[j].To() == nil {
  682. // Deriving the signer is expensive, only do if it's actually needed
  683. from, _ := types.Sender(signer, transactions[j])
  684. receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
  685. }
  686. // The used gas can be calculated based on previous receipts
  687. if j == 0 {
  688. receipts[j].GasUsed = receipts[j].CumulativeGasUsed
  689. } else {
  690. receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
  691. }
  692. // The derived log fields can simply be set from the block and transaction
  693. for k := 0; k < len(receipts[j].Logs); k++ {
  694. receipts[j].Logs[k].BlockNumber = block.NumberU64()
  695. receipts[j].Logs[k].BlockHash = block.Hash()
  696. receipts[j].Logs[k].TxHash = receipts[j].TxHash
  697. receipts[j].Logs[k].TxIndex = uint(j)
  698. receipts[j].Logs[k].Index = logIndex
  699. logIndex++
  700. }
  701. }
  702. return nil
  703. }
  704. // InsertReceiptChain attempts to complete an already existing header chain with
  705. // transaction and receipt data.
  706. func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  707. bc.wg.Add(1)
  708. defer bc.wg.Done()
  709. // Do a sanity check that the provided chain is actually ordered and linked
  710. for i := 1; i < len(blockChain); i++ {
  711. if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
  712. log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
  713. "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
  714. return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
  715. blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
  716. }
  717. }
  718. var (
  719. stats = struct{ processed, ignored int32 }{}
  720. start = time.Now()
  721. bytes = 0
  722. batch = bc.db.NewBatch()
  723. )
  724. for i, block := range blockChain {
  725. receipts := receiptChain[i]
  726. // Short circuit insertion if shutting down or processing failed
  727. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  728. return 0, nil
  729. }
  730. // Short circuit if the owner header is unknown
  731. if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  732. return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  733. }
  734. // Skip if the entire data is already known
  735. if bc.HasBlock(block.Hash(), block.NumberU64()) {
  736. stats.ignored++
  737. continue
  738. }
  739. // Compute all the non-consensus fields of the receipts
  740. if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
  741. return i, fmt.Errorf("failed to set receipts data: %v", err)
  742. }
  743. // Write all the data out into the database
  744. rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  745. rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  746. rawdb.WriteTxLookupEntries(batch, block)
  747. stats.processed++
  748. if batch.ValueSize() >= ethdb.IdealBatchSize {
  749. if err := batch.Write(); err != nil {
  750. return 0, err
  751. }
  752. bytes += batch.ValueSize()
  753. batch.Reset()
  754. }
  755. }
  756. if batch.ValueSize() > 0 {
  757. bytes += batch.ValueSize()
  758. if err := batch.Write(); err != nil {
  759. return 0, err
  760. }
  761. }
  762. // Update the head fast sync block if better
  763. bc.mu.Lock()
  764. head := blockChain[len(blockChain)-1]
  765. if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
  766. currentFastBlock := bc.CurrentFastBlock()
  767. if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
  768. rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
  769. bc.currentFastBlock.Store(head)
  770. }
  771. }
  772. bc.mu.Unlock()
  773. context := []interface{}{
  774. "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  775. "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)),
  776. "size", common.StorageSize(bytes),
  777. }
  778. if stats.ignored > 0 {
  779. context = append(context, []interface{}{"ignored", stats.ignored}...)
  780. }
  781. log.Info("Imported new block receipts", context...)
  782. return 0, nil
  783. }
  784. var lastWrite uint64
  785. // WriteBlockWithoutState writes only the block and its metadata to the database,
  786. // but does not write any state. This is used to construct competing side forks
  787. // up to the point where they exceed the canonical total difficulty.
  788. func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  789. bc.wg.Add(1)
  790. defer bc.wg.Done()
  791. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
  792. return err
  793. }
  794. rawdb.WriteBlock(bc.db, block)
  795. return nil
  796. }
  797. // WriteBlockWithState writes the block and all associated state to the database.
  798. func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  799. bc.wg.Add(1)
  800. defer bc.wg.Done()
  801. // Calculate the total difficulty of the block
  802. ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  803. if ptd == nil {
  804. return NonStatTy, consensus.ErrUnknownAncestor
  805. }
  806. // Make sure no inconsistent state is leaked during insertion
  807. bc.mu.Lock()
  808. defer bc.mu.Unlock()
  809. currentBlock := bc.CurrentBlock()
  810. localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  811. externTd := new(big.Int).Add(block.Difficulty(), ptd)
  812. // Irrelevant of the canonical status, write the block itself to the database
  813. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
  814. return NonStatTy, err
  815. }
  816. rawdb.WriteBlock(bc.db, block)
  817. root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  818. if err != nil {
  819. return NonStatTy, err
  820. }
  821. triedb := bc.stateCache.TrieDB()
  822. // If we're running an archive node, always flush
  823. if bc.cacheConfig.Disabled {
  824. if err := triedb.Commit(root, false); err != nil {
  825. return NonStatTy, err
  826. }
  827. } else {
  828. // Full but not archive node, do proper garbage collection
  829. triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  830. bc.triegc.Push(root, -int64(block.NumberU64()))
  831. if current := block.NumberU64(); current > triesInMemory {
  832. // If we exceeded our memory allowance, flush matured singleton nodes to disk
  833. var (
  834. nodes, imgs = triedb.Size()
  835. limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  836. )
  837. if nodes > limit || imgs > 4*1024*1024 {
  838. triedb.Cap(limit - ethdb.IdealBatchSize)
  839. }
  840. // Find the next state trie we need to commit
  841. header := bc.GetHeaderByNumber(current - triesInMemory)
  842. chosen := header.Number.Uint64()
  843. // If we exceeded out time allowance, flush an entire trie to disk
  844. if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  845. // If we're exceeding limits but haven't reached a large enough memory gap,
  846. // warn the user that the system is becoming unstable.
  847. if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  848. log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
  849. }
  850. // Flush an entire trie and restart the counters
  851. triedb.Commit(header.Root, true)
  852. lastWrite = chosen
  853. bc.gcproc = 0
  854. }
  855. // Garbage collect anything below our required write retention
  856. for !bc.triegc.Empty() {
  857. root, number := bc.triegc.Pop()
  858. if uint64(-number) > chosen {
  859. bc.triegc.Push(root, number)
  860. break
  861. }
  862. triedb.Dereference(root.(common.Hash))
  863. }
  864. }
  865. }
  866. // Write other block data using a batch.
  867. batch := bc.db.NewBatch()
  868. rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  869. // If the total difficulty is higher than our known, add it to the canonical chain
  870. // Second clause in the if statement reduces the vulnerability to selfish mining.
  871. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  872. reorg := externTd.Cmp(localTd) > 0
  873. currentBlock = bc.CurrentBlock()
  874. if !reorg && externTd.Cmp(localTd) == 0 {
  875. // Split same-difficulty blocks by number, then preferentially select
  876. // the block generated by the local miner as the canonical block.
  877. if block.NumberU64() < currentBlock.NumberU64() {
  878. reorg = true
  879. } else if block.NumberU64() == currentBlock.NumberU64() {
  880. var currentPreserve, blockPreserve bool
  881. if bc.shouldPreserve != nil {
  882. currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
  883. }
  884. reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
  885. }
  886. }
  887. if reorg {
  888. // Reorganise the chain if the parent is not the head block
  889. if block.ParentHash() != currentBlock.Hash() {
  890. if err := bc.reorg(currentBlock, block); err != nil {
  891. return NonStatTy, err
  892. }
  893. }
  894. // Write the positional metadata for transaction/receipt lookups and preimages
  895. rawdb.WriteTxLookupEntries(batch, block)
  896. rawdb.WritePreimages(batch, state.Preimages())
  897. status = CanonStatTy
  898. } else {
  899. status = SideStatTy
  900. }
  901. if err := batch.Write(); err != nil {
  902. return NonStatTy, err
  903. }
  904. // Set new head.
  905. if status == CanonStatTy {
  906. bc.insert(block)
  907. }
  908. bc.futureBlocks.Remove(block.Hash())
  909. return status, nil
  910. }
  911. // InsertChain attempts to insert the given batch of blocks in to the canonical
  912. // chain or, otherwise, create a fork. If an error is returned it will return
  913. // the index number of the failing block as well an error describing what went
  914. // wrong.
  915. //
  916. // After insertion is done, all accumulated events will be fired.
  917. func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  918. n, events, logs, err := bc.insertChain(chain)
  919. bc.PostChainEvents(events, logs)
  920. return n, err
  921. }
  922. // insertChain will execute the actual chain insertion and event aggregation. The
  923. // only reason this method exists as a separate one is to make locking cleaner
  924. // with deferred statements.
  925. func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
  926. // Sanity check that we have something meaningful to import
  927. if len(chain) == 0 {
  928. return 0, nil, nil, nil
  929. }
  930. // Do a sanity check that the provided chain is actually ordered and linked
  931. for i := 1; i < len(chain); i++ {
  932. if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
  933. // Chain broke ancestry, log a message (programming error) and skip insertion
  934. log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
  935. "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
  936. return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
  937. chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
  938. }
  939. }
  940. // Pre-checks passed, start the full block imports
  941. bc.wg.Add(1)
  942. defer bc.wg.Done()
  943. bc.chainmu.Lock()
  944. defer bc.chainmu.Unlock()
  945. // A queued approach to delivering events. This is generally
  946. // faster than direct delivery and requires much less mutex
  947. // acquiring.
  948. var (
  949. stats = insertStats{startTime: mclock.Now()}
  950. events = make([]interface{}, 0, len(chain))
  951. lastCanon *types.Block
  952. coalescedLogs []*types.Log
  953. )
  954. // Start the parallel header verifier
  955. headers := make([]*types.Header, len(chain))
  956. seals := make([]bool, len(chain))
  957. for i, block := range chain {
  958. headers[i] = block.Header()
  959. seals[i] = true
  960. }
  961. abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  962. defer close(abort)
  963. // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  964. senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  965. // Iterate over the blocks and insert when the verifier permits
  966. for i, block := range chain {
  967. // If the chain is terminating, stop processing blocks
  968. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  969. log.Debug("Premature abort during blocks processing")
  970. break
  971. }
  972. // If the header is a banned one, straight out abort
  973. if BadHashes[block.Hash()] {
  974. bc.reportBlock(block, nil, ErrBlacklistedHash)
  975. return i, events, coalescedLogs, ErrBlacklistedHash
  976. }
  977. // Wait for the block's verification to complete
  978. bstart := time.Now()
  979. err := <-results
  980. if err == nil {
  981. err = bc.Validator().ValidateBody(block)
  982. }
  983. switch {
  984. case err == ErrKnownBlock:
  985. // Block and state both already known. However if the current block is below
  986. // this number we did a rollback and we should reimport it nonetheless.
  987. if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
  988. stats.ignored++
  989. continue
  990. }
  991. case err == consensus.ErrFutureBlock:
  992. // Allow up to MaxFuture second in the future blocks. If this limit is exceeded
  993. // the chain is discarded and processed at a later time if given.
  994. max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  995. if block.Time().Cmp(max) > 0 {
  996. return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
  997. }
  998. bc.futureBlocks.Add(block.Hash(), block)
  999. stats.queued++
  1000. continue
  1001. case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
  1002. bc.futureBlocks.Add(block.Hash(), block)
  1003. stats.queued++
  1004. continue
  1005. case err == consensus.ErrPrunedAncestor:
  1006. // Block competing with the canonical chain, store in the db, but don't process
  1007. // until the competitor TD goes above the canonical TD
  1008. currentBlock := bc.CurrentBlock()
  1009. localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  1010. externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
  1011. if localTd.Cmp(externTd) > 0 {
  1012. if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
  1013. return i, events, coalescedLogs, err
  1014. }
  1015. continue
  1016. }
  1017. // Competitor chain beat canonical, gather all blocks from the common ancestor
  1018. var winner []*types.Block
  1019. parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1020. for !bc.HasState(parent.Root()) {
  1021. winner = append(winner, parent)
  1022. parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
  1023. }
  1024. for j := 0; j < len(winner)/2; j++ {
  1025. winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
  1026. }
  1027. // Import all the pruned blocks to make the state available
  1028. bc.chainmu.Unlock()
  1029. _, evs, logs, err := bc.insertChain(winner)
  1030. bc.chainmu.Lock()
  1031. events, coalescedLogs = evs, logs
  1032. if err != nil {
  1033. return i, events, coalescedLogs, err
  1034. }
  1035. case err != nil:
  1036. bc.reportBlock(block, nil, err)
  1037. return i, events, coalescedLogs, err
  1038. }
  1039. // Create a new statedb using the parent block and report an
  1040. // error if it fails.
  1041. var parent *types.Block
  1042. if i == 0 {
  1043. parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  1044. } else {
  1045. parent = chain[i-1]
  1046. }
  1047. state, err := state.New(parent.Root(), bc.stateCache)
  1048. if err != nil {
  1049. return i, events, coalescedLogs, err
  1050. }
  1051. // Process block using the parent state as reference point.
  1052. receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1053. if err != nil {
  1054. bc.reportBlock(block, receipts, err)
  1055. return i, events, coalescedLogs, err
  1056. }
  1057. // Validate the state using the default validator
  1058. err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
  1059. if err != nil {
  1060. bc.reportBlock(block, receipts, err)
  1061. return i, events, coalescedLogs, err
  1062. }
  1063. proctime := time.Since(bstart)
  1064. // Write the block to the chain and get the status.
  1065. status, err := bc.WriteBlockWithState(block, receipts, state)
  1066. if err != nil {
  1067. return i, events, coalescedLogs, err
  1068. }
  1069. switch status {
  1070. case CanonStatTy:
  1071. log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
  1072. "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
  1073. coalescedLogs = append(coalescedLogs, logs...)
  1074. blockInsertTimer.UpdateSince(bstart)
  1075. events = append(events, ChainEvent{block, block.Hash(), logs})
  1076. lastCanon = block
  1077. // Only count canonical blocks for GC processing time
  1078. bc.gcproc += proctime
  1079. case SideStatTy:
  1080. log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
  1081. common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
  1082. blockInsertTimer.UpdateSince(bstart)
  1083. events = append(events, ChainSideEvent{block})
  1084. }
  1085. stats.processed++
  1086. stats.usedGas += usedGas
  1087. cache, _ := bc.stateCache.TrieDB().Size()
  1088. stats.report(chain, i, cache)
  1089. }
  1090. // Append a single chain head event if we've progressed the chain
  1091. if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1092. events = append(events, ChainHeadEvent{lastCanon})
  1093. }
  1094. return 0, events, coalescedLogs, nil
  1095. }
  1096. // insertStats tracks and reports on block insertion.
  1097. type insertStats struct {
  1098. queued, processed, ignored int
  1099. usedGas uint64
  1100. lastIndex int
  1101. startTime mclock.AbsTime
  1102. }
  1103. // statsReportLimit is the time limit during import and export after which we
  1104. // always print out progress. This avoids the user wondering what's going on.
  1105. const statsReportLimit = 8 * time.Second
  1106. // report prints statistics if some number of blocks have been processed
  1107. // or more than a few seconds have passed since the last message.
  1108. func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
  1109. // Fetch the timings for the batch
  1110. var (
  1111. now = mclock.Now()
  1112. elapsed = time.Duration(now) - time.Duration(st.startTime)
  1113. )
  1114. // If we're at the last block of the batch or report period reached, log
  1115. if index == len(chain)-1 || elapsed >= statsReportLimit {
  1116. var (
  1117. end = chain[index]
  1118. txs = countTransactions(chain[st.lastIndex : index+1])
  1119. )
  1120. context := []interface{}{
  1121. "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
  1122. "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
  1123. "number", end.Number(), "hash", end.Hash(),
  1124. }
  1125. if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
  1126. context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
  1127. }
  1128. context = append(context, []interface{}{"cache", cache}...)
  1129. if st.queued > 0 {
  1130. context = append(context, []interface{}{"queued", st.queued}...)
  1131. }
  1132. if st.ignored > 0 {
  1133. context = append(context, []interface{}{"ignored", st.ignored}...)
  1134. }
  1135. log.Info("Imported new chain segment", context...)
  1136. *st = insertStats{startTime: now, lastIndex: index + 1}
  1137. }
  1138. }
  1139. func countTransactions(chain []*types.Block) (c int) {
  1140. for _, b := range chain {
  1141. c += len(b.Transactions())
  1142. }
  1143. return c
  1144. }
  1145. // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1146. // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1147. // event about them
  1148. func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1149. var (
  1150. newChain types.Blocks
  1151. oldChain types.Blocks
  1152. commonBlock *types.Block
  1153. deletedTxs types.Transactions
  1154. deletedLogs []*types.Log
  1155. // collectLogs collects the logs that were generated during the
  1156. // processing of the block that corresponds with the given hash.
  1157. // These logs are later announced as deleted.
  1158. collectLogs = func(hash common.Hash) {
  1159. // Coalesce logs and set 'Removed'.
  1160. number := bc.hc.GetBlockNumber(hash)
  1161. if number == nil {
  1162. return
  1163. }
  1164. receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  1165. for _, receipt := range receipts {
  1166. for _, log := range receipt.Logs {
  1167. del := *log
  1168. del.Removed = true
  1169. deletedLogs = append(deletedLogs, &del)
  1170. }
  1171. }
  1172. }
  1173. )
  1174. // first reduce whoever is higher bound
  1175. if oldBlock.NumberU64() > newBlock.NumberU64() {
  1176. // reduce old chain
  1177. for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1178. oldChain = append(oldChain, oldBlock)
  1179. deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1180. collectLogs(oldBlock.Hash())
  1181. }
  1182. } else {
  1183. // reduce new chain and append new chain blocks for inserting later on
  1184. for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1185. newChain = append(newChain, newBlock)
  1186. }
  1187. }
  1188. if oldBlock == nil {
  1189. return fmt.Errorf("Invalid old chain")
  1190. }
  1191. if newBlock == nil {
  1192. return fmt.Errorf("Invalid new chain")
  1193. }
  1194. for {
  1195. if oldBlock.Hash() == newBlock.Hash() {
  1196. commonBlock = oldBlock
  1197. break
  1198. }
  1199. oldChain = append(oldChain, oldBlock)
  1200. newChain = append(newChain, newBlock)
  1201. deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1202. collectLogs(oldBlock.Hash())
  1203. oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1204. if oldBlock == nil {
  1205. return fmt.Errorf("Invalid old chain")
  1206. }
  1207. if newBlock == nil {
  1208. return fmt.Errorf("Invalid new chain")
  1209. }
  1210. }
  1211. // Ensure the user sees large reorgs
  1212. if len(oldChain) > 0 && len(newChain) > 0 {
  1213. logFn := log.Debug
  1214. if len(oldChain) > 63 {
  1215. logFn = log.Warn
  1216. }
  1217. logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1218. "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1219. } else {
  1220. log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1221. }
  1222. // Insert the new chain, taking care of the proper incremental order
  1223. var addedTxs types.Transactions
  1224. for i := len(newChain) - 1; i >= 0; i-- {
  1225. // insert the block in the canonical way, re-writing history
  1226. bc.insert(newChain[i])
  1227. // write lookup entries for hash based transaction/receipt searches
  1228. rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1229. addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1230. }
  1231. // calculate the difference between deleted and added transactions
  1232. diff := types.TxDifference(deletedTxs, addedTxs)
  1233. // When transactions get deleted from the database that means the
  1234. // receipts that were created in the fork must also be deleted
  1235. batch := bc.db.NewBatch()
  1236. for _, tx := range diff {
  1237. rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1238. }
  1239. batch.Write()
  1240. if len(deletedLogs) > 0 {
  1241. go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1242. }
  1243. if len(oldChain) > 0 {
  1244. go func() {
  1245. for _, block := range oldChain {
  1246. bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1247. }
  1248. }()
  1249. }
  1250. return nil
  1251. }
  1252. // PostChainEvents iterates over the events generated by a chain insertion and
  1253. // posts them into the event feed.
  1254. // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1255. func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1256. // post event logs for further processing
  1257. if logs != nil {
  1258. bc.logsFeed.Send(logs)
  1259. }
  1260. for _, event := range events {
  1261. switch ev := event.(type) {
  1262. case ChainEvent:
  1263. bc.chainFeed.Send(ev)
  1264. case ChainHeadEvent:
  1265. bc.chainHeadFeed.Send(ev)
  1266. case ChainSideEvent:
  1267. bc.chainSideFeed.Send(ev)
  1268. }
  1269. }
  1270. }
  1271. func (bc *BlockChain) update() {
  1272. futureTimer := time.NewTicker(5 * time.Second)
  1273. defer futureTimer.Stop()
  1274. for {
  1275. select {
  1276. case <-futureTimer.C:
  1277. bc.procFutureBlocks()
  1278. case <-bc.quit:
  1279. return
  1280. }
  1281. }
  1282. }
  1283. // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1284. func (bc *BlockChain) BadBlocks() []*types.Block {
  1285. blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1286. for _, hash := range bc.badBlocks.Keys() {
  1287. if blk, exist := bc.badBlocks.Peek(hash); exist {
  1288. block := blk.(*types.Block)
  1289. blocks = append(blocks, block)
  1290. }
  1291. }
  1292. return blocks
  1293. }
  1294. // addBadBlock adds a bad block to the bad-block LRU cache
  1295. func (bc *BlockChain) addBadBlock(block *types.Block) {
  1296. bc.badBlocks.Add(block.Hash(), block)
  1297. }
  1298. // reportBlock logs a bad block error.
  1299. func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1300. bc.addBadBlock(block)
  1301. var receiptString string
  1302. for _, receipt := range receipts {
  1303. receiptString += fmt.Sprintf("\t%v\n", receipt)
  1304. }
  1305. log.Error(fmt.Sprintf(`
  1306. ########## BAD BLOCK #########
  1307. Chain config: %v
  1308. Number: %v
  1309. Hash: 0x%x
  1310. %v
  1311. Error: %v
  1312. ##############################
  1313. `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1314. }
  1315. // InsertHeaderChain attempts to insert the given header chain in to the local
  1316. // chain, possibly creating a reorg. If an error is returned, it will return the
  1317. // index number of the failing header as well an error describing what went wrong.
  1318. //
  1319. // The verify parameter can be used to fine tune whether nonce verification
  1320. // should be done or not. The reason behind the optional check is because some
  1321. // of the header retrieval mechanisms already need to verify nonces, as well as
  1322. // because nonces can be verified sparsely, not needing to check each.
  1323. func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1324. start := time.Now()
  1325. if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1326. return i, err
  1327. }
  1328. // Make sure only one thread manipulates the chain at once
  1329. bc.chainmu.Lock()
  1330. defer bc.chainmu.Unlock()
  1331. bc.wg.Add(1)
  1332. defer bc.wg.Done()
  1333. whFunc := func(header *types.Header) error {
  1334. bc.mu.Lock()
  1335. defer bc.mu.Unlock()
  1336. _, err := bc.hc.WriteHeader(header)
  1337. return err
  1338. }
  1339. return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1340. }
  1341. // writeHeader writes a header into the local chain, given that its parent is
  1342. // already known. If the total difficulty of the newly inserted header becomes
  1343. // greater than the current known TD, the canonical chain is re-routed.
  1344. //
  1345. // Note: This method is not concurrent-safe with inserting blocks simultaneously
  1346. // into the chain, as side effects caused by reorganisations cannot be emulated
  1347. // without the real blocks. Hence, writing headers directly should only be done
  1348. // in two scenarios: pure-header mode of operation (light clients), or properly
  1349. // separated header/block phases (non-archive clients).
  1350. func (bc *BlockChain) writeHeader(header *types.Header) error {
  1351. bc.wg.Add(1)
  1352. defer bc.wg.Done()
  1353. bc.mu.Lock()
  1354. defer bc.mu.Unlock()
  1355. _, err := bc.hc.WriteHeader(header)
  1356. return err
  1357. }
  1358. // CurrentHeader retrieves the current head header of the canonical chain. The
  1359. // header is retrieved from the HeaderChain's internal cache.
  1360. func (bc *BlockChain) CurrentHeader() *types.Header {
  1361. return bc.hc.CurrentHeader()
  1362. }
  1363. // GetTd retrieves a block's total difficulty in the canonical chain from the
  1364. // database by hash and number, caching it if found.
  1365. func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1366. return bc.hc.GetTd(hash, number)
  1367. }
  1368. // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1369. // database by hash, caching it if found.
  1370. func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1371. return bc.hc.GetTdByHash(hash)
  1372. }
  1373. // GetHeader retrieves a block header from the database by hash and number,
  1374. // caching it if found.
  1375. func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1376. return bc.hc.GetHeader(hash, number)
  1377. }
  1378. // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1379. // found.
  1380. func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1381. return bc.hc.GetHeaderByHash(hash)
  1382. }
  1383. // HasHeader checks if a block header is present in the database or not, caching
  1384. // it if present.
  1385. func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1386. return bc.hc.HasHeader(hash, number)
  1387. }
  1388. // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1389. // hash, fetching towards the genesis block.
  1390. func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1391. return bc.hc.GetBlockHashesFromHash(hash, max)
  1392. }
  1393. // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  1394. // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  1395. // number of blocks to be individually checked before we reach the canonical chain.
  1396. //
  1397. // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  1398. func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  1399. bc.chainmu.Lock()
  1400. defer bc.chainmu.Unlock()
  1401. return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  1402. }
  1403. // GetHeaderByNumber retrieves a block header from the database by number,
  1404. // caching it (associated with its hash) if found.
  1405. func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1406. return bc.hc.GetHeaderByNumber(number)
  1407. }
  1408. // Config retrieves the blockchain's chain configuration.
  1409. func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1410. // Engine retrieves the blockchain's consensus engine.
  1411. func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1412. // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1413. func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1414. return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1415. }
  1416. // SubscribeChainEvent registers a subscription of ChainEvent.
  1417. func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1418. return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1419. }
  1420. // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1421. func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1422. return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1423. }
  1424. // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1425. func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1426. return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1427. }
  1428. // SubscribeLogsEvent registers a subscription of []*types.Log.
  1429. func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1430. return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1431. }