blockchain.go 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772
  1. // Copyright 2014 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package core implements the Ethereum consensus protocol.
  17. package core
  18. import (
  19. "errors"
  20. "fmt"
  21. "io"
  22. "math/big"
  23. mrand "math/rand"
  24. "sync"
  25. "sync/atomic"
  26. "time"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/common/mclock"
  29. "github.com/ethereum/go-ethereum/common/prque"
  30. "github.com/ethereum/go-ethereum/consensus"
  31. "github.com/ethereum/go-ethereum/core/rawdb"
  32. "github.com/ethereum/go-ethereum/core/state"
  33. "github.com/ethereum/go-ethereum/core/types"
  34. "github.com/ethereum/go-ethereum/core/vm"
  35. "github.com/ethereum/go-ethereum/crypto"
  36. "github.com/ethereum/go-ethereum/ethdb"
  37. "github.com/ethereum/go-ethereum/event"
  38. "github.com/ethereum/go-ethereum/log"
  39. "github.com/ethereum/go-ethereum/metrics"
  40. "github.com/ethereum/go-ethereum/params"
  41. "github.com/ethereum/go-ethereum/rlp"
  42. "github.com/ethereum/go-ethereum/trie"
  43. lru "github.com/hashicorp/golang-lru"
  44. )
  45. var (
  46. accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil)
  47. accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil)
  48. accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
  49. accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
  50. storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
  51. storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
  52. storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
  53. storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
  54. blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
  55. blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
  56. blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
  57. blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
  58. ErrNoGenesis = errors.New("Genesis not found in chain")
  59. )
  60. const (
  61. bodyCacheLimit = 256
  62. blockCacheLimit = 256
  63. receiptsCacheLimit = 32
  64. maxFutureBlocks = 256
  65. maxTimeFutureBlocks = 30
  66. badBlockLimit = 10
  67. triesInMemory = 128
  68. // BlockChainVersion ensures that an incompatible database forces a resync from scratch.
  69. //
  70. // During the process of upgrading the database version from 3 to 4,
  71. // the following incompatible database changes were added.
  72. // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
  73. // * the `Bloom` field of receipt is deleted
  74. // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
  75. BlockChainVersion uint64 = 4
  76. )
  77. // CacheConfig contains the configuration values for the trie caching/pruning
  78. // that's resident in a blockchain.
  79. type CacheConfig struct {
  80. Disabled bool // Whether to disable trie write caching (archive node)
  81. TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
  82. TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
  83. TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
  84. }
  85. // BlockChain represents the canonical chain given a database with a genesis
  86. // block. The Blockchain manages chain imports, reverts, chain reorganisations.
  87. //
  88. // Importing blocks in to the block chain happens according to the set of rules
  89. // defined by the two stage Validator. Processing of blocks is done using the
  90. // Processor which processes the included transaction. The validation of the state
  91. // is done in the second part of the Validator. Failing results in aborting of
  92. // the import.
  93. //
  94. // The BlockChain also helps in returning blocks from **any** chain included
  95. // in the database as well as blocks that represents the canonical chain. It's
  96. // important to note that GetBlock can return any block and does not need to be
  97. // included in the canonical one where as GetBlockByNumber always represents the
  98. // canonical chain.
  99. type BlockChain struct {
  100. chainConfig *params.ChainConfig // Chain & network configuration
  101. cacheConfig *CacheConfig // Cache configuration for pruning
  102. db ethdb.Database // Low level persistent database to store final content in
  103. triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
  104. gcproc time.Duration // Accumulates canonical block processing for trie dumping
  105. hc *HeaderChain
  106. rmLogsFeed event.Feed
  107. chainFeed event.Feed
  108. chainSideFeed event.Feed
  109. chainHeadFeed event.Feed
  110. logsFeed event.Feed
  111. blockProcFeed event.Feed
  112. scope event.SubscriptionScope
  113. genesisBlock *types.Block
  114. chainmu sync.RWMutex // blockchain insertion lock
  115. procmu sync.RWMutex // block processor lock
  116. checkpoint int // checkpoint counts towards the new checkpoint
  117. currentBlock atomic.Value // Current head of the block chain
  118. currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
  119. stateCache state.Database // State database to reuse between imports (contains state cache)
  120. bodyCache *lru.Cache // Cache for the most recent block bodies
  121. bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
  122. receiptsCache *lru.Cache // Cache for the most recent receipts per block
  123. blockCache *lru.Cache // Cache for the most recent entire blocks
  124. futureBlocks *lru.Cache // future blocks are blocks added for later processing
  125. quit chan struct{} // blockchain quit channel
  126. running int32 // running must be called atomically
  127. // procInterrupt must be atomically called
  128. procInterrupt int32 // interrupt signaler for block processing
  129. wg sync.WaitGroup // chain processing wait group for shutting down
  130. engine consensus.Engine
  131. processor Processor // block processor interface
  132. validator Validator // block and state validator interface
  133. vmConfig vm.Config
  134. badBlocks *lru.Cache // Bad block cache
  135. shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
  136. }
  137. // NewBlockChain returns a fully initialised block chain using information
  138. // available in the database. It initialises the default Ethereum Validator and
  139. // Processor.
  140. func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
  141. if cacheConfig == nil {
  142. cacheConfig = &CacheConfig{
  143. TrieCleanLimit: 256,
  144. TrieDirtyLimit: 256,
  145. TrieTimeLimit: 5 * time.Minute,
  146. }
  147. }
  148. bodyCache, _ := lru.New(bodyCacheLimit)
  149. bodyRLPCache, _ := lru.New(bodyCacheLimit)
  150. receiptsCache, _ := lru.New(receiptsCacheLimit)
  151. blockCache, _ := lru.New(blockCacheLimit)
  152. futureBlocks, _ := lru.New(maxFutureBlocks)
  153. badBlocks, _ := lru.New(badBlockLimit)
  154. bc := &BlockChain{
  155. chainConfig: chainConfig,
  156. cacheConfig: cacheConfig,
  157. db: db,
  158. triegc: prque.New(nil),
  159. stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
  160. quit: make(chan struct{}),
  161. shouldPreserve: shouldPreserve,
  162. bodyCache: bodyCache,
  163. bodyRLPCache: bodyRLPCache,
  164. receiptsCache: receiptsCache,
  165. blockCache: blockCache,
  166. futureBlocks: futureBlocks,
  167. engine: engine,
  168. vmConfig: vmConfig,
  169. badBlocks: badBlocks,
  170. }
  171. bc.SetValidator(NewBlockValidator(chainConfig, bc, engine))
  172. bc.SetProcessor(NewStateProcessor(chainConfig, bc, engine))
  173. var err error
  174. bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
  175. if err != nil {
  176. return nil, err
  177. }
  178. bc.genesisBlock = bc.GetBlockByNumber(0)
  179. if bc.genesisBlock == nil {
  180. return nil, ErrNoGenesis
  181. }
  182. if err := bc.loadLastState(); err != nil {
  183. return nil, err
  184. }
  185. // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
  186. for hash := range BadHashes {
  187. if header := bc.GetHeaderByHash(hash); header != nil {
  188. // get the canonical block corresponding to the offending header's number
  189. headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
  190. // make sure the headerByNumber (if present) is in our current canonical chain
  191. if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
  192. log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
  193. bc.SetHead(header.Number.Uint64() - 1)
  194. log.Error("Chain rewind was successful, resuming normal operation")
  195. }
  196. }
  197. }
  198. // Take ownership of this particular state
  199. go bc.update()
  200. return bc, nil
  201. }
  202. func (bc *BlockChain) getProcInterrupt() bool {
  203. return atomic.LoadInt32(&bc.procInterrupt) == 1
  204. }
  205. // GetVMConfig returns the block chain VM config.
  206. func (bc *BlockChain) GetVMConfig() *vm.Config {
  207. return &bc.vmConfig
  208. }
  209. // loadLastState loads the last known chain state from the database. This method
  210. // assumes that the chain manager mutex is held.
  211. func (bc *BlockChain) loadLastState() error {
  212. // Restore the last known head block
  213. head := rawdb.ReadHeadBlockHash(bc.db)
  214. if head == (common.Hash{}) {
  215. // Corrupt or empty database, init from scratch
  216. log.Warn("Empty database, resetting chain")
  217. return bc.Reset()
  218. }
  219. // Make sure the entire head block is available
  220. currentBlock := bc.GetBlockByHash(head)
  221. if currentBlock == nil {
  222. // Corrupt or empty database, init from scratch
  223. log.Warn("Head block missing, resetting chain", "hash", head)
  224. return bc.Reset()
  225. }
  226. // Make sure the state associated with the block is available
  227. if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
  228. // Dangling block without a state associated, init from scratch
  229. log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
  230. if err := bc.repair(&currentBlock); err != nil {
  231. return err
  232. }
  233. }
  234. // Everything seems to be fine, set as the head block
  235. bc.currentBlock.Store(currentBlock)
  236. // Restore the last known head header
  237. currentHeader := currentBlock.Header()
  238. if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
  239. if header := bc.GetHeaderByHash(head); header != nil {
  240. currentHeader = header
  241. }
  242. }
  243. bc.hc.SetCurrentHeader(currentHeader)
  244. // Restore the last known head fast block
  245. bc.currentFastBlock.Store(currentBlock)
  246. if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
  247. if block := bc.GetBlockByHash(head); block != nil {
  248. bc.currentFastBlock.Store(block)
  249. }
  250. }
  251. // Issue a status log for the user
  252. currentFastBlock := bc.CurrentFastBlock()
  253. headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
  254. blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  255. fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
  256. log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(currentHeader.Time.Int64(), 0)))
  257. log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)))
  258. log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)))
  259. return nil
  260. }
  261. // SetHead rewinds the local chain to a new head. In the case of headers, everything
  262. // above the new head will be deleted and the new one set. In the case of blocks
  263. // though, the head may be further rewound if block bodies are missing (non-archive
  264. // nodes after a fast sync).
  265. func (bc *BlockChain) SetHead(head uint64) error {
  266. log.Warn("Rewinding blockchain", "target", head)
  267. bc.chainmu.Lock()
  268. defer bc.chainmu.Unlock()
  269. // Rewind the header chain, deleting all block bodies until then
  270. delFn := func(db ethdb.Writer, hash common.Hash, num uint64) {
  271. rawdb.DeleteBody(db, hash, num)
  272. }
  273. bc.hc.SetHead(head, delFn)
  274. currentHeader := bc.hc.CurrentHeader()
  275. // Clear out any stale content from the caches
  276. bc.bodyCache.Purge()
  277. bc.bodyRLPCache.Purge()
  278. bc.receiptsCache.Purge()
  279. bc.blockCache.Purge()
  280. bc.futureBlocks.Purge()
  281. // Rewind the block chain, ensuring we don't end up with a stateless head block
  282. if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
  283. bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
  284. }
  285. if currentBlock := bc.CurrentBlock(); currentBlock != nil {
  286. if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
  287. // Rewound state missing, rolled back to before pivot, reset to genesis
  288. bc.currentBlock.Store(bc.genesisBlock)
  289. }
  290. }
  291. // Rewind the fast block in a simpleton way to the target head
  292. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
  293. bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
  294. }
  295. // If either blocks reached nil, reset to the genesis state
  296. if currentBlock := bc.CurrentBlock(); currentBlock == nil {
  297. bc.currentBlock.Store(bc.genesisBlock)
  298. }
  299. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
  300. bc.currentFastBlock.Store(bc.genesisBlock)
  301. }
  302. currentBlock := bc.CurrentBlock()
  303. currentFastBlock := bc.CurrentFastBlock()
  304. rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
  305. rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
  306. return bc.loadLastState()
  307. }
  308. // FastSyncCommitHead sets the current head block to the one defined by the hash
  309. // irrelevant what the chain contents were prior.
  310. func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
  311. // Make sure that both the block as well at its state trie exists
  312. block := bc.GetBlockByHash(hash)
  313. if block == nil {
  314. return fmt.Errorf("non existent block [%x…]", hash[:4])
  315. }
  316. if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
  317. return err
  318. }
  319. // If all checks out, manually set the head block
  320. bc.chainmu.Lock()
  321. bc.currentBlock.Store(block)
  322. bc.chainmu.Unlock()
  323. log.Info("Committed new head block", "number", block.Number(), "hash", hash)
  324. return nil
  325. }
  326. // GasLimit returns the gas limit of the current HEAD block.
  327. func (bc *BlockChain) GasLimit() uint64 {
  328. return bc.CurrentBlock().GasLimit()
  329. }
  330. // CurrentBlock retrieves the current head block of the canonical chain. The
  331. // block is retrieved from the blockchain's internal cache.
  332. func (bc *BlockChain) CurrentBlock() *types.Block {
  333. return bc.currentBlock.Load().(*types.Block)
  334. }
  335. // CurrentFastBlock retrieves the current fast-sync head block of the canonical
  336. // chain. The block is retrieved from the blockchain's internal cache.
  337. func (bc *BlockChain) CurrentFastBlock() *types.Block {
  338. return bc.currentFastBlock.Load().(*types.Block)
  339. }
  340. // SetProcessor sets the processor required for making state modifications.
  341. func (bc *BlockChain) SetProcessor(processor Processor) {
  342. bc.procmu.Lock()
  343. defer bc.procmu.Unlock()
  344. bc.processor = processor
  345. }
  346. // SetValidator sets the validator which is used to validate incoming blocks.
  347. func (bc *BlockChain) SetValidator(validator Validator) {
  348. bc.procmu.Lock()
  349. defer bc.procmu.Unlock()
  350. bc.validator = validator
  351. }
  352. // Validator returns the current validator.
  353. func (bc *BlockChain) Validator() Validator {
  354. bc.procmu.RLock()
  355. defer bc.procmu.RUnlock()
  356. return bc.validator
  357. }
  358. // Processor returns the current processor.
  359. func (bc *BlockChain) Processor() Processor {
  360. bc.procmu.RLock()
  361. defer bc.procmu.RUnlock()
  362. return bc.processor
  363. }
  364. // State returns a new mutable state based on the current HEAD block.
  365. func (bc *BlockChain) State() (*state.StateDB, error) {
  366. return bc.StateAt(bc.CurrentBlock().Root())
  367. }
  368. // StateAt returns a new mutable state based on a particular point in time.
  369. func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
  370. return state.New(root, bc.stateCache)
  371. }
  372. // StateCache returns the caching database underpinning the blockchain instance.
  373. func (bc *BlockChain) StateCache() state.Database {
  374. return bc.stateCache
  375. }
  376. // Reset purges the entire blockchain, restoring it to its genesis state.
  377. func (bc *BlockChain) Reset() error {
  378. return bc.ResetWithGenesisBlock(bc.genesisBlock)
  379. }
  380. // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
  381. // specified genesis state.
  382. func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
  383. // Dump the entire block chain and purge the caches
  384. if err := bc.SetHead(0); err != nil {
  385. return err
  386. }
  387. bc.chainmu.Lock()
  388. defer bc.chainmu.Unlock()
  389. // Prepare the genesis block and reinitialise the chain
  390. if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
  391. log.Crit("Failed to write genesis block TD", "err", err)
  392. }
  393. rawdb.WriteBlock(bc.db, genesis)
  394. bc.genesisBlock = genesis
  395. bc.insert(bc.genesisBlock)
  396. bc.currentBlock.Store(bc.genesisBlock)
  397. bc.hc.SetGenesis(bc.genesisBlock.Header())
  398. bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
  399. bc.currentFastBlock.Store(bc.genesisBlock)
  400. return nil
  401. }
  402. // repair tries to repair the current blockchain by rolling back the current block
  403. // until one with associated state is found. This is needed to fix incomplete db
  404. // writes caused either by crashes/power outages, or simply non-committed tries.
  405. //
  406. // This method only rolls back the current block. The current header and current
  407. // fast block are left intact.
  408. func (bc *BlockChain) repair(head **types.Block) error {
  409. for {
  410. // Abort if we've rewound to a head block that does have associated state
  411. if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
  412. log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
  413. return nil
  414. }
  415. // Otherwise rewind one block and recheck state availability there
  416. block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
  417. if block == nil {
  418. return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
  419. }
  420. *head = block
  421. }
  422. }
  423. // Export writes the active chain to the given writer.
  424. func (bc *BlockChain) Export(w io.Writer) error {
  425. return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
  426. }
  427. // ExportN writes a subset of the active chain to the given writer.
  428. func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
  429. bc.chainmu.RLock()
  430. defer bc.chainmu.RUnlock()
  431. if first > last {
  432. return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
  433. }
  434. log.Info("Exporting batch of blocks", "count", last-first+1)
  435. start, reported := time.Now(), time.Now()
  436. for nr := first; nr <= last; nr++ {
  437. block := bc.GetBlockByNumber(nr)
  438. if block == nil {
  439. return fmt.Errorf("export failed on #%d: not found", nr)
  440. }
  441. if err := block.EncodeRLP(w); err != nil {
  442. return err
  443. }
  444. if time.Since(reported) >= statsReportLimit {
  445. log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
  446. reported = time.Now()
  447. }
  448. }
  449. return nil
  450. }
  451. // insert injects a new head block into the current block chain. This method
  452. // assumes that the block is indeed a true head. It will also reset the head
  453. // header and the head fast sync block to this very same block if they are older
  454. // or if they are on a different side chain.
  455. //
  456. // Note, this function assumes that the `mu` mutex is held!
  457. func (bc *BlockChain) insert(block *types.Block) {
  458. // If the block is on a side chain or an unknown one, force other heads onto it too
  459. updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
  460. // Add the block to the canonical chain number scheme and mark as the head
  461. rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
  462. rawdb.WriteHeadBlockHash(bc.db, block.Hash())
  463. bc.currentBlock.Store(block)
  464. // If the block is better than our head or is on a different chain, force update heads
  465. if updateHeads {
  466. bc.hc.SetCurrentHeader(block.Header())
  467. rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
  468. bc.currentFastBlock.Store(block)
  469. }
  470. }
  471. // Genesis retrieves the chain's genesis block.
  472. func (bc *BlockChain) Genesis() *types.Block {
  473. return bc.genesisBlock
  474. }
  475. // GetBody retrieves a block body (transactions and uncles) from the database by
  476. // hash, caching it if found.
  477. func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
  478. // Short circuit if the body's already in the cache, retrieve otherwise
  479. if cached, ok := bc.bodyCache.Get(hash); ok {
  480. body := cached.(*types.Body)
  481. return body
  482. }
  483. number := bc.hc.GetBlockNumber(hash)
  484. if number == nil {
  485. return nil
  486. }
  487. body := rawdb.ReadBody(bc.db, hash, *number)
  488. if body == nil {
  489. return nil
  490. }
  491. // Cache the found body for next time and return
  492. bc.bodyCache.Add(hash, body)
  493. return body
  494. }
  495. // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
  496. // caching it if found.
  497. func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
  498. // Short circuit if the body's already in the cache, retrieve otherwise
  499. if cached, ok := bc.bodyRLPCache.Get(hash); ok {
  500. return cached.(rlp.RawValue)
  501. }
  502. number := bc.hc.GetBlockNumber(hash)
  503. if number == nil {
  504. return nil
  505. }
  506. body := rawdb.ReadBodyRLP(bc.db, hash, *number)
  507. if len(body) == 0 {
  508. return nil
  509. }
  510. // Cache the found body for next time and return
  511. bc.bodyRLPCache.Add(hash, body)
  512. return body
  513. }
  514. // HasBlock checks if a block is fully present in the database or not.
  515. func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
  516. if bc.blockCache.Contains(hash) {
  517. return true
  518. }
  519. return rawdb.HasBody(bc.db, hash, number)
  520. }
  521. // HasFastBlock checks if a fast block is fully present in the database or not.
  522. func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
  523. if !bc.HasBlock(hash, number) {
  524. return false
  525. }
  526. if bc.receiptsCache.Contains(hash) {
  527. return true
  528. }
  529. return rawdb.HasReceipts(bc.db, hash, number)
  530. }
  531. // HasState checks if state trie is fully present in the database or not.
  532. func (bc *BlockChain) HasState(hash common.Hash) bool {
  533. _, err := bc.stateCache.OpenTrie(hash)
  534. return err == nil
  535. }
  536. // HasBlockAndState checks if a block and associated state trie is fully present
  537. // in the database or not, caching it if present.
  538. func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
  539. // Check first that the block itself is known
  540. block := bc.GetBlock(hash, number)
  541. if block == nil {
  542. return false
  543. }
  544. return bc.HasState(block.Root())
  545. }
  546. // GetBlock retrieves a block from the database by hash and number,
  547. // caching it if found.
  548. func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
  549. // Short circuit if the block's already in the cache, retrieve otherwise
  550. if block, ok := bc.blockCache.Get(hash); ok {
  551. return block.(*types.Block)
  552. }
  553. block := rawdb.ReadBlock(bc.db, hash, number)
  554. if block == nil {
  555. return nil
  556. }
  557. // Cache the found block for next time and return
  558. bc.blockCache.Add(block.Hash(), block)
  559. return block
  560. }
  561. // GetBlockByHash retrieves a block from the database by hash, caching it if found.
  562. func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
  563. number := bc.hc.GetBlockNumber(hash)
  564. if number == nil {
  565. return nil
  566. }
  567. return bc.GetBlock(hash, *number)
  568. }
  569. // GetBlockByNumber retrieves a block from the database by number, caching it
  570. // (associated with its hash) if found.
  571. func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
  572. hash := rawdb.ReadCanonicalHash(bc.db, number)
  573. if hash == (common.Hash{}) {
  574. return nil
  575. }
  576. return bc.GetBlock(hash, number)
  577. }
  578. // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
  579. func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
  580. if receipts, ok := bc.receiptsCache.Get(hash); ok {
  581. return receipts.(types.Receipts)
  582. }
  583. number := rawdb.ReadHeaderNumber(bc.db, hash)
  584. if number == nil {
  585. return nil
  586. }
  587. receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  588. if receipts == nil {
  589. return nil
  590. }
  591. bc.receiptsCache.Add(hash, receipts)
  592. return receipts
  593. }
  594. // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
  595. // [deprecated by eth/62]
  596. func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
  597. number := bc.hc.GetBlockNumber(hash)
  598. if number == nil {
  599. return nil
  600. }
  601. for i := 0; i < n; i++ {
  602. block := bc.GetBlock(hash, *number)
  603. if block == nil {
  604. break
  605. }
  606. blocks = append(blocks, block)
  607. hash = block.ParentHash()
  608. *number--
  609. }
  610. return
  611. }
  612. // GetUnclesInChain retrieves all the uncles from a given block backwards until
  613. // a specific distance is reached.
  614. func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
  615. uncles := []*types.Header{}
  616. for i := 0; block != nil && i < length; i++ {
  617. uncles = append(uncles, block.Uncles()...)
  618. block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  619. }
  620. return uncles
  621. }
  622. // TrieNode retrieves a blob of data associated with a trie node (or code hash)
  623. // either from ephemeral in-memory cache, or from persistent storage.
  624. func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
  625. return bc.stateCache.TrieDB().Node(hash)
  626. }
  627. // Stop stops the blockchain service. If any imports are currently in progress
  628. // it will abort them using the procInterrupt.
  629. func (bc *BlockChain) Stop() {
  630. if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
  631. return
  632. }
  633. // Unsubscribe all subscriptions registered from blockchain
  634. bc.scope.Close()
  635. close(bc.quit)
  636. atomic.StoreInt32(&bc.procInterrupt, 1)
  637. bc.wg.Wait()
  638. // Ensure the state of a recent block is also stored to disk before exiting.
  639. // We're writing three different states to catch different restart scenarios:
  640. // - HEAD: So we don't need to reprocess any blocks in the general case
  641. // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
  642. // - HEAD-127: So we have a hard limit on the number of blocks reexecuted
  643. if !bc.cacheConfig.Disabled {
  644. triedb := bc.stateCache.TrieDB()
  645. for _, offset := range []uint64{0, 1, triesInMemory - 1} {
  646. if number := bc.CurrentBlock().NumberU64(); number > offset {
  647. recent := bc.GetBlockByNumber(number - offset)
  648. log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
  649. if err := triedb.Commit(recent.Root(), true); err != nil {
  650. log.Error("Failed to commit recent state trie", "err", err)
  651. }
  652. }
  653. }
  654. for !bc.triegc.Empty() {
  655. triedb.Dereference(bc.triegc.PopItem().(common.Hash))
  656. }
  657. if size, _ := triedb.Size(); size != 0 {
  658. log.Error("Dangling trie nodes after full cleanup")
  659. }
  660. }
  661. log.Info("Blockchain manager stopped")
  662. }
  663. func (bc *BlockChain) procFutureBlocks() {
  664. blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
  665. for _, hash := range bc.futureBlocks.Keys() {
  666. if block, exist := bc.futureBlocks.Peek(hash); exist {
  667. blocks = append(blocks, block.(*types.Block))
  668. }
  669. }
  670. if len(blocks) > 0 {
  671. types.BlockBy(types.Number).Sort(blocks)
  672. // Insert one by one as chain insertion needs contiguous ancestry between blocks
  673. for i := range blocks {
  674. bc.InsertChain(blocks[i : i+1])
  675. }
  676. }
  677. }
  678. // WriteStatus status of write
  679. type WriteStatus byte
  680. const (
  681. NonStatTy WriteStatus = iota
  682. CanonStatTy
  683. SideStatTy
  684. )
  685. // Rollback is designed to remove a chain of links from the database that aren't
  686. // certain enough to be valid.
  687. func (bc *BlockChain) Rollback(chain []common.Hash) {
  688. bc.chainmu.Lock()
  689. defer bc.chainmu.Unlock()
  690. for i := len(chain) - 1; i >= 0; i-- {
  691. hash := chain[i]
  692. currentHeader := bc.hc.CurrentHeader()
  693. if currentHeader.Hash() == hash {
  694. bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
  695. }
  696. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
  697. newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
  698. bc.currentFastBlock.Store(newFastBlock)
  699. rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
  700. }
  701. if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
  702. newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
  703. bc.currentBlock.Store(newBlock)
  704. rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
  705. }
  706. }
  707. }
  708. // SetReceiptsData computes all the non-consensus fields of the receipts
  709. func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
  710. signer := types.MakeSigner(config, block.Number())
  711. transactions, logIndex := block.Transactions(), uint(0)
  712. if len(transactions) != len(receipts) {
  713. return errors.New("transaction and receipt count mismatch")
  714. }
  715. for j := 0; j < len(receipts); j++ {
  716. // The transaction hash can be retrieved from the transaction itself
  717. receipts[j].TxHash = transactions[j].Hash()
  718. // block location fields
  719. receipts[j].BlockHash = block.Hash()
  720. receipts[j].BlockNumber = block.Number()
  721. receipts[j].TransactionIndex = uint(j)
  722. // The contract address can be derived from the transaction itself
  723. if transactions[j].To() == nil {
  724. // Deriving the signer is expensive, only do if it's actually needed
  725. from, _ := types.Sender(signer, transactions[j])
  726. receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
  727. }
  728. // The used gas can be calculated based on previous receipts
  729. if j == 0 {
  730. receipts[j].GasUsed = receipts[j].CumulativeGasUsed
  731. } else {
  732. receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
  733. }
  734. // The derived log fields can simply be set from the block and transaction
  735. for k := 0; k < len(receipts[j].Logs); k++ {
  736. receipts[j].Logs[k].BlockNumber = block.NumberU64()
  737. receipts[j].Logs[k].BlockHash = block.Hash()
  738. receipts[j].Logs[k].TxHash = receipts[j].TxHash
  739. receipts[j].Logs[k].TxIndex = uint(j)
  740. receipts[j].Logs[k].Index = logIndex
  741. logIndex++
  742. }
  743. }
  744. return nil
  745. }
  746. // InsertReceiptChain attempts to complete an already existing header chain with
  747. // transaction and receipt data.
  748. func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  749. bc.wg.Add(1)
  750. defer bc.wg.Done()
  751. // Do a sanity check that the provided chain is actually ordered and linked
  752. for i := 1; i < len(blockChain); i++ {
  753. if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
  754. log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
  755. "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
  756. return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
  757. blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
  758. }
  759. }
  760. var (
  761. stats = struct{ processed, ignored int32 }{}
  762. start = time.Now()
  763. bytes = 0
  764. batch = bc.db.NewBatch()
  765. )
  766. for i, block := range blockChain {
  767. receipts := receiptChain[i]
  768. // Short circuit insertion if shutting down or processing failed
  769. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  770. return 0, nil
  771. }
  772. // Short circuit if the owner header is unknown
  773. if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  774. return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  775. }
  776. // Skip if the entire data is already known
  777. if bc.HasBlock(block.Hash(), block.NumberU64()) {
  778. stats.ignored++
  779. continue
  780. }
  781. // Compute all the non-consensus fields of the receipts
  782. if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
  783. return i, fmt.Errorf("failed to set receipts data: %v", err)
  784. }
  785. // Write all the data out into the database
  786. rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  787. rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  788. rawdb.WriteTxLookupEntries(batch, block)
  789. stats.processed++
  790. if batch.ValueSize() >= ethdb.IdealBatchSize {
  791. if err := batch.Write(); err != nil {
  792. return 0, err
  793. }
  794. bytes += batch.ValueSize()
  795. batch.Reset()
  796. }
  797. }
  798. if batch.ValueSize() > 0 {
  799. bytes += batch.ValueSize()
  800. if err := batch.Write(); err != nil {
  801. return 0, err
  802. }
  803. }
  804. // Update the head fast sync block if better
  805. bc.chainmu.Lock()
  806. head := blockChain[len(blockChain)-1]
  807. if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
  808. currentFastBlock := bc.CurrentFastBlock()
  809. if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
  810. rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
  811. bc.currentFastBlock.Store(head)
  812. }
  813. }
  814. bc.chainmu.Unlock()
  815. context := []interface{}{
  816. "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  817. "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)),
  818. "size", common.StorageSize(bytes),
  819. }
  820. if stats.ignored > 0 {
  821. context = append(context, []interface{}{"ignored", stats.ignored}...)
  822. }
  823. log.Info("Imported new block receipts", context...)
  824. return 0, nil
  825. }
  826. var lastWrite uint64
  827. // WriteBlockWithoutState writes only the block and its metadata to the database,
  828. // but does not write any state. This is used to construct competing side forks
  829. // up to the point where they exceed the canonical total difficulty.
  830. func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  831. bc.wg.Add(1)
  832. defer bc.wg.Done()
  833. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
  834. return err
  835. }
  836. rawdb.WriteBlock(bc.db, block)
  837. return nil
  838. }
  839. // WriteBlockWithState writes the block and all associated state to the database.
  840. func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  841. bc.chainmu.Lock()
  842. defer bc.chainmu.Unlock()
  843. return bc.writeBlockWithState(block, receipts, state)
  844. }
  845. // writeBlockWithState writes the block and all associated state to the database,
  846. // but is expects the chain mutex to be held.
  847. func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  848. bc.wg.Add(1)
  849. defer bc.wg.Done()
  850. // Calculate the total difficulty of the block
  851. ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  852. if ptd == nil {
  853. return NonStatTy, consensus.ErrUnknownAncestor
  854. }
  855. // Make sure no inconsistent state is leaked during insertion
  856. currentBlock := bc.CurrentBlock()
  857. localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  858. externTd := new(big.Int).Add(block.Difficulty(), ptd)
  859. // Irrelevant of the canonical status, write the block itself to the database
  860. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
  861. return NonStatTy, err
  862. }
  863. rawdb.WriteBlock(bc.db, block)
  864. root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  865. if err != nil {
  866. return NonStatTy, err
  867. }
  868. triedb := bc.stateCache.TrieDB()
  869. // If we're running an archive node, always flush
  870. if bc.cacheConfig.Disabled {
  871. if err := triedb.Commit(root, false); err != nil {
  872. return NonStatTy, err
  873. }
  874. } else {
  875. // Full but not archive node, do proper garbage collection
  876. triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  877. bc.triegc.Push(root, -int64(block.NumberU64()))
  878. if current := block.NumberU64(); current > triesInMemory {
  879. // If we exceeded our memory allowance, flush matured singleton nodes to disk
  880. var (
  881. nodes, imgs = triedb.Size()
  882. limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  883. )
  884. if nodes > limit || imgs > 4*1024*1024 {
  885. triedb.Cap(limit - ethdb.IdealBatchSize)
  886. }
  887. // Find the next state trie we need to commit
  888. chosen := current - triesInMemory
  889. // If we exceeded out time allowance, flush an entire trie to disk
  890. if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  891. // If the header is missing (canonical chain behind), we're reorging a low
  892. // diff sidechain. Suspend committing until this operation is completed.
  893. header := bc.GetHeaderByNumber(chosen)
  894. if header == nil {
  895. log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  896. } else {
  897. // If we're exceeding limits but haven't reached a large enough memory gap,
  898. // warn the user that the system is becoming unstable.
  899. if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  900. log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
  901. }
  902. // Flush an entire trie and restart the counters
  903. triedb.Commit(header.Root, true)
  904. lastWrite = chosen
  905. bc.gcproc = 0
  906. }
  907. }
  908. // Garbage collect anything below our required write retention
  909. for !bc.triegc.Empty() {
  910. root, number := bc.triegc.Pop()
  911. if uint64(-number) > chosen {
  912. bc.triegc.Push(root, number)
  913. break
  914. }
  915. triedb.Dereference(root.(common.Hash))
  916. }
  917. }
  918. }
  919. // Write other block data using a batch.
  920. batch := bc.db.NewBatch()
  921. rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  922. // If the total difficulty is higher than our known, add it to the canonical chain
  923. // Second clause in the if statement reduces the vulnerability to selfish mining.
  924. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  925. reorg := externTd.Cmp(localTd) > 0
  926. currentBlock = bc.CurrentBlock()
  927. if !reorg && externTd.Cmp(localTd) == 0 {
  928. // Split same-difficulty blocks by number, then preferentially select
  929. // the block generated by the local miner as the canonical block.
  930. if block.NumberU64() < currentBlock.NumberU64() {
  931. reorg = true
  932. } else if block.NumberU64() == currentBlock.NumberU64() {
  933. var currentPreserve, blockPreserve bool
  934. if bc.shouldPreserve != nil {
  935. currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
  936. }
  937. reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
  938. }
  939. }
  940. if reorg {
  941. // Reorganise the chain if the parent is not the head block
  942. if block.ParentHash() != currentBlock.Hash() {
  943. if err := bc.reorg(currentBlock, block); err != nil {
  944. return NonStatTy, err
  945. }
  946. }
  947. // Write the positional metadata for transaction/receipt lookups and preimages
  948. rawdb.WriteTxLookupEntries(batch, block)
  949. rawdb.WritePreimages(batch, state.Preimages())
  950. status = CanonStatTy
  951. } else {
  952. status = SideStatTy
  953. }
  954. if err := batch.Write(); err != nil {
  955. return NonStatTy, err
  956. }
  957. // Set new head.
  958. if status == CanonStatTy {
  959. bc.insert(block)
  960. }
  961. bc.futureBlocks.Remove(block.Hash())
  962. return status, nil
  963. }
  964. // addFutureBlock checks if the block is within the max allowed window to get
  965. // accepted for future processing, and returns an error if the block is too far
  966. // ahead and was not added.
  967. func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  968. max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
  969. if block.Time().Cmp(max) > 0 {
  970. return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  971. }
  972. bc.futureBlocks.Add(block.Hash(), block)
  973. return nil
  974. }
  975. // InsertChain attempts to insert the given batch of blocks in to the canonical
  976. // chain or, otherwise, create a fork. If an error is returned it will return
  977. // the index number of the failing block as well an error describing what went
  978. // wrong.
  979. //
  980. // After insertion is done, all accumulated events will be fired.
  981. func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  982. // Sanity check that we have something meaningful to import
  983. if len(chain) == 0 {
  984. return 0, nil
  985. }
  986. bc.blockProcFeed.Send(true)
  987. defer bc.blockProcFeed.Send(false)
  988. // Remove already known canon-blocks
  989. var (
  990. block, prev *types.Block
  991. )
  992. // Do a sanity check that the provided chain is actually ordered and linked
  993. for i := 1; i < len(chain); i++ {
  994. block = chain[i]
  995. prev = chain[i-1]
  996. if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  997. // Chain broke ancestry, log a message (programming error) and skip insertion
  998. log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
  999. "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
  1000. return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  1001. prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  1002. }
  1003. }
  1004. // Pre-checks passed, start the full block imports
  1005. bc.wg.Add(1)
  1006. bc.chainmu.Lock()
  1007. n, events, logs, err := bc.insertChain(chain, true)
  1008. bc.chainmu.Unlock()
  1009. bc.wg.Done()
  1010. bc.PostChainEvents(events, logs)
  1011. return n, err
  1012. }
  1013. // insertChain is the internal implementation of insertChain, which assumes that
  1014. // 1) chains are contiguous, and 2) The chain mutex is held.
  1015. //
  1016. // This method is split out so that import batches that require re-injecting
  1017. // historical blocks can do so without releasing the lock, which could lead to
  1018. // racey behaviour. If a sidechain import is in progress, and the historic state
  1019. // is imported, but then new canon-head is added before the actual sidechain
  1020. // completes, then the historic state could be pruned again
  1021. func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
  1022. // If the chain is terminating, don't even bother starting u
  1023. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1024. return 0, nil, nil, nil
  1025. }
  1026. // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1027. senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1028. // A queued approach to delivering events. This is generally
  1029. // faster than direct delivery and requires much less mutex
  1030. // acquiring.
  1031. var (
  1032. stats = insertStats{startTime: mclock.Now()}
  1033. events = make([]interface{}, 0, len(chain))
  1034. lastCanon *types.Block
  1035. coalescedLogs []*types.Log
  1036. )
  1037. // Start the parallel header verifier
  1038. headers := make([]*types.Header, len(chain))
  1039. seals := make([]bool, len(chain))
  1040. for i, block := range chain {
  1041. headers[i] = block.Header()
  1042. seals[i] = verifySeals
  1043. }
  1044. abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1045. defer close(abort)
  1046. // Peek the error for the first block to decide the directing import logic
  1047. it := newInsertIterator(chain, results, bc.Validator())
  1048. block, err := it.next()
  1049. // Left-trim all the known blocks
  1050. if err == ErrKnownBlock {
  1051. // First block (and state) is known
  1052. // 1. We did a roll-back, and should now do a re-import
  1053. // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1054. // from the canonical chain, which has not been verified.
  1055. // Skip all known blocks that are behind us
  1056. current := bc.CurrentBlock().NumberU64()
  1057. for block != nil && err == ErrKnownBlock && current >= block.NumberU64() {
  1058. stats.ignored++
  1059. block, err = it.next()
  1060. }
  1061. // Falls through to the block import
  1062. }
  1063. switch {
  1064. // First block is pruned, insert as sidechain and reorg only if TD grows enough
  1065. case err == consensus.ErrPrunedAncestor:
  1066. return bc.insertSidechain(block, it)
  1067. // First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1068. case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
  1069. for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
  1070. if err := bc.addFutureBlock(block); err != nil {
  1071. return it.index, events, coalescedLogs, err
  1072. }
  1073. block, err = it.next()
  1074. }
  1075. stats.queued += it.processed()
  1076. stats.ignored += it.remaining()
  1077. // If there are any still remaining, mark as ignored
  1078. return it.index, events, coalescedLogs, err
  1079. // Some other error occurred, abort
  1080. case err != nil:
  1081. stats.ignored += len(it.chain)
  1082. bc.reportBlock(block, nil, err)
  1083. return it.index, events, coalescedLogs, err
  1084. }
  1085. // No validation errors for the first block (or chain prefix skipped)
  1086. for ; block != nil && err == nil; block, err = it.next() {
  1087. // If the chain is terminating, stop processing blocks
  1088. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1089. log.Debug("Premature abort during blocks processing")
  1090. break
  1091. }
  1092. // If the header is a banned one, straight out abort
  1093. if BadHashes[block.Hash()] {
  1094. bc.reportBlock(block, nil, ErrBlacklistedHash)
  1095. return it.index, events, coalescedLogs, ErrBlacklistedHash
  1096. }
  1097. // Retrieve the parent block and it's state to execute on top
  1098. start := time.Now()
  1099. parent := it.previous()
  1100. if parent == nil {
  1101. parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1102. }
  1103. state, err := state.New(parent.Root, bc.stateCache)
  1104. if err != nil {
  1105. return it.index, events, coalescedLogs, err
  1106. }
  1107. // Process block using the parent state as reference point.
  1108. substart := time.Now()
  1109. receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
  1110. if err != nil {
  1111. bc.reportBlock(block, receipts, err)
  1112. return it.index, events, coalescedLogs, err
  1113. }
  1114. // Update the metrics touched during block processing
  1115. accountReadTimer.Update(state.AccountReads) // Account reads are complete, we can mark them
  1116. storageReadTimer.Update(state.StorageReads) // Storage reads are complete, we can mark them
  1117. accountUpdateTimer.Update(state.AccountUpdates) // Account updates are complete, we can mark them
  1118. storageUpdateTimer.Update(state.StorageUpdates) // Storage updates are complete, we can mark them
  1119. triehash := state.AccountHashes + state.StorageHashes // Save to not double count in validation
  1120. trieproc := state.AccountReads + state.AccountUpdates
  1121. trieproc += state.StorageReads + state.StorageUpdates
  1122. blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
  1123. // Validate the state using the default validator
  1124. substart = time.Now()
  1125. if err := bc.Validator().ValidateState(block, state, receipts, usedGas); err != nil {
  1126. bc.reportBlock(block, receipts, err)
  1127. return it.index, events, coalescedLogs, err
  1128. }
  1129. proctime := time.Since(start)
  1130. // Update the metrics touched during block validation
  1131. accountHashTimer.Update(state.AccountHashes) // Account hashes are complete, we can mark them
  1132. storageHashTimer.Update(state.StorageHashes) // Storage hashes are complete, we can mark them
  1133. blockValidationTimer.Update(time.Since(substart) - (state.AccountHashes + state.StorageHashes - triehash))
  1134. // Write the block to the chain and get the status.
  1135. substart = time.Now()
  1136. status, err := bc.writeBlockWithState(block, receipts, state)
  1137. if err != nil {
  1138. return it.index, events, coalescedLogs, err
  1139. }
  1140. // Update the metrics touched during block commit
  1141. accountCommitTimer.Update(state.AccountCommits) // Account commits are complete, we can mark them
  1142. storageCommitTimer.Update(state.StorageCommits) // Storage commits are complete, we can mark them
  1143. blockWriteTimer.Update(time.Since(substart) - state.AccountCommits - state.StorageCommits)
  1144. blockInsertTimer.UpdateSince(start)
  1145. switch status {
  1146. case CanonStatTy:
  1147. log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1148. "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1149. "elapsed", common.PrettyDuration(time.Since(start)),
  1150. "root", block.Root())
  1151. coalescedLogs = append(coalescedLogs, logs...)
  1152. events = append(events, ChainEvent{block, block.Hash(), logs})
  1153. lastCanon = block
  1154. // Only count canonical blocks for GC processing time
  1155. bc.gcproc += proctime
  1156. case SideStatTy:
  1157. log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1158. "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1159. "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1160. "root", block.Root())
  1161. events = append(events, ChainSideEvent{block})
  1162. }
  1163. stats.processed++
  1164. stats.usedGas += usedGas
  1165. dirty, _ := bc.stateCache.TrieDB().Size()
  1166. stats.report(chain, it.index, dirty)
  1167. }
  1168. // Any blocks remaining here? The only ones we care about are the future ones
  1169. if block != nil && err == consensus.ErrFutureBlock {
  1170. if err := bc.addFutureBlock(block); err != nil {
  1171. return it.index, events, coalescedLogs, err
  1172. }
  1173. block, err = it.next()
  1174. for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
  1175. if err := bc.addFutureBlock(block); err != nil {
  1176. return it.index, events, coalescedLogs, err
  1177. }
  1178. stats.queued++
  1179. }
  1180. }
  1181. stats.ignored += it.remaining()
  1182. // Append a single chain head event if we've progressed the chain
  1183. if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1184. events = append(events, ChainHeadEvent{lastCanon})
  1185. }
  1186. return it.index, events, coalescedLogs, err
  1187. }
  1188. // insertSidechain is called when an import batch hits upon a pruned ancestor
  1189. // error, which happens when a sidechain with a sufficiently old fork-block is
  1190. // found.
  1191. //
  1192. // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1193. // switch over to the new chain if the TD exceeded the current chain.
  1194. func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
  1195. var (
  1196. externTd *big.Int
  1197. current = bc.CurrentBlock()
  1198. )
  1199. // The first sidechain block error is already verified to be ErrPrunedAncestor.
  1200. // Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1201. // ones. Any other errors means that the block is invalid, and should not be written
  1202. // to disk.
  1203. err := consensus.ErrPrunedAncestor
  1204. for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
  1205. // Check the canonical state root for that number
  1206. if number := block.NumberU64(); current.NumberU64() >= number {
  1207. canonical := bc.GetBlockByNumber(number)
  1208. if canonical != nil && canonical.Hash() == block.Hash() {
  1209. // Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1210. continue
  1211. }
  1212. if canonical != nil && canonical.Root() == block.Root() {
  1213. // This is most likely a shadow-state attack. When a fork is imported into the
  1214. // database, and it eventually reaches a block height which is not pruned, we
  1215. // just found that the state already exist! This means that the sidechain block
  1216. // refers to a state which already exists in our canon chain.
  1217. //
  1218. // If left unchecked, we would now proceed importing the blocks, without actually
  1219. // having verified the state of the previous blocks.
  1220. log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1221. // If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1222. // we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1223. // mechanism.
  1224. return it.index, nil, nil, errors.New("sidechain ghost-state attack")
  1225. }
  1226. }
  1227. if externTd == nil {
  1228. externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1229. }
  1230. externTd = new(big.Int).Add(externTd, block.Difficulty())
  1231. if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  1232. start := time.Now()
  1233. if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
  1234. return it.index, nil, nil, err
  1235. }
  1236. log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  1237. "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1238. "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1239. "root", block.Root())
  1240. }
  1241. }
  1242. // At this point, we've written all sidechain blocks to database. Loop ended
  1243. // either on some other error or all were processed. If there was some other
  1244. // error, we can ignore the rest of those blocks.
  1245. //
  1246. // If the externTd was larger than our local TD, we now need to reimport the previous
  1247. // blocks to regenerate the required state
  1248. localTd := bc.GetTd(current.Hash(), current.NumberU64())
  1249. if localTd.Cmp(externTd) > 0 {
  1250. log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  1251. return it.index, nil, nil, err
  1252. }
  1253. // Gather all the sidechain hashes (full blocks may be memory heavy)
  1254. var (
  1255. hashes []common.Hash
  1256. numbers []uint64
  1257. )
  1258. parent := it.previous()
  1259. for parent != nil && !bc.HasState(parent.Root) {
  1260. hashes = append(hashes, parent.Hash())
  1261. numbers = append(numbers, parent.Number.Uint64())
  1262. parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  1263. }
  1264. if parent == nil {
  1265. return it.index, nil, nil, errors.New("missing parent")
  1266. }
  1267. // Import all the pruned blocks to make the state available
  1268. var (
  1269. blocks []*types.Block
  1270. memory common.StorageSize
  1271. )
  1272. for i := len(hashes) - 1; i >= 0; i-- {
  1273. // Append the next block to our batch
  1274. block := bc.GetBlock(hashes[i], numbers[i])
  1275. blocks = append(blocks, block)
  1276. memory += block.Size()
  1277. // If memory use grew too large, import and continue. Sadly we need to discard
  1278. // all raised events and logs from notifications since we're too heavy on the
  1279. // memory here.
  1280. if len(blocks) >= 2048 || memory > 64*1024*1024 {
  1281. log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  1282. if _, _, _, err := bc.insertChain(blocks, false); err != nil {
  1283. return 0, nil, nil, err
  1284. }
  1285. blocks, memory = blocks[:0], 0
  1286. // If the chain is terminating, stop processing blocks
  1287. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1288. log.Debug("Premature abort during blocks processing")
  1289. return 0, nil, nil, nil
  1290. }
  1291. }
  1292. }
  1293. if len(blocks) > 0 {
  1294. log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  1295. return bc.insertChain(blocks, false)
  1296. }
  1297. return 0, nil, nil, nil
  1298. }
  1299. // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1300. // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1301. // event about them
  1302. func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1303. var (
  1304. newChain types.Blocks
  1305. oldChain types.Blocks
  1306. commonBlock *types.Block
  1307. deletedTxs types.Transactions
  1308. deletedLogs []*types.Log
  1309. // collectLogs collects the logs that were generated during the
  1310. // processing of the block that corresponds with the given hash.
  1311. // These logs are later announced as deleted.
  1312. collectLogs = func(hash common.Hash) {
  1313. // Coalesce logs and set 'Removed'.
  1314. number := bc.hc.GetBlockNumber(hash)
  1315. if number == nil {
  1316. return
  1317. }
  1318. receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  1319. for _, receipt := range receipts {
  1320. for _, log := range receipt.Logs {
  1321. del := *log
  1322. del.Removed = true
  1323. deletedLogs = append(deletedLogs, &del)
  1324. }
  1325. }
  1326. }
  1327. )
  1328. // first reduce whoever is higher bound
  1329. if oldBlock.NumberU64() > newBlock.NumberU64() {
  1330. // reduce old chain
  1331. for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1332. oldChain = append(oldChain, oldBlock)
  1333. deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1334. collectLogs(oldBlock.Hash())
  1335. }
  1336. } else {
  1337. // reduce new chain and append new chain blocks for inserting later on
  1338. for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1339. newChain = append(newChain, newBlock)
  1340. }
  1341. }
  1342. if oldBlock == nil {
  1343. return fmt.Errorf("Invalid old chain")
  1344. }
  1345. if newBlock == nil {
  1346. return fmt.Errorf("Invalid new chain")
  1347. }
  1348. for {
  1349. if oldBlock.Hash() == newBlock.Hash() {
  1350. commonBlock = oldBlock
  1351. break
  1352. }
  1353. oldChain = append(oldChain, oldBlock)
  1354. newChain = append(newChain, newBlock)
  1355. deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1356. collectLogs(oldBlock.Hash())
  1357. oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1358. if oldBlock == nil {
  1359. return fmt.Errorf("Invalid old chain")
  1360. }
  1361. if newBlock == nil {
  1362. return fmt.Errorf("Invalid new chain")
  1363. }
  1364. }
  1365. // Ensure the user sees large reorgs
  1366. if len(oldChain) > 0 && len(newChain) > 0 {
  1367. logFn := log.Debug
  1368. if len(oldChain) > 63 {
  1369. logFn = log.Warn
  1370. }
  1371. logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1372. "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1373. } else {
  1374. log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1375. }
  1376. // Insert the new chain, taking care of the proper incremental order
  1377. var addedTxs types.Transactions
  1378. for i := len(newChain) - 1; i >= 0; i-- {
  1379. // insert the block in the canonical way, re-writing history
  1380. bc.insert(newChain[i])
  1381. // write lookup entries for hash based transaction/receipt searches
  1382. rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1383. addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1384. }
  1385. // calculate the difference between deleted and added transactions
  1386. diff := types.TxDifference(deletedTxs, addedTxs)
  1387. // When transactions get deleted from the database that means the
  1388. // receipts that were created in the fork must also be deleted
  1389. batch := bc.db.NewBatch()
  1390. for _, tx := range diff {
  1391. rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1392. }
  1393. batch.Write()
  1394. if len(deletedLogs) > 0 {
  1395. go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1396. }
  1397. if len(oldChain) > 0 {
  1398. go func() {
  1399. for _, block := range oldChain {
  1400. bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1401. }
  1402. }()
  1403. }
  1404. return nil
  1405. }
  1406. // PostChainEvents iterates over the events generated by a chain insertion and
  1407. // posts them into the event feed.
  1408. // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1409. func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1410. // post event logs for further processing
  1411. if logs != nil {
  1412. bc.logsFeed.Send(logs)
  1413. }
  1414. for _, event := range events {
  1415. switch ev := event.(type) {
  1416. case ChainEvent:
  1417. bc.chainFeed.Send(ev)
  1418. case ChainHeadEvent:
  1419. bc.chainHeadFeed.Send(ev)
  1420. case ChainSideEvent:
  1421. bc.chainSideFeed.Send(ev)
  1422. }
  1423. }
  1424. }
  1425. func (bc *BlockChain) update() {
  1426. futureTimer := time.NewTicker(5 * time.Second)
  1427. defer futureTimer.Stop()
  1428. for {
  1429. select {
  1430. case <-futureTimer.C:
  1431. bc.procFutureBlocks()
  1432. case <-bc.quit:
  1433. return
  1434. }
  1435. }
  1436. }
  1437. // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1438. func (bc *BlockChain) BadBlocks() []*types.Block {
  1439. blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1440. for _, hash := range bc.badBlocks.Keys() {
  1441. if blk, exist := bc.badBlocks.Peek(hash); exist {
  1442. block := blk.(*types.Block)
  1443. blocks = append(blocks, block)
  1444. }
  1445. }
  1446. return blocks
  1447. }
  1448. // addBadBlock adds a bad block to the bad-block LRU cache
  1449. func (bc *BlockChain) addBadBlock(block *types.Block) {
  1450. bc.badBlocks.Add(block.Hash(), block)
  1451. }
  1452. // reportBlock logs a bad block error.
  1453. func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1454. bc.addBadBlock(block)
  1455. var receiptString string
  1456. for i, receipt := range receipts {
  1457. receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  1458. i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  1459. receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  1460. }
  1461. log.Error(fmt.Sprintf(`
  1462. ########## BAD BLOCK #########
  1463. Chain config: %v
  1464. Number: %v
  1465. Hash: 0x%x
  1466. %v
  1467. Error: %v
  1468. ##############################
  1469. `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1470. }
  1471. // InsertHeaderChain attempts to insert the given header chain in to the local
  1472. // chain, possibly creating a reorg. If an error is returned, it will return the
  1473. // index number of the failing header as well an error describing what went wrong.
  1474. //
  1475. // The verify parameter can be used to fine tune whether nonce verification
  1476. // should be done or not. The reason behind the optional check is because some
  1477. // of the header retrieval mechanisms already need to verify nonces, as well as
  1478. // because nonces can be verified sparsely, not needing to check each.
  1479. func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1480. start := time.Now()
  1481. if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1482. return i, err
  1483. }
  1484. // Make sure only one thread manipulates the chain at once
  1485. bc.chainmu.Lock()
  1486. defer bc.chainmu.Unlock()
  1487. bc.wg.Add(1)
  1488. defer bc.wg.Done()
  1489. whFunc := func(header *types.Header) error {
  1490. _, err := bc.hc.WriteHeader(header)
  1491. return err
  1492. }
  1493. return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1494. }
  1495. // CurrentHeader retrieves the current head header of the canonical chain. The
  1496. // header is retrieved from the HeaderChain's internal cache.
  1497. func (bc *BlockChain) CurrentHeader() *types.Header {
  1498. return bc.hc.CurrentHeader()
  1499. }
  1500. // GetTd retrieves a block's total difficulty in the canonical chain from the
  1501. // database by hash and number, caching it if found.
  1502. func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1503. return bc.hc.GetTd(hash, number)
  1504. }
  1505. // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1506. // database by hash, caching it if found.
  1507. func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1508. return bc.hc.GetTdByHash(hash)
  1509. }
  1510. // GetHeader retrieves a block header from the database by hash and number,
  1511. // caching it if found.
  1512. func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1513. return bc.hc.GetHeader(hash, number)
  1514. }
  1515. // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1516. // found.
  1517. func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1518. return bc.hc.GetHeaderByHash(hash)
  1519. }
  1520. // HasHeader checks if a block header is present in the database or not, caching
  1521. // it if present.
  1522. func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1523. return bc.hc.HasHeader(hash, number)
  1524. }
  1525. // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1526. // hash, fetching towards the genesis block.
  1527. func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1528. return bc.hc.GetBlockHashesFromHash(hash, max)
  1529. }
  1530. // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  1531. // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  1532. // number of blocks to be individually checked before we reach the canonical chain.
  1533. //
  1534. // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  1535. func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  1536. bc.chainmu.RLock()
  1537. defer bc.chainmu.RUnlock()
  1538. return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  1539. }
  1540. // GetHeaderByNumber retrieves a block header from the database by number,
  1541. // caching it (associated with its hash) if found.
  1542. func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1543. return bc.hc.GetHeaderByNumber(number)
  1544. }
  1545. // Config retrieves the blockchain's chain configuration.
  1546. func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1547. // Engine retrieves the blockchain's consensus engine.
  1548. func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1549. // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1550. func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1551. return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1552. }
  1553. // SubscribeChainEvent registers a subscription of ChainEvent.
  1554. func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1555. return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1556. }
  1557. // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1558. func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1559. return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1560. }
  1561. // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1562. func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1563. return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1564. }
  1565. // SubscribeLogsEvent registers a subscription of []*types.Log.
  1566. func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1567. return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1568. }
  1569. // SubscribeBlockProcessingEvent registers a subscription of bool where true means
  1570. // block processing has started while false means it has stopped.
  1571. func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
  1572. return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
  1573. }