blockchain.go 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781
  1. // Copyright 2014 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package core implements the Ethereum consensus protocol.
  17. package core
  18. import (
  19. "errors"
  20. "fmt"
  21. "io"
  22. "math/big"
  23. mrand "math/rand"
  24. "sync"
  25. "sync/atomic"
  26. "time"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/common/mclock"
  29. "github.com/ethereum/go-ethereum/common/prque"
  30. "github.com/ethereum/go-ethereum/consensus"
  31. "github.com/ethereum/go-ethereum/core/rawdb"
  32. "github.com/ethereum/go-ethereum/core/state"
  33. "github.com/ethereum/go-ethereum/core/types"
  34. "github.com/ethereum/go-ethereum/core/vm"
  35. "github.com/ethereum/go-ethereum/crypto"
  36. "github.com/ethereum/go-ethereum/ethdb"
  37. "github.com/ethereum/go-ethereum/event"
  38. "github.com/ethereum/go-ethereum/log"
  39. "github.com/ethereum/go-ethereum/metrics"
  40. "github.com/ethereum/go-ethereum/params"
  41. "github.com/ethereum/go-ethereum/rlp"
  42. "github.com/ethereum/go-ethereum/trie"
  43. lru "github.com/hashicorp/golang-lru"
  44. )
  45. var (
  46. accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil)
  47. accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil)
  48. accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
  49. accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
  50. storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
  51. storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
  52. storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
  53. storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
  54. blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
  55. blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
  56. blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
  57. blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
  58. blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
  59. blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
  60. ErrNoGenesis = errors.New("Genesis not found in chain")
  61. )
  62. const (
  63. bodyCacheLimit = 256
  64. blockCacheLimit = 256
  65. receiptsCacheLimit = 32
  66. maxFutureBlocks = 256
  67. maxTimeFutureBlocks = 30
  68. badBlockLimit = 10
  69. triesInMemory = 128
  70. // BlockChainVersion ensures that an incompatible database forces a resync from scratch.
  71. //
  72. // During the process of upgrading the database version from 3 to 4,
  73. // the following incompatible database changes were added.
  74. // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
  75. // * the `Bloom` field of receipt is deleted
  76. // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
  77. BlockChainVersion uint64 = 4
  78. )
  79. // CacheConfig contains the configuration values for the trie caching/pruning
  80. // that's resident in a blockchain.
  81. type CacheConfig struct {
  82. TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
  83. TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
  84. TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
  85. TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
  86. TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
  87. }
  88. // BlockChain represents the canonical chain given a database with a genesis
  89. // block. The Blockchain manages chain imports, reverts, chain reorganisations.
  90. //
  91. // Importing blocks in to the block chain happens according to the set of rules
  92. // defined by the two stage Validator. Processing of blocks is done using the
  93. // Processor which processes the included transaction. The validation of the state
  94. // is done in the second part of the Validator. Failing results in aborting of
  95. // the import.
  96. //
  97. // The BlockChain also helps in returning blocks from **any** chain included
  98. // in the database as well as blocks that represents the canonical chain. It's
  99. // important to note that GetBlock can return any block and does not need to be
  100. // included in the canonical one where as GetBlockByNumber always represents the
  101. // canonical chain.
  102. type BlockChain struct {
  103. chainConfig *params.ChainConfig // Chain & network configuration
  104. cacheConfig *CacheConfig // Cache configuration for pruning
  105. db ethdb.Database // Low level persistent database to store final content in
  106. triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
  107. gcproc time.Duration // Accumulates canonical block processing for trie dumping
  108. hc *HeaderChain
  109. rmLogsFeed event.Feed
  110. chainFeed event.Feed
  111. chainSideFeed event.Feed
  112. chainHeadFeed event.Feed
  113. logsFeed event.Feed
  114. blockProcFeed event.Feed
  115. scope event.SubscriptionScope
  116. genesisBlock *types.Block
  117. chainmu sync.RWMutex // blockchain insertion lock
  118. checkpoint int // checkpoint counts towards the new checkpoint
  119. currentBlock atomic.Value // Current head of the block chain
  120. currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
  121. stateCache state.Database // State database to reuse between imports (contains state cache)
  122. bodyCache *lru.Cache // Cache for the most recent block bodies
  123. bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
  124. receiptsCache *lru.Cache // Cache for the most recent receipts per block
  125. blockCache *lru.Cache // Cache for the most recent entire blocks
  126. futureBlocks *lru.Cache // future blocks are blocks added for later processing
  127. quit chan struct{} // blockchain quit channel
  128. running int32 // running must be called atomically
  129. // procInterrupt must be atomically called
  130. procInterrupt int32 // interrupt signaler for block processing
  131. wg sync.WaitGroup // chain processing wait group for shutting down
  132. engine consensus.Engine
  133. validator Validator // Block and state validator interface
  134. prefetcher Prefetcher // Block state prefetcher interface
  135. processor Processor // Block transaction processor interface
  136. vmConfig vm.Config
  137. badBlocks *lru.Cache // Bad block cache
  138. shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
  139. }
  140. // NewBlockChain returns a fully initialised block chain using information
  141. // available in the database. It initialises the default Ethereum Validator and
  142. // Processor.
  143. func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
  144. if cacheConfig == nil {
  145. cacheConfig = &CacheConfig{
  146. TrieCleanLimit: 256,
  147. TrieDirtyLimit: 256,
  148. TrieTimeLimit: 5 * time.Minute,
  149. }
  150. }
  151. bodyCache, _ := lru.New(bodyCacheLimit)
  152. bodyRLPCache, _ := lru.New(bodyCacheLimit)
  153. receiptsCache, _ := lru.New(receiptsCacheLimit)
  154. blockCache, _ := lru.New(blockCacheLimit)
  155. futureBlocks, _ := lru.New(maxFutureBlocks)
  156. badBlocks, _ := lru.New(badBlockLimit)
  157. bc := &BlockChain{
  158. chainConfig: chainConfig,
  159. cacheConfig: cacheConfig,
  160. db: db,
  161. triegc: prque.New(nil),
  162. stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
  163. quit: make(chan struct{}),
  164. shouldPreserve: shouldPreserve,
  165. bodyCache: bodyCache,
  166. bodyRLPCache: bodyRLPCache,
  167. receiptsCache: receiptsCache,
  168. blockCache: blockCache,
  169. futureBlocks: futureBlocks,
  170. engine: engine,
  171. vmConfig: vmConfig,
  172. badBlocks: badBlocks,
  173. }
  174. bc.validator = NewBlockValidator(chainConfig, bc, engine)
  175. bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
  176. bc.processor = NewStateProcessor(chainConfig, bc, engine)
  177. var err error
  178. bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.getProcInterrupt)
  179. if err != nil {
  180. return nil, err
  181. }
  182. bc.genesisBlock = bc.GetBlockByNumber(0)
  183. if bc.genesisBlock == nil {
  184. return nil, ErrNoGenesis
  185. }
  186. if err := bc.loadLastState(); err != nil {
  187. return nil, err
  188. }
  189. // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
  190. for hash := range BadHashes {
  191. if header := bc.GetHeaderByHash(hash); header != nil {
  192. // get the canonical block corresponding to the offending header's number
  193. headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
  194. // make sure the headerByNumber (if present) is in our current canonical chain
  195. if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
  196. log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
  197. bc.SetHead(header.Number.Uint64() - 1)
  198. log.Error("Chain rewind was successful, resuming normal operation")
  199. }
  200. }
  201. }
  202. // Take ownership of this particular state
  203. go bc.update()
  204. return bc, nil
  205. }
  206. func (bc *BlockChain) getProcInterrupt() bool {
  207. return atomic.LoadInt32(&bc.procInterrupt) == 1
  208. }
  209. // GetVMConfig returns the block chain VM config.
  210. func (bc *BlockChain) GetVMConfig() *vm.Config {
  211. return &bc.vmConfig
  212. }
  213. // loadLastState loads the last known chain state from the database. This method
  214. // assumes that the chain manager mutex is held.
  215. func (bc *BlockChain) loadLastState() error {
  216. // Restore the last known head block
  217. head := rawdb.ReadHeadBlockHash(bc.db)
  218. if head == (common.Hash{}) {
  219. // Corrupt or empty database, init from scratch
  220. log.Warn("Empty database, resetting chain")
  221. return bc.Reset()
  222. }
  223. // Make sure the entire head block is available
  224. currentBlock := bc.GetBlockByHash(head)
  225. if currentBlock == nil {
  226. // Corrupt or empty database, init from scratch
  227. log.Warn("Head block missing, resetting chain", "hash", head)
  228. return bc.Reset()
  229. }
  230. // Make sure the state associated with the block is available
  231. if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
  232. // Dangling block without a state associated, init from scratch
  233. log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
  234. if err := bc.repair(&currentBlock); err != nil {
  235. return err
  236. }
  237. }
  238. // Everything seems to be fine, set as the head block
  239. bc.currentBlock.Store(currentBlock)
  240. // Restore the last known head header
  241. currentHeader := currentBlock.Header()
  242. if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
  243. if header := bc.GetHeaderByHash(head); header != nil {
  244. currentHeader = header
  245. }
  246. }
  247. bc.hc.SetCurrentHeader(currentHeader)
  248. // Restore the last known head fast block
  249. bc.currentFastBlock.Store(currentBlock)
  250. if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
  251. if block := bc.GetBlockByHash(head); block != nil {
  252. bc.currentFastBlock.Store(block)
  253. }
  254. }
  255. // Issue a status log for the user
  256. currentFastBlock := bc.CurrentFastBlock()
  257. headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
  258. blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  259. fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
  260. log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
  261. log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
  262. log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
  263. return nil
  264. }
  265. // SetHead rewinds the local chain to a new head. In the case of headers, everything
  266. // above the new head will be deleted and the new one set. In the case of blocks
  267. // though, the head may be further rewound if block bodies are missing (non-archive
  268. // nodes after a fast sync).
  269. func (bc *BlockChain) SetHead(head uint64) error {
  270. log.Warn("Rewinding blockchain", "target", head)
  271. bc.chainmu.Lock()
  272. defer bc.chainmu.Unlock()
  273. // Rewind the header chain, deleting all block bodies until then
  274. delFn := func(db ethdb.Writer, hash common.Hash, num uint64) {
  275. rawdb.DeleteBody(db, hash, num)
  276. }
  277. bc.hc.SetHead(head, delFn)
  278. currentHeader := bc.hc.CurrentHeader()
  279. // Clear out any stale content from the caches
  280. bc.bodyCache.Purge()
  281. bc.bodyRLPCache.Purge()
  282. bc.receiptsCache.Purge()
  283. bc.blockCache.Purge()
  284. bc.futureBlocks.Purge()
  285. // Rewind the block chain, ensuring we don't end up with a stateless head block
  286. if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
  287. bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
  288. }
  289. if currentBlock := bc.CurrentBlock(); currentBlock != nil {
  290. if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
  291. // Rewound state missing, rolled back to before pivot, reset to genesis
  292. bc.currentBlock.Store(bc.genesisBlock)
  293. }
  294. }
  295. // Rewind the fast block in a simpleton way to the target head
  296. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
  297. bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
  298. }
  299. // If either blocks reached nil, reset to the genesis state
  300. if currentBlock := bc.CurrentBlock(); currentBlock == nil {
  301. bc.currentBlock.Store(bc.genesisBlock)
  302. }
  303. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
  304. bc.currentFastBlock.Store(bc.genesisBlock)
  305. }
  306. currentBlock := bc.CurrentBlock()
  307. currentFastBlock := bc.CurrentFastBlock()
  308. rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
  309. rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash())
  310. return bc.loadLastState()
  311. }
  312. // FastSyncCommitHead sets the current head block to the one defined by the hash
  313. // irrelevant what the chain contents were prior.
  314. func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
  315. // Make sure that both the block as well at its state trie exists
  316. block := bc.GetBlockByHash(hash)
  317. if block == nil {
  318. return fmt.Errorf("non existent block [%x…]", hash[:4])
  319. }
  320. if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
  321. return err
  322. }
  323. // If all checks out, manually set the head block
  324. bc.chainmu.Lock()
  325. bc.currentBlock.Store(block)
  326. bc.chainmu.Unlock()
  327. log.Info("Committed new head block", "number", block.Number(), "hash", hash)
  328. return nil
  329. }
  330. // GasLimit returns the gas limit of the current HEAD block.
  331. func (bc *BlockChain) GasLimit() uint64 {
  332. return bc.CurrentBlock().GasLimit()
  333. }
  334. // CurrentBlock retrieves the current head block of the canonical chain. The
  335. // block is retrieved from the blockchain's internal cache.
  336. func (bc *BlockChain) CurrentBlock() *types.Block {
  337. return bc.currentBlock.Load().(*types.Block)
  338. }
  339. // CurrentFastBlock retrieves the current fast-sync head block of the canonical
  340. // chain. The block is retrieved from the blockchain's internal cache.
  341. func (bc *BlockChain) CurrentFastBlock() *types.Block {
  342. return bc.currentFastBlock.Load().(*types.Block)
  343. }
  344. // Validator returns the current validator.
  345. func (bc *BlockChain) Validator() Validator {
  346. return bc.validator
  347. }
  348. // Processor returns the current processor.
  349. func (bc *BlockChain) Processor() Processor {
  350. return bc.processor
  351. }
  352. // State returns a new mutable state based on the current HEAD block.
  353. func (bc *BlockChain) State() (*state.StateDB, error) {
  354. return bc.StateAt(bc.CurrentBlock().Root())
  355. }
  356. // StateAt returns a new mutable state based on a particular point in time.
  357. func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
  358. return state.New(root, bc.stateCache)
  359. }
  360. // StateCache returns the caching database underpinning the blockchain instance.
  361. func (bc *BlockChain) StateCache() state.Database {
  362. return bc.stateCache
  363. }
  364. // Reset purges the entire blockchain, restoring it to its genesis state.
  365. func (bc *BlockChain) Reset() error {
  366. return bc.ResetWithGenesisBlock(bc.genesisBlock)
  367. }
  368. // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
  369. // specified genesis state.
  370. func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
  371. // Dump the entire block chain and purge the caches
  372. if err := bc.SetHead(0); err != nil {
  373. return err
  374. }
  375. bc.chainmu.Lock()
  376. defer bc.chainmu.Unlock()
  377. // Prepare the genesis block and reinitialise the chain
  378. if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
  379. log.Crit("Failed to write genesis block TD", "err", err)
  380. }
  381. rawdb.WriteBlock(bc.db, genesis)
  382. bc.genesisBlock = genesis
  383. bc.insert(bc.genesisBlock)
  384. bc.currentBlock.Store(bc.genesisBlock)
  385. bc.hc.SetGenesis(bc.genesisBlock.Header())
  386. bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
  387. bc.currentFastBlock.Store(bc.genesisBlock)
  388. return nil
  389. }
  390. // repair tries to repair the current blockchain by rolling back the current block
  391. // until one with associated state is found. This is needed to fix incomplete db
  392. // writes caused either by crashes/power outages, or simply non-committed tries.
  393. //
  394. // This method only rolls back the current block. The current header and current
  395. // fast block are left intact.
  396. func (bc *BlockChain) repair(head **types.Block) error {
  397. for {
  398. // Abort if we've rewound to a head block that does have associated state
  399. if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
  400. log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
  401. return nil
  402. }
  403. // Otherwise rewind one block and recheck state availability there
  404. block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
  405. if block == nil {
  406. return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
  407. }
  408. *head = block
  409. }
  410. }
  411. // Export writes the active chain to the given writer.
  412. func (bc *BlockChain) Export(w io.Writer) error {
  413. return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
  414. }
  415. // ExportN writes a subset of the active chain to the given writer.
  416. func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
  417. bc.chainmu.RLock()
  418. defer bc.chainmu.RUnlock()
  419. if first > last {
  420. return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
  421. }
  422. log.Info("Exporting batch of blocks", "count", last-first+1)
  423. start, reported := time.Now(), time.Now()
  424. for nr := first; nr <= last; nr++ {
  425. block := bc.GetBlockByNumber(nr)
  426. if block == nil {
  427. return fmt.Errorf("export failed on #%d: not found", nr)
  428. }
  429. if err := block.EncodeRLP(w); err != nil {
  430. return err
  431. }
  432. if time.Since(reported) >= statsReportLimit {
  433. log.Info("Exporting blocks", "exported", block.NumberU64()-first, "elapsed", common.PrettyDuration(time.Since(start)))
  434. reported = time.Now()
  435. }
  436. }
  437. return nil
  438. }
  439. // insert injects a new head block into the current block chain. This method
  440. // assumes that the block is indeed a true head. It will also reset the head
  441. // header and the head fast sync block to this very same block if they are older
  442. // or if they are on a different side chain.
  443. //
  444. // Note, this function assumes that the `mu` mutex is held!
  445. func (bc *BlockChain) insert(block *types.Block) {
  446. // If the block is on a side chain or an unknown one, force other heads onto it too
  447. updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
  448. // Add the block to the canonical chain number scheme and mark as the head
  449. rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
  450. rawdb.WriteHeadBlockHash(bc.db, block.Hash())
  451. bc.currentBlock.Store(block)
  452. // If the block is better than our head or is on a different chain, force update heads
  453. if updateHeads {
  454. bc.hc.SetCurrentHeader(block.Header())
  455. rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
  456. bc.currentFastBlock.Store(block)
  457. }
  458. }
  459. // Genesis retrieves the chain's genesis block.
  460. func (bc *BlockChain) Genesis() *types.Block {
  461. return bc.genesisBlock
  462. }
  463. // GetBody retrieves a block body (transactions and uncles) from the database by
  464. // hash, caching it if found.
  465. func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
  466. // Short circuit if the body's already in the cache, retrieve otherwise
  467. if cached, ok := bc.bodyCache.Get(hash); ok {
  468. body := cached.(*types.Body)
  469. return body
  470. }
  471. number := bc.hc.GetBlockNumber(hash)
  472. if number == nil {
  473. return nil
  474. }
  475. body := rawdb.ReadBody(bc.db, hash, *number)
  476. if body == nil {
  477. return nil
  478. }
  479. // Cache the found body for next time and return
  480. bc.bodyCache.Add(hash, body)
  481. return body
  482. }
  483. // GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
  484. // caching it if found.
  485. func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
  486. // Short circuit if the body's already in the cache, retrieve otherwise
  487. if cached, ok := bc.bodyRLPCache.Get(hash); ok {
  488. return cached.(rlp.RawValue)
  489. }
  490. number := bc.hc.GetBlockNumber(hash)
  491. if number == nil {
  492. return nil
  493. }
  494. body := rawdb.ReadBodyRLP(bc.db, hash, *number)
  495. if len(body) == 0 {
  496. return nil
  497. }
  498. // Cache the found body for next time and return
  499. bc.bodyRLPCache.Add(hash, body)
  500. return body
  501. }
  502. // HasBlock checks if a block is fully present in the database or not.
  503. func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
  504. if bc.blockCache.Contains(hash) {
  505. return true
  506. }
  507. return rawdb.HasBody(bc.db, hash, number)
  508. }
  509. // HasFastBlock checks if a fast block is fully present in the database or not.
  510. func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
  511. if !bc.HasBlock(hash, number) {
  512. return false
  513. }
  514. if bc.receiptsCache.Contains(hash) {
  515. return true
  516. }
  517. return rawdb.HasReceipts(bc.db, hash, number)
  518. }
  519. // HasState checks if state trie is fully present in the database or not.
  520. func (bc *BlockChain) HasState(hash common.Hash) bool {
  521. _, err := bc.stateCache.OpenTrie(hash)
  522. return err == nil
  523. }
  524. // HasBlockAndState checks if a block and associated state trie is fully present
  525. // in the database or not, caching it if present.
  526. func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
  527. // Check first that the block itself is known
  528. block := bc.GetBlock(hash, number)
  529. if block == nil {
  530. return false
  531. }
  532. return bc.HasState(block.Root())
  533. }
  534. // GetBlock retrieves a block from the database by hash and number,
  535. // caching it if found.
  536. func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
  537. // Short circuit if the block's already in the cache, retrieve otherwise
  538. if block, ok := bc.blockCache.Get(hash); ok {
  539. return block.(*types.Block)
  540. }
  541. block := rawdb.ReadBlock(bc.db, hash, number)
  542. if block == nil {
  543. return nil
  544. }
  545. // Cache the found block for next time and return
  546. bc.blockCache.Add(block.Hash(), block)
  547. return block
  548. }
  549. // GetBlockByHash retrieves a block from the database by hash, caching it if found.
  550. func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
  551. number := bc.hc.GetBlockNumber(hash)
  552. if number == nil {
  553. return nil
  554. }
  555. return bc.GetBlock(hash, *number)
  556. }
  557. // GetBlockByNumber retrieves a block from the database by number, caching it
  558. // (associated with its hash) if found.
  559. func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
  560. hash := rawdb.ReadCanonicalHash(bc.db, number)
  561. if hash == (common.Hash{}) {
  562. return nil
  563. }
  564. return bc.GetBlock(hash, number)
  565. }
  566. // GetReceiptsByHash retrieves the receipts for all transactions in a given block.
  567. func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
  568. if receipts, ok := bc.receiptsCache.Get(hash); ok {
  569. return receipts.(types.Receipts)
  570. }
  571. number := rawdb.ReadHeaderNumber(bc.db, hash)
  572. if number == nil {
  573. return nil
  574. }
  575. receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  576. if receipts == nil {
  577. return nil
  578. }
  579. bc.receiptsCache.Add(hash, receipts)
  580. return receipts
  581. }
  582. // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
  583. // [deprecated by eth/62]
  584. func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
  585. number := bc.hc.GetBlockNumber(hash)
  586. if number == nil {
  587. return nil
  588. }
  589. for i := 0; i < n; i++ {
  590. block := bc.GetBlock(hash, *number)
  591. if block == nil {
  592. break
  593. }
  594. blocks = append(blocks, block)
  595. hash = block.ParentHash()
  596. *number--
  597. }
  598. return
  599. }
  600. // GetUnclesInChain retrieves all the uncles from a given block backwards until
  601. // a specific distance is reached.
  602. func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
  603. uncles := []*types.Header{}
  604. for i := 0; block != nil && i < length; i++ {
  605. uncles = append(uncles, block.Uncles()...)
  606. block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
  607. }
  608. return uncles
  609. }
  610. // TrieNode retrieves a blob of data associated with a trie node (or code hash)
  611. // either from ephemeral in-memory cache, or from persistent storage.
  612. func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
  613. return bc.stateCache.TrieDB().Node(hash)
  614. }
  615. // Stop stops the blockchain service. If any imports are currently in progress
  616. // it will abort them using the procInterrupt.
  617. func (bc *BlockChain) Stop() {
  618. if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
  619. return
  620. }
  621. // Unsubscribe all subscriptions registered from blockchain
  622. bc.scope.Close()
  623. close(bc.quit)
  624. atomic.StoreInt32(&bc.procInterrupt, 1)
  625. bc.wg.Wait()
  626. // Ensure the state of a recent block is also stored to disk before exiting.
  627. // We're writing three different states to catch different restart scenarios:
  628. // - HEAD: So we don't need to reprocess any blocks in the general case
  629. // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
  630. // - HEAD-127: So we have a hard limit on the number of blocks reexecuted
  631. if !bc.cacheConfig.TrieDirtyDisabled {
  632. triedb := bc.stateCache.TrieDB()
  633. for _, offset := range []uint64{0, 1, triesInMemory - 1} {
  634. if number := bc.CurrentBlock().NumberU64(); number > offset {
  635. recent := bc.GetBlockByNumber(number - offset)
  636. log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
  637. if err := triedb.Commit(recent.Root(), true); err != nil {
  638. log.Error("Failed to commit recent state trie", "err", err)
  639. }
  640. }
  641. }
  642. for !bc.triegc.Empty() {
  643. triedb.Dereference(bc.triegc.PopItem().(common.Hash))
  644. }
  645. if size, _ := triedb.Size(); size != 0 {
  646. log.Error("Dangling trie nodes after full cleanup")
  647. }
  648. }
  649. log.Info("Blockchain manager stopped")
  650. }
  651. func (bc *BlockChain) procFutureBlocks() {
  652. blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
  653. for _, hash := range bc.futureBlocks.Keys() {
  654. if block, exist := bc.futureBlocks.Peek(hash); exist {
  655. blocks = append(blocks, block.(*types.Block))
  656. }
  657. }
  658. if len(blocks) > 0 {
  659. types.BlockBy(types.Number).Sort(blocks)
  660. // Insert one by one as chain insertion needs contiguous ancestry between blocks
  661. for i := range blocks {
  662. bc.InsertChain(blocks[i : i+1])
  663. }
  664. }
  665. }
  666. // WriteStatus status of write
  667. type WriteStatus byte
  668. const (
  669. NonStatTy WriteStatus = iota
  670. CanonStatTy
  671. SideStatTy
  672. )
  673. // Rollback is designed to remove a chain of links from the database that aren't
  674. // certain enough to be valid.
  675. func (bc *BlockChain) Rollback(chain []common.Hash) {
  676. bc.chainmu.Lock()
  677. defer bc.chainmu.Unlock()
  678. for i := len(chain) - 1; i >= 0; i-- {
  679. hash := chain[i]
  680. currentHeader := bc.hc.CurrentHeader()
  681. if currentHeader.Hash() == hash {
  682. bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
  683. }
  684. if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
  685. newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
  686. bc.currentFastBlock.Store(newFastBlock)
  687. rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
  688. }
  689. if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
  690. newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
  691. bc.currentBlock.Store(newBlock)
  692. rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
  693. }
  694. }
  695. }
  696. // SetReceiptsData computes all the non-consensus fields of the receipts
  697. func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
  698. signer := types.MakeSigner(config, block.Number())
  699. transactions, logIndex := block.Transactions(), uint(0)
  700. if len(transactions) != len(receipts) {
  701. return errors.New("transaction and receipt count mismatch")
  702. }
  703. for j := 0; j < len(receipts); j++ {
  704. // The transaction hash can be retrieved from the transaction itself
  705. receipts[j].TxHash = transactions[j].Hash()
  706. // block location fields
  707. receipts[j].BlockHash = block.Hash()
  708. receipts[j].BlockNumber = block.Number()
  709. receipts[j].TransactionIndex = uint(j)
  710. // The contract address can be derived from the transaction itself
  711. if transactions[j].To() == nil {
  712. // Deriving the signer is expensive, only do if it's actually needed
  713. from, _ := types.Sender(signer, transactions[j])
  714. receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
  715. }
  716. // The used gas can be calculated based on previous receipts
  717. if j == 0 {
  718. receipts[j].GasUsed = receipts[j].CumulativeGasUsed
  719. } else {
  720. receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed
  721. }
  722. // The derived log fields can simply be set from the block and transaction
  723. for k := 0; k < len(receipts[j].Logs); k++ {
  724. receipts[j].Logs[k].BlockNumber = block.NumberU64()
  725. receipts[j].Logs[k].BlockHash = block.Hash()
  726. receipts[j].Logs[k].TxHash = receipts[j].TxHash
  727. receipts[j].Logs[k].TxIndex = uint(j)
  728. receipts[j].Logs[k].Index = logIndex
  729. logIndex++
  730. }
  731. }
  732. return nil
  733. }
  734. // InsertReceiptChain attempts to complete an already existing header chain with
  735. // transaction and receipt data.
  736. func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
  737. bc.wg.Add(1)
  738. defer bc.wg.Done()
  739. // Do a sanity check that the provided chain is actually ordered and linked
  740. for i := 1; i < len(blockChain); i++ {
  741. if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
  742. log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
  743. "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
  744. return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
  745. blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
  746. }
  747. }
  748. var (
  749. stats = struct{ processed, ignored int32 }{}
  750. start = time.Now()
  751. bytes = 0
  752. batch = bc.db.NewBatch()
  753. )
  754. for i, block := range blockChain {
  755. receipts := receiptChain[i]
  756. // Short circuit insertion if shutting down or processing failed
  757. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  758. return 0, nil
  759. }
  760. // Short circuit if the owner header is unknown
  761. if !bc.HasHeader(block.Hash(), block.NumberU64()) {
  762. return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
  763. }
  764. // Skip if the entire data is already known
  765. if bc.HasBlock(block.Hash(), block.NumberU64()) {
  766. stats.ignored++
  767. continue
  768. }
  769. // Compute all the non-consensus fields of the receipts
  770. if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
  771. return i, fmt.Errorf("failed to set receipts data: %v", err)
  772. }
  773. // Write all the data out into the database
  774. rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
  775. rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  776. rawdb.WriteTxLookupEntries(batch, block)
  777. stats.processed++
  778. if batch.ValueSize() >= ethdb.IdealBatchSize {
  779. if err := batch.Write(); err != nil {
  780. return 0, err
  781. }
  782. bytes += batch.ValueSize()
  783. batch.Reset()
  784. }
  785. }
  786. if batch.ValueSize() > 0 {
  787. bytes += batch.ValueSize()
  788. if err := batch.Write(); err != nil {
  789. return 0, err
  790. }
  791. }
  792. // Update the head fast sync block if better
  793. bc.chainmu.Lock()
  794. head := blockChain[len(blockChain)-1]
  795. if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
  796. currentFastBlock := bc.CurrentFastBlock()
  797. if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
  798. rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
  799. bc.currentFastBlock.Store(head)
  800. }
  801. }
  802. bc.chainmu.Unlock()
  803. context := []interface{}{
  804. "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
  805. "number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
  806. "size", common.StorageSize(bytes),
  807. }
  808. if stats.ignored > 0 {
  809. context = append(context, []interface{}{"ignored", stats.ignored}...)
  810. }
  811. log.Info("Imported new block receipts", context...)
  812. return 0, nil
  813. }
  814. var lastWrite uint64
  815. // WriteBlockWithoutState writes only the block and its metadata to the database,
  816. // but does not write any state. This is used to construct competing side forks
  817. // up to the point where they exceed the canonical total difficulty.
  818. func (bc *BlockChain) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
  819. bc.wg.Add(1)
  820. defer bc.wg.Done()
  821. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
  822. return err
  823. }
  824. rawdb.WriteBlock(bc.db, block)
  825. return nil
  826. }
  827. // WriteBlockWithState writes the block and all associated state to the database.
  828. func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  829. bc.chainmu.Lock()
  830. defer bc.chainmu.Unlock()
  831. return bc.writeBlockWithState(block, receipts, state)
  832. }
  833. // writeBlockWithState writes the block and all associated state to the database,
  834. // but is expects the chain mutex to be held.
  835. func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
  836. bc.wg.Add(1)
  837. defer bc.wg.Done()
  838. // Calculate the total difficulty of the block
  839. ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  840. if ptd == nil {
  841. return NonStatTy, consensus.ErrUnknownAncestor
  842. }
  843. // Make sure no inconsistent state is leaked during insertion
  844. currentBlock := bc.CurrentBlock()
  845. localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
  846. externTd := new(big.Int).Add(block.Difficulty(), ptd)
  847. // Irrelevant of the canonical status, write the block itself to the database
  848. if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
  849. return NonStatTy, err
  850. }
  851. rawdb.WriteBlock(bc.db, block)
  852. root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
  853. if err != nil {
  854. return NonStatTy, err
  855. }
  856. triedb := bc.stateCache.TrieDB()
  857. // If we're running an archive node, always flush
  858. if bc.cacheConfig.TrieDirtyDisabled {
  859. if err := triedb.Commit(root, false); err != nil {
  860. return NonStatTy, err
  861. }
  862. } else {
  863. // Full but not archive node, do proper garbage collection
  864. triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
  865. bc.triegc.Push(root, -int64(block.NumberU64()))
  866. if current := block.NumberU64(); current > triesInMemory {
  867. // If we exceeded our memory allowance, flush matured singleton nodes to disk
  868. var (
  869. nodes, imgs = triedb.Size()
  870. limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
  871. )
  872. if nodes > limit || imgs > 4*1024*1024 {
  873. triedb.Cap(limit - ethdb.IdealBatchSize)
  874. }
  875. // Find the next state trie we need to commit
  876. chosen := current - triesInMemory
  877. // If we exceeded out time allowance, flush an entire trie to disk
  878. if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
  879. // If the header is missing (canonical chain behind), we're reorging a low
  880. // diff sidechain. Suspend committing until this operation is completed.
  881. header := bc.GetHeaderByNumber(chosen)
  882. if header == nil {
  883. log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
  884. } else {
  885. // If we're exceeding limits but haven't reached a large enough memory gap,
  886. // warn the user that the system is becoming unstable.
  887. if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
  888. log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
  889. }
  890. // Flush an entire trie and restart the counters
  891. triedb.Commit(header.Root, true)
  892. lastWrite = chosen
  893. bc.gcproc = 0
  894. }
  895. }
  896. // Garbage collect anything below our required write retention
  897. for !bc.triegc.Empty() {
  898. root, number := bc.triegc.Pop()
  899. if uint64(-number) > chosen {
  900. bc.triegc.Push(root, number)
  901. break
  902. }
  903. triedb.Dereference(root.(common.Hash))
  904. }
  905. }
  906. }
  907. // Write other block data using a batch.
  908. batch := bc.db.NewBatch()
  909. rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
  910. // If the total difficulty is higher than our known, add it to the canonical chain
  911. // Second clause in the if statement reduces the vulnerability to selfish mining.
  912. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
  913. reorg := externTd.Cmp(localTd) > 0
  914. currentBlock = bc.CurrentBlock()
  915. if !reorg && externTd.Cmp(localTd) == 0 {
  916. // Split same-difficulty blocks by number, then preferentially select
  917. // the block generated by the local miner as the canonical block.
  918. if block.NumberU64() < currentBlock.NumberU64() {
  919. reorg = true
  920. } else if block.NumberU64() == currentBlock.NumberU64() {
  921. var currentPreserve, blockPreserve bool
  922. if bc.shouldPreserve != nil {
  923. currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
  924. }
  925. reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
  926. }
  927. }
  928. if reorg {
  929. // Reorganise the chain if the parent is not the head block
  930. if block.ParentHash() != currentBlock.Hash() {
  931. if err := bc.reorg(currentBlock, block); err != nil {
  932. return NonStatTy, err
  933. }
  934. }
  935. // Write the positional metadata for transaction/receipt lookups and preimages
  936. rawdb.WriteTxLookupEntries(batch, block)
  937. rawdb.WritePreimages(batch, state.Preimages())
  938. status = CanonStatTy
  939. } else {
  940. status = SideStatTy
  941. }
  942. if err := batch.Write(); err != nil {
  943. return NonStatTy, err
  944. }
  945. // Set new head.
  946. if status == CanonStatTy {
  947. bc.insert(block)
  948. }
  949. bc.futureBlocks.Remove(block.Hash())
  950. return status, nil
  951. }
  952. // addFutureBlock checks if the block is within the max allowed window to get
  953. // accepted for future processing, and returns an error if the block is too far
  954. // ahead and was not added.
  955. func (bc *BlockChain) addFutureBlock(block *types.Block) error {
  956. max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
  957. if block.Time() > max {
  958. return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
  959. }
  960. bc.futureBlocks.Add(block.Hash(), block)
  961. return nil
  962. }
  963. // InsertChain attempts to insert the given batch of blocks in to the canonical
  964. // chain or, otherwise, create a fork. If an error is returned it will return
  965. // the index number of the failing block as well an error describing what went
  966. // wrong.
  967. //
  968. // After insertion is done, all accumulated events will be fired.
  969. func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
  970. // Sanity check that we have something meaningful to import
  971. if len(chain) == 0 {
  972. return 0, nil
  973. }
  974. bc.blockProcFeed.Send(true)
  975. defer bc.blockProcFeed.Send(false)
  976. // Remove already known canon-blocks
  977. var (
  978. block, prev *types.Block
  979. )
  980. // Do a sanity check that the provided chain is actually ordered and linked
  981. for i := 1; i < len(chain); i++ {
  982. block = chain[i]
  983. prev = chain[i-1]
  984. if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
  985. // Chain broke ancestry, log a message (programming error) and skip insertion
  986. log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
  987. "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
  988. return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
  989. prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
  990. }
  991. }
  992. // Pre-checks passed, start the full block imports
  993. bc.wg.Add(1)
  994. bc.chainmu.Lock()
  995. n, events, logs, err := bc.insertChain(chain, true)
  996. bc.chainmu.Unlock()
  997. bc.wg.Done()
  998. bc.PostChainEvents(events, logs)
  999. return n, err
  1000. }
  1001. // insertChain is the internal implementation of InsertChain, which assumes that
  1002. // 1) chains are contiguous, and 2) The chain mutex is held.
  1003. //
  1004. // This method is split out so that import batches that require re-injecting
  1005. // historical blocks can do so without releasing the lock, which could lead to
  1006. // racey behaviour. If a sidechain import is in progress, and the historic state
  1007. // is imported, but then new canon-head is added before the actual sidechain
  1008. // completes, then the historic state could be pruned again
  1009. func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
  1010. // If the chain is terminating, don't even bother starting up
  1011. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1012. return 0, nil, nil, nil
  1013. }
  1014. // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
  1015. senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
  1016. // A queued approach to delivering events. This is generally
  1017. // faster than direct delivery and requires much less mutex
  1018. // acquiring.
  1019. var (
  1020. stats = insertStats{startTime: mclock.Now()}
  1021. events = make([]interface{}, 0, len(chain))
  1022. lastCanon *types.Block
  1023. coalescedLogs []*types.Log
  1024. )
  1025. // Start the parallel header verifier
  1026. headers := make([]*types.Header, len(chain))
  1027. seals := make([]bool, len(chain))
  1028. for i, block := range chain {
  1029. headers[i] = block.Header()
  1030. seals[i] = verifySeals
  1031. }
  1032. abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
  1033. defer close(abort)
  1034. // Peek the error for the first block to decide the directing import logic
  1035. it := newInsertIterator(chain, results, bc.validator)
  1036. block, err := it.next()
  1037. // Left-trim all the known blocks
  1038. if err == ErrKnownBlock {
  1039. // First block (and state) is known
  1040. // 1. We did a roll-back, and should now do a re-import
  1041. // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
  1042. // from the canonical chain, which has not been verified.
  1043. // Skip all known blocks that are behind us
  1044. current := bc.CurrentBlock().NumberU64()
  1045. for block != nil && err == ErrKnownBlock && current >= block.NumberU64() {
  1046. stats.ignored++
  1047. block, err = it.next()
  1048. }
  1049. // Falls through to the block import
  1050. }
  1051. switch {
  1052. // First block is pruned, insert as sidechain and reorg only if TD grows enough
  1053. case err == consensus.ErrPrunedAncestor:
  1054. return bc.insertSidechain(block, it)
  1055. // First block is future, shove it (and all children) to the future queue (unknown ancestor)
  1056. case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
  1057. for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
  1058. if err := bc.addFutureBlock(block); err != nil {
  1059. return it.index, events, coalescedLogs, err
  1060. }
  1061. block, err = it.next()
  1062. }
  1063. stats.queued += it.processed()
  1064. stats.ignored += it.remaining()
  1065. // If there are any still remaining, mark as ignored
  1066. return it.index, events, coalescedLogs, err
  1067. // Some other error occurred, abort
  1068. case err != nil:
  1069. stats.ignored += len(it.chain)
  1070. bc.reportBlock(block, nil, err)
  1071. return it.index, events, coalescedLogs, err
  1072. }
  1073. // No validation errors for the first block (or chain prefix skipped)
  1074. for ; block != nil && err == nil; block, err = it.next() {
  1075. // If the chain is terminating, stop processing blocks
  1076. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1077. log.Debug("Premature abort during blocks processing")
  1078. break
  1079. }
  1080. // If the header is a banned one, straight out abort
  1081. if BadHashes[block.Hash()] {
  1082. bc.reportBlock(block, nil, ErrBlacklistedHash)
  1083. return it.index, events, coalescedLogs, ErrBlacklistedHash
  1084. }
  1085. // Retrieve the parent block and it's state to execute on top
  1086. start := time.Now()
  1087. parent := it.previous()
  1088. if parent == nil {
  1089. parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
  1090. }
  1091. statedb, err := state.New(parent.Root, bc.stateCache)
  1092. if err != nil {
  1093. return it.index, events, coalescedLogs, err
  1094. }
  1095. // If we have a followup block, run that against the current state to pre-cache
  1096. // transactions and probabilistically some of the account/storage trie nodes.
  1097. var followupInterrupt uint32
  1098. if !bc.cacheConfig.TrieCleanNoPrefetch {
  1099. if followup, err := it.peek(); followup != nil && err == nil {
  1100. go func(start time.Time) {
  1101. throwaway, _ := state.New(parent.Root, bc.stateCache)
  1102. bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
  1103. blockPrefetchExecuteTimer.Update(time.Since(start))
  1104. if atomic.LoadUint32(&followupInterrupt) == 1 {
  1105. blockPrefetchInterruptMeter.Mark(1)
  1106. }
  1107. }(time.Now())
  1108. }
  1109. }
  1110. // Process block using the parent state as reference point
  1111. substart := time.Now()
  1112. receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
  1113. if err != nil {
  1114. bc.reportBlock(block, receipts, err)
  1115. atomic.StoreUint32(&followupInterrupt, 1)
  1116. return it.index, events, coalescedLogs, err
  1117. }
  1118. // Update the metrics touched during block processing
  1119. accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
  1120. storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them
  1121. accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
  1122. storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
  1123. triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
  1124. trieproc := statedb.AccountReads + statedb.AccountUpdates
  1125. trieproc += statedb.StorageReads + statedb.StorageUpdates
  1126. blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
  1127. // Validate the state using the default validator
  1128. substart = time.Now()
  1129. if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
  1130. bc.reportBlock(block, receipts, err)
  1131. atomic.StoreUint32(&followupInterrupt, 1)
  1132. return it.index, events, coalescedLogs, err
  1133. }
  1134. proctime := time.Since(start)
  1135. // Update the metrics touched during block validation
  1136. accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
  1137. storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
  1138. blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
  1139. // Write the block to the chain and get the status.
  1140. substart = time.Now()
  1141. status, err := bc.writeBlockWithState(block, receipts, statedb)
  1142. if err != nil {
  1143. atomic.StoreUint32(&followupInterrupt, 1)
  1144. return it.index, events, coalescedLogs, err
  1145. }
  1146. atomic.StoreUint32(&followupInterrupt, 1)
  1147. // Update the metrics touched during block commit
  1148. accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
  1149. storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
  1150. blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits)
  1151. blockInsertTimer.UpdateSince(start)
  1152. switch status {
  1153. case CanonStatTy:
  1154. log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
  1155. "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
  1156. "elapsed", common.PrettyDuration(time.Since(start)),
  1157. "root", block.Root())
  1158. coalescedLogs = append(coalescedLogs, logs...)
  1159. events = append(events, ChainEvent{block, block.Hash(), logs})
  1160. lastCanon = block
  1161. // Only count canonical blocks for GC processing time
  1162. bc.gcproc += proctime
  1163. case SideStatTy:
  1164. log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
  1165. "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1166. "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1167. "root", block.Root())
  1168. events = append(events, ChainSideEvent{block})
  1169. }
  1170. stats.processed++
  1171. stats.usedGas += usedGas
  1172. dirty, _ := bc.stateCache.TrieDB().Size()
  1173. stats.report(chain, it.index, dirty)
  1174. }
  1175. // Any blocks remaining here? The only ones we care about are the future ones
  1176. if block != nil && err == consensus.ErrFutureBlock {
  1177. if err := bc.addFutureBlock(block); err != nil {
  1178. return it.index, events, coalescedLogs, err
  1179. }
  1180. block, err = it.next()
  1181. for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
  1182. if err := bc.addFutureBlock(block); err != nil {
  1183. return it.index, events, coalescedLogs, err
  1184. }
  1185. stats.queued++
  1186. }
  1187. }
  1188. stats.ignored += it.remaining()
  1189. // Append a single chain head event if we've progressed the chain
  1190. if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
  1191. events = append(events, ChainHeadEvent{lastCanon})
  1192. }
  1193. return it.index, events, coalescedLogs, err
  1194. }
  1195. // insertSidechain is called when an import batch hits upon a pruned ancestor
  1196. // error, which happens when a sidechain with a sufficiently old fork-block is
  1197. // found.
  1198. //
  1199. // The method writes all (header-and-body-valid) blocks to disk, then tries to
  1200. // switch over to the new chain if the TD exceeded the current chain.
  1201. func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
  1202. var (
  1203. externTd *big.Int
  1204. current = bc.CurrentBlock()
  1205. )
  1206. // The first sidechain block error is already verified to be ErrPrunedAncestor.
  1207. // Since we don't import them here, we expect ErrUnknownAncestor for the remaining
  1208. // ones. Any other errors means that the block is invalid, and should not be written
  1209. // to disk.
  1210. err := consensus.ErrPrunedAncestor
  1211. for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
  1212. // Check the canonical state root for that number
  1213. if number := block.NumberU64(); current.NumberU64() >= number {
  1214. canonical := bc.GetBlockByNumber(number)
  1215. if canonical != nil && canonical.Hash() == block.Hash() {
  1216. // Not a sidechain block, this is a re-import of a canon block which has it's state pruned
  1217. continue
  1218. }
  1219. if canonical != nil && canonical.Root() == block.Root() {
  1220. // This is most likely a shadow-state attack. When a fork is imported into the
  1221. // database, and it eventually reaches a block height which is not pruned, we
  1222. // just found that the state already exist! This means that the sidechain block
  1223. // refers to a state which already exists in our canon chain.
  1224. //
  1225. // If left unchecked, we would now proceed importing the blocks, without actually
  1226. // having verified the state of the previous blocks.
  1227. log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
  1228. // If someone legitimately side-mines blocks, they would still be imported as usual. However,
  1229. // we cannot risk writing unverified blocks to disk when they obviously target the pruning
  1230. // mechanism.
  1231. return it.index, nil, nil, errors.New("sidechain ghost-state attack")
  1232. }
  1233. }
  1234. if externTd == nil {
  1235. externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
  1236. }
  1237. externTd = new(big.Int).Add(externTd, block.Difficulty())
  1238. if !bc.HasBlock(block.Hash(), block.NumberU64()) {
  1239. start := time.Now()
  1240. if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
  1241. return it.index, nil, nil, err
  1242. }
  1243. log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
  1244. "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
  1245. "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
  1246. "root", block.Root())
  1247. }
  1248. }
  1249. // At this point, we've written all sidechain blocks to database. Loop ended
  1250. // either on some other error or all were processed. If there was some other
  1251. // error, we can ignore the rest of those blocks.
  1252. //
  1253. // If the externTd was larger than our local TD, we now need to reimport the previous
  1254. // blocks to regenerate the required state
  1255. localTd := bc.GetTd(current.Hash(), current.NumberU64())
  1256. if localTd.Cmp(externTd) > 0 {
  1257. log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
  1258. return it.index, nil, nil, err
  1259. }
  1260. // Gather all the sidechain hashes (full blocks may be memory heavy)
  1261. var (
  1262. hashes []common.Hash
  1263. numbers []uint64
  1264. )
  1265. parent := it.previous()
  1266. for parent != nil && !bc.HasState(parent.Root) {
  1267. hashes = append(hashes, parent.Hash())
  1268. numbers = append(numbers, parent.Number.Uint64())
  1269. parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
  1270. }
  1271. if parent == nil {
  1272. return it.index, nil, nil, errors.New("missing parent")
  1273. }
  1274. // Import all the pruned blocks to make the state available
  1275. var (
  1276. blocks []*types.Block
  1277. memory common.StorageSize
  1278. )
  1279. for i := len(hashes) - 1; i >= 0; i-- {
  1280. // Append the next block to our batch
  1281. block := bc.GetBlock(hashes[i], numbers[i])
  1282. blocks = append(blocks, block)
  1283. memory += block.Size()
  1284. // If memory use grew too large, import and continue. Sadly we need to discard
  1285. // all raised events and logs from notifications since we're too heavy on the
  1286. // memory here.
  1287. if len(blocks) >= 2048 || memory > 64*1024*1024 {
  1288. log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
  1289. if _, _, _, err := bc.insertChain(blocks, false); err != nil {
  1290. return 0, nil, nil, err
  1291. }
  1292. blocks, memory = blocks[:0], 0
  1293. // If the chain is terminating, stop processing blocks
  1294. if atomic.LoadInt32(&bc.procInterrupt) == 1 {
  1295. log.Debug("Premature abort during blocks processing")
  1296. return 0, nil, nil, nil
  1297. }
  1298. }
  1299. }
  1300. if len(blocks) > 0 {
  1301. log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
  1302. return bc.insertChain(blocks, false)
  1303. }
  1304. return 0, nil, nil, nil
  1305. }
  1306. // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
  1307. // to be part of the new canonical chain and accumulates potential missing transactions and post an
  1308. // event about them
  1309. func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
  1310. var (
  1311. newChain types.Blocks
  1312. oldChain types.Blocks
  1313. commonBlock *types.Block
  1314. deletedTxs types.Transactions
  1315. deletedLogs []*types.Log
  1316. // collectLogs collects the logs that were generated during the
  1317. // processing of the block that corresponds with the given hash.
  1318. // These logs are later announced as deleted.
  1319. collectLogs = func(hash common.Hash) {
  1320. // Coalesce logs and set 'Removed'.
  1321. number := bc.hc.GetBlockNumber(hash)
  1322. if number == nil {
  1323. return
  1324. }
  1325. receipts := rawdb.ReadReceipts(bc.db, hash, *number)
  1326. for _, receipt := range receipts {
  1327. for _, log := range receipt.Logs {
  1328. del := *log
  1329. del.Removed = true
  1330. deletedLogs = append(deletedLogs, &del)
  1331. }
  1332. }
  1333. }
  1334. )
  1335. // first reduce whoever is higher bound
  1336. if oldBlock.NumberU64() > newBlock.NumberU64() {
  1337. // reduce old chain
  1338. for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
  1339. oldChain = append(oldChain, oldBlock)
  1340. deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1341. collectLogs(oldBlock.Hash())
  1342. }
  1343. } else {
  1344. // reduce new chain and append new chain blocks for inserting later on
  1345. for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
  1346. newChain = append(newChain, newBlock)
  1347. }
  1348. }
  1349. if oldBlock == nil {
  1350. return fmt.Errorf("Invalid old chain")
  1351. }
  1352. if newBlock == nil {
  1353. return fmt.Errorf("Invalid new chain")
  1354. }
  1355. for {
  1356. if oldBlock.Hash() == newBlock.Hash() {
  1357. commonBlock = oldBlock
  1358. break
  1359. }
  1360. oldChain = append(oldChain, oldBlock)
  1361. newChain = append(newChain, newBlock)
  1362. deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
  1363. collectLogs(oldBlock.Hash())
  1364. oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
  1365. if oldBlock == nil {
  1366. return fmt.Errorf("Invalid old chain")
  1367. }
  1368. if newBlock == nil {
  1369. return fmt.Errorf("Invalid new chain")
  1370. }
  1371. }
  1372. // Ensure the user sees large reorgs
  1373. if len(oldChain) > 0 && len(newChain) > 0 {
  1374. logFn := log.Debug
  1375. if len(oldChain) > 63 {
  1376. logFn = log.Warn
  1377. }
  1378. logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
  1379. "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
  1380. } else {
  1381. log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
  1382. }
  1383. // Insert the new chain, taking care of the proper incremental order
  1384. var addedTxs types.Transactions
  1385. for i := len(newChain) - 1; i >= 0; i-- {
  1386. // insert the block in the canonical way, re-writing history
  1387. bc.insert(newChain[i])
  1388. // write lookup entries for hash based transaction/receipt searches
  1389. rawdb.WriteTxLookupEntries(bc.db, newChain[i])
  1390. addedTxs = append(addedTxs, newChain[i].Transactions()...)
  1391. }
  1392. // calculate the difference between deleted and added transactions
  1393. diff := types.TxDifference(deletedTxs, addedTxs)
  1394. // When transactions get deleted from the database that means the
  1395. // receipts that were created in the fork must also be deleted
  1396. batch := bc.db.NewBatch()
  1397. for _, tx := range diff {
  1398. rawdb.DeleteTxLookupEntry(batch, tx.Hash())
  1399. }
  1400. batch.Write()
  1401. if len(deletedLogs) > 0 {
  1402. go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
  1403. }
  1404. if len(oldChain) > 0 {
  1405. go func() {
  1406. for _, block := range oldChain {
  1407. bc.chainSideFeed.Send(ChainSideEvent{Block: block})
  1408. }
  1409. }()
  1410. }
  1411. return nil
  1412. }
  1413. // PostChainEvents iterates over the events generated by a chain insertion and
  1414. // posts them into the event feed.
  1415. // TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
  1416. func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
  1417. // post event logs for further processing
  1418. if logs != nil {
  1419. bc.logsFeed.Send(logs)
  1420. }
  1421. for _, event := range events {
  1422. switch ev := event.(type) {
  1423. case ChainEvent:
  1424. bc.chainFeed.Send(ev)
  1425. case ChainHeadEvent:
  1426. bc.chainHeadFeed.Send(ev)
  1427. case ChainSideEvent:
  1428. bc.chainSideFeed.Send(ev)
  1429. }
  1430. }
  1431. }
  1432. func (bc *BlockChain) update() {
  1433. futureTimer := time.NewTicker(5 * time.Second)
  1434. defer futureTimer.Stop()
  1435. for {
  1436. select {
  1437. case <-futureTimer.C:
  1438. bc.procFutureBlocks()
  1439. case <-bc.quit:
  1440. return
  1441. }
  1442. }
  1443. }
  1444. // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network
  1445. func (bc *BlockChain) BadBlocks() []*types.Block {
  1446. blocks := make([]*types.Block, 0, bc.badBlocks.Len())
  1447. for _, hash := range bc.badBlocks.Keys() {
  1448. if blk, exist := bc.badBlocks.Peek(hash); exist {
  1449. block := blk.(*types.Block)
  1450. blocks = append(blocks, block)
  1451. }
  1452. }
  1453. return blocks
  1454. }
  1455. // addBadBlock adds a bad block to the bad-block LRU cache
  1456. func (bc *BlockChain) addBadBlock(block *types.Block) {
  1457. bc.badBlocks.Add(block.Hash(), block)
  1458. }
  1459. // reportBlock logs a bad block error.
  1460. func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
  1461. bc.addBadBlock(block)
  1462. var receiptString string
  1463. for i, receipt := range receipts {
  1464. receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
  1465. i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
  1466. receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
  1467. }
  1468. log.Error(fmt.Sprintf(`
  1469. ########## BAD BLOCK #########
  1470. Chain config: %v
  1471. Number: %v
  1472. Hash: 0x%x
  1473. %v
  1474. Error: %v
  1475. ##############################
  1476. `, bc.chainConfig, block.Number(), block.Hash(), receiptString, err))
  1477. }
  1478. // InsertHeaderChain attempts to insert the given header chain in to the local
  1479. // chain, possibly creating a reorg. If an error is returned, it will return the
  1480. // index number of the failing header as well an error describing what went wrong.
  1481. //
  1482. // The verify parameter can be used to fine tune whether nonce verification
  1483. // should be done or not. The reason behind the optional check is because some
  1484. // of the header retrieval mechanisms already need to verify nonces, as well as
  1485. // because nonces can be verified sparsely, not needing to check each.
  1486. func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
  1487. start := time.Now()
  1488. if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
  1489. return i, err
  1490. }
  1491. // Make sure only one thread manipulates the chain at once
  1492. bc.chainmu.Lock()
  1493. defer bc.chainmu.Unlock()
  1494. bc.wg.Add(1)
  1495. defer bc.wg.Done()
  1496. whFunc := func(header *types.Header) error {
  1497. _, err := bc.hc.WriteHeader(header)
  1498. return err
  1499. }
  1500. return bc.hc.InsertHeaderChain(chain, whFunc, start)
  1501. }
  1502. // CurrentHeader retrieves the current head header of the canonical chain. The
  1503. // header is retrieved from the HeaderChain's internal cache.
  1504. func (bc *BlockChain) CurrentHeader() *types.Header {
  1505. return bc.hc.CurrentHeader()
  1506. }
  1507. // GetTd retrieves a block's total difficulty in the canonical chain from the
  1508. // database by hash and number, caching it if found.
  1509. func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
  1510. return bc.hc.GetTd(hash, number)
  1511. }
  1512. // GetTdByHash retrieves a block's total difficulty in the canonical chain from the
  1513. // database by hash, caching it if found.
  1514. func (bc *BlockChain) GetTdByHash(hash common.Hash) *big.Int {
  1515. return bc.hc.GetTdByHash(hash)
  1516. }
  1517. // GetHeader retrieves a block header from the database by hash and number,
  1518. // caching it if found.
  1519. func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
  1520. return bc.hc.GetHeader(hash, number)
  1521. }
  1522. // GetHeaderByHash retrieves a block header from the database by hash, caching it if
  1523. // found.
  1524. func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
  1525. return bc.hc.GetHeaderByHash(hash)
  1526. }
  1527. // HasHeader checks if a block header is present in the database or not, caching
  1528. // it if present.
  1529. func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
  1530. return bc.hc.HasHeader(hash, number)
  1531. }
  1532. // GetBlockHashesFromHash retrieves a number of block hashes starting at a given
  1533. // hash, fetching towards the genesis block.
  1534. func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
  1535. return bc.hc.GetBlockHashesFromHash(hash, max)
  1536. }
  1537. // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
  1538. // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
  1539. // number of blocks to be individually checked before we reach the canonical chain.
  1540. //
  1541. // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
  1542. func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
  1543. bc.chainmu.RLock()
  1544. defer bc.chainmu.RUnlock()
  1545. return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
  1546. }
  1547. // GetHeaderByNumber retrieves a block header from the database by number,
  1548. // caching it (associated with its hash) if found.
  1549. func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
  1550. return bc.hc.GetHeaderByNumber(number)
  1551. }
  1552. // Config retrieves the blockchain's chain configuration.
  1553. func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
  1554. // Engine retrieves the blockchain's consensus engine.
  1555. func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
  1556. // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
  1557. func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
  1558. return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
  1559. }
  1560. // SubscribeChainEvent registers a subscription of ChainEvent.
  1561. func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
  1562. return bc.scope.Track(bc.chainFeed.Subscribe(ch))
  1563. }
  1564. // SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
  1565. func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
  1566. return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
  1567. }
  1568. // SubscribeChainSideEvent registers a subscription of ChainSideEvent.
  1569. func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
  1570. return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
  1571. }
  1572. // SubscribeLogsEvent registers a subscription of []*types.Log.
  1573. func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
  1574. return bc.scope.Track(bc.logsFeed.Subscribe(ch))
  1575. }
  1576. // SubscribeBlockProcessingEvent registers a subscription of bool where true means
  1577. // block processing has started while false means it has stopped.
  1578. func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
  1579. return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
  1580. }