| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821 |
- // Copyright 2014 The go-ethereum Authors
- // This file is part of go-ethereum.
- //
- // go-ethereum is free software: you can redistribute it and/or modify
- // it under the terms of the GNU Lesser General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // go-ethereum is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU Lesser General Public License for more details.
- //
- // You should have received a copy of the GNU Lesser General Public License
- // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
- package core
- import (
- "fmt"
- "io"
- "math/big"
- "runtime"
- "sync"
- "sync/atomic"
- "time"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/logger"
- "github.com/ethereum/go-ethereum/logger/glog"
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/pow"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/hashicorp/golang-lru"
- )
- var (
- chainlogger = logger.NewLogger("CHAIN")
- jsonlogger = logger.NewJsonLogger()
- blockHashPre = []byte("block-hash-")
- blockNumPre = []byte("block-num-")
- blockInsertTimer = metrics.NewTimer("chain/inserts")
- )
- const (
- blockCacheLimit = 256
- maxFutureBlocks = 256
- maxTimeFutureBlocks = 30
- checkpointLimit = 200
- )
- type ChainManager struct {
- //eth EthManager
- blockDb common.Database
- stateDb common.Database
- extraDb common.Database
- processor types.BlockProcessor
- eventMux *event.TypeMux
- genesisBlock *types.Block
- // Last known total difficulty
- mu sync.RWMutex
- chainmu sync.RWMutex
- tsmu sync.RWMutex
- checkpoint int // checkpoint counts towards the new checkpoint
- td *big.Int
- currentBlock *types.Block
- lastBlockHash common.Hash
- currentGasLimit *big.Int
- transState *state.StateDB
- txState *state.ManagedState
- cache *lru.Cache // cache is the LRU caching
- futureBlocks *lru.Cache // future blocks are blocks added for later processing
- quit chan struct{}
- // procInterrupt must be atomically called
- procInterrupt int32 // interrupt signaler for block processing
- wg sync.WaitGroup
- pow pow.PoW
- }
- func NewChainManager(genesis *types.Block, blockDb, stateDb, extraDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
- cache, _ := lru.New(blockCacheLimit)
- bc := &ChainManager{
- blockDb: blockDb,
- stateDb: stateDb,
- extraDb: extraDb,
- genesisBlock: GenesisBlock(42, stateDb),
- eventMux: mux,
- quit: make(chan struct{}),
- cache: cache,
- pow: pow,
- }
- // Check the genesis block given to the chain manager. If the genesis block mismatches block number 0
- // throw an error. If no block or the same block's found continue.
- if g := bc.GetBlockByNumber(0); g != nil && g.Hash() != genesis.Hash() {
- return nil, fmt.Errorf("Genesis mismatch. Maybe different nonce (%d vs %d)? %x / %x", g.Nonce(), genesis.Nonce(), g.Hash().Bytes()[:4], genesis.Hash().Bytes()[:4])
- }
- bc.genesisBlock = genesis
- bc.setLastState()
- // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
- for hash, _ := range BadHashes {
- if block := bc.GetBlock(hash); block != nil {
- glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4])
- block = bc.GetBlock(block.ParentHash())
- if block == nil {
- glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?")
- }
- bc.SetHead(block)
- glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
- }
- }
- bc.transState = bc.State().Copy()
- // Take ownership of this particular state
- bc.txState = state.ManageState(bc.State().Copy())
- bc.futureBlocks, _ = lru.New(maxFutureBlocks)
- bc.makeCache()
- go bc.update()
- return bc, nil
- }
- func (bc *ChainManager) SetHead(head *types.Block) {
- bc.mu.Lock()
- defer bc.mu.Unlock()
- for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
- bc.removeBlock(block)
- }
- bc.cache, _ = lru.New(blockCacheLimit)
- bc.currentBlock = head
- bc.makeCache()
- statedb := state.New(head.Root(), bc.stateDb)
- bc.txState = state.ManageState(statedb)
- bc.transState = statedb.Copy()
- bc.setTotalDifficulty(head.Td)
- bc.insert(head)
- bc.setLastState()
- }
- func (self *ChainManager) Td() *big.Int {
- self.mu.RLock()
- defer self.mu.RUnlock()
- return new(big.Int).Set(self.td)
- }
- func (self *ChainManager) GasLimit() *big.Int {
- self.mu.RLock()
- defer self.mu.RUnlock()
- return self.currentBlock.GasLimit()
- }
- func (self *ChainManager) LastBlockHash() common.Hash {
- self.mu.RLock()
- defer self.mu.RUnlock()
- return self.lastBlockHash
- }
- func (self *ChainManager) CurrentBlock() *types.Block {
- self.mu.RLock()
- defer self.mu.RUnlock()
- return self.currentBlock
- }
- func (self *ChainManager) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) {
- self.mu.RLock()
- defer self.mu.RUnlock()
- return new(big.Int).Set(self.td), self.currentBlock.Hash(), self.genesisBlock.Hash()
- }
- func (self *ChainManager) SetProcessor(proc types.BlockProcessor) {
- self.processor = proc
- }
- func (self *ChainManager) State() *state.StateDB {
- return state.New(self.CurrentBlock().Root(), self.stateDb)
- }
- func (self *ChainManager) TransState() *state.StateDB {
- self.tsmu.RLock()
- defer self.tsmu.RUnlock()
- return self.transState
- }
- func (self *ChainManager) setTransState(statedb *state.StateDB) {
- self.transState = statedb
- }
- func (bc *ChainManager) recover() bool {
- data, _ := bc.blockDb.Get([]byte("checkpoint"))
- if len(data) != 0 {
- block := bc.GetBlock(common.BytesToHash(data))
- if block != nil {
- err := bc.blockDb.Put([]byte("LastBlock"), block.Hash().Bytes())
- if err != nil {
- glog.Fatalln("db write err:", err)
- }
- bc.currentBlock = block
- bc.lastBlockHash = block.Hash()
- return true
- }
- }
- return false
- }
- func (bc *ChainManager) setLastState() {
- data, _ := bc.blockDb.Get([]byte("LastBlock"))
- if len(data) != 0 {
- block := bc.GetBlock(common.BytesToHash(data))
- if block != nil {
- bc.currentBlock = block
- bc.lastBlockHash = block.Hash()
- } else {
- glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
- if bc.recover() {
- glog.Infof("Recover successful")
- } else {
- glog.Fatalf("Recover failed. Please report")
- }
- }
- } else {
- bc.Reset()
- }
- bc.td = bc.currentBlock.Td
- bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
- if glog.V(logger.Info) {
- glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
- }
- }
- func (bc *ChainManager) makeCache() {
- bc.cache, _ = lru.New(blockCacheLimit)
- // load in last `blockCacheLimit` - 1 blocks. Last block is the current.
- bc.cache.Add(bc.genesisBlock.Hash(), bc.genesisBlock)
- for _, block := range bc.GetBlocksFromHash(bc.currentBlock.Hash(), blockCacheLimit) {
- bc.cache.Add(block.Hash(), block)
- }
- }
- func (bc *ChainManager) Reset() {
- bc.mu.Lock()
- defer bc.mu.Unlock()
- for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
- bc.removeBlock(block)
- }
- bc.cache, _ = lru.New(blockCacheLimit)
- // Prepare the genesis block
- bc.write(bc.genesisBlock)
- bc.insert(bc.genesisBlock)
- bc.currentBlock = bc.genesisBlock
- bc.makeCache()
- bc.setTotalDifficulty(common.Big("0"))
- }
- func (bc *ChainManager) removeBlock(block *types.Block) {
- bc.blockDb.Delete(append(blockHashPre, block.Hash().Bytes()...))
- }
- func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) {
- bc.mu.Lock()
- defer bc.mu.Unlock()
- for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
- bc.removeBlock(block)
- }
- // Prepare the genesis block
- gb.Td = gb.Difficulty()
- bc.genesisBlock = gb
- bc.write(bc.genesisBlock)
- bc.insert(bc.genesisBlock)
- bc.currentBlock = bc.genesisBlock
- bc.makeCache()
- bc.td = gb.Difficulty()
- }
- // Export writes the active chain to the given writer.
- func (self *ChainManager) Export(w io.Writer) error {
- if err := self.ExportN(w, uint64(0), self.currentBlock.NumberU64()); err != nil {
- return err
- }
- return nil
- }
- // ExportN writes a subset of the active chain to the given writer.
- func (self *ChainManager) ExportN(w io.Writer, first uint64, last uint64) error {
- self.mu.RLock()
- defer self.mu.RUnlock()
- if first > last {
- return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
- }
- glog.V(logger.Info).Infof("exporting %d blocks...\n", last-first+1)
- for nr := first; nr <= last; nr++ {
- block := self.GetBlockByNumber(nr)
- if block == nil {
- return fmt.Errorf("export failed on #%d: not found", nr)
- }
- if err := block.EncodeRLP(w); err != nil {
- return err
- }
- }
- return nil
- }
- // insert injects a block into the current chain block chain. Note, this function
- // assumes that the `mu` mutex is held!
- func (bc *ChainManager) insert(block *types.Block) {
- err := WriteHead(bc.blockDb, block)
- if err != nil {
- glog.Fatal("db write fail:", err)
- }
- bc.checkpoint++
- if bc.checkpoint > checkpointLimit {
- err = bc.blockDb.Put([]byte("checkpoint"), block.Hash().Bytes())
- if err != nil {
- glog.Fatal("db write fail:", err)
- }
- bc.checkpoint = 0
- }
- bc.currentBlock = block
- bc.lastBlockHash = block.Hash()
- }
- func (bc *ChainManager) write(block *types.Block) {
- tstart := time.Now()
- enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
- key := append(blockHashPre, block.Hash().Bytes()...)
- err := bc.blockDb.Put(key, enc)
- if err != nil {
- glog.Fatal("db write fail:", err)
- }
- if glog.V(logger.Debug) {
- glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
- }
- }
- // Accessors
- func (bc *ChainManager) Genesis() *types.Block {
- return bc.genesisBlock
- }
- // Block fetching methods
- func (bc *ChainManager) HasBlock(hash common.Hash) bool {
- if bc.cache.Contains(hash) {
- return true
- }
- data, _ := bc.blockDb.Get(append(blockHashPre, hash[:]...))
- return len(data) != 0
- }
- func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) (chain []common.Hash) {
- block := self.GetBlock(hash)
- if block == nil {
- return
- }
- // XXX Could be optimised by using a different database which only holds hashes (i.e., linked list)
- for i := uint64(0); i < max; i++ {
- block = self.GetBlock(block.ParentHash())
- if block == nil {
- break
- }
- chain = append(chain, block.Hash())
- if block.Number().Cmp(common.Big0) <= 0 {
- break
- }
- }
- return
- }
- func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
- if block, ok := self.cache.Get(hash); ok {
- return block.(*types.Block)
- }
- block := GetBlockByHash(self.blockDb, hash)
- if block == nil {
- return nil
- }
- // Add the block to the cache
- self.cache.Add(hash, (*types.Block)(block))
- return (*types.Block)(block)
- }
- func (self *ChainManager) GetBlockByNumber(num uint64) *types.Block {
- self.mu.RLock()
- defer self.mu.RUnlock()
- return self.getBlockByNumber(num)
- }
- // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
- func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
- for i := 0; i < n; i++ {
- block := self.GetBlock(hash)
- if block == nil {
- break
- }
- blocks = append(blocks, block)
- hash = block.ParentHash()
- }
- return
- }
- // non blocking version
- func (self *ChainManager) getBlockByNumber(num uint64) *types.Block {
- return GetBlockByNumber(self.blockDb, num)
- }
- func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
- for i := 0; block != nil && i < length; i++ {
- uncles = append(uncles, block.Uncles()...)
- block = self.GetBlock(block.ParentHash())
- }
- return
- }
- // setTotalDifficulty updates the TD of the chain manager. Note, this function
- // assumes that the `mu` mutex is held!
- func (bc *ChainManager) setTotalDifficulty(td *big.Int) {
- bc.td = new(big.Int).Set(td)
- }
- func (bc *ChainManager) Stop() {
- close(bc.quit)
- atomic.StoreInt32(&bc.procInterrupt, 1)
- bc.wg.Wait()
- glog.V(logger.Info).Infoln("Chain manager stopped")
- }
- type queueEvent struct {
- queue []interface{}
- canonicalCount int
- sideCount int
- splitCount int
- }
- func (self *ChainManager) procFutureBlocks() {
- blocks := make([]*types.Block, self.futureBlocks.Len())
- for i, hash := range self.futureBlocks.Keys() {
- block, _ := self.futureBlocks.Get(hash)
- blocks[i] = block.(*types.Block)
- }
- if len(blocks) > 0 {
- types.BlockBy(types.Number).Sort(blocks)
- self.InsertChain(blocks)
- }
- }
- type writeStatus byte
- const (
- NonStatTy writeStatus = iota
- CanonStatTy
- SplitStatTy
- SideStatTy
- )
- // WriteBlock writes the block to the chain (or pending queue)
- func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status writeStatus, err error) {
- self.wg.Add(1)
- defer self.wg.Done()
- cblock := self.currentBlock
- // Compare the TD of the last known block in the canonical chain to make sure it's greater.
- // At this point it's possible that a different chain (fork) becomes the new canonical chain.
- if block.Td.Cmp(self.Td()) > 0 {
- // chain fork
- if block.ParentHash() != cblock.Hash() {
- // during split we merge two different chains and create the new canonical chain
- err := self.merge(cblock, block)
- if err != nil {
- return NonStatTy, err
- }
- status = SplitStatTy
- }
- self.mu.Lock()
- self.setTotalDifficulty(block.Td)
- self.insert(block)
- self.mu.Unlock()
- self.setTransState(state.New(block.Root(), self.stateDb))
- self.txState.SetState(state.New(block.Root(), self.stateDb))
- status = CanonStatTy
- } else {
- status = SideStatTy
- }
- self.write(block)
- // Delete from future blocks
- self.futureBlocks.Remove(block.Hash())
- return
- }
- // InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. It an error is returned
- // it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go).
- func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
- self.wg.Add(1)
- defer self.wg.Done()
- self.chainmu.Lock()
- defer self.chainmu.Unlock()
- // A queued approach to delivering events. This is generally
- // faster than direct delivery and requires much less mutex
- // acquiring.
- var (
- queue = make([]interface{}, len(chain))
- queueEvent = queueEvent{queue: queue}
- stats struct{ queued, processed, ignored int }
- tstart = time.Now()
- nonceDone = make(chan nonceResult, len(chain))
- nonceQuit = make(chan struct{})
- nonceChecked = make([]bool, len(chain))
- )
- // Start the parallel nonce verifier.
- go verifyNonces(self.pow, chain, nonceQuit, nonceDone)
- defer close(nonceQuit)
- txcount := 0
- for i, block := range chain {
- if atomic.LoadInt32(&self.procInterrupt) == 1 {
- glog.V(logger.Debug).Infoln("Premature abort during chain processing")
- break
- }
- bstart := time.Now()
- // Wait for block i's nonce to be verified before processing
- // its state transition.
- for !nonceChecked[i] {
- r := <-nonceDone
- nonceChecked[r.i] = true
- if !r.valid {
- block := chain[r.i]
- return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
- }
- }
- if BadHashes[block.Hash()] {
- err := fmt.Errorf("Found known bad hash in chain %x", block.Hash())
- blockErr(block, err)
- return i, err
- }
- // Setting block.Td regardless of error (known for example) prevents errors down the line
- // in the protocol handler
- block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash())))
- // Call in to the block processor and check for errors. It's likely that if one block fails
- // all others will fail too (unless a known block is returned).
- logs, receipts, err := self.processor.Process(block)
- if err != nil {
- if IsKnownBlockErr(err) {
- stats.ignored++
- continue
- }
- if err == BlockFutureErr {
- // Allow up to MaxFuture second in the future blocks. If this limit
- // is exceeded the chain is discarded and processed at a later time
- // if given.
- if max := time.Now().Unix() + maxTimeFutureBlocks; int64(block.Time()) > max {
- return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
- }
- self.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
- }
- if IsParentErr(err) && self.futureBlocks.Contains(block.ParentHash()) {
- self.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
- }
- blockErr(block, err)
- go ReportBlock(block, err)
- return i, err
- }
- txcount += len(block.Transactions())
- // write the block to the chain and get the status
- status, err := self.WriteBlock(block, true)
- if err != nil {
- return i, err
- }
- switch status {
- case CanonStatTy:
- if glog.V(logger.Debug) {
- glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
- }
- queue[i] = ChainEvent{block, block.Hash(), logs}
- queueEvent.canonicalCount++
- // This puts transactions in a extra db for rpc
- PutTransactions(self.extraDb, block, block.Transactions())
- // store the receipts
- PutReceipts(self.extraDb, receipts)
- case SideStatTy:
- if glog.V(logger.Detail) {
- glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
- }
- queue[i] = ChainSideEvent{block, logs}
- queueEvent.sideCount++
- case SplitStatTy:
- queue[i] = ChainSplitEvent{block, logs}
- queueEvent.splitCount++
- }
- stats.processed++
- }
- if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) {
- tend := time.Since(tstart)
- start, end := chain[0], chain[len(chain)-1]
- glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
- }
- go self.eventMux.Post(queueEvent)
- return 0, nil
- }
- // diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
- // to be part of the new canonical chain.
- func (self *ChainManager) diff(oldBlock, newBlock *types.Block) (types.Blocks, error) {
- var (
- newChain types.Blocks
- commonBlock *types.Block
- oldStart = oldBlock
- newStart = newBlock
- )
- // first reduce whoever is higher bound
- if oldBlock.NumberU64() > newBlock.NumberU64() {
- // reduce old chain
- for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) {
- }
- } else {
- // reduce new chain and append new chain blocks for inserting later on
- for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
- newChain = append(newChain, newBlock)
- }
- }
- if oldBlock == nil {
- return nil, fmt.Errorf("Invalid old chain")
- }
- if newBlock == nil {
- return nil, fmt.Errorf("Invalid new chain")
- }
- numSplit := newBlock.Number()
- for {
- if oldBlock.Hash() == newBlock.Hash() {
- commonBlock = oldBlock
- break
- }
- newChain = append(newChain, newBlock)
- oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
- if oldBlock == nil {
- return nil, fmt.Errorf("Invalid old chain")
- }
- if newBlock == nil {
- return nil, fmt.Errorf("Invalid new chain")
- }
- }
- if glog.V(logger.Debug) {
- commonHash := commonBlock.Hash()
- glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4])
- }
- return newChain, nil
- }
- // merge merges two different chain to the new canonical chain
- func (self *ChainManager) merge(oldBlock, newBlock *types.Block) error {
- newChain, err := self.diff(oldBlock, newBlock)
- if err != nil {
- return fmt.Errorf("chain reorg failed: %v", err)
- }
- // insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
- self.mu.Lock()
- for _, block := range newChain {
- self.insert(block)
- }
- self.mu.Unlock()
- return nil
- }
- func (self *ChainManager) update() {
- events := self.eventMux.Subscribe(queueEvent{})
- futureTimer := time.Tick(5 * time.Second)
- out:
- for {
- select {
- case ev := <-events.Chan():
- switch ev := ev.(type) {
- case queueEvent:
- for _, event := range ev.queue {
- switch event := event.(type) {
- case ChainEvent:
- // We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
- // and in most cases isn't even necessary.
- if self.lastBlockHash == event.Hash {
- self.currentGasLimit = CalcGasLimit(event.Block)
- self.eventMux.Post(ChainHeadEvent{event.Block})
- }
- }
- self.eventMux.Post(event)
- }
- }
- case <-futureTimer:
- self.procFutureBlocks()
- case <-self.quit:
- break out
- }
- }
- }
- func blockErr(block *types.Block, err error) {
- h := block.Header()
- glog.V(logger.Error).Infof("Bad block #%v (%x)\n", h.Number, h.Hash().Bytes())
- glog.V(logger.Error).Infoln(err)
- glog.V(logger.Debug).Infoln(verifyNonces)
- }
- type nonceResult struct {
- i int
- valid bool
- }
- // block verifies nonces of the given blocks in parallel and returns
- // an error if one of the blocks nonce verifications failed.
- func verifyNonces(pow pow.PoW, blocks []*types.Block, quit <-chan struct{}, done chan<- nonceResult) {
- // Spawn a few workers. They listen for blocks on the in channel
- // and send results on done. The workers will exit in the
- // background when in is closed.
- var (
- in = make(chan int)
- nworkers = runtime.GOMAXPROCS(0)
- )
- defer close(in)
- if len(blocks) < nworkers {
- nworkers = len(blocks)
- }
- for i := 0; i < nworkers; i++ {
- go func() {
- for i := range in {
- done <- nonceResult{i: i, valid: pow.Verify(blocks[i])}
- }
- }()
- }
- // Feed block indices to the workers.
- for i := range blocks {
- select {
- case in <- i:
- continue
- case <-quit:
- return
- }
- }
- }
|