| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665 |
- // Copyright 2015 The go-ethereum Authors
- // This file is part of the go-ethereum library.
- //
- // The go-ethereum library is free software: you can redistribute it and/or modify
- // it under the terms of the GNU Lesser General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // The go-ethereum library is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU Lesser General Public License for more details.
- //
- // You should have received a copy of the GNU Lesser General Public License
- // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
- // Package downloader contains the manual full chain synchronisation.
- package downloader
- import (
- "crypto/rand"
- "errors"
- "fmt"
- "math"
- "math/big"
- "strings"
- "sync"
- "sync/atomic"
- "time"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/logger"
- "github.com/ethereum/go-ethereum/logger/glog"
- "github.com/rcrowley/go-metrics"
- )
- var (
- MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request
- MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
- MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
- MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request
- MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
- MaxStateFetch = 384 // Amount of node state values to allow fetching per request
- hashTTL = 3 * time.Second // [eth/61] Time it takes for a hash request to time out
- blockTargetRTT = 3 * time.Second / 2 // [eth/61] Target time for completing a block retrieval request
- blockTTL = 3 * blockTargetRTT // [eth/61] Maximum time allowance before a block request is considered expired
- headerTTL = 3 * time.Second // [eth/62] Time it takes for a header request to time out
- bodyTargetRTT = 3 * time.Second / 2 // [eth/62] Target time for completing a block body retrieval request
- bodyTTL = 3 * bodyTargetRTT // [eth/62] Maximum time allowance before a block body request is considered expired
- receiptTargetRTT = 3 * time.Second / 2 // [eth/63] Target time for completing a receipt retrieval request
- receiptTTL = 3 * receiptTargetRTT // [eth/63] Maximum time allowance before a receipt request is considered expired
- stateTargetRTT = 2 * time.Second / 2 // [eth/63] Target time for completing a state trie retrieval request
- stateTTL = 3 * stateTargetRTT // [eth/63] Maximum time allowance before a node data request is considered expired
- maxQueuedHashes = 256 * 1024 // [eth/61] Maximum number of hashes to queue for import (DOS protection)
- maxQueuedHeaders = 256 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
- maxResultsProcess = 256 // Number of download results to import at once into the chain
- fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
- fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
- fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
- fsPivotInterval = 512 // Number of headers out of which to randomize the pivot point
- fsMinFullBlocks = 1024 // Number of blocks to retrieve fully even in fast sync
- )
- var (
- errBusy = errors.New("busy")
- errUnknownPeer = errors.New("peer is unknown or unhealthy")
- errBadPeer = errors.New("action from bad peer ignored")
- errStallingPeer = errors.New("peer is stalling")
- errNoPeers = errors.New("no peers to keep download active")
- errTimeout = errors.New("timeout")
- errEmptyHashSet = errors.New("empty hash set by peer")
- errEmptyHeaderSet = errors.New("empty header set by peer")
- errPeersUnavailable = errors.New("no peers available or all tried for download")
- errAlreadyInPool = errors.New("hash already in pool")
- errInvalidChain = errors.New("retrieved hash chain is invalid")
- errInvalidBlock = errors.New("retrieved block is invalid")
- errInvalidBody = errors.New("retrieved block body is invalid")
- errInvalidReceipt = errors.New("retrieved receipt is invalid")
- errCancelHashFetch = errors.New("hash download canceled (requested)")
- errCancelBlockFetch = errors.New("block download canceled (requested)")
- errCancelHeaderFetch = errors.New("block header download canceled (requested)")
- errCancelBodyFetch = errors.New("block body download canceled (requested)")
- errCancelReceiptFetch = errors.New("receipt download canceled (requested)")
- errCancelStateFetch = errors.New("state data download canceled (requested)")
- errCancelProcessing = errors.New("processing canceled (requested)")
- errNoSyncActive = errors.New("no sync active")
- )
- type Downloader struct {
- mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle)
- noFast bool // Flag to disable fast syncing in case of a security error
- mux *event.TypeMux // Event multiplexer to announce sync operation events
- queue *queue // Scheduler for selecting the hashes to download
- peers *peerSet // Set of active peers from which download can proceed
- interrupt int32 // Atomic boolean to signal termination
- // Statistics
- syncStatsChainOrigin uint64 // Origin block number where syncing started at
- syncStatsChainHeight uint64 // Highest block number known when syncing started
- syncStatsStateTotal uint64 // Total number of node state entries known so far
- syncStatsStateDone uint64 // Number of state trie entries already pulled
- syncStatsLock sync.RWMutex // Lock protecting the sync stats fields
- // Callbacks
- hasHeader headerCheckFn // Checks if a header is present in the chain
- hasBlockAndState blockAndStateCheckFn // Checks if a block and associated state is present in the chain
- getHeader headerRetrievalFn // Retrieves a header from the chain
- getBlock blockRetrievalFn // Retrieves a block from the chain
- headHeader headHeaderRetrievalFn // Retrieves the head header from the chain
- headBlock headBlockRetrievalFn // Retrieves the head block from the chain
- headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain
- commitHeadBlock headBlockCommitterFn // Commits a manually assembled block as the chain head
- getTd tdRetrievalFn // Retrieves the TD of a block from the chain
- insertHeaders headerChainInsertFn // Injects a batch of headers into the chain
- insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain
- insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain
- rollback chainRollbackFn // Removes a batch of recently added chain links
- dropPeer peerDropFn // Drops a peer for misbehaving
- // Status
- synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
- synchronising int32
- notified int32
- // Channels
- newPeerCh chan *peer
- hashCh chan dataPack // [eth/61] Channel receiving inbound hashes
- blockCh chan dataPack // [eth/61] Channel receiving inbound blocks
- headerCh chan dataPack // [eth/62] Channel receiving inbound block headers
- bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies
- receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts
- stateCh chan dataPack // [eth/63] Channel receiving inbound node state data
- blockWakeCh chan bool // [eth/61] Channel to signal the block fetcher of new tasks
- bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks
- receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks
- stateWakeCh chan bool // [eth/63] Channel to signal the state fetcher of new tasks
- cancelCh chan struct{} // Channel to cancel mid-flight syncs
- cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers
- // Testing hooks
- syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run
- bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch
- receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
- chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
- }
- // New creates a new downloader to fetch hashes and blocks from remote peers.
- func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlockAndState blockAndStateCheckFn,
- getHeader headerRetrievalFn, getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn,
- headFastBlock headFastBlockRetrievalFn, commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn,
- insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader {
- return &Downloader{
- mode: FullSync,
- mux: mux,
- queue: newQueue(stateDb),
- peers: newPeerSet(),
- hasHeader: hasHeader,
- hasBlockAndState: hasBlockAndState,
- getHeader: getHeader,
- getBlock: getBlock,
- headHeader: headHeader,
- headBlock: headBlock,
- headFastBlock: headFastBlock,
- commitHeadBlock: commitHeadBlock,
- getTd: getTd,
- insertHeaders: insertHeaders,
- insertBlocks: insertBlocks,
- insertReceipts: insertReceipts,
- rollback: rollback,
- dropPeer: dropPeer,
- newPeerCh: make(chan *peer, 1),
- hashCh: make(chan dataPack, 1),
- blockCh: make(chan dataPack, 1),
- headerCh: make(chan dataPack, 1),
- bodyCh: make(chan dataPack, 1),
- receiptCh: make(chan dataPack, 1),
- stateCh: make(chan dataPack, 1),
- blockWakeCh: make(chan bool, 1),
- bodyWakeCh: make(chan bool, 1),
- receiptWakeCh: make(chan bool, 1),
- stateWakeCh: make(chan bool, 1),
- }
- }
- // Progress retrieves the synchronisation boundaries, specifically the origin
- // block where synchronisation started at (may have failed/suspended); the block
- // or header sync is currently at; and the latest known block which the sync targets.
- //
- // In addition, during the state download phase of fast synchronisation the number
- // of processed and the total number of known states are also returned. Otherwise
- // these are zero.
- func (d *Downloader) Progress() (uint64, uint64, uint64, uint64, uint64) {
- // Fetch the pending state count outside of the lock to prevent unforeseen deadlocks
- pendingStates := uint64(d.queue.PendingNodeData())
- // Lock the current stats and return the progress
- d.syncStatsLock.RLock()
- defer d.syncStatsLock.RUnlock()
- current := uint64(0)
- switch d.mode {
- case FullSync:
- current = d.headBlock().NumberU64()
- case FastSync:
- current = d.headFastBlock().NumberU64()
- case LightSync:
- current = d.headHeader().Number.Uint64()
- }
- return d.syncStatsChainOrigin, current, d.syncStatsChainHeight, d.syncStatsStateDone, d.syncStatsStateDone + pendingStates
- }
- // Synchronising returns whether the downloader is currently retrieving blocks.
- func (d *Downloader) Synchronising() bool {
- return atomic.LoadInt32(&d.synchronising) > 0
- }
- // RegisterPeer injects a new download peer into the set of block source to be
- // used for fetching hashes and blocks from.
- func (d *Downloader) RegisterPeer(id string, version int, head common.Hash,
- getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn, // eth/61 callbacks, remove when upgrading
- getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
- getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
- glog.V(logger.Detail).Infoln("Registering peer", id)
- if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
- glog.V(logger.Error).Infoln("Register failed:", err)
- return err
- }
- return nil
- }
- // UnregisterPeer remove a peer from the known list, preventing any action from
- // the specified peer. An effort is also made to return any pending fetches into
- // the queue.
- func (d *Downloader) UnregisterPeer(id string) error {
- glog.V(logger.Detail).Infoln("Unregistering peer", id)
- if err := d.peers.Unregister(id); err != nil {
- glog.V(logger.Error).Infoln("Unregister failed:", err)
- return err
- }
- d.queue.Revoke(id)
- return nil
- }
- // Synchronise tries to sync up our local block chain with a remote peer, both
- // adding various sanity checks as well as wrapping it with various log entries.
- func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
- glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td)
- err := d.synchronise(id, head, td, mode)
- switch err {
- case nil:
- glog.V(logger.Detail).Infof("Synchronisation completed")
- case errBusy:
- glog.V(logger.Detail).Infof("Synchronisation already in progress")
- case errTimeout, errBadPeer, errStallingPeer, errEmptyHashSet, errEmptyHeaderSet, errPeersUnavailable, errInvalidChain:
- glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
- d.dropPeer(id)
- default:
- glog.V(logger.Warn).Infof("Synchronisation failed: %v", err)
- }
- return err
- }
- // synchronise will select the peer and use it for synchronising. If an empty string is given
- // it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
- // checks fail an error will be returned. This method is synchronous
- func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
- // Mock out the synchronisation if testing
- if d.synchroniseMock != nil {
- return d.synchroniseMock(id, hash)
- }
- // Make sure only one goroutine is ever allowed past this point at once
- if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
- return errBusy
- }
- defer atomic.StoreInt32(&d.synchronising, 0)
- // Post a user notification of the sync (only once per session)
- if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
- glog.V(logger.Info).Infoln("Block synchronisation started")
- }
- // Reset the queue, peer set and wake channels to clean any internal leftover state
- d.queue.Reset()
- d.peers.Reset()
- for _, ch := range []chan bool{d.blockWakeCh, d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
- select {
- case <-ch:
- default:
- }
- }
- // Reset any ephemeral sync statistics
- d.syncStatsLock.Lock()
- d.syncStatsStateTotal = 0
- d.syncStatsStateDone = 0
- d.syncStatsLock.Unlock()
- // Create cancel channel for aborting mid-flight
- d.cancelLock.Lock()
- d.cancelCh = make(chan struct{})
- d.cancelLock.Unlock()
- // Set the requested sync mode, unless it's forbidden
- d.mode = mode
- if d.mode == FastSync && d.noFast {
- d.mode = FullSync
- }
- // Retrieve the origin peer and initiate the downloading process
- p := d.peers.Peer(id)
- if p == nil {
- return errUnknownPeer
- }
- return d.syncWithPeer(p, hash, td)
- }
- // syncWithPeer starts a block synchronization based on the hash chain from the
- // specified peer and head hash.
- func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err error) {
- d.mux.Post(StartEvent{})
- defer func() {
- // reset on error
- if err != nil {
- d.mux.Post(FailedEvent{err})
- } else {
- d.mux.Post(DoneEvent{})
- }
- }()
- glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
- defer func(start time.Time) {
- glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
- }(time.Now())
- switch {
- case p.version == 61:
- // Look up the sync boundaries: the common ancestor and the target block
- latest, err := d.fetchHeight61(p)
- if err != nil {
- return err
- }
- origin, err := d.findAncestor61(p)
- if err != nil {
- return err
- }
- d.syncStatsLock.Lock()
- if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
- d.syncStatsChainOrigin = origin
- }
- d.syncStatsChainHeight = latest
- d.syncStatsLock.Unlock()
- // Initiate the sync using a concurrent hash and block retrieval algorithm
- d.queue.Prepare(origin+1, d.mode, 0)
- if d.syncInitHook != nil {
- d.syncInitHook(origin, latest)
- }
- return d.spawnSync(
- func() error { return d.fetchHashes61(p, td, origin+1) },
- func() error { return d.fetchBlocks61(origin + 1) },
- )
- case p.version >= 62:
- // Look up the sync boundaries: the common ancestor and the target block
- latest, err := d.fetchHeight(p)
- if err != nil {
- return err
- }
- origin, err := d.findAncestor(p)
- if err != nil {
- return err
- }
- d.syncStatsLock.Lock()
- if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
- d.syncStatsChainOrigin = origin
- }
- d.syncStatsChainHeight = latest
- d.syncStatsLock.Unlock()
- // Initiate the sync using a concurrent header and content retrieval algorithm
- pivot := uint64(0)
- switch d.mode {
- case LightSync:
- pivot = latest
- case FastSync:
- // Calculate the new fast/slow sync pivot point
- pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval)))
- if err != nil {
- panic(fmt.Sprintf("Failed to access crypto random source: %v", err))
- }
- if latest > uint64(fsMinFullBlocks)+pivotOffset.Uint64() {
- pivot = latest - uint64(fsMinFullBlocks) - pivotOffset.Uint64()
- }
- // If the point is below the origin, move origin back to ensure state download
- if pivot < origin {
- if pivot > 0 {
- origin = pivot - 1
- } else {
- origin = 0
- }
- }
- glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot)
- }
- d.queue.Prepare(origin+1, d.mode, pivot)
- if d.syncInitHook != nil {
- d.syncInitHook(origin, latest)
- }
- return d.spawnSync(
- func() error { return d.fetchHeaders(p, td, origin+1) }, // Headers are always retrieved
- func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync
- func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
- func() error { return d.fetchNodeData() }, // Node state data is retrieved during fast sync
- )
- default:
- // Something very wrong, stop right here
- glog.V(logger.Error).Infof("Unsupported eth protocol: %d", p.version)
- return errBadPeer
- }
- }
- // spawnSync runs d.process and all given fetcher functions to completion in
- // separate goroutines, returning the first error that appears.
- func (d *Downloader) spawnSync(fetchers ...func() error) error {
- var wg sync.WaitGroup
- errc := make(chan error, len(fetchers)+1)
- wg.Add(len(fetchers) + 1)
- go func() { defer wg.Done(); errc <- d.process() }()
- for _, fn := range fetchers {
- fn := fn
- go func() { defer wg.Done(); errc <- fn() }()
- }
- // Wait for the first error, then terminate the others.
- var err error
- for i := 0; i < len(fetchers)+1; i++ {
- if i == len(fetchers) {
- // Close the queue when all fetchers have exited.
- // This will cause the block processor to end when
- // it has processed the queue.
- d.queue.Close()
- }
- if err = <-errc; err != nil {
- break
- }
- }
- d.queue.Close()
- d.cancel()
- wg.Wait()
- return err
- }
- // cancel cancels all of the operations and resets the queue. It returns true
- // if the cancel operation was completed.
- func (d *Downloader) cancel() {
- // Close the current cancel channel
- d.cancelLock.Lock()
- if d.cancelCh != nil {
- select {
- case <-d.cancelCh:
- // Channel was already closed
- default:
- close(d.cancelCh)
- }
- }
- d.cancelLock.Unlock()
- }
- // Terminate interrupts the downloader, canceling all pending operations.
- // The downloader cannot be reused after calling Terminate.
- func (d *Downloader) Terminate() {
- atomic.StoreInt32(&d.interrupt, 1)
- d.cancel()
- }
- // fetchHeight61 retrieves the head block of the remote peer to aid in estimating
- // the total time a pending synchronisation would take.
- func (d *Downloader) fetchHeight61(p *peer) (uint64, error) {
- glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
- // Request the advertised remote head block and wait for the response
- go p.getBlocks([]common.Hash{p.head})
- timeout := time.After(hashTTL)
- for {
- select {
- case <-d.cancelCh:
- return 0, errCancelBlockFetch
- case packet := <-d.blockCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received blocks from incorrect peer(%s)", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- blocks := packet.(*blockPack).blocks
- if len(blocks) != 1 {
- glog.V(logger.Debug).Infof("%v: invalid number of head blocks: %d != 1", p, len(blocks))
- return 0, errBadPeer
- }
- return blocks[0].NumberU64(), nil
- case <-timeout:
- glog.V(logger.Debug).Infof("%v: head block timeout", p)
- return 0, errTimeout
- case <-d.hashCh:
- // Out of bounds hashes received, ignore them
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- }
- // findAncestor61 tries to locate the common ancestor block of the local chain and
- // a remote peers blockchain. In the general case when our node was in sync and
- // on the correct chain, checking the top N blocks should already get us a match.
- // In the rare scenario when we ended up on a long reorganisation (i.e. none of
- // the head blocks match), we do a binary search to find the common ancestor.
- func (d *Downloader) findAncestor61(p *peer) (uint64, error) {
- glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
- // Request out head blocks to short circuit ancestor location
- head := d.headBlock().NumberU64()
- from := int64(head) - int64(MaxHashFetch) + 1
- if from < 0 {
- from = 0
- }
- go p.getAbsHashes(uint64(from), MaxHashFetch)
- // Wait for the remote response to the head fetch
- number, hash := uint64(0), common.Hash{}
- timeout := time.After(hashTTL)
- for finished := false; !finished; {
- select {
- case <-d.cancelCh:
- return 0, errCancelHashFetch
- case packet := <-d.hashCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- hashes := packet.(*hashPack).hashes
- if len(hashes) == 0 {
- glog.V(logger.Debug).Infof("%v: empty head hash set", p)
- return 0, errEmptyHashSet
- }
- // Check if a common ancestor was found
- finished = true
- for i := len(hashes) - 1; i >= 0; i-- {
- // Skip any headers that underflow/overflow our requested set
- header := d.getHeader(hashes[i])
- if header == nil || header.Number.Int64() < from || header.Number.Uint64() > head {
- continue
- }
- // Otherwise check if we already know the header or not
- if d.hasBlockAndState(hashes[i]) {
- number, hash = header.Number.Uint64(), header.Hash()
- break
- }
- }
- case <-timeout:
- glog.V(logger.Debug).Infof("%v: head hash timeout", p)
- return 0, errTimeout
- case <-d.blockCh:
- // Out of bounds blocks received, ignore them
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- // If the head fetch already found an ancestor, return
- if !common.EmptyHash(hash) {
- glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
- return number, nil
- }
- // Ancestor not found, we need to binary search over our chain
- start, end := uint64(0), head
- for start+1 < end {
- // Split our chain interval in two, and request the hash to cross check
- check := (start + end) / 2
- timeout := time.After(hashTTL)
- go p.getAbsHashes(uint64(check), 1)
- // Wait until a reply arrives to this request
- for arrived := false; !arrived; {
- select {
- case <-d.cancelCh:
- return 0, errCancelHashFetch
- case packet := <-d.hashCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- hashes := packet.(*hashPack).hashes
- if len(hashes) != 1 {
- glog.V(logger.Debug).Infof("%v: invalid search hash set (%d)", p, len(hashes))
- return 0, errBadPeer
- }
- arrived = true
- // Modify the search interval based on the response
- if !d.hasBlockAndState(hashes[0]) {
- end = check
- break
- }
- block := d.getBlock(hashes[0]) // this doesn't check state, hence the above explicit check
- if block.NumberU64() != check {
- glog.V(logger.Debug).Infof("%v: non requested hash #%d [%x…], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check)
- return 0, errBadPeer
- }
- start = check
- case <-timeout:
- glog.V(logger.Debug).Infof("%v: search hash timeout", p)
- return 0, errTimeout
- case <-d.blockCh:
- // Out of bounds blocks received, ignore them
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- }
- return start, nil
- }
- // fetchHashes61 keeps retrieving hashes from the requested number, until no more
- // are returned, potentially throttling on the way.
- func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error {
- glog.V(logger.Debug).Infof("%v: downloading hashes from #%d", p, from)
- // Create a timeout timer, and the associated hash fetcher
- request := time.Now() // time of the last fetch request
- timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
- <-timeout.C // timeout channel should be initially empty
- defer timeout.Stop()
- getHashes := func(from uint64) {
- glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
- go p.getAbsHashes(from, MaxHashFetch)
- request = time.Now()
- timeout.Reset(hashTTL)
- }
- // Start pulling hashes, until all are exhausted
- getHashes(from)
- gotHashes := false
- for {
- select {
- case <-d.cancelCh:
- return errCancelHashFetch
- case packet := <-d.hashCh:
- // Make sure the active peer is giving us the hashes
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", packet.PeerId())
- break
- }
- hashReqTimer.UpdateSince(request)
- timeout.Stop()
- // If no more hashes are inbound, notify the block fetcher and return
- if packet.Items() == 0 {
- glog.V(logger.Debug).Infof("%v: no available hashes", p)
- select {
- case d.blockWakeCh <- false:
- case <-d.cancelCh:
- }
- // If no hashes were retrieved at all, the peer violated it's TD promise that it had a
- // better chain compared to ours. The only exception is if it's promised blocks were
- // already imported by other means (e.g. fetcher):
- //
- // R <remote peer>, L <local node>: Both at block 10
- // R: Mine block 11, and propagate it to L
- // L: Queue block 11 for import
- // L: Notice that R's head and TD increased compared to ours, start sync
- // L: Import of block 11 finishes
- // L: Sync begins, and finds common ancestor at 11
- // L: Request new hashes up from 11 (R's TD was higher, it must have something)
- // R: Nothing to give
- if !gotHashes && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
- return errStallingPeer
- }
- return nil
- }
- gotHashes = true
- hashes := packet.(*hashPack).hashes
- // Otherwise insert all the new hashes, aborting in case of junk
- glog.V(logger.Detail).Infof("%v: scheduling %d hashes from #%d", p, len(hashes), from)
- inserts := d.queue.Schedule61(hashes, true)
- if len(inserts) != len(hashes) {
- glog.V(logger.Debug).Infof("%v: stale hashes", p)
- return errBadPeer
- }
- // Notify the block fetcher of new hashes, but stop if queue is full
- if d.queue.PendingBlocks() < maxQueuedHashes {
- // We still have hashes to fetch, send continuation wake signal (potential)
- select {
- case d.blockWakeCh <- true:
- default:
- }
- } else {
- // Hash limit reached, send a termination wake signal (enforced)
- select {
- case d.blockWakeCh <- false:
- case <-d.cancelCh:
- }
- return nil
- }
- // Queue not yet full, fetch the next batch
- from += uint64(len(hashes))
- getHashes(from)
- case <-timeout.C:
- glog.V(logger.Debug).Infof("%v: hash request timed out", p)
- hashTimeoutMeter.Mark(1)
- return errTimeout
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- }
- // fetchBlocks61 iteratively downloads the scheduled hashes, taking any available
- // peers, reserving a chunk of blocks for each, waiting for delivery and also
- // periodically checking for timeouts.
- func (d *Downloader) fetchBlocks61(from uint64) error {
- glog.V(logger.Debug).Infof("Downloading blocks from #%d", from)
- defer glog.V(logger.Debug).Infof("Block download terminated")
- // Create a timeout timer for scheduling expiration tasks
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
- update := make(chan struct{}, 1)
- // Fetch blocks until the hash fetcher's done
- finished := false
- for {
- select {
- case <-d.cancelCh:
- return errCancelBlockFetch
- case packet := <-d.blockCh:
- // If the peer was previously banned and failed to deliver it's pack
- // in a reasonable time frame, ignore it's message.
- if peer := d.peers.Peer(packet.PeerId()); peer != nil {
- blocks := packet.(*blockPack).blocks
- // Deliver the received chunk of blocks and check chain validity
- accepted, err := d.queue.DeliverBlocks(peer.id, blocks)
- if err == errInvalidChain {
- return err
- }
- // Unless a peer delivered something completely else than requested (usually
- // caused by a timed out request which came through in the end), set it to
- // idle. If the delivery's stale, the peer should have already been idled.
- if err != errStaleDelivery {
- peer.SetBlocksIdle(accepted)
- }
- // Issue a log to the user to see what's going on
- switch {
- case err == nil && len(blocks) == 0:
- glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
- case err == nil:
- glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blocks))
- default:
- glog.V(logger.Detail).Infof("%s: delivery failed: %v", peer, err)
- }
- }
- // Blocks arrived, try to update the progress
- select {
- case update <- struct{}{}:
- default:
- }
- case cont := <-d.blockWakeCh:
- // The hash fetcher sent a continuation flag, check if it's done
- if !cont {
- finished = true
- }
- // Hashes arrive, try to update the progress
- select {
- case update <- struct{}{}:
- default:
- }
- case <-ticker.C:
- // Sanity check update the progress
- select {
- case update <- struct{}{}:
- default:
- }
- case <-update:
- // Short circuit if we lost all our peers
- if d.peers.Len() == 0 {
- return errNoPeers
- }
- // Check for block request timeouts and demote the responsible peers
- for pid, fails := range d.queue.ExpireBlocks(blockTTL) {
- if peer := d.peers.Peer(pid); peer != nil {
- if fails > 1 {
- glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
- peer.SetBlocksIdle(0)
- } else {
- glog.V(logger.Debug).Infof("%s: stalling block delivery, dropping", peer)
- d.dropPeer(pid)
- }
- }
- }
- // If there's nothing more to fetch, wait or terminate
- if d.queue.PendingBlocks() == 0 {
- if !d.queue.InFlightBlocks() && finished {
- glog.V(logger.Debug).Infof("Block fetching completed")
- return nil
- }
- break
- }
- // Send a download request to all idle peers, until throttled
- throttled := false
- idles, total := d.peers.BlockIdlePeers()
- for _, peer := range idles {
- // Short circuit if throttling activated
- if d.queue.ShouldThrottleBlocks() {
- throttled = true
- break
- }
- // Reserve a chunk of hashes for a peer. A nil can mean either that
- // no more hashes are available, or that the peer is known not to
- // have them.
- request := d.queue.ReserveBlocks(peer, peer.BlockCapacity())
- if request == nil {
- continue
- }
- if glog.V(logger.Detail) {
- glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
- }
- // Fetch the chunk and make sure any errors return the hashes to the queue
- if err := peer.Fetch61(request); err != nil {
- // Although we could try and make an attempt to fix this, this error really
- // means that we've double allocated a fetch task to a peer. If that is the
- // case, the internal state of the downloader and the queue is very wrong so
- // better hard crash and note the error instead of silently accumulating into
- // a much bigger issue.
- panic(fmt.Sprintf("%v: fetch assignment failed", peer))
- }
- }
- // Make sure that we have peers available for fetching. If all peers have been tried
- // and all failed throw an error
- if !throttled && !d.queue.InFlightBlocks() && len(idles) == total {
- return errPeersUnavailable
- }
- case <-d.headerCh:
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Ignore eth/{62,63} packets because this is eth/61.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- }
- // fetchHeight retrieves the head header of the remote peer to aid in estimating
- // the total time a pending synchronisation would take.
- func (d *Downloader) fetchHeight(p *peer) (uint64, error) {
- glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p)
- // Request the advertised remote head block and wait for the response
- go p.getRelHeaders(p.head, 1, 0, false)
- timeout := time.After(headerTTL)
- for {
- select {
- case <-d.cancelCh:
- return 0, errCancelBlockFetch
- case packet := <-d.headerCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- headers := packet.(*headerPack).headers
- if len(headers) != 1 {
- glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers))
- return 0, errBadPeer
- }
- return headers[0].Number.Uint64(), nil
- case <-timeout:
- glog.V(logger.Debug).Infof("%v: head header timeout", p)
- return 0, errTimeout
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Out of bounds delivery, ignore
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- }
- // findAncestor tries to locate the common ancestor link of the local chain and
- // a remote peers blockchain. In the general case when our node was in sync and
- // on the correct chain, checking the top N links should already get us a match.
- // In the rare scenario when we ended up on a long reorganisation (i.e. none of
- // the head links match), we do a binary search to find the common ancestor.
- func (d *Downloader) findAncestor(p *peer) (uint64, error) {
- glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
- // Request our head headers to short circuit ancestor location
- head := d.headHeader().Number.Uint64()
- if d.mode == FullSync {
- head = d.headBlock().NumberU64()
- } else if d.mode == FastSync {
- head = d.headFastBlock().NumberU64()
- }
- from := int64(head) - int64(MaxHeaderFetch) + 1
- if from < 0 {
- from = 0
- }
- go p.getAbsHeaders(uint64(from), MaxHeaderFetch, 0, false)
- // Wait for the remote response to the head fetch
- number, hash := uint64(0), common.Hash{}
- timeout := time.After(hashTTL)
- for finished := false; !finished; {
- select {
- case <-d.cancelCh:
- return 0, errCancelHashFetch
- case packet := <-d.headerCh:
- // Discard anything not from the origin peer
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- headers := packet.(*headerPack).headers
- if len(headers) == 0 {
- glog.V(logger.Warn).Infof("%v: empty head header set", p)
- return 0, errEmptyHeaderSet
- }
- // Make sure the peer's reply conforms to the request
- for i := 0; i < len(headers); i++ {
- if number := headers[i].Number.Int64(); number != from+int64(i) {
- glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i), number)
- return 0, errInvalidChain
- }
- if i > 0 && headers[i-1].Hash() != headers[i].ParentHash {
- glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ancestry: expected [%x], got [%x]", p, i, headers[i-1].Hash().Bytes()[:4], headers[i].ParentHash[:4])
- return 0, errInvalidChain
- }
- }
- // Check if a common ancestor was found
- finished = true
- for i := len(headers) - 1; i >= 0; i-- {
- // Skip any headers that underflow/overflow our requested set
- if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > head {
- continue
- }
- // Otherwise check if we already know the header or not
- if (d.mode != LightSync && d.hasBlockAndState(headers[i].Hash())) || (d.mode == LightSync && d.hasHeader(headers[i].Hash())) {
- number, hash = headers[i].Number.Uint64(), headers[i].Hash()
- break
- }
- }
- case <-timeout:
- glog.V(logger.Debug).Infof("%v: head header timeout", p)
- return 0, errTimeout
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Out of bounds delivery, ignore
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- // If the head fetch already found an ancestor, return
- if !common.EmptyHash(hash) {
- glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4])
- return number, nil
- }
- // Ancestor not found, we need to binary search over our chain
- start, end := uint64(0), head
- for start+1 < end {
- // Split our chain interval in two, and request the hash to cross check
- check := (start + end) / 2
- timeout := time.After(hashTTL)
- go p.getAbsHeaders(uint64(check), 1, 0, false)
- // Wait until a reply arrives to this request
- for arrived := false; !arrived; {
- select {
- case <-d.cancelCh:
- return 0, errCancelHashFetch
- case packer := <-d.headerCh:
- // Discard anything not from the origin peer
- if packer.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packer.PeerId())
- break
- }
- // Make sure the peer actually gave something valid
- headers := packer.(*headerPack).headers
- if len(headers) != 1 {
- glog.V(logger.Debug).Infof("%v: invalid search header set (%d)", p, len(headers))
- return 0, errBadPeer
- }
- arrived = true
- // Modify the search interval based on the response
- if (d.mode == FullSync && !d.hasBlockAndState(headers[0].Hash())) || (d.mode != FullSync && !d.hasHeader(headers[0].Hash())) {
- end = check
- break
- }
- header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
- if header.Number.Uint64() != check {
- glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check)
- return 0, errBadPeer
- }
- start = check
- case <-timeout:
- glog.V(logger.Debug).Infof("%v: search header timeout", p)
- return 0, errTimeout
- case <-d.bodyCh:
- case <-d.stateCh:
- case <-d.receiptCh:
- // Out of bounds delivery, ignore
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- }
- return start, nil
- }
- // fetchHeaders keeps retrieving headers from the requested number, until no more
- // are returned, potentially throttling on the way.
- //
- // The queue parameter can be used to switch between queuing headers for block
- // body download too, or directly import as pure header chains.
- func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
- glog.V(logger.Debug).Infof("%v: downloading headers from #%d", p, from)
- defer glog.V(logger.Debug).Infof("%v: header download terminated", p)
- // Calculate the pivoting point for switching from fast to slow sync
- pivot := d.queue.FastSyncPivot()
- // Keep a count of uncertain headers to roll back
- rollback := []*types.Header{}
- defer func() {
- if len(rollback) > 0 {
- // Flatten the headers and roll them back
- hashes := make([]common.Hash, len(rollback))
- for i, header := range rollback {
- hashes[i] = header.Hash()
- }
- lh, lfb, lb := d.headHeader().Number, d.headFastBlock().Number(), d.headBlock().Number()
- d.rollback(hashes)
- glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
- len(hashes), lh, d.headHeader().Number, lfb, d.headFastBlock().Number(), lb, d.headBlock().Number())
- // If we're already past the pivot point, this could be an attack, disable fast sync
- if rollback[len(rollback)-1].Number.Uint64() > pivot {
- d.noFast = true
- }
- }
- }()
- // Create a timeout timer, and the associated hash fetcher
- request := time.Now() // time of the last fetch request
- timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
- <-timeout.C // timeout channel should be initially empty
- defer timeout.Stop()
- getHeaders := func(from uint64) {
- glog.V(logger.Detail).Infof("%v: fetching %d headers from #%d", p, MaxHeaderFetch, from)
- go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
- request = time.Now()
- timeout.Reset(headerTTL)
- }
- // Start pulling headers, until all are exhausted
- getHeaders(from)
- gotHeaders := false
- for {
- select {
- case <-d.cancelCh:
- return errCancelHeaderFetch
- case packet := <-d.headerCh:
- // Make sure the active peer is giving us the headers
- if packet.PeerId() != p.id {
- glog.V(logger.Debug).Infof("Received headers from incorrect peer (%s)", packet.PeerId())
- break
- }
- headerReqTimer.UpdateSince(request)
- timeout.Stop()
- // If no more headers are inbound, notify the content fetchers and return
- if packet.Items() == 0 {
- glog.V(logger.Debug).Infof("%v: no available headers", p)
- for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
- select {
- case ch <- false:
- case <-d.cancelCh:
- }
- }
- // If no headers were retrieved at all, the peer violated it's TD promise that it had a
- // better chain compared to ours. The only exception is if it's promised blocks were
- // already imported by other means (e.g. fetcher):
- //
- // R <remote peer>, L <local node>: Both at block 10
- // R: Mine block 11, and propagate it to L
- // L: Queue block 11 for import
- // L: Notice that R's head and TD increased compared to ours, start sync
- // L: Import of block 11 finishes
- // L: Sync begins, and finds common ancestor at 11
- // L: Request new headers up from 11 (R's TD was higher, it must have something)
- // R: Nothing to give
- if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 {
- return errStallingPeer
- }
- // If fast or light syncing, ensure promised headers are indeed delivered. This is
- // needed to detect scenarios where an attacker feeds a bad pivot and then bails out
- // of delivering the post-pivot blocks that would flag the invalid content.
- //
- // This check cannot be executed "as is" for full imports, since blocks may still be
- // queued for processing when the header download completes. However, as long as the
- // peer gave us something useful, we're already happy/progressed (above check).
- if d.mode == FastSync || d.mode == LightSync {
- if td.Cmp(d.getTd(d.headHeader().Hash())) > 0 {
- return errStallingPeer
- }
- }
- rollback = nil
- return nil
- }
- gotHeaders = true
- headers := packet.(*headerPack).headers
- // Otherwise insert all the new headers, aborting in case of junk
- glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from)
- if d.mode == FastSync || d.mode == LightSync {
- // Collect the yet unknown headers to mark them as uncertain
- unknown := make([]*types.Header, 0, len(headers))
- for _, header := range headers {
- if !d.hasHeader(header.Hash()) {
- unknown = append(unknown, header)
- }
- }
- // If we're importing pure headers, verify based on their recentness
- frequency := fsHeaderCheckFrequency
- if headers[len(headers)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
- frequency = 1
- }
- if n, err := d.insertHeaders(headers, frequency); err != nil {
- // If some headers were inserted, add them too to the rollback list
- if n > 0 {
- rollback = append(rollback, headers[:n]...)
- }
- glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headers[n].Number, headers[n].Hash().Bytes()[:4], err)
- return errInvalidChain
- }
- // All verifications passed, store newly found uncertain headers
- rollback = append(rollback, unknown...)
- if len(rollback) > fsHeaderSafetyNet {
- rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
- }
- }
- if d.mode == FullSync || d.mode == FastSync {
- inserts := d.queue.Schedule(headers, from)
- if len(inserts) != len(headers) {
- glog.V(logger.Debug).Infof("%v: stale headers", p)
- return errBadPeer
- }
- }
- // Notify the content fetchers of new headers, but stop if queue is full
- cont := d.queue.PendingBlocks() < maxQueuedHeaders && d.queue.PendingReceipts() < maxQueuedHeaders
- for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
- if cont {
- // We still have headers to fetch, send continuation wake signal (potential)
- select {
- case ch <- true:
- default:
- }
- } else {
- // Header limit reached, send a termination wake signal (enforced)
- select {
- case ch <- false:
- case <-d.cancelCh:
- }
- }
- }
- if !cont {
- return nil
- }
- // Queue not yet full, fetch the next batch
- from += uint64(len(headers))
- getHeaders(from)
- case <-timeout.C:
- // Header retrieval timed out, consider the peer bad and drop
- glog.V(logger.Debug).Infof("%v: header request timed out", p)
- headerTimeoutMeter.Mark(1)
- d.dropPeer(p.id)
- // Finish the sync gracefully instead of dumping the gathered data though
- for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh, d.stateWakeCh} {
- select {
- case ch <- false:
- case <-d.cancelCh:
- }
- }
- return nil
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- }
- // fetchBodies iteratively downloads the scheduled block bodies, taking any
- // available peers, reserving a chunk of blocks for each, waiting for delivery
- // and also periodically checking for timeouts.
- func (d *Downloader) fetchBodies(from uint64) error {
- glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from)
- var (
- deliver = func(packet dataPack) (int, error) {
- pack := packet.(*bodyPack)
- return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles)
- }
- expire = func() map[string]int { return d.queue.ExpireBodies(bodyTTL) }
- fetch = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) }
- capacity = func(p *peer) int { return p.BlockCapacity() }
- setIdle = func(p *peer, accepted int) { p.SetBodiesIdle(accepted) }
- )
- err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
- d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
- d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body")
- glog.V(logger.Debug).Infof("Block body download terminated: %v", err)
- return err
- }
- // fetchReceipts iteratively downloads the scheduled block receipts, taking any
- // available peers, reserving a chunk of receipts for each, waiting for delivery
- // and also periodically checking for timeouts.
- func (d *Downloader) fetchReceipts(from uint64) error {
- glog.V(logger.Debug).Infof("Downloading receipts from #%d", from)
- var (
- deliver = func(packet dataPack) (int, error) {
- pack := packet.(*receiptPack)
- return d.queue.DeliverReceipts(pack.peerId, pack.receipts)
- }
- expire = func() map[string]int { return d.queue.ExpireReceipts(receiptTTL) }
- fetch = func(p *peer, req *fetchRequest) error { return p.FetchReceipts(req) }
- capacity = func(p *peer) int { return p.ReceiptCapacity() }
- setIdle = func(p *peer, accepted int) { p.SetReceiptsIdle(accepted) }
- )
- err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
- d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
- d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt")
- glog.V(logger.Debug).Infof("Receipt download terminated: %v", err)
- return err
- }
- // fetchNodeData iteratively downloads the scheduled state trie nodes, taking any
- // available peers, reserving a chunk of nodes for each, waiting for delivery and
- // also periodically checking for timeouts.
- func (d *Downloader) fetchNodeData() error {
- glog.V(logger.Debug).Infof("Downloading node state data")
- var (
- deliver = func(packet dataPack) (int, error) {
- start := time.Now()
- return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(err error, delivered int) {
- if err != nil {
- // If the node data processing failed, the root hash is very wrong, abort
- glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err)
- d.cancel()
- return
- }
- // Processing succeeded, notify state fetcher of continuation
- if d.queue.PendingNodeData() > 0 {
- select {
- case d.stateWakeCh <- true:
- default:
- }
- }
- // Log a message to the user and return
- d.syncStatsLock.Lock()
- defer d.syncStatsLock.Unlock()
- d.syncStatsStateDone += uint64(delivered)
- glog.V(logger.Info).Infof("imported %d state entries in %v: processed %d in total", delivered, time.Since(start), d.syncStatsStateDone)
- })
- }
- expire = func() map[string]int { return d.queue.ExpireNodeData(stateTTL) }
- throttle = func() bool { return false }
- reserve = func(p *peer, count int) (*fetchRequest, bool, error) {
- return d.queue.ReserveNodeData(p, count), false, nil
- }
- fetch = func(p *peer, req *fetchRequest) error { return p.FetchNodeData(req) }
- capacity = func(p *peer) int { return p.NodeDataCapacity() }
- setIdle = func(p *peer, accepted int) { p.SetNodeDataIdle(accepted) }
- )
- err := d.fetchParts(errCancelStateFetch, d.stateCh, deliver, d.stateWakeCh, expire,
- d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
- d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State")
- glog.V(logger.Debug).Infof("Node state data download terminated: %v", err)
- return err
- }
- // fetchParts iteratively downloads scheduled block parts, taking any available
- // peers, reserving a chunk of fetch requests for each, waiting for delivery and
- // also periodically checking for timeouts.
- func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
- expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error),
- fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int,
- idle func() ([]*peer, int), setIdle func(*peer, int), kind string) error {
- // Create a ticker to detect expired retrieval tasks
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
- update := make(chan struct{}, 1)
- // Prepare the queue and fetch block parts until the block header fetcher's done
- finished := false
- for {
- select {
- case <-d.cancelCh:
- return errCancel
- case packet := <-deliveryCh:
- // If the peer was previously banned and failed to deliver it's pack
- // in a reasonable time frame, ignore it's message.
- if peer := d.peers.Peer(packet.PeerId()); peer != nil {
- // Deliver the received chunk of data and check chain validity
- accepted, err := deliver(packet)
- if err == errInvalidChain {
- return err
- }
- // Unless a peer delivered something completely else than requested (usually
- // caused by a timed out request which came through in the end), set it to
- // idle. If the delivery's stale, the peer should have already been idled.
- if err != errStaleDelivery {
- setIdle(peer, accepted)
- }
- // Issue a log to the user to see what's going on
- switch {
- case err == nil && packet.Items() == 0:
- glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind))
- case err == nil:
- glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind))
- default:
- glog.V(logger.Detail).Infof("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err)
- }
- }
- // Blocks assembled, try to update the progress
- select {
- case update <- struct{}{}:
- default:
- }
- case cont := <-wakeCh:
- // The header fetcher sent a continuation flag, check if it's done
- if !cont {
- finished = true
- }
- // Headers arrive, try to update the progress
- select {
- case update <- struct{}{}:
- default:
- }
- case <-ticker.C:
- // Sanity check update the progress
- select {
- case update <- struct{}{}:
- default:
- }
- case <-update:
- // Short circuit if we lost all our peers
- if d.peers.Len() == 0 {
- return errNoPeers
- }
- // Check for fetch request timeouts and demote the responsible peers
- for pid, fails := range expire() {
- if peer := d.peers.Peer(pid); peer != nil {
- if fails > 1 {
- glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
- setIdle(peer, 0)
- } else {
- glog.V(logger.Debug).Infof("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind))
- d.dropPeer(pid)
- }
- }
- }
- // If there's nothing more to fetch, wait or terminate
- if pending() == 0 {
- if !inFlight() && finished {
- glog.V(logger.Debug).Infof("%s fetching completed", kind)
- return nil
- }
- break
- }
- // Send a download request to all idle peers, until throttled
- progressed, throttled, running := false, false, inFlight()
- idles, total := idle()
- for _, peer := range idles {
- // Short circuit if throttling activated
- if throttle() {
- throttled = true
- break
- }
- // Reserve a chunk of fetches for a peer. A nil can mean either that
- // no more headers are available, or that the peer is known not to
- // have them.
- request, progress, err := reserve(peer, capacity(peer))
- if err != nil {
- return err
- }
- if progress {
- progressed = true
- }
- if request == nil {
- continue
- }
- if glog.V(logger.Detail) {
- if len(request.Headers) > 0 {
- glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
- } else {
- glog.Infof("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
- }
- }
- // Fetch the chunk and make sure any errors return the hashes to the queue
- if fetchHook != nil {
- fetchHook(request.Headers)
- }
- if err := fetch(peer, request); err != nil {
- // Although we could try and make an attempt to fix this, this error really
- // means that we've double allocated a fetch task to a peer. If that is the
- // case, the internal state of the downloader and the queue is very wrong so
- // better hard crash and note the error instead of silently accumulating into
- // a much bigger issue.
- panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, strings.ToLower(kind)))
- }
- running = true
- }
- // Make sure that we have peers available for fetching. If all peers have been tried
- // and all failed throw an error
- if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
- return errPeersUnavailable
- }
- case <-d.hashCh:
- case <-d.blockCh:
- // Ignore eth/61 packets because this is eth/62+.
- // These can arrive as a late delivery from a previous sync.
- }
- }
- }
- // process takes fetch results from the queue and tries to import them into the
- // chain. The type of import operation will depend on the result contents.
- func (d *Downloader) process() error {
- pivot := d.queue.FastSyncPivot()
- for {
- results := d.queue.WaitResults()
- if len(results) == 0 {
- return nil // queue empty
- }
- if d.chainInsertHook != nil {
- d.chainInsertHook(results)
- }
- // Actually import the blocks
- if glog.V(logger.Debug) {
- first, last := results[0].Header, results[len(results)-1].Header
- glog.Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
- }
- for len(results) != 0 {
- // Check for any termination requests
- if atomic.LoadInt32(&d.interrupt) == 1 {
- return errCancelProcessing
- }
- // Retrieve the a batch of results to import
- var (
- blocks = make([]*types.Block, 0, maxResultsProcess)
- receipts = make([]types.Receipts, 0, maxResultsProcess)
- )
- items := int(math.Min(float64(len(results)), float64(maxResultsProcess)))
- for _, result := range results[:items] {
- switch {
- case d.mode == FullSync:
- blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles))
- case d.mode == FastSync:
- blocks = append(blocks, types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles))
- if result.Header.Number.Uint64() <= pivot {
- receipts = append(receipts, result.Receipts)
- }
- }
- }
- // Try to process the results, aborting if there's an error
- var (
- err error
- index int
- )
- switch {
- case len(receipts) > 0:
- index, err = d.insertReceipts(blocks, receipts)
- if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
- glog.V(logger.Debug).Infof("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4])
- index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
- }
- default:
- index, err = d.insertBlocks(blocks)
- }
- if err != nil {
- glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err)
- return err
- }
- // Shift the results to the next batch
- results = results[items:]
- }
- }
- }
- // DeliverHashes injects a new batch of hashes received from a remote node into
- // the download schedule. This is usually invoked through the BlockHashesMsg by
- // the protocol handler.
- func (d *Downloader) DeliverHashes(id string, hashes []common.Hash) (err error) {
- return d.deliver(id, d.hashCh, &hashPack{id, hashes}, hashInMeter, hashDropMeter)
- }
- // DeliverBlocks injects a new batch of blocks received from a remote node.
- // This is usually invoked through the BlocksMsg by the protocol handler.
- func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) (err error) {
- return d.deliver(id, d.blockCh, &blockPack{id, blocks}, blockInMeter, blockDropMeter)
- }
- // DeliverHeaders injects a new batch of block headers received from a remote
- // node into the download schedule.
- func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
- return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
- }
- // DeliverBodies injects a new batch of block bodies received from a remote node.
- func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
- return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
- }
- // DeliverReceipts injects a new batch of receipts received from a remote node.
- func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) {
- return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
- }
- // DeliverNodeData injects a new batch of node state data received from a remote node.
- func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) {
- return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
- }
- // deliver injects a new batch of data received from a remote node.
- func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
- // Update the delivery metrics for both good and failed deliveries
- inMeter.Mark(int64(packet.Items()))
- defer func() {
- if err != nil {
- dropMeter.Mark(int64(packet.Items()))
- }
- }()
- // Deliver or abort if the sync is canceled while queuing
- d.cancelLock.RLock()
- cancel := d.cancelCh
- d.cancelLock.RUnlock()
- if cancel == nil {
- return errNoSyncActive
- }
- select {
- case destCh <- packet:
- return nil
- case <-cancel:
- return errNoSyncActive
- }
- }
|