fetcher.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. // Copyright 2016 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package les
  17. import (
  18. "math/big"
  19. "sync"
  20. "time"
  21. "github.com/ethereum/go-ethereum/common"
  22. "github.com/ethereum/go-ethereum/common/mclock"
  23. "github.com/ethereum/go-ethereum/consensus"
  24. "github.com/ethereum/go-ethereum/core/rawdb"
  25. "github.com/ethereum/go-ethereum/core/types"
  26. "github.com/ethereum/go-ethereum/light"
  27. "github.com/ethereum/go-ethereum/log"
  28. )
  29. const (
  30. blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others
  31. maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer
  32. serverStateAvailable = 100 // number of recent blocks where state availability is assumed
  33. )
  34. // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
  35. // ODR system to ensure that we only request data related to a certain block from peers who have already processed
  36. // and announced that block.
  37. type lightFetcher struct {
  38. pm *ProtocolManager
  39. odr *LesOdr
  40. chain lightChain
  41. lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
  42. maxConfirmedTd *big.Int
  43. peers map[*peer]*fetcherPeerInfo
  44. lastUpdateStats *updateStatsEntry
  45. syncing bool
  46. syncDone chan *peer
  47. reqMu sync.RWMutex // reqMu protects access to sent header fetch requests
  48. requested map[uint64]fetchRequest
  49. deliverChn chan fetchResponse
  50. timeoutChn chan uint64
  51. requestTriggered bool
  52. requestTrigger chan struct{}
  53. lastTrustedHeader *types.Header
  54. }
  55. // lightChain extends the BlockChain interface by locking.
  56. type lightChain interface {
  57. BlockChain
  58. LockChain()
  59. UnlockChain()
  60. }
  61. // fetcherPeerInfo holds fetcher-specific information about each active peer
  62. type fetcherPeerInfo struct {
  63. root, lastAnnounced *fetcherTreeNode
  64. nodeCnt int
  65. confirmedTd *big.Int
  66. bestConfirmed *fetcherTreeNode
  67. nodeByHash map[common.Hash]*fetcherTreeNode
  68. firstUpdateStats *updateStatsEntry
  69. }
  70. // fetcherTreeNode is a node of a tree that holds information about blocks recently
  71. // announced and confirmed by a certain peer. Each new announce message from a peer
  72. // adds nodes to the tree, based on the previous announced head and the reorg depth.
  73. // There are three possible states for a tree node:
  74. // - announced: not downloaded (known) yet, but we know its head, number and td
  75. // - intermediate: not known, hash and td are empty, they are filled out when it becomes known
  76. // - known: both announced by this peer and downloaded (from any peer).
  77. // This structure makes it possible to always know which peer has a certain block,
  78. // which is necessary for selecting a suitable peer for ODR requests and also for
  79. // canonizing new heads. It also helps to always download the minimum necessary
  80. // amount of headers with a single request.
  81. type fetcherTreeNode struct {
  82. hash common.Hash
  83. number uint64
  84. td *big.Int
  85. known, requested bool
  86. parent *fetcherTreeNode
  87. children []*fetcherTreeNode
  88. }
  89. // fetchRequest represents a header download request
  90. type fetchRequest struct {
  91. hash common.Hash
  92. amount uint64
  93. peer *peer
  94. sent mclock.AbsTime
  95. timeout bool
  96. }
  97. // fetchResponse represents a header download response
  98. type fetchResponse struct {
  99. reqID uint64
  100. headers []*types.Header
  101. peer *peer
  102. }
  103. // newLightFetcher creates a new light fetcher
  104. func newLightFetcher(pm *ProtocolManager) *lightFetcher {
  105. f := &lightFetcher{
  106. pm: pm,
  107. chain: pm.blockchain.(*light.LightChain),
  108. odr: pm.odr,
  109. peers: make(map[*peer]*fetcherPeerInfo),
  110. deliverChn: make(chan fetchResponse, 100),
  111. requested: make(map[uint64]fetchRequest),
  112. timeoutChn: make(chan uint64),
  113. requestTrigger: make(chan struct{}, 1),
  114. syncDone: make(chan *peer),
  115. maxConfirmedTd: big.NewInt(0),
  116. }
  117. pm.peers.notify(f)
  118. f.pm.wg.Add(1)
  119. go f.syncLoop()
  120. return f
  121. }
  122. // syncLoop is the main event loop of the light fetcher
  123. func (f *lightFetcher) syncLoop() {
  124. defer f.pm.wg.Done()
  125. for {
  126. select {
  127. case <-f.pm.quitSync:
  128. return
  129. // request loop keeps running until no further requests are necessary or possible
  130. case <-f.requestTrigger:
  131. f.lock.Lock()
  132. var (
  133. rq *distReq
  134. reqID uint64
  135. syncing bool
  136. )
  137. if !f.syncing {
  138. rq, reqID, syncing = f.nextRequest()
  139. }
  140. f.requestTriggered = rq != nil
  141. f.lock.Unlock()
  142. if rq != nil {
  143. if _, ok := <-f.pm.reqDist.queue(rq); ok {
  144. if syncing {
  145. f.lock.Lock()
  146. f.syncing = true
  147. f.lock.Unlock()
  148. } else {
  149. go func() {
  150. time.Sleep(softRequestTimeout)
  151. f.reqMu.Lock()
  152. req, ok := f.requested[reqID]
  153. if ok {
  154. req.timeout = true
  155. f.requested[reqID] = req
  156. }
  157. f.reqMu.Unlock()
  158. // keep starting new requests while possible
  159. f.requestTrigger <- struct{}{}
  160. }()
  161. }
  162. } else {
  163. f.requestTrigger <- struct{}{}
  164. }
  165. }
  166. case reqID := <-f.timeoutChn:
  167. f.reqMu.Lock()
  168. req, ok := f.requested[reqID]
  169. if ok {
  170. delete(f.requested, reqID)
  171. }
  172. f.reqMu.Unlock()
  173. if ok {
  174. f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
  175. req.peer.Log().Debug("Fetching data timed out hard")
  176. go f.pm.removePeer(req.peer.id)
  177. }
  178. case resp := <-f.deliverChn:
  179. f.reqMu.Lock()
  180. req, ok := f.requested[resp.reqID]
  181. if ok && req.peer != resp.peer {
  182. ok = false
  183. }
  184. if ok {
  185. delete(f.requested, resp.reqID)
  186. }
  187. f.reqMu.Unlock()
  188. if ok {
  189. f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), req.timeout)
  190. }
  191. f.lock.Lock()
  192. if !ok || !(f.syncing || f.processResponse(req, resp)) {
  193. resp.peer.Log().Debug("Failed processing response")
  194. go f.pm.removePeer(resp.peer.id)
  195. }
  196. f.lock.Unlock()
  197. case p := <-f.syncDone:
  198. f.lock.Lock()
  199. p.Log().Debug("Done synchronising with peer")
  200. f.checkSyncedHeaders(p)
  201. f.syncing = false
  202. f.lock.Unlock()
  203. f.requestTrigger <- struct{}{} // f.requestTriggered is always true here
  204. }
  205. }
  206. }
  207. // registerPeer adds a new peer to the fetcher's peer set
  208. func (f *lightFetcher) registerPeer(p *peer) {
  209. p.lock.Lock()
  210. p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool {
  211. return f.peerHasBlock(p, hash, number, hasState)
  212. }
  213. p.lock.Unlock()
  214. f.lock.Lock()
  215. defer f.lock.Unlock()
  216. f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)}
  217. }
  218. // unregisterPeer removes a new peer from the fetcher's peer set
  219. func (f *lightFetcher) unregisterPeer(p *peer) {
  220. p.lock.Lock()
  221. p.hasBlock = nil
  222. p.lock.Unlock()
  223. f.lock.Lock()
  224. defer f.lock.Unlock()
  225. // check for potential timed out block delay statistics
  226. f.checkUpdateStats(p, nil)
  227. delete(f.peers, p)
  228. }
  229. // announce processes a new announcement message received from a peer, adding new
  230. // nodes to the peer's block tree and removing old nodes if necessary
  231. func (f *lightFetcher) announce(p *peer, head *announceData) {
  232. f.lock.Lock()
  233. defer f.lock.Unlock()
  234. p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
  235. fp := f.peers[p]
  236. if fp == nil {
  237. p.Log().Debug("Announcement from unknown peer")
  238. return
  239. }
  240. if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
  241. // announced tds should be strictly monotonic
  242. p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)
  243. go f.pm.removePeer(p.id)
  244. return
  245. }
  246. n := fp.lastAnnounced
  247. for i := uint64(0); i < head.ReorgDepth; i++ {
  248. if n == nil {
  249. break
  250. }
  251. n = n.parent
  252. }
  253. // n is now the reorg common ancestor, add a new branch of nodes
  254. if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) {
  255. // if announced head block height is lower or same as n or too far from it to add
  256. // intermediate nodes then discard previous announcement info and trigger a resync
  257. n = nil
  258. fp.nodeCnt = 0
  259. fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode)
  260. }
  261. // check if the node count is too high to add new nodes, discard oldest ones if necessary
  262. if n != nil {
  263. // n is now the reorg common ancestor, add a new branch of nodes
  264. // check if the node count is too high to add new nodes
  265. locked := false
  266. for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {
  267. if !locked {
  268. f.chain.LockChain()
  269. defer f.chain.UnlockChain()
  270. locked = true
  271. }
  272. // if one of root's children is canonical, keep it, delete other branches and root itself
  273. var newRoot *fetcherTreeNode
  274. for i, nn := range fp.root.children {
  275. if rawdb.ReadCanonicalHash(f.pm.chainDb, nn.number) == nn.hash {
  276. fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)
  277. nn.parent = nil
  278. newRoot = nn
  279. break
  280. }
  281. }
  282. fp.deleteNode(fp.root)
  283. if n == fp.root {
  284. n = newRoot
  285. }
  286. fp.root = newRoot
  287. if newRoot == nil || !f.checkKnownNode(p, newRoot) {
  288. fp.bestConfirmed = nil
  289. fp.confirmedTd = nil
  290. }
  291. if n == nil {
  292. break
  293. }
  294. }
  295. if n != nil {
  296. for n.number < head.Number {
  297. nn := &fetcherTreeNode{number: n.number + 1, parent: n}
  298. n.children = append(n.children, nn)
  299. n = nn
  300. fp.nodeCnt++
  301. }
  302. n.hash = head.Hash
  303. n.td = head.Td
  304. fp.nodeByHash[n.hash] = n
  305. }
  306. }
  307. if n == nil {
  308. // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed
  309. if fp.root != nil {
  310. fp.deleteNode(fp.root)
  311. }
  312. n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}
  313. fp.root = n
  314. fp.nodeCnt++
  315. fp.nodeByHash[n.hash] = n
  316. fp.bestConfirmed = nil
  317. fp.confirmedTd = nil
  318. }
  319. f.checkKnownNode(p, n)
  320. p.lock.Lock()
  321. p.headInfo = head
  322. fp.lastAnnounced = n
  323. p.lock.Unlock()
  324. f.checkUpdateStats(p, nil)
  325. if !f.requestTriggered {
  326. f.requestTriggered = true
  327. f.requestTrigger <- struct{}{}
  328. }
  329. }
  330. // peerHasBlock returns true if we can assume the peer knows the given block
  331. // based on its announcements
  332. func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64, hasState bool) bool {
  333. f.lock.Lock()
  334. defer f.lock.Unlock()
  335. fp := f.peers[p]
  336. if fp == nil || fp.root == nil {
  337. return false
  338. }
  339. if hasState {
  340. if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable {
  341. return false
  342. }
  343. }
  344. if f.syncing {
  345. // always return true when syncing
  346. // false positives are acceptable, a more sophisticated condition can be implemented later
  347. return true
  348. }
  349. if number >= fp.root.number {
  350. // it is recent enough that if it is known, is should be in the peer's block tree
  351. return fp.nodeByHash[hash] != nil
  352. }
  353. f.chain.LockChain()
  354. defer f.chain.UnlockChain()
  355. // if it's older than the peer's block tree root but it's in the same canonical chain
  356. // as the root, we can still be sure the peer knows it
  357. //
  358. // when syncing, just check if it is part of the known chain, there is nothing better we
  359. // can do since we do not know the most recent block hash yet
  360. return rawdb.ReadCanonicalHash(f.pm.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.pm.chainDb, number) == hash
  361. }
  362. // requestAmount calculates the amount of headers to be downloaded starting
  363. // from a certain head backwards
  364. func (f *lightFetcher) requestAmount(p *peer, n *fetcherTreeNode) uint64 {
  365. amount := uint64(0)
  366. nn := n
  367. for nn != nil && !f.checkKnownNode(p, nn) {
  368. nn = nn.parent
  369. amount++
  370. }
  371. if nn == nil {
  372. amount = n.number
  373. }
  374. return amount
  375. }
  376. // requestedID tells if a certain reqID has been requested by the fetcher
  377. func (f *lightFetcher) requestedID(reqID uint64) bool {
  378. f.reqMu.RLock()
  379. _, ok := f.requested[reqID]
  380. f.reqMu.RUnlock()
  381. return ok
  382. }
  383. // nextRequest selects the peer and announced head to be requested next, amount
  384. // to be downloaded starting from the head backwards is also returned
  385. func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
  386. var (
  387. bestHash common.Hash
  388. bestAmount uint64
  389. bestTd *big.Int
  390. bestSyncing bool
  391. )
  392. bestHash, bestAmount, bestTd, bestSyncing = f.findBestRequest()
  393. if bestTd == f.maxConfirmedTd {
  394. return nil, 0, false
  395. }
  396. var rq *distReq
  397. reqID := genReqID()
  398. if bestSyncing {
  399. rq = f.newFetcherDistReqForSync(bestHash)
  400. } else {
  401. rq = f.newFetcherDistReq(bestHash, reqID, bestAmount)
  402. }
  403. return rq, reqID, bestSyncing
  404. }
  405. // findBestRequest finds the best head to request that has been announced by but not yet requested from a known peer.
  406. // It also returns the announced Td (which should be verified after fetching the head),
  407. // the necessary amount to request and whether a downloader sync is necessary instead of a normal header request.
  408. func (f *lightFetcher) findBestRequest() (bestHash common.Hash, bestAmount uint64, bestTd *big.Int, bestSyncing bool) {
  409. bestTd = f.maxConfirmedTd
  410. bestSyncing = false
  411. for p, fp := range f.peers {
  412. for hash, n := range fp.nodeByHash {
  413. if f.checkKnownNode(p, n) || n.requested {
  414. continue
  415. }
  416. //if ulc mode is disabled, isTrustedHash returns true
  417. amount := f.requestAmount(p, n)
  418. if (bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount) && (f.isTrustedHash(hash) || f.maxConfirmedTd.Int64() == 0) {
  419. bestHash = hash
  420. bestTd = n.td
  421. bestAmount = amount
  422. bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root)
  423. }
  424. }
  425. }
  426. return
  427. }
  428. // isTrustedHash checks if the block can be trusted by the minimum trusted fraction.
  429. func (f *lightFetcher) isTrustedHash(hash common.Hash) bool {
  430. // If ultra light cliet mode is disabled, trust all hashes
  431. if f.pm.ulc == nil {
  432. return true
  433. }
  434. // Ultra light enabled, only trust after enough confirmations
  435. var agreed int
  436. for peer, info := range f.peers {
  437. if peer.trusted && info.nodeByHash[hash] != nil {
  438. agreed++
  439. }
  440. }
  441. return 100*agreed/len(f.pm.ulc.keys) >= f.pm.ulc.fraction
  442. }
  443. func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq {
  444. return &distReq{
  445. getCost: func(dp distPeer) uint64 {
  446. return 0
  447. },
  448. canSend: func(dp distPeer) bool {
  449. p := dp.(*peer)
  450. f.lock.Lock()
  451. defer f.lock.Unlock()
  452. if p.onlyAnnounce {
  453. return false
  454. }
  455. fp := f.peers[p]
  456. return fp != nil && fp.nodeByHash[bestHash] != nil
  457. },
  458. request: func(dp distPeer) func() {
  459. if f.pm.ulc != nil {
  460. // Keep last trusted header before sync
  461. f.setLastTrustedHeader(f.chain.CurrentHeader())
  462. }
  463. go func() {
  464. p := dp.(*peer)
  465. p.Log().Debug("Synchronisation started")
  466. f.pm.synchronise(p)
  467. f.syncDone <- p
  468. }()
  469. return nil
  470. },
  471. }
  472. }
  473. // newFetcherDistReq creates a new request for the distributor.
  474. func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bestAmount uint64) *distReq {
  475. return &distReq{
  476. getCost: func(dp distPeer) uint64 {
  477. p := dp.(*peer)
  478. return p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
  479. },
  480. canSend: func(dp distPeer) bool {
  481. p := dp.(*peer)
  482. f.lock.Lock()
  483. defer f.lock.Unlock()
  484. if p.onlyAnnounce {
  485. return false
  486. }
  487. fp := f.peers[p]
  488. if fp == nil {
  489. return false
  490. }
  491. n := fp.nodeByHash[bestHash]
  492. return n != nil && !n.requested
  493. },
  494. request: func(dp distPeer) func() {
  495. p := dp.(*peer)
  496. f.lock.Lock()
  497. fp := f.peers[p]
  498. if fp != nil {
  499. n := fp.nodeByHash[bestHash]
  500. if n != nil {
  501. n.requested = true
  502. }
  503. }
  504. f.lock.Unlock()
  505. cost := p.GetRequestCost(GetBlockHeadersMsg, int(bestAmount))
  506. p.fcServer.QueuedRequest(reqID, cost)
  507. f.reqMu.Lock()
  508. f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}
  509. f.reqMu.Unlock()
  510. go func() {
  511. time.Sleep(hardRequestTimeout)
  512. f.timeoutChn <- reqID
  513. }()
  514. return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) }
  515. },
  516. }
  517. }
  518. // deliverHeaders delivers header download request responses for processing
  519. func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types.Header) {
  520. f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}
  521. }
  522. // processResponse processes header download request responses, returns true if successful
  523. func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
  524. if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
  525. req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)
  526. return false
  527. }
  528. headers := make([]*types.Header, req.amount)
  529. for i, header := range resp.headers {
  530. headers[int(req.amount)-1-i] = header
  531. }
  532. if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil {
  533. if err == consensus.ErrFutureBlock {
  534. return true
  535. }
  536. log.Debug("Failed to insert header chain", "err", err)
  537. return false
  538. }
  539. tds := make([]*big.Int, len(headers))
  540. for i, header := range headers {
  541. td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
  542. if td == nil {
  543. log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())
  544. return false
  545. }
  546. tds[i] = td
  547. }
  548. f.newHeaders(headers, tds)
  549. return true
  550. }
  551. // newHeaders updates the block trees of all active peers according to a newly
  552. // downloaded and validated batch or headers
  553. func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
  554. var maxTd *big.Int
  555. for p, fp := range f.peers {
  556. if !f.checkAnnouncedHeaders(fp, headers, tds) {
  557. p.Log().Debug("Inconsistent announcement")
  558. go f.pm.removePeer(p.id)
  559. }
  560. if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
  561. maxTd = fp.confirmedTd
  562. }
  563. }
  564. if maxTd != nil {
  565. f.updateMaxConfirmedTd(maxTd)
  566. }
  567. }
  568. // checkAnnouncedHeaders updates peer's block tree if necessary after validating
  569. // a batch of headers. It searches for the latest header in the batch that has a
  570. // matching tree node (if any), and if it has not been marked as known already,
  571. // sets it and its parents to known (even those which are older than the currently
  572. // validated ones). Return value shows if all hashes, numbers and Tds matched
  573. // correctly to the announced values (otherwise the peer should be dropped).
  574. func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool {
  575. var (
  576. n *fetcherTreeNode
  577. header *types.Header
  578. td *big.Int
  579. )
  580. for i := len(headers) - 1; ; i-- {
  581. if i < 0 {
  582. if n == nil {
  583. // no more headers and nothing to match
  584. return true
  585. }
  586. // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
  587. hash, number := header.ParentHash, header.Number.Uint64()-1
  588. td = f.chain.GetTd(hash, number)
  589. header = f.chain.GetHeader(hash, number)
  590. if header == nil || td == nil {
  591. log.Error("Missing parent of validated header", "hash", hash, "number", number)
  592. return false
  593. }
  594. } else {
  595. header = headers[i]
  596. td = tds[i]
  597. }
  598. hash := header.Hash()
  599. number := header.Number.Uint64()
  600. if n == nil {
  601. n = fp.nodeByHash[hash]
  602. }
  603. if n != nil {
  604. if n.td == nil {
  605. // node was unannounced
  606. if nn := fp.nodeByHash[hash]; nn != nil {
  607. // if there was already a node with the same hash, continue there and drop this one
  608. nn.children = append(nn.children, n.children...)
  609. n.children = nil
  610. fp.deleteNode(n)
  611. n = nn
  612. } else {
  613. n.hash = hash
  614. n.td = td
  615. fp.nodeByHash[hash] = n
  616. }
  617. }
  618. // check if it matches the header
  619. if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 {
  620. // peer has previously made an invalid announcement
  621. return false
  622. }
  623. if n.known {
  624. // we reached a known node that matched our expectations, return with success
  625. return true
  626. }
  627. n.known = true
  628. if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 {
  629. fp.confirmedTd = td
  630. fp.bestConfirmed = n
  631. }
  632. n = n.parent
  633. if n == nil {
  634. return true
  635. }
  636. }
  637. }
  638. }
  639. // checkSyncedHeaders updates peer's block tree after synchronisation by marking
  640. // downloaded headers as known. If none of the announced headers are found after
  641. // syncing, the peer is dropped.
  642. func (f *lightFetcher) checkSyncedHeaders(p *peer) {
  643. fp := f.peers[p]
  644. if fp == nil {
  645. p.Log().Debug("Unknown peer to check sync headers")
  646. return
  647. }
  648. var (
  649. node = fp.lastAnnounced
  650. td *big.Int
  651. )
  652. if f.pm.ulc != nil {
  653. // Roll back untrusted blocks
  654. h, unapproved := f.lastTrustedTreeNode(p)
  655. f.chain.Rollback(unapproved)
  656. node = fp.nodeByHash[h.Hash()]
  657. }
  658. // Find last valid block
  659. for node != nil {
  660. if td = f.chain.GetTd(node.hash, node.number); td != nil {
  661. break
  662. }
  663. node = node.parent
  664. }
  665. // Now node is the latest downloaded/approved header after syncing
  666. if node == nil {
  667. p.Log().Debug("Synchronisation failed")
  668. go f.pm.removePeer(p.id)
  669. return
  670. }
  671. header := f.chain.GetHeader(node.hash, node.number)
  672. f.newHeaders([]*types.Header{header}, []*big.Int{td})
  673. }
  674. // lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes
  675. func (f *lightFetcher) lastTrustedTreeNode(p *peer) (*types.Header, []common.Hash) {
  676. unapprovedHashes := make([]common.Hash, 0)
  677. current := f.chain.CurrentHeader()
  678. if f.lastTrustedHeader == nil {
  679. return current, unapprovedHashes
  680. }
  681. canonical := f.chain.CurrentHeader()
  682. if canonical.Number.Uint64() > f.lastTrustedHeader.Number.Uint64() {
  683. canonical = f.chain.GetHeaderByNumber(f.lastTrustedHeader.Number.Uint64())
  684. }
  685. commonAncestor := rawdb.FindCommonAncestor(f.pm.chainDb, canonical, f.lastTrustedHeader)
  686. if commonAncestor == nil {
  687. log.Error("Common ancestor of last trusted header and canonical header is nil", "canonical hash", canonical.Hash(), "trusted hash", f.lastTrustedHeader.Hash())
  688. return current, unapprovedHashes
  689. }
  690. for current.Hash() == commonAncestor.Hash() {
  691. if f.isTrustedHash(current.Hash()) {
  692. break
  693. }
  694. unapprovedHashes = append(unapprovedHashes, current.Hash())
  695. current = f.chain.GetHeader(current.ParentHash, current.Number.Uint64()-1)
  696. }
  697. return current, unapprovedHashes
  698. }
  699. func (f *lightFetcher) setLastTrustedHeader(h *types.Header) {
  700. f.lock.Lock()
  701. defer f.lock.Unlock()
  702. f.lastTrustedHeader = h
  703. }
  704. // checkKnownNode checks if a block tree node is known (downloaded and validated)
  705. // If it was not known previously but found in the database, sets its known flag
  706. func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
  707. if n.known {
  708. return true
  709. }
  710. td := f.chain.GetTd(n.hash, n.number)
  711. if td == nil {
  712. return false
  713. }
  714. header := f.chain.GetHeader(n.hash, n.number)
  715. // check the availability of both header and td because reads are not protected by chain db mutex
  716. // Note: returning false is always safe here
  717. if header == nil {
  718. return false
  719. }
  720. fp := f.peers[p]
  721. if fp == nil {
  722. p.Log().Debug("Unknown peer to check known nodes")
  723. return false
  724. }
  725. if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
  726. p.Log().Debug("Inconsistent announcement")
  727. go f.pm.removePeer(p.id)
  728. }
  729. if fp.confirmedTd != nil {
  730. f.updateMaxConfirmedTd(fp.confirmedTd)
  731. }
  732. return n.known
  733. }
  734. // deleteNode deletes a node and its child subtrees from a peer's block tree
  735. func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) {
  736. if n.parent != nil {
  737. for i, nn := range n.parent.children {
  738. if nn == n {
  739. n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...)
  740. break
  741. }
  742. }
  743. }
  744. for {
  745. if n.td != nil {
  746. delete(fp.nodeByHash, n.hash)
  747. }
  748. fp.nodeCnt--
  749. if len(n.children) == 0 {
  750. return
  751. }
  752. for i, nn := range n.children {
  753. if i == 0 {
  754. n = nn
  755. } else {
  756. fp.deleteNode(nn)
  757. }
  758. }
  759. }
  760. }
  761. // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td
  762. // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values
  763. // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated
  764. // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head
  765. // and it has also been downloaded from any peer, either before or after the given announcement).
  766. // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer,
  767. // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed
  768. // the current global head).
  769. type updateStatsEntry struct {
  770. time mclock.AbsTime
  771. td *big.Int
  772. next *updateStatsEntry
  773. }
  774. // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed,
  775. // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have
  776. // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics.
  777. // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a
  778. // positive block delay value.
  779. func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
  780. if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 {
  781. f.maxConfirmedTd = td
  782. newEntry := &updateStatsEntry{
  783. time: mclock.Now(),
  784. td: td,
  785. }
  786. if f.lastUpdateStats != nil {
  787. f.lastUpdateStats.next = newEntry
  788. }
  789. f.lastUpdateStats = newEntry
  790. for p := range f.peers {
  791. f.checkUpdateStats(p, newEntry)
  792. }
  793. }
  794. }
  795. // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it
  796. // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the
  797. // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed,
  798. // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry
  799. // items are removed from the head of the linked list.
  800. // If a new entry has been added to the global tail, it is passed as a parameter here even though this function
  801. // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),
  802. // it can set the new head to newEntry.
  803. func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
  804. now := mclock.Now()
  805. fp := f.peers[p]
  806. if fp == nil {
  807. p.Log().Debug("Unknown peer to check update stats")
  808. return
  809. }
  810. if newEntry != nil && fp.firstUpdateStats == nil {
  811. fp.firstUpdateStats = newEntry
  812. }
  813. for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) {
  814. f.pm.serverPool.adjustBlockDelay(p.poolEntry, blockDelayTimeout)
  815. fp.firstUpdateStats = fp.firstUpdateStats.next
  816. }
  817. if fp.confirmedTd != nil {
  818. for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 {
  819. f.pm.serverPool.adjustBlockDelay(p.poolEntry, time.Duration(now-fp.firstUpdateStats.time))
  820. fp.firstUpdateStats = fp.firstUpdateStats.next
  821. }
  822. }
  823. }