fetcher.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. // Copyright 2016 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package les
  17. import (
  18. "math/big"
  19. "sync"
  20. "time"
  21. "github.com/ethereum/go-ethereum/common"
  22. "github.com/ethereum/go-ethereum/common/mclock"
  23. "github.com/ethereum/go-ethereum/consensus"
  24. "github.com/ethereum/go-ethereum/core/rawdb"
  25. "github.com/ethereum/go-ethereum/core/types"
  26. "github.com/ethereum/go-ethereum/light"
  27. "github.com/ethereum/go-ethereum/log"
  28. )
  29. const (
  30. blockDelayTimeout = time.Second * 10 // timeout for a peer to announce a head that has already been confirmed by others
  31. maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer
  32. serverStateAvailable = 100 // number of recent blocks where state availability is assumed
  33. )
  34. // lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
  35. // ODR system to ensure that we only request data related to a certain block from peers who have already processed
  36. // and announced that block.
  37. type lightFetcher struct {
  38. handler *clientHandler
  39. chain *light.LightChain
  40. softRequestTimeout func() time.Duration
  41. lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
  42. maxConfirmedTd *big.Int
  43. peers map[*serverPeer]*fetcherPeerInfo
  44. lastUpdateStats *updateStatsEntry
  45. syncing bool
  46. syncDone chan *serverPeer
  47. reqMu sync.RWMutex // reqMu protects access to sent header fetch requests
  48. requested map[uint64]fetchRequest
  49. deliverChn chan fetchResponse
  50. timeoutChn chan uint64
  51. requestTriggered bool
  52. requestTrigger chan struct{}
  53. lastTrustedHeader *types.Header
  54. closeCh chan struct{}
  55. wg sync.WaitGroup
  56. }
  57. // fetcherPeerInfo holds fetcher-specific information about each active peer
  58. type fetcherPeerInfo struct {
  59. root, lastAnnounced *fetcherTreeNode
  60. nodeCnt int
  61. confirmedTd *big.Int
  62. bestConfirmed *fetcherTreeNode
  63. nodeByHash map[common.Hash]*fetcherTreeNode
  64. firstUpdateStats *updateStatsEntry
  65. }
  66. // fetcherTreeNode is a node of a tree that holds information about blocks recently
  67. // announced and confirmed by a certain peer. Each new announce message from a peer
  68. // adds nodes to the tree, based on the previous announced head and the reorg depth.
  69. // There are three possible states for a tree node:
  70. // - announced: not downloaded (known) yet, but we know its head, number and td
  71. // - intermediate: not known, hash and td are empty, they are filled out when it becomes known
  72. // - known: both announced by this peer and downloaded (from any peer).
  73. // This structure makes it possible to always know which peer has a certain block,
  74. // which is necessary for selecting a suitable peer for ODR requests and also for
  75. // canonizing new heads. It also helps to always download the minimum necessary
  76. // amount of headers with a single request.
  77. type fetcherTreeNode struct {
  78. hash common.Hash
  79. number uint64
  80. td *big.Int
  81. known, requested bool
  82. parent *fetcherTreeNode
  83. children []*fetcherTreeNode
  84. }
  85. // fetchRequest represents a header download request
  86. type fetchRequest struct {
  87. hash common.Hash
  88. amount uint64
  89. peer *serverPeer
  90. sent mclock.AbsTime
  91. timeout bool
  92. }
  93. // fetchResponse represents a header download response
  94. type fetchResponse struct {
  95. reqID uint64
  96. headers []*types.Header
  97. peer *serverPeer
  98. }
  99. // newLightFetcher creates a new light fetcher
  100. func newLightFetcher(h *clientHandler, softRequestTimeout func() time.Duration) *lightFetcher {
  101. f := &lightFetcher{
  102. handler: h,
  103. chain: h.backend.blockchain,
  104. peers: make(map[*serverPeer]*fetcherPeerInfo),
  105. deliverChn: make(chan fetchResponse, 100),
  106. requested: make(map[uint64]fetchRequest),
  107. timeoutChn: make(chan uint64),
  108. requestTrigger: make(chan struct{}, 1),
  109. syncDone: make(chan *serverPeer),
  110. closeCh: make(chan struct{}),
  111. maxConfirmedTd: big.NewInt(0),
  112. softRequestTimeout: softRequestTimeout,
  113. }
  114. h.backend.peers.subscribe(f)
  115. f.wg.Add(1)
  116. go f.syncLoop()
  117. return f
  118. }
  119. func (f *lightFetcher) close() {
  120. close(f.closeCh)
  121. f.wg.Wait()
  122. }
  123. // syncLoop is the main event loop of the light fetcher
  124. func (f *lightFetcher) syncLoop() {
  125. defer f.wg.Done()
  126. for {
  127. select {
  128. case <-f.closeCh:
  129. return
  130. // request loop keeps running until no further requests are necessary or possible
  131. case <-f.requestTrigger:
  132. f.lock.Lock()
  133. var (
  134. rq *distReq
  135. reqID uint64
  136. syncing bool
  137. )
  138. if !f.syncing {
  139. rq, reqID, syncing = f.nextRequest()
  140. }
  141. f.requestTriggered = rq != nil
  142. f.lock.Unlock()
  143. if rq != nil {
  144. if _, ok := <-f.handler.backend.reqDist.queue(rq); ok {
  145. if syncing {
  146. f.lock.Lock()
  147. f.syncing = true
  148. f.lock.Unlock()
  149. } else {
  150. go func() {
  151. time.Sleep(f.softRequestTimeout())
  152. f.reqMu.Lock()
  153. req, ok := f.requested[reqID]
  154. if ok {
  155. req.timeout = true
  156. f.requested[reqID] = req
  157. }
  158. f.reqMu.Unlock()
  159. // keep starting new requests while possible
  160. f.requestTrigger <- struct{}{}
  161. }()
  162. }
  163. } else {
  164. f.requestTrigger <- struct{}{}
  165. }
  166. }
  167. case reqID := <-f.timeoutChn:
  168. f.reqMu.Lock()
  169. req, ok := f.requested[reqID]
  170. if ok {
  171. delete(f.requested, reqID)
  172. }
  173. f.reqMu.Unlock()
  174. if ok {
  175. req.peer.Log().Debug("Fetching data timed out hard")
  176. go f.handler.removePeer(req.peer.id)
  177. }
  178. case resp := <-f.deliverChn:
  179. f.reqMu.Lock()
  180. req, ok := f.requested[resp.reqID]
  181. if ok && req.peer != resp.peer {
  182. ok = false
  183. }
  184. if ok {
  185. delete(f.requested, resp.reqID)
  186. }
  187. f.reqMu.Unlock()
  188. f.lock.Lock()
  189. if !ok || !(f.syncing || f.processResponse(req, resp)) {
  190. resp.peer.Log().Debug("Failed processing response")
  191. go f.handler.removePeer(resp.peer.id)
  192. }
  193. f.lock.Unlock()
  194. case p := <-f.syncDone:
  195. f.lock.Lock()
  196. p.Log().Debug("Done synchronising with peer")
  197. f.checkSyncedHeaders(p)
  198. f.syncing = false
  199. f.lock.Unlock()
  200. f.requestTrigger <- struct{}{} // f.requestTriggered is always true here
  201. }
  202. }
  203. }
  204. // registerPeer adds a new peer to the fetcher's peer set
  205. func (f *lightFetcher) registerPeer(p *serverPeer) {
  206. p.lock.Lock()
  207. p.hasBlock = func(hash common.Hash, number uint64, hasState bool) bool {
  208. return f.peerHasBlock(p, hash, number, hasState)
  209. }
  210. p.lock.Unlock()
  211. f.lock.Lock()
  212. defer f.lock.Unlock()
  213. f.peers[p] = &fetcherPeerInfo{nodeByHash: make(map[common.Hash]*fetcherTreeNode)}
  214. }
  215. // unregisterPeer removes a new peer from the fetcher's peer set
  216. func (f *lightFetcher) unregisterPeer(p *serverPeer) {
  217. p.lock.Lock()
  218. p.hasBlock = nil
  219. p.lock.Unlock()
  220. f.lock.Lock()
  221. defer f.lock.Unlock()
  222. // check for potential timed out block delay statistics
  223. f.checkUpdateStats(p, nil)
  224. delete(f.peers, p)
  225. }
  226. // announce processes a new announcement message received from a peer, adding new
  227. // nodes to the peer's block tree and removing old nodes if necessary
  228. func (f *lightFetcher) announce(p *serverPeer, head *announceData) {
  229. f.lock.Lock()
  230. defer f.lock.Unlock()
  231. p.Log().Debug("Received new announcement", "number", head.Number, "hash", head.Hash, "reorg", head.ReorgDepth)
  232. fp := f.peers[p]
  233. if fp == nil {
  234. p.Log().Debug("Announcement from unknown peer")
  235. return
  236. }
  237. if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
  238. // announced tds should be strictly monotonic
  239. p.Log().Debug("Received non-monotonic td", "current", head.Td, "previous", fp.lastAnnounced.td)
  240. go f.handler.removePeer(p.id)
  241. return
  242. }
  243. n := fp.lastAnnounced
  244. for i := uint64(0); i < head.ReorgDepth; i++ {
  245. if n == nil {
  246. break
  247. }
  248. n = n.parent
  249. }
  250. // n is now the reorg common ancestor, add a new branch of nodes
  251. if n != nil && (head.Number >= n.number+maxNodeCount || head.Number <= n.number) {
  252. // if announced head block height is lower or same as n or too far from it to add
  253. // intermediate nodes then discard previous announcement info and trigger a resync
  254. n = nil
  255. fp.nodeCnt = 0
  256. fp.nodeByHash = make(map[common.Hash]*fetcherTreeNode)
  257. }
  258. // check if the node count is too high to add new nodes, discard oldest ones if necessary
  259. if n != nil {
  260. // n is now the reorg common ancestor, add a new branch of nodes
  261. // check if the node count is too high to add new nodes
  262. locked := false
  263. for uint64(fp.nodeCnt)+head.Number-n.number > maxNodeCount && fp.root != nil {
  264. if !locked {
  265. f.chain.LockChain()
  266. defer f.chain.UnlockChain()
  267. locked = true
  268. }
  269. // if one of root's children is canonical, keep it, delete other branches and root itself
  270. var newRoot *fetcherTreeNode
  271. for i, nn := range fp.root.children {
  272. if rawdb.ReadCanonicalHash(f.handler.backend.chainDb, nn.number) == nn.hash {
  273. fp.root.children = append(fp.root.children[:i], fp.root.children[i+1:]...)
  274. nn.parent = nil
  275. newRoot = nn
  276. break
  277. }
  278. }
  279. fp.deleteNode(fp.root)
  280. if n == fp.root {
  281. n = newRoot
  282. }
  283. fp.root = newRoot
  284. if newRoot == nil || !f.checkKnownNode(p, newRoot) {
  285. fp.bestConfirmed = nil
  286. fp.confirmedTd = nil
  287. }
  288. if n == nil {
  289. break
  290. }
  291. }
  292. if n != nil {
  293. for n.number < head.Number {
  294. nn := &fetcherTreeNode{number: n.number + 1, parent: n}
  295. n.children = append(n.children, nn)
  296. n = nn
  297. fp.nodeCnt++
  298. }
  299. n.hash = head.Hash
  300. n.td = head.Td
  301. fp.nodeByHash[n.hash] = n
  302. }
  303. }
  304. if n == nil {
  305. // could not find reorg common ancestor or had to delete entire tree, a new root and a resync is needed
  306. if fp.root != nil {
  307. fp.deleteNode(fp.root)
  308. }
  309. n = &fetcherTreeNode{hash: head.Hash, number: head.Number, td: head.Td}
  310. fp.root = n
  311. fp.nodeCnt++
  312. fp.nodeByHash[n.hash] = n
  313. fp.bestConfirmed = nil
  314. fp.confirmedTd = nil
  315. }
  316. f.checkKnownNode(p, n)
  317. p.lock.Lock()
  318. p.headInfo = blockInfo{Number: head.Number, Hash: head.Hash, Td: head.Td}
  319. fp.lastAnnounced = n
  320. p.lock.Unlock()
  321. f.checkUpdateStats(p, nil)
  322. if !f.requestTriggered {
  323. f.requestTriggered = true
  324. f.requestTrigger <- struct{}{}
  325. }
  326. }
  327. // peerHasBlock returns true if we can assume the peer knows the given block
  328. // based on its announcements
  329. func (f *lightFetcher) peerHasBlock(p *serverPeer, hash common.Hash, number uint64, hasState bool) bool {
  330. f.lock.Lock()
  331. defer f.lock.Unlock()
  332. fp := f.peers[p]
  333. if fp == nil || fp.root == nil {
  334. return false
  335. }
  336. if hasState {
  337. if fp.lastAnnounced == nil || fp.lastAnnounced.number > number+serverStateAvailable {
  338. return false
  339. }
  340. }
  341. if f.syncing {
  342. // always return true when syncing
  343. // false positives are acceptable, a more sophisticated condition can be implemented later
  344. return true
  345. }
  346. if number >= fp.root.number {
  347. // it is recent enough that if it is known, is should be in the peer's block tree
  348. return fp.nodeByHash[hash] != nil
  349. }
  350. f.chain.LockChain()
  351. defer f.chain.UnlockChain()
  352. // if it's older than the peer's block tree root but it's in the same canonical chain
  353. // as the root, we can still be sure the peer knows it
  354. //
  355. // when syncing, just check if it is part of the known chain, there is nothing better we
  356. // can do since we do not know the most recent block hash yet
  357. return rawdb.ReadCanonicalHash(f.handler.backend.chainDb, fp.root.number) == fp.root.hash && rawdb.ReadCanonicalHash(f.handler.backend.chainDb, number) == hash
  358. }
  359. // requestAmount calculates the amount of headers to be downloaded starting
  360. // from a certain head backwards
  361. func (f *lightFetcher) requestAmount(p *serverPeer, n *fetcherTreeNode) uint64 {
  362. amount := uint64(0)
  363. nn := n
  364. for nn != nil && !f.checkKnownNode(p, nn) {
  365. nn = nn.parent
  366. amount++
  367. }
  368. if nn == nil {
  369. amount = n.number
  370. }
  371. return amount
  372. }
  373. // requestedID tells if a certain reqID has been requested by the fetcher
  374. func (f *lightFetcher) requestedID(reqID uint64) bool {
  375. f.reqMu.RLock()
  376. _, ok := f.requested[reqID]
  377. f.reqMu.RUnlock()
  378. return ok
  379. }
  380. // nextRequest selects the peer and announced head to be requested next, amount
  381. // to be downloaded starting from the head backwards is also returned
  382. func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
  383. var (
  384. bestHash common.Hash
  385. bestAmount uint64
  386. bestTd *big.Int
  387. bestSyncing bool
  388. )
  389. bestHash, bestAmount, bestTd, bestSyncing = f.findBestRequest()
  390. if bestTd == f.maxConfirmedTd {
  391. return nil, 0, false
  392. }
  393. var rq *distReq
  394. reqID := genReqID()
  395. if bestSyncing {
  396. rq = f.newFetcherDistReqForSync(bestHash)
  397. } else {
  398. rq = f.newFetcherDistReq(bestHash, reqID, bestAmount)
  399. }
  400. return rq, reqID, bestSyncing
  401. }
  402. // findBestRequest finds the best head to request that has been announced by but not yet requested from a known peer.
  403. // It also returns the announced Td (which should be verified after fetching the head),
  404. // the necessary amount to request and whether a downloader sync is necessary instead of a normal header request.
  405. func (f *lightFetcher) findBestRequest() (bestHash common.Hash, bestAmount uint64, bestTd *big.Int, bestSyncing bool) {
  406. bestTd = f.maxConfirmedTd
  407. bestSyncing = false
  408. for p, fp := range f.peers {
  409. for hash, n := range fp.nodeByHash {
  410. if f.checkKnownNode(p, n) || n.requested {
  411. continue
  412. }
  413. // if ulc mode is disabled, isTrustedHash returns true
  414. amount := f.requestAmount(p, n)
  415. if (bestTd == nil || n.td.Cmp(bestTd) > 0 || amount < bestAmount) && (f.isTrustedHash(hash) || f.maxConfirmedTd.Int64() == 0) {
  416. bestHash = hash
  417. bestTd = n.td
  418. bestAmount = amount
  419. bestSyncing = fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root)
  420. }
  421. }
  422. }
  423. return
  424. }
  425. // isTrustedHash checks if the block can be trusted by the minimum trusted fraction.
  426. func (f *lightFetcher) isTrustedHash(hash common.Hash) bool {
  427. // If ultra light cliet mode is disabled, trust all hashes
  428. if f.handler.ulc == nil {
  429. return true
  430. }
  431. // Ultra light enabled, only trust after enough confirmations
  432. var agreed int
  433. for peer, info := range f.peers {
  434. if peer.trusted && info.nodeByHash[hash] != nil {
  435. agreed++
  436. }
  437. }
  438. return 100*agreed/len(f.handler.ulc.keys) >= f.handler.ulc.fraction
  439. }
  440. func (f *lightFetcher) newFetcherDistReqForSync(bestHash common.Hash) *distReq {
  441. return &distReq{
  442. getCost: func(dp distPeer) uint64 {
  443. return 0
  444. },
  445. canSend: func(dp distPeer) bool {
  446. p := dp.(*serverPeer)
  447. f.lock.Lock()
  448. defer f.lock.Unlock()
  449. if p.onlyAnnounce {
  450. return false
  451. }
  452. fp := f.peers[p]
  453. return fp != nil && fp.nodeByHash[bestHash] != nil
  454. },
  455. request: func(dp distPeer) func() {
  456. if f.handler.ulc != nil {
  457. // Keep last trusted header before sync
  458. f.setLastTrustedHeader(f.chain.CurrentHeader())
  459. }
  460. go func() {
  461. p := dp.(*serverPeer)
  462. p.Log().Debug("Synchronisation started")
  463. f.handler.synchronise(p)
  464. f.syncDone <- p
  465. }()
  466. return nil
  467. },
  468. }
  469. }
  470. // newFetcherDistReq creates a new request for the distributor.
  471. func (f *lightFetcher) newFetcherDistReq(bestHash common.Hash, reqID uint64, bestAmount uint64) *distReq {
  472. return &distReq{
  473. getCost: func(dp distPeer) uint64 {
  474. p := dp.(*serverPeer)
  475. return p.getRequestCost(GetBlockHeadersMsg, int(bestAmount))
  476. },
  477. canSend: func(dp distPeer) bool {
  478. p := dp.(*serverPeer)
  479. f.lock.Lock()
  480. defer f.lock.Unlock()
  481. if p.onlyAnnounce {
  482. return false
  483. }
  484. fp := f.peers[p]
  485. if fp == nil {
  486. return false
  487. }
  488. n := fp.nodeByHash[bestHash]
  489. return n != nil && !n.requested
  490. },
  491. request: func(dp distPeer) func() {
  492. p := dp.(*serverPeer)
  493. f.lock.Lock()
  494. fp := f.peers[p]
  495. if fp != nil {
  496. n := fp.nodeByHash[bestHash]
  497. if n != nil {
  498. n.requested = true
  499. }
  500. }
  501. f.lock.Unlock()
  502. cost := p.getRequestCost(GetBlockHeadersMsg, int(bestAmount))
  503. p.fcServer.QueuedRequest(reqID, cost)
  504. f.reqMu.Lock()
  505. f.requested[reqID] = fetchRequest{hash: bestHash, amount: bestAmount, peer: p, sent: mclock.Now()}
  506. f.reqMu.Unlock()
  507. go func() {
  508. time.Sleep(hardRequestTimeout)
  509. f.timeoutChn <- reqID
  510. }()
  511. return func() { p.requestHeadersByHash(reqID, bestHash, int(bestAmount), 0, true) }
  512. },
  513. }
  514. }
  515. // deliverHeaders delivers header download request responses for processing
  516. func (f *lightFetcher) deliverHeaders(peer *serverPeer, reqID uint64, headers []*types.Header) {
  517. f.deliverChn <- fetchResponse{reqID: reqID, headers: headers, peer: peer}
  518. }
  519. // processResponse processes header download request responses, returns true if successful
  520. func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
  521. if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
  522. req.peer.Log().Debug("Response content mismatch", "requested", len(resp.headers), "reqfrom", resp.headers[0], "delivered", req.amount, "delfrom", req.hash)
  523. return false
  524. }
  525. headers := make([]*types.Header, req.amount)
  526. for i, header := range resp.headers {
  527. headers[int(req.amount)-1-i] = header
  528. }
  529. if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil {
  530. if err == consensus.ErrFutureBlock {
  531. return true
  532. }
  533. log.Debug("Failed to insert header chain", "err", err)
  534. return false
  535. }
  536. tds := make([]*big.Int, len(headers))
  537. for i, header := range headers {
  538. td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
  539. if td == nil {
  540. log.Debug("Total difficulty not found for header", "index", i+1, "number", header.Number, "hash", header.Hash())
  541. return false
  542. }
  543. tds[i] = td
  544. }
  545. f.newHeaders(headers, tds)
  546. return true
  547. }
  548. // newHeaders updates the block trees of all active peers according to a newly
  549. // downloaded and validated batch or headers
  550. func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
  551. var maxTd *big.Int
  552. for p, fp := range f.peers {
  553. if !f.checkAnnouncedHeaders(fp, headers, tds) {
  554. p.Log().Debug("Inconsistent announcement")
  555. go f.handler.removePeer(p.id)
  556. }
  557. if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
  558. maxTd = fp.confirmedTd
  559. }
  560. }
  561. if maxTd != nil {
  562. f.updateMaxConfirmedTd(maxTd)
  563. }
  564. }
  565. // checkAnnouncedHeaders updates peer's block tree if necessary after validating
  566. // a batch of headers. It searches for the latest header in the batch that has a
  567. // matching tree node (if any), and if it has not been marked as known already,
  568. // sets it and its parents to known (even those which are older than the currently
  569. // validated ones). Return value shows if all hashes, numbers and Tds matched
  570. // correctly to the announced values (otherwise the peer should be dropped).
  571. func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*types.Header, tds []*big.Int) bool {
  572. var (
  573. n *fetcherTreeNode
  574. header *types.Header
  575. td *big.Int
  576. )
  577. for i := len(headers) - 1; ; i-- {
  578. if i < 0 {
  579. if n == nil {
  580. // no more headers and nothing to match
  581. return true
  582. }
  583. // we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
  584. hash, number := header.ParentHash, header.Number.Uint64()-1
  585. td = f.chain.GetTd(hash, number)
  586. header = f.chain.GetHeader(hash, number)
  587. if header == nil || td == nil {
  588. log.Error("Missing parent of validated header", "hash", hash, "number", number)
  589. return false
  590. }
  591. } else {
  592. header = headers[i]
  593. td = tds[i]
  594. }
  595. hash := header.Hash()
  596. number := header.Number.Uint64()
  597. if n == nil {
  598. n = fp.nodeByHash[hash]
  599. }
  600. if n != nil {
  601. if n.td == nil {
  602. // node was unannounced
  603. if nn := fp.nodeByHash[hash]; nn != nil {
  604. // if there was already a node with the same hash, continue there and drop this one
  605. nn.children = append(nn.children, n.children...)
  606. n.children = nil
  607. fp.deleteNode(n)
  608. n = nn
  609. } else {
  610. n.hash = hash
  611. n.td = td
  612. fp.nodeByHash[hash] = n
  613. }
  614. }
  615. // check if it matches the header
  616. if n.hash != hash || n.number != number || n.td.Cmp(td) != 0 {
  617. // peer has previously made an invalid announcement
  618. return false
  619. }
  620. if n.known {
  621. // we reached a known node that matched our expectations, return with success
  622. return true
  623. }
  624. n.known = true
  625. if fp.confirmedTd == nil || td.Cmp(fp.confirmedTd) > 0 {
  626. fp.confirmedTd = td
  627. fp.bestConfirmed = n
  628. }
  629. n = n.parent
  630. if n == nil {
  631. return true
  632. }
  633. }
  634. }
  635. }
  636. // checkSyncedHeaders updates peer's block tree after synchronisation by marking
  637. // downloaded headers as known. If none of the announced headers are found after
  638. // syncing, the peer is dropped.
  639. func (f *lightFetcher) checkSyncedHeaders(p *serverPeer) {
  640. fp := f.peers[p]
  641. if fp == nil {
  642. p.Log().Debug("Unknown peer to check sync headers")
  643. return
  644. }
  645. var (
  646. node = fp.lastAnnounced
  647. td *big.Int
  648. )
  649. if f.handler.ulc != nil {
  650. // Roll back untrusted blocks
  651. h, unapproved := f.lastTrustedTreeNode(p)
  652. f.chain.Rollback(unapproved)
  653. node = fp.nodeByHash[h.Hash()]
  654. }
  655. // Find last valid block
  656. for node != nil {
  657. if td = f.chain.GetTd(node.hash, node.number); td != nil {
  658. break
  659. }
  660. node = node.parent
  661. }
  662. // Now node is the latest downloaded/approved header after syncing
  663. if node == nil {
  664. p.Log().Debug("Synchronisation failed")
  665. go f.handler.removePeer(p.id)
  666. return
  667. }
  668. header := f.chain.GetHeader(node.hash, node.number)
  669. f.newHeaders([]*types.Header{header}, []*big.Int{td})
  670. }
  671. // lastTrustedTreeNode return last approved treeNode and a list of unapproved hashes
  672. func (f *lightFetcher) lastTrustedTreeNode(p *serverPeer) (*types.Header, []common.Hash) {
  673. unapprovedHashes := make([]common.Hash, 0)
  674. current := f.chain.CurrentHeader()
  675. if f.lastTrustedHeader == nil {
  676. return current, unapprovedHashes
  677. }
  678. canonical := f.chain.CurrentHeader()
  679. if canonical.Number.Uint64() > f.lastTrustedHeader.Number.Uint64() {
  680. canonical = f.chain.GetHeaderByNumber(f.lastTrustedHeader.Number.Uint64())
  681. }
  682. commonAncestor := rawdb.FindCommonAncestor(f.handler.backend.chainDb, canonical, f.lastTrustedHeader)
  683. if commonAncestor == nil {
  684. log.Error("Common ancestor of last trusted header and canonical header is nil", "canonical hash", canonical.Hash(), "trusted hash", f.lastTrustedHeader.Hash())
  685. return current, unapprovedHashes
  686. }
  687. for current.Hash() == commonAncestor.Hash() {
  688. if f.isTrustedHash(current.Hash()) {
  689. break
  690. }
  691. unapprovedHashes = append(unapprovedHashes, current.Hash())
  692. current = f.chain.GetHeader(current.ParentHash, current.Number.Uint64()-1)
  693. }
  694. return current, unapprovedHashes
  695. }
  696. func (f *lightFetcher) setLastTrustedHeader(h *types.Header) {
  697. f.lock.Lock()
  698. defer f.lock.Unlock()
  699. f.lastTrustedHeader = h
  700. }
  701. // checkKnownNode checks if a block tree node is known (downloaded and validated)
  702. // If it was not known previously but found in the database, sets its known flag
  703. func (f *lightFetcher) checkKnownNode(p *serverPeer, n *fetcherTreeNode) bool {
  704. if n.known {
  705. return true
  706. }
  707. td := f.chain.GetTd(n.hash, n.number)
  708. if td == nil {
  709. return false
  710. }
  711. header := f.chain.GetHeader(n.hash, n.number)
  712. // check the availability of both header and td because reads are not protected by chain db mutex
  713. // Note: returning false is always safe here
  714. if header == nil {
  715. return false
  716. }
  717. fp := f.peers[p]
  718. if fp == nil {
  719. p.Log().Debug("Unknown peer to check known nodes")
  720. return false
  721. }
  722. if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
  723. p.Log().Debug("Inconsistent announcement")
  724. go f.handler.removePeer(p.id)
  725. }
  726. if fp.confirmedTd != nil {
  727. f.updateMaxConfirmedTd(fp.confirmedTd)
  728. }
  729. return n.known
  730. }
  731. // deleteNode deletes a node and its child subtrees from a peer's block tree
  732. func (fp *fetcherPeerInfo) deleteNode(n *fetcherTreeNode) {
  733. if n.parent != nil {
  734. for i, nn := range n.parent.children {
  735. if nn == n {
  736. n.parent.children = append(n.parent.children[:i], n.parent.children[i+1:]...)
  737. break
  738. }
  739. }
  740. }
  741. for {
  742. if n.td != nil {
  743. delete(fp.nodeByHash, n.hash)
  744. }
  745. fp.nodeCnt--
  746. if len(n.children) == 0 {
  747. return
  748. }
  749. for i, nn := range n.children {
  750. if i == 0 {
  751. n = nn
  752. } else {
  753. fp.deleteNode(nn)
  754. }
  755. }
  756. }
  757. }
  758. // updateStatsEntry items form a linked list that is expanded with a new item every time a new head with a higher Td
  759. // than the previous one has been downloaded and validated. The list contains a series of maximum confirmed Td values
  760. // and the time these values have been confirmed, both increasing monotonically. A maximum confirmed Td is calculated
  761. // both globally for all peers and also for each individual peer (meaning that the given peer has announced the head
  762. // and it has also been downloaded from any peer, either before or after the given announcement).
  763. // The linked list has a global tail where new confirmed Td entries are added and a separate head for each peer,
  764. // pointing to the next Td entry that is higher than the peer's max confirmed Td (nil if it has already confirmed
  765. // the current global head).
  766. type updateStatsEntry struct {
  767. time mclock.AbsTime
  768. td *big.Int
  769. next *updateStatsEntry
  770. }
  771. // updateMaxConfirmedTd updates the block delay statistics of active peers. Whenever a new highest Td is confirmed,
  772. // adds it to the end of a linked list together with the time it has been confirmed. Then checks which peers have
  773. // already confirmed a head with the same or higher Td (which counts as zero block delay) and updates their statistics.
  774. // Those who have not confirmed such a head by now will be updated by a subsequent checkUpdateStats call with a
  775. // positive block delay value.
  776. func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) {
  777. if f.maxConfirmedTd == nil || td.Cmp(f.maxConfirmedTd) > 0 {
  778. f.maxConfirmedTd = td
  779. newEntry := &updateStatsEntry{
  780. time: mclock.Now(),
  781. td: td,
  782. }
  783. if f.lastUpdateStats != nil {
  784. f.lastUpdateStats.next = newEntry
  785. }
  786. f.lastUpdateStats = newEntry
  787. for p := range f.peers {
  788. f.checkUpdateStats(p, newEntry)
  789. }
  790. }
  791. }
  792. // checkUpdateStats checks those peers who have not confirmed a certain highest Td (or a larger one) by the time it
  793. // has been confirmed by another peer. If they have confirmed such a head by now, their stats are updated with the
  794. // block delay which is (this peer's confirmation time)-(first confirmation time). After blockDelayTimeout has passed,
  795. // the stats are updated with blockDelayTimeout value. In either case, the confirmed or timed out updateStatsEntry
  796. // items are removed from the head of the linked list.
  797. // If a new entry has been added to the global tail, it is passed as a parameter here even though this function
  798. // assumes that it has already been added, so that if the peer's list is empty (all heads confirmed, head is nil),
  799. // it can set the new head to newEntry.
  800. func (f *lightFetcher) checkUpdateStats(p *serverPeer, newEntry *updateStatsEntry) {
  801. now := mclock.Now()
  802. fp := f.peers[p]
  803. if fp == nil {
  804. p.Log().Debug("Unknown peer to check update stats")
  805. return
  806. }
  807. if newEntry != nil && fp.firstUpdateStats == nil {
  808. fp.firstUpdateStats = newEntry
  809. }
  810. for fp.firstUpdateStats != nil && fp.firstUpdateStats.time <= now-mclock.AbsTime(blockDelayTimeout) {
  811. fp.firstUpdateStats = fp.firstUpdateStats.next
  812. }
  813. if fp.confirmedTd != nil {
  814. for fp.firstUpdateStats != nil && fp.firstUpdateStats.td.Cmp(fp.confirmedTd) <= 0 {
  815. fp.firstUpdateStats = fp.firstUpdateStats.next
  816. }
  817. }
  818. }