downloader_test.go 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621
  1. // Copyright 2015 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package downloader
  17. import (
  18. "errors"
  19. "fmt"
  20. "math/big"
  21. "strings"
  22. "sync"
  23. "sync/atomic"
  24. "testing"
  25. "time"
  26. "github.com/ethereum/go-ethereum"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/core/rawdb"
  29. "github.com/ethereum/go-ethereum/core/state/snapshot"
  30. "github.com/ethereum/go-ethereum/core/types"
  31. "github.com/ethereum/go-ethereum/eth/protocols/eth"
  32. "github.com/ethereum/go-ethereum/ethdb"
  33. "github.com/ethereum/go-ethereum/event"
  34. "github.com/ethereum/go-ethereum/trie"
  35. )
  36. // Reduce some of the parameters to make the tester faster.
  37. func init() {
  38. fullMaxForkAncestry = 10000
  39. lightMaxForkAncestry = 10000
  40. blockCacheMaxItems = 1024
  41. fsHeaderContCheck = 500 * time.Millisecond
  42. }
  43. // downloadTester is a test simulator for mocking out local block chain.
  44. type downloadTester struct {
  45. downloader *Downloader
  46. genesis *types.Block // Genesis blocks used by the tester and peers
  47. stateDb ethdb.Database // Database used by the tester for syncing from peers
  48. peerDb ethdb.Database // Database of the peers containing all data
  49. peers map[string]*downloadTesterPeer
  50. ownHashes []common.Hash // Hash chain belonging to the tester
  51. ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester
  52. ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
  53. ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
  54. ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain
  55. ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester
  56. ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester
  57. ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
  58. ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain
  59. lock sync.RWMutex
  60. }
  61. // newTester creates a new downloader test mocker.
  62. func newTester() *downloadTester {
  63. tester := &downloadTester{
  64. genesis: testGenesis,
  65. peerDb: testDB,
  66. peers: make(map[string]*downloadTesterPeer),
  67. ownHashes: []common.Hash{testGenesis.Hash()},
  68. ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
  69. ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
  70. ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
  71. ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
  72. // Initialize ancient store with test genesis block
  73. ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
  74. ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
  75. ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
  76. ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
  77. }
  78. tester.stateDb = rawdb.NewMemoryDatabase()
  79. tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
  80. tester.downloader = New(0, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer)
  81. return tester
  82. }
  83. // terminate aborts any operations on the embedded downloader and releases all
  84. // held resources.
  85. func (dl *downloadTester) terminate() {
  86. dl.downloader.Terminate()
  87. }
  88. // sync starts synchronizing with a remote peer, blocking until it completes.
  89. func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
  90. dl.lock.RLock()
  91. hash := dl.peers[id].chain.headBlock().Hash()
  92. // If no particular TD was requested, load from the peer's blockchain
  93. if td == nil {
  94. td = dl.peers[id].chain.td(hash)
  95. }
  96. dl.lock.RUnlock()
  97. // Synchronise with the chosen peer and ensure proper cleanup afterwards
  98. err := dl.downloader.synchronise(id, hash, td, mode)
  99. select {
  100. case <-dl.downloader.cancelCh:
  101. // Ok, downloader fully cancelled after sync cycle
  102. default:
  103. // Downloader is still accepting packets, can block a peer up
  104. panic("downloader active post sync cycle") // panic will be caught by tester
  105. }
  106. return err
  107. }
  108. // HasHeader checks if a header is present in the testers canonical chain.
  109. func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
  110. return dl.GetHeaderByHash(hash) != nil
  111. }
  112. // HasBlock checks if a block is present in the testers canonical chain.
  113. func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
  114. return dl.GetBlockByHash(hash) != nil
  115. }
  116. // HasFastBlock checks if a block is present in the testers canonical chain.
  117. func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
  118. dl.lock.RLock()
  119. defer dl.lock.RUnlock()
  120. if _, ok := dl.ancientReceipts[hash]; ok {
  121. return true
  122. }
  123. _, ok := dl.ownReceipts[hash]
  124. return ok
  125. }
  126. // GetHeader retrieves a header from the testers canonical chain.
  127. func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
  128. dl.lock.RLock()
  129. defer dl.lock.RUnlock()
  130. return dl.getHeaderByHash(hash)
  131. }
  132. // getHeaderByHash returns the header if found either within ancients or own blocks)
  133. // This method assumes that the caller holds at least the read-lock (dl.lock)
  134. func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
  135. header := dl.ancientHeaders[hash]
  136. if header != nil {
  137. return header
  138. }
  139. return dl.ownHeaders[hash]
  140. }
  141. // GetBlock retrieves a block from the testers canonical chain.
  142. func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
  143. dl.lock.RLock()
  144. defer dl.lock.RUnlock()
  145. block := dl.ancientBlocks[hash]
  146. if block != nil {
  147. return block
  148. }
  149. return dl.ownBlocks[hash]
  150. }
  151. // CurrentHeader retrieves the current head header from the canonical chain.
  152. func (dl *downloadTester) CurrentHeader() *types.Header {
  153. dl.lock.RLock()
  154. defer dl.lock.RUnlock()
  155. for i := len(dl.ownHashes) - 1; i >= 0; i-- {
  156. if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
  157. return header
  158. }
  159. if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
  160. return header
  161. }
  162. }
  163. return dl.genesis.Header()
  164. }
  165. // CurrentBlock retrieves the current head block from the canonical chain.
  166. func (dl *downloadTester) CurrentBlock() *types.Block {
  167. dl.lock.RLock()
  168. defer dl.lock.RUnlock()
  169. for i := len(dl.ownHashes) - 1; i >= 0; i-- {
  170. if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
  171. if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
  172. return block
  173. }
  174. return block
  175. }
  176. if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
  177. if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
  178. return block
  179. }
  180. }
  181. }
  182. return dl.genesis
  183. }
  184. // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
  185. func (dl *downloadTester) CurrentFastBlock() *types.Block {
  186. dl.lock.RLock()
  187. defer dl.lock.RUnlock()
  188. for i := len(dl.ownHashes) - 1; i >= 0; i-- {
  189. if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
  190. return block
  191. }
  192. if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
  193. return block
  194. }
  195. }
  196. return dl.genesis
  197. }
  198. // FastSyncCommitHead manually sets the head block to a given hash.
  199. func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
  200. // For now only check that the state trie is correct
  201. if block := dl.GetBlockByHash(hash); block != nil {
  202. _, err := trie.NewStateTrie(common.Hash{}, block.Root(), trie.NewDatabase(dl.stateDb))
  203. return err
  204. }
  205. return fmt.Errorf("non existent block: %x", hash[:4])
  206. }
  207. // GetTd retrieves the block's total difficulty from the canonical chain.
  208. func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
  209. dl.lock.RLock()
  210. defer dl.lock.RUnlock()
  211. return dl.getTd(hash)
  212. }
  213. // getTd retrieves the block's total difficulty if found either within
  214. // ancients or own blocks).
  215. // This method assumes that the caller holds at least the read-lock (dl.lock)
  216. func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
  217. if td := dl.ancientChainTd[hash]; td != nil {
  218. return td
  219. }
  220. return dl.ownChainTd[hash]
  221. }
  222. // InsertHeaderChain injects a new batch of headers into the simulated chain.
  223. func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
  224. dl.lock.Lock()
  225. defer dl.lock.Unlock()
  226. // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
  227. if dl.getHeaderByHash(headers[0].ParentHash) == nil {
  228. return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
  229. }
  230. var hashes []common.Hash
  231. for i := 1; i < len(headers); i++ {
  232. hash := headers[i-1].Hash()
  233. if headers[i].ParentHash != headers[i-1].Hash() {
  234. return i, fmt.Errorf("non-contiguous import at position %d", i)
  235. }
  236. hashes = append(hashes, hash)
  237. }
  238. hashes = append(hashes, headers[len(headers)-1].Hash())
  239. // Do a full insert if pre-checks passed
  240. for i, header := range headers {
  241. hash := hashes[i]
  242. if dl.getHeaderByHash(hash) != nil {
  243. continue
  244. }
  245. if dl.getHeaderByHash(header.ParentHash) == nil {
  246. // This _should_ be impossible, due to precheck and induction
  247. return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
  248. }
  249. dl.ownHashes = append(dl.ownHashes, hash)
  250. dl.ownHeaders[hash] = header
  251. td := dl.getTd(header.ParentHash)
  252. dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
  253. }
  254. return len(headers), nil
  255. }
  256. // InsertChain injects a new batch of blocks into the simulated chain.
  257. func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
  258. dl.lock.Lock()
  259. defer dl.lock.Unlock()
  260. for i, block := range blocks {
  261. if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
  262. return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
  263. } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
  264. return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
  265. }
  266. if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
  267. dl.ownHashes = append(dl.ownHashes, block.Hash())
  268. dl.ownHeaders[block.Hash()] = block.Header()
  269. }
  270. dl.ownBlocks[block.Hash()] = block
  271. dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
  272. dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
  273. td := dl.getTd(block.ParentHash())
  274. dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
  275. }
  276. return len(blocks), nil
  277. }
  278. // InsertReceiptChain injects a new batch of receipts into the simulated chain.
  279. func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
  280. dl.lock.Lock()
  281. defer dl.lock.Unlock()
  282. for i := 0; i < len(blocks) && i < len(receipts); i++ {
  283. if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
  284. return i, errors.New("unknown owner")
  285. }
  286. if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
  287. if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
  288. return i, errors.New("InsertReceiptChain: unknown parent")
  289. }
  290. }
  291. if blocks[i].NumberU64() <= ancientLimit {
  292. dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
  293. dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
  294. // Migrate from active db to ancient db
  295. dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
  296. dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
  297. delete(dl.ownHeaders, blocks[i].Hash())
  298. delete(dl.ownChainTd, blocks[i].Hash())
  299. } else {
  300. dl.ownBlocks[blocks[i].Hash()] = blocks[i]
  301. dl.ownReceipts[blocks[i].Hash()] = receipts[i]
  302. }
  303. }
  304. return len(blocks), nil
  305. }
  306. // SetHead rewinds the local chain to a new head.
  307. func (dl *downloadTester) SetHead(head uint64) error {
  308. dl.lock.Lock()
  309. defer dl.lock.Unlock()
  310. // Find the hash of the head to reset to
  311. var hash common.Hash
  312. for h, header := range dl.ownHeaders {
  313. if header.Number.Uint64() == head {
  314. hash = h
  315. }
  316. }
  317. for h, header := range dl.ancientHeaders {
  318. if header.Number.Uint64() == head {
  319. hash = h
  320. }
  321. }
  322. if hash == (common.Hash{}) {
  323. return fmt.Errorf("unknown head to set: %d", head)
  324. }
  325. // Find the offset in the header chain
  326. var offset int
  327. for o, h := range dl.ownHashes {
  328. if h == hash {
  329. offset = o
  330. break
  331. }
  332. }
  333. // Remove all the hashes and associated data afterwards
  334. for i := offset + 1; i < len(dl.ownHashes); i++ {
  335. delete(dl.ownChainTd, dl.ownHashes[i])
  336. delete(dl.ownHeaders, dl.ownHashes[i])
  337. delete(dl.ownReceipts, dl.ownHashes[i])
  338. delete(dl.ownBlocks, dl.ownHashes[i])
  339. delete(dl.ancientChainTd, dl.ownHashes[i])
  340. delete(dl.ancientHeaders, dl.ownHashes[i])
  341. delete(dl.ancientReceipts, dl.ownHashes[i])
  342. delete(dl.ancientBlocks, dl.ownHashes[i])
  343. }
  344. dl.ownHashes = dl.ownHashes[:offset+1]
  345. return nil
  346. }
  347. // Rollback removes some recently added elements from the chain.
  348. func (dl *downloadTester) Rollback(hashes []common.Hash) {
  349. }
  350. // newPeer registers a new block download source into the downloader.
  351. func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error {
  352. dl.lock.Lock()
  353. defer dl.lock.Unlock()
  354. peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
  355. dl.peers[id] = peer
  356. return dl.downloader.RegisterPeer(id, version, peer)
  357. }
  358. // dropPeer simulates a hard peer removal from the connection pool.
  359. func (dl *downloadTester) dropPeer(id string) {
  360. dl.lock.Lock()
  361. defer dl.lock.Unlock()
  362. delete(dl.peers, id)
  363. dl.downloader.UnregisterPeer(id)
  364. }
  365. // Snapshots implements the BlockChain interface for the downloader, but is a noop.
  366. func (dl *downloadTester) Snapshots() *snapshot.Tree {
  367. return nil
  368. }
  369. type downloadTesterPeer struct {
  370. dl *downloadTester
  371. id string
  372. chain *testChain
  373. missingStates map[common.Hash]bool // State entries that fast sync should not return
  374. }
  375. // Head constructs a function to retrieve a peer's current head hash
  376. // and total difficulty.
  377. func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
  378. b := dlp.chain.headBlock()
  379. return b.Hash(), dlp.chain.td(b.Hash())
  380. }
  381. // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
  382. // origin; associated with a particular peer in the download tester. The returned
  383. // function can be used to retrieve batches of headers from the particular peer.
  384. func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
  385. result := dlp.chain.headersByHash(origin, amount, skip, reverse)
  386. go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
  387. return nil
  388. }
  389. // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
  390. // origin; associated with a particular peer in the download tester. The returned
  391. // function can be used to retrieve batches of headers from the particular peer.
  392. func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
  393. result := dlp.chain.headersByNumber(origin, amount, skip, reverse)
  394. go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
  395. return nil
  396. }
  397. // RequestBodies constructs a getBlockBodies method associated with a particular
  398. // peer in the download tester. The returned function can be used to retrieve
  399. // batches of block bodies from the particularly requested peer.
  400. func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
  401. txs, uncles := dlp.chain.bodies(hashes)
  402. go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
  403. return nil
  404. }
  405. // RequestReceipts constructs a getReceipts method associated with a particular
  406. // peer in the download tester. The returned function can be used to retrieve
  407. // batches of block receipts from the particularly requested peer.
  408. func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
  409. receipts := dlp.chain.receipts(hashes)
  410. go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
  411. return nil
  412. }
  413. // RequestNodeData constructs a getNodeData method associated with a particular
  414. // peer in the download tester. The returned function can be used to retrieve
  415. // batches of node state data from the particularly requested peer.
  416. func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
  417. dlp.dl.lock.RLock()
  418. defer dlp.dl.lock.RUnlock()
  419. results := make([][]byte, 0, len(hashes))
  420. for _, hash := range hashes {
  421. if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
  422. if !dlp.missingStates[hash] {
  423. results = append(results, data)
  424. }
  425. }
  426. }
  427. go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
  428. return nil
  429. }
  430. // assertOwnChain checks if the local chain contains the correct number of items
  431. // of the various chain components.
  432. func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
  433. // Mark this method as a helper to report errors at callsite, not in here
  434. t.Helper()
  435. assertOwnForkedChain(t, tester, 1, []int{length})
  436. }
  437. // assertOwnForkedChain checks if the local forked chain contains the correct
  438. // number of items of the various chain components.
  439. func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
  440. // Mark this method as a helper to report errors at callsite, not in here
  441. t.Helper()
  442. // Initialize the counters for the first fork
  443. headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
  444. // Update the counters for each subsequent fork
  445. for _, length := range lengths[1:] {
  446. headers += length - common
  447. blocks += length - common
  448. receipts += length - common
  449. }
  450. if tester.downloader.getMode() == LightSync {
  451. blocks, receipts = 1, 1
  452. }
  453. if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
  454. t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
  455. }
  456. if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
  457. t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
  458. }
  459. if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
  460. t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
  461. }
  462. }
  463. func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) }
  464. func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, eth.ETH66, FastSync) }
  465. func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
  466. func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
  467. t.Parallel()
  468. tester := newTester()
  469. defer tester.terminate()
  470. // Create a small enough block chain to download
  471. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  472. tester.newPeer("peer", protocol, chain)
  473. // Synchronise with the peer and make sure all relevant data was retrieved
  474. if err := tester.sync("peer", nil, mode); err != nil {
  475. t.Fatalf("failed to synchronise blocks: %v", err)
  476. }
  477. assertOwnChain(t, tester, chain.len())
  478. }
  479. // Tests that if a large batch of blocks are being downloaded, it is throttled
  480. // until the cached blocks are retrieved.
  481. func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
  482. func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) }
  483. func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
  484. t.Parallel()
  485. tester := newTester()
  486. // Create a long block chain to download and the tester
  487. targetBlocks := testChainBase.len() - 1
  488. tester.newPeer("peer", protocol, testChainBase)
  489. // Wrap the importer to allow stepping
  490. blocked, proceed := uint32(0), make(chan struct{})
  491. tester.downloader.chainInsertHook = func(results []*fetchResult) {
  492. atomic.StoreUint32(&blocked, uint32(len(results)))
  493. <-proceed
  494. }
  495. // Start a synchronisation concurrently
  496. errc := make(chan error, 1)
  497. go func() {
  498. errc <- tester.sync("peer", nil, mode)
  499. }()
  500. // Iteratively take some blocks, always checking the retrieval count
  501. for {
  502. // Check the retrieval count synchronously (! reason for this ugly block)
  503. tester.lock.RLock()
  504. retrieved := len(tester.ownBlocks)
  505. tester.lock.RUnlock()
  506. if retrieved >= targetBlocks+1 {
  507. break
  508. }
  509. // Wait a bit for sync to throttle itself
  510. var cached, frozen int
  511. for start := time.Now(); time.Since(start) < 3*time.Second; {
  512. time.Sleep(25 * time.Millisecond)
  513. tester.lock.Lock()
  514. tester.downloader.queue.lock.Lock()
  515. tester.downloader.queue.resultCache.lock.Lock()
  516. {
  517. cached = tester.downloader.queue.resultCache.countCompleted()
  518. frozen = int(atomic.LoadUint32(&blocked))
  519. retrieved = len(tester.ownBlocks)
  520. }
  521. tester.downloader.queue.resultCache.lock.Unlock()
  522. tester.downloader.queue.lock.Unlock()
  523. tester.lock.Unlock()
  524. if cached == blockCacheMaxItems ||
  525. cached == blockCacheMaxItems-reorgProtHeaderDelay ||
  526. retrieved+cached+frozen == targetBlocks+1 ||
  527. retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
  528. break
  529. }
  530. }
  531. // Make sure we filled up the cache, then exhaust it
  532. time.Sleep(25 * time.Millisecond) // give it a chance to screw up
  533. tester.lock.RLock()
  534. retrieved = len(tester.ownBlocks)
  535. tester.lock.RUnlock()
  536. if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
  537. t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
  538. }
  539. // Permit the blocked blocks to import
  540. if atomic.LoadUint32(&blocked) > 0 {
  541. atomic.StoreUint32(&blocked, uint32(0))
  542. proceed <- struct{}{}
  543. }
  544. }
  545. // Check that we haven't pulled more blocks than available
  546. assertOwnChain(t, tester, targetBlocks+1)
  547. if err := <-errc; err != nil {
  548. t.Fatalf("block synchronization failed: %v", err)
  549. }
  550. tester.terminate()
  551. }
  552. // Tests that simple synchronization against a forked chain works correctly. In
  553. // this test common ancestor lookup should *not* be short circuited, and a full
  554. // binary search should be executed.
  555. func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) }
  556. func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, eth.ETH66, FastSync) }
  557. func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
  558. func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  559. t.Parallel()
  560. tester := newTester()
  561. defer tester.terminate()
  562. chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
  563. chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
  564. tester.newPeer("fork A", protocol, chainA)
  565. tester.newPeer("fork B", protocol, chainB)
  566. // Synchronise with the peer and make sure all blocks were retrieved
  567. if err := tester.sync("fork A", nil, mode); err != nil {
  568. t.Fatalf("failed to synchronise blocks: %v", err)
  569. }
  570. assertOwnChain(t, tester, chainA.len())
  571. // Synchronise with the second peer and make sure that fork is pulled too
  572. if err := tester.sync("fork B", nil, mode); err != nil {
  573. t.Fatalf("failed to synchronise blocks: %v", err)
  574. }
  575. assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
  576. }
  577. // Tests that synchronising against a much shorter but much heavier fork works
  578. // correctly and is not dropped.
  579. func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) }
  580. func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) }
  581. func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
  582. func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  583. t.Parallel()
  584. tester := newTester()
  585. defer tester.terminate()
  586. chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
  587. chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
  588. tester.newPeer("light", protocol, chainA)
  589. tester.newPeer("heavy", protocol, chainB)
  590. // Synchronise with the peer and make sure all blocks were retrieved
  591. if err := tester.sync("light", nil, mode); err != nil {
  592. t.Fatalf("failed to synchronise blocks: %v", err)
  593. }
  594. assertOwnChain(t, tester, chainA.len())
  595. // Synchronise with the second peer and make sure that fork is pulled too
  596. if err := tester.sync("heavy", nil, mode); err != nil {
  597. t.Fatalf("failed to synchronise blocks: %v", err)
  598. }
  599. assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
  600. }
  601. // Tests that chain forks are contained within a certain interval of the current
  602. // chain head, ensuring that malicious peers cannot waste resources by feeding
  603. // long dead chains.
  604. func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) }
  605. func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FastSync) }
  606. func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
  607. func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  608. t.Parallel()
  609. tester := newTester()
  610. defer tester.terminate()
  611. chainA := testChainForkLightA
  612. chainB := testChainForkLightB
  613. tester.newPeer("original", protocol, chainA)
  614. tester.newPeer("rewriter", protocol, chainB)
  615. // Synchronise with the peer and make sure all blocks were retrieved
  616. if err := tester.sync("original", nil, mode); err != nil {
  617. t.Fatalf("failed to synchronise blocks: %v", err)
  618. }
  619. assertOwnChain(t, tester, chainA.len())
  620. // Synchronise with the second peer and ensure that the fork is rejected to being too old
  621. if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
  622. t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
  623. }
  624. }
  625. // Tests that chain forks are contained within a certain interval of the current
  626. // chain head for short but heavy forks too. These are a bit special because they
  627. // take different ancestor lookup paths.
  628. func TestBoundedHeavyForkedSync66Full(t *testing.T) {
  629. testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
  630. }
  631. func TestBoundedHeavyForkedSync66Fast(t *testing.T) {
  632. testBoundedHeavyForkedSync(t, eth.ETH66, FastSync)
  633. }
  634. func TestBoundedHeavyForkedSync66Light(t *testing.T) {
  635. testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
  636. }
  637. func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  638. t.Parallel()
  639. tester := newTester()
  640. // Create a long enough forked chain
  641. chainA := testChainForkLightA
  642. chainB := testChainForkHeavy
  643. tester.newPeer("original", protocol, chainA)
  644. // Synchronise with the peer and make sure all blocks were retrieved
  645. if err := tester.sync("original", nil, mode); err != nil {
  646. t.Fatalf("failed to synchronise blocks: %v", err)
  647. }
  648. assertOwnChain(t, tester, chainA.len())
  649. tester.newPeer("heavy-rewriter", protocol, chainB)
  650. // Synchronise with the second peer and ensure that the fork is rejected to being too old
  651. if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
  652. t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
  653. }
  654. tester.terminate()
  655. }
  656. // Tests that an inactive downloader will not accept incoming block headers,
  657. // bodies and receipts.
  658. func TestInactiveDownloader63(t *testing.T) {
  659. t.Parallel()
  660. tester := newTester()
  661. defer tester.terminate()
  662. // Check that neither block headers nor bodies are accepted
  663. if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
  664. t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  665. }
  666. if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
  667. t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  668. }
  669. if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
  670. t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  671. }
  672. }
  673. // Tests that a canceled download wipes all previously accumulated state.
  674. func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) }
  675. func TestCancel66Fast(t *testing.T) { testCancel(t, eth.ETH66, FastSync) }
  676. func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
  677. func testCancel(t *testing.T, protocol uint, mode SyncMode) {
  678. t.Parallel()
  679. tester := newTester()
  680. defer tester.terminate()
  681. chain := testChainBase.shorten(MaxHeaderFetch)
  682. tester.newPeer("peer", protocol, chain)
  683. // Make sure canceling works with a pristine downloader
  684. tester.downloader.Cancel()
  685. if !tester.downloader.queue.Idle() {
  686. t.Errorf("download queue not idle")
  687. }
  688. // Synchronise with the peer, but cancel afterwards
  689. if err := tester.sync("peer", nil, mode); err != nil {
  690. t.Fatalf("failed to synchronise blocks: %v", err)
  691. }
  692. tester.downloader.Cancel()
  693. if !tester.downloader.queue.Idle() {
  694. t.Errorf("download queue not idle")
  695. }
  696. }
  697. // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  698. func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) }
  699. func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FastSync) }
  700. func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
  701. func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
  702. t.Parallel()
  703. tester := newTester()
  704. defer tester.terminate()
  705. // Create various peers with various parts of the chain
  706. targetPeers := 8
  707. chain := testChainBase.shorten(targetPeers * 100)
  708. for i := 0; i < targetPeers; i++ {
  709. id := fmt.Sprintf("peer #%d", i)
  710. tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
  711. }
  712. if err := tester.sync("peer #0", nil, mode); err != nil {
  713. t.Fatalf("failed to synchronise blocks: %v", err)
  714. }
  715. assertOwnChain(t, tester, chain.len())
  716. }
  717. // Tests that synchronisations behave well in multi-version protocol environments
  718. // and not wreak havoc on other nodes in the network.
  719. func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) }
  720. func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FastSync) }
  721. func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
  722. func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
  723. t.Parallel()
  724. tester := newTester()
  725. defer tester.terminate()
  726. // Create a small enough block chain to download
  727. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  728. // Create peers of every type
  729. tester.newPeer("peer 66", eth.ETH66, chain)
  730. //tester.newPeer("peer 65", eth.ETH67, chain)
  731. // Synchronise with the requested peer and make sure all blocks were retrieved
  732. if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  733. t.Fatalf("failed to synchronise blocks: %v", err)
  734. }
  735. assertOwnChain(t, tester, chain.len())
  736. // Check that no peers have been dropped off
  737. for _, version := range []int{66} {
  738. peer := fmt.Sprintf("peer %d", version)
  739. if _, ok := tester.peers[peer]; !ok {
  740. t.Errorf("%s dropped", peer)
  741. }
  742. }
  743. }
  744. // Tests that if a block is empty (e.g. header only), no body request should be
  745. // made, and instead the header should be assembled into a whole block in itself.
  746. func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
  747. func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FastSync) }
  748. func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
  749. func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
  750. t.Parallel()
  751. tester := newTester()
  752. defer tester.terminate()
  753. // Create a block chain to download
  754. chain := testChainBase
  755. tester.newPeer("peer", protocol, chain)
  756. // Instrument the downloader to signal body requests
  757. bodiesHave, receiptsHave := int32(0), int32(0)
  758. tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  759. atomic.AddInt32(&bodiesHave, int32(len(headers)))
  760. }
  761. tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  762. atomic.AddInt32(&receiptsHave, int32(len(headers)))
  763. }
  764. // Synchronise with the peer and make sure all blocks were retrieved
  765. if err := tester.sync("peer", nil, mode); err != nil {
  766. t.Fatalf("failed to synchronise blocks: %v", err)
  767. }
  768. assertOwnChain(t, tester, chain.len())
  769. // Validate the number of block bodies that should have been requested
  770. bodiesNeeded, receiptsNeeded := 0, 0
  771. for _, block := range chain.blockm {
  772. if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  773. bodiesNeeded++
  774. }
  775. }
  776. for _, receipt := range chain.receiptm {
  777. if mode == FastSync && len(receipt) > 0 {
  778. receiptsNeeded++
  779. }
  780. }
  781. if int(bodiesHave) != bodiesNeeded {
  782. t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  783. }
  784. if int(receiptsHave) != receiptsNeeded {
  785. t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  786. }
  787. }
  788. // Tests that headers are enqueued continuously, preventing malicious nodes from
  789. // stalling the downloader by feeding gapped header chains.
  790. func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
  791. func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FastSync) }
  792. func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
  793. func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
  794. t.Parallel()
  795. tester := newTester()
  796. defer tester.terminate()
  797. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  798. brokenChain := chain.shorten(chain.len())
  799. delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
  800. tester.newPeer("attack", protocol, brokenChain)
  801. if err := tester.sync("attack", nil, mode); err == nil {
  802. t.Fatalf("succeeded attacker synchronisation")
  803. }
  804. // Synchronise with the valid peer and make sure sync succeeds
  805. tester.newPeer("valid", protocol, chain)
  806. if err := tester.sync("valid", nil, mode); err != nil {
  807. t.Fatalf("failed to synchronise blocks: %v", err)
  808. }
  809. assertOwnChain(t, tester, chain.len())
  810. }
  811. // Tests that if requested headers are shifted (i.e. first is missing), the queue
  812. // detects the invalid numbering.
  813. func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
  814. func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FastSync) }
  815. func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
  816. func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
  817. t.Parallel()
  818. tester := newTester()
  819. defer tester.terminate()
  820. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  821. // Attempt a full sync with an attacker feeding shifted headers
  822. brokenChain := chain.shorten(chain.len())
  823. delete(brokenChain.headerm, brokenChain.chain[1])
  824. delete(brokenChain.blockm, brokenChain.chain[1])
  825. delete(brokenChain.receiptm, brokenChain.chain[1])
  826. tester.newPeer("attack", protocol, brokenChain)
  827. if err := tester.sync("attack", nil, mode); err == nil {
  828. t.Fatalf("succeeded attacker synchronisation")
  829. }
  830. // Synchronise with the valid peer and make sure sync succeeds
  831. tester.newPeer("valid", protocol, chain)
  832. if err := tester.sync("valid", nil, mode); err != nil {
  833. t.Fatalf("failed to synchronise blocks: %v", err)
  834. }
  835. assertOwnChain(t, tester, chain.len())
  836. }
  837. // Tests that upon detecting an invalid header, the recent ones are rolled back
  838. // for various failure scenarios. Afterwards a full sync is attempted to make
  839. // sure no state was corrupted.
  840. func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) }
  841. func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
  842. t.Parallel()
  843. tester := newTester()
  844. // Create a small enough block chain to download
  845. targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  846. chain := testChainBase.shorten(targetBlocks)
  847. // Attempt to sync with an attacker that feeds junk during the fast sync phase.
  848. // This should result in the last fsHeaderSafetyNet headers being rolled back.
  849. missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  850. fastAttackChain := chain.shorten(chain.len())
  851. delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
  852. tester.newPeer("fast-attack", protocol, fastAttackChain)
  853. if err := tester.sync("fast-attack", nil, mode); err == nil {
  854. t.Fatalf("succeeded fast attacker synchronisation")
  855. }
  856. if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  857. t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  858. }
  859. // Attempt to sync with an attacker that feeds junk during the block import phase.
  860. // This should result in both the last fsHeaderSafetyNet number of headers being
  861. // rolled back, and also the pivot point being reverted to a non-block status.
  862. missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  863. blockAttackChain := chain.shorten(chain.len())
  864. delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
  865. delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
  866. tester.newPeer("block-attack", protocol, blockAttackChain)
  867. if err := tester.sync("block-attack", nil, mode); err == nil {
  868. t.Fatalf("succeeded block attacker synchronisation")
  869. }
  870. if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  871. t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  872. }
  873. if mode == FastSync {
  874. if head := tester.CurrentBlock().NumberU64(); head != 0 {
  875. t.Errorf("fast sync pivot block #%d not rolled back", head)
  876. }
  877. }
  878. // Attempt to sync with an attacker that withholds promised blocks after the
  879. // fast sync pivot point. This could be a trial to leave the node with a bad
  880. // but already imported pivot block.
  881. withholdAttackChain := chain.shorten(chain.len())
  882. tester.newPeer("withhold-attack", protocol, withholdAttackChain)
  883. tester.downloader.syncInitHook = func(uint64, uint64) {
  884. for i := missing; i < withholdAttackChain.len(); i++ {
  885. delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
  886. }
  887. tester.downloader.syncInitHook = nil
  888. }
  889. if err := tester.sync("withhold-attack", nil, mode); err == nil {
  890. t.Fatalf("succeeded withholding attacker synchronisation")
  891. }
  892. if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  893. t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  894. }
  895. if mode == FastSync {
  896. if head := tester.CurrentBlock().NumberU64(); head != 0 {
  897. t.Errorf("fast sync pivot block #%d not rolled back", head)
  898. }
  899. }
  900. // synchronise with the valid peer and make sure sync succeeds. Since the last rollback
  901. // should also disable fast syncing for this process, verify that we did a fresh full
  902. // sync. Note, we can't assert anything about the receipts since we won't purge the
  903. // database of them, hence we can't use assertOwnChain.
  904. tester.newPeer("valid", protocol, chain)
  905. if err := tester.sync("valid", nil, mode); err != nil {
  906. t.Fatalf("failed to synchronise blocks: %v", err)
  907. }
  908. if hs := len(tester.ownHeaders); hs != chain.len() {
  909. t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
  910. }
  911. if mode != LightSync {
  912. if bs := len(tester.ownBlocks); bs != chain.len() {
  913. t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
  914. }
  915. }
  916. tester.terminate()
  917. }
  918. // Tests that a peer advertising a high TD doesn't get to stall the downloader
  919. // afterwards by not sending any useful hashes.
  920. func TestHighTDStarvationAttack66Full(t *testing.T) {
  921. testHighTDStarvationAttack(t, eth.ETH66, FullSync)
  922. }
  923. func TestHighTDStarvationAttack66Fast(t *testing.T) {
  924. testHighTDStarvationAttack(t, eth.ETH66, FastSync)
  925. }
  926. func TestHighTDStarvationAttack66Light(t *testing.T) {
  927. testHighTDStarvationAttack(t, eth.ETH66, LightSync)
  928. }
  929. func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
  930. t.Parallel()
  931. tester := newTester()
  932. chain := testChainBase.shorten(1)
  933. tester.newPeer("attack", protocol, chain)
  934. if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  935. t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  936. }
  937. tester.terminate()
  938. }
  939. // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  940. func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
  941. func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
  942. t.Parallel()
  943. // Define the disconnection requirement for individual hash fetch errors
  944. tests := []struct {
  945. result error
  946. drop bool
  947. }{
  948. {nil, false}, // Sync succeeded, all is well
  949. {errBusy, false}, // Sync is already in progress, no problem
  950. {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
  951. {errBadPeer, true}, // Peer was deemed bad for some reason, drop it
  952. {errStallingPeer, true}, // Peer was detected to be stalling, drop it
  953. {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it
  954. {errNoPeers, false}, // No peers to download from, soft race, no issue
  955. {errTimeout, true}, // No hashes received in due time, drop the peer
  956. {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
  957. {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
  958. {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
  959. {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
  960. {errInvalidBody, false}, // A bad peer was detected, but not the sync origin
  961. {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
  962. {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  963. }
  964. // Run the tests and check disconnection status
  965. tester := newTester()
  966. defer tester.terminate()
  967. chain := testChainBase.shorten(1)
  968. for i, tt := range tests {
  969. // Register a new peer and ensure its presence
  970. id := fmt.Sprintf("test %d", i)
  971. if err := tester.newPeer(id, protocol, chain); err != nil {
  972. t.Fatalf("test %d: failed to register new peer: %v", i, err)
  973. }
  974. if _, ok := tester.peers[id]; !ok {
  975. t.Fatalf("test %d: registered peer not found", i)
  976. }
  977. // Simulate a synchronisation and check the required result
  978. tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  979. tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  980. if _, ok := tester.peers[id]; !ok != tt.drop {
  981. t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  982. }
  983. }
  984. }
  985. // Tests that synchronisation progress (origin block number, current block number
  986. // and highest block number) is tracked and updated correctly.
  987. func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) }
  988. func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, eth.ETH66, FastSync) }
  989. func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
  990. func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  991. t.Parallel()
  992. tester := newTester()
  993. defer tester.terminate()
  994. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  995. // Set a sync init hook to catch progress changes
  996. starting := make(chan struct{})
  997. progress := make(chan struct{})
  998. tester.downloader.syncInitHook = func(origin, latest uint64) {
  999. starting <- struct{}{}
  1000. <-progress
  1001. }
  1002. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1003. // Synchronise half the blocks and check initial progress
  1004. tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1005. pending := new(sync.WaitGroup)
  1006. pending.Add(1)
  1007. go func() {
  1008. defer pending.Done()
  1009. if err := tester.sync("peer-half", nil, mode); err != nil {
  1010. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1011. }
  1012. }()
  1013. <-starting
  1014. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1015. HighestBlock: uint64(chain.len()/2 - 1),
  1016. })
  1017. progress <- struct{}{}
  1018. pending.Wait()
  1019. // Synchronise all the blocks and check continuation progress
  1020. tester.newPeer("peer-full", protocol, chain)
  1021. pending.Add(1)
  1022. go func() {
  1023. defer pending.Done()
  1024. if err := tester.sync("peer-full", nil, mode); err != nil {
  1025. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1026. }
  1027. }()
  1028. <-starting
  1029. checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1030. StartingBlock: uint64(chain.len()/2 - 1),
  1031. CurrentBlock: uint64(chain.len()/2 - 1),
  1032. HighestBlock: uint64(chain.len() - 1),
  1033. })
  1034. // Check final progress after successful sync
  1035. progress <- struct{}{}
  1036. pending.Wait()
  1037. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1038. StartingBlock: uint64(chain.len()/2 - 1),
  1039. CurrentBlock: uint64(chain.len() - 1),
  1040. HighestBlock: uint64(chain.len() - 1),
  1041. })
  1042. }
  1043. func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1044. // Mark this method as a helper to report errors at callsite, not in here
  1045. t.Helper()
  1046. p := d.Progress()
  1047. //p.KnownStates, p.PulledStates = 0, 0
  1048. //want.KnownStates, want.PulledStates = 0, 0
  1049. if p != want {
  1050. t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1051. }
  1052. }
  1053. // Tests that synchronisation progress (origin block number and highest block
  1054. // number) is tracked and updated correctly in case of a fork (or manual head
  1055. // revertal).
  1056. func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) }
  1057. func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FastSync) }
  1058. func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
  1059. func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1060. t.Parallel()
  1061. tester := newTester()
  1062. defer tester.terminate()
  1063. chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch)
  1064. chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch)
  1065. // Set a sync init hook to catch progress changes
  1066. starting := make(chan struct{})
  1067. progress := make(chan struct{})
  1068. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1069. starting <- struct{}{}
  1070. <-progress
  1071. }
  1072. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1073. // Synchronise with one of the forks and check progress
  1074. tester.newPeer("fork A", protocol, chainA)
  1075. pending := new(sync.WaitGroup)
  1076. pending.Add(1)
  1077. go func() {
  1078. defer pending.Done()
  1079. if err := tester.sync("fork A", nil, mode); err != nil {
  1080. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1081. }
  1082. }()
  1083. <-starting
  1084. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1085. HighestBlock: uint64(chainA.len() - 1),
  1086. })
  1087. progress <- struct{}{}
  1088. pending.Wait()
  1089. // Simulate a successful sync above the fork
  1090. tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1091. // Synchronise with the second fork and check progress resets
  1092. tester.newPeer("fork B", protocol, chainB)
  1093. pending.Add(1)
  1094. go func() {
  1095. defer pending.Done()
  1096. if err := tester.sync("fork B", nil, mode); err != nil {
  1097. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1098. }
  1099. }()
  1100. <-starting
  1101. checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1102. StartingBlock: uint64(testChainBase.len()) - 1,
  1103. CurrentBlock: uint64(chainA.len() - 1),
  1104. HighestBlock: uint64(chainB.len() - 1),
  1105. })
  1106. // Check final progress after successful sync
  1107. progress <- struct{}{}
  1108. pending.Wait()
  1109. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1110. StartingBlock: uint64(testChainBase.len()) - 1,
  1111. CurrentBlock: uint64(chainB.len() - 1),
  1112. HighestBlock: uint64(chainB.len() - 1),
  1113. })
  1114. }
  1115. // Tests that if synchronisation is aborted due to some failure, then the progress
  1116. // origin is not updated in the next sync cycle, as it should be considered the
  1117. // continuation of the previous sync and not a new instance.
  1118. func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) }
  1119. func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FastSync) }
  1120. func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
  1121. func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1122. t.Parallel()
  1123. tester := newTester()
  1124. defer tester.terminate()
  1125. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1126. // Set a sync init hook to catch progress changes
  1127. starting := make(chan struct{})
  1128. progress := make(chan struct{})
  1129. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1130. starting <- struct{}{}
  1131. <-progress
  1132. }
  1133. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1134. // Attempt a full sync with a faulty peer
  1135. brokenChain := chain.shorten(chain.len())
  1136. missing := brokenChain.len() / 2
  1137. delete(brokenChain.headerm, brokenChain.chain[missing])
  1138. delete(brokenChain.blockm, brokenChain.chain[missing])
  1139. delete(brokenChain.receiptm, brokenChain.chain[missing])
  1140. tester.newPeer("faulty", protocol, brokenChain)
  1141. pending := new(sync.WaitGroup)
  1142. pending.Add(1)
  1143. go func() {
  1144. defer pending.Done()
  1145. if err := tester.sync("faulty", nil, mode); err == nil {
  1146. panic("succeeded faulty synchronisation")
  1147. }
  1148. }()
  1149. <-starting
  1150. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1151. HighestBlock: uint64(brokenChain.len() - 1),
  1152. })
  1153. progress <- struct{}{}
  1154. pending.Wait()
  1155. afterFailedSync := tester.downloader.Progress()
  1156. // Synchronise with a good peer and check that the progress origin remind the same
  1157. // after a failure
  1158. tester.newPeer("valid", protocol, chain)
  1159. pending.Add(1)
  1160. go func() {
  1161. defer pending.Done()
  1162. if err := tester.sync("valid", nil, mode); err != nil {
  1163. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1164. }
  1165. }()
  1166. <-starting
  1167. checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1168. // Check final progress after successful sync
  1169. progress <- struct{}{}
  1170. pending.Wait()
  1171. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1172. CurrentBlock: uint64(chain.len() - 1),
  1173. HighestBlock: uint64(chain.len() - 1),
  1174. })
  1175. }
  1176. // Tests that if an attacker fakes a chain height, after the attack is detected,
  1177. // the progress height is successfully reduced at the next sync invocation.
  1178. func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) }
  1179. func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FastSync) }
  1180. func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
  1181. func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1182. t.Parallel()
  1183. tester := newTester()
  1184. defer tester.terminate()
  1185. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1186. // Set a sync init hook to catch progress changes
  1187. starting := make(chan struct{})
  1188. progress := make(chan struct{})
  1189. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1190. starting <- struct{}{}
  1191. <-progress
  1192. }
  1193. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1194. // Create and sync with an attacker that promises a higher chain than available.
  1195. brokenChain := chain.shorten(chain.len())
  1196. numMissing := 5
  1197. for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1198. delete(brokenChain.headerm, brokenChain.chain[i])
  1199. }
  1200. tester.newPeer("attack", protocol, brokenChain)
  1201. pending := new(sync.WaitGroup)
  1202. pending.Add(1)
  1203. go func() {
  1204. defer pending.Done()
  1205. if err := tester.sync("attack", nil, mode); err == nil {
  1206. panic("succeeded attacker synchronisation")
  1207. }
  1208. }()
  1209. <-starting
  1210. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1211. HighestBlock: uint64(brokenChain.len() - 1),
  1212. })
  1213. progress <- struct{}{}
  1214. pending.Wait()
  1215. afterFailedSync := tester.downloader.Progress()
  1216. // Synchronise with a good peer and check that the progress height has been reduced to
  1217. // the true value.
  1218. validChain := chain.shorten(chain.len() - numMissing)
  1219. tester.newPeer("valid", protocol, validChain)
  1220. pending.Add(1)
  1221. go func() {
  1222. defer pending.Done()
  1223. if err := tester.sync("valid", nil, mode); err != nil {
  1224. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1225. }
  1226. }()
  1227. <-starting
  1228. checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1229. CurrentBlock: afterFailedSync.CurrentBlock,
  1230. HighestBlock: uint64(validChain.len() - 1),
  1231. })
  1232. // Check final progress after successful sync.
  1233. progress <- struct{}{}
  1234. pending.Wait()
  1235. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1236. CurrentBlock: uint64(validChain.len() - 1),
  1237. HighestBlock: uint64(validChain.len() - 1),
  1238. })
  1239. }
  1240. // This test reproduces an issue where unexpected deliveries would
  1241. // block indefinitely if they arrived at the right time.
  1242. func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FullSync) }
  1243. func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FastSync) }
  1244. func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) }
  1245. func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {
  1246. t.Parallel()
  1247. master := newTester()
  1248. defer master.terminate()
  1249. chain := testChainBase.shorten(15)
  1250. for i := 0; i < 200; i++ {
  1251. tester := newTester()
  1252. tester.peerDb = master.peerDb
  1253. tester.newPeer("peer", protocol, chain)
  1254. // Whenever the downloader requests headers, flood it with
  1255. // a lot of unrequested header deliveries.
  1256. tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1257. peer: tester.downloader.peers.peers["peer"].peer,
  1258. tester: tester,
  1259. }
  1260. if err := tester.sync("peer", nil, mode); err != nil {
  1261. t.Errorf("test %d: sync failed: %v", i, err)
  1262. }
  1263. tester.terminate()
  1264. }
  1265. }
  1266. type floodingTestPeer struct {
  1267. peer Peer
  1268. tester *downloadTester
  1269. }
  1270. func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1271. func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1272. return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1273. }
  1274. func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1275. return ftp.peer.RequestBodies(hashes)
  1276. }
  1277. func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1278. return ftp.peer.RequestReceipts(hashes)
  1279. }
  1280. func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1281. return ftp.peer.RequestNodeData(hashes)
  1282. }
  1283. func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1284. deliveriesDone := make(chan struct{}, 500)
  1285. for i := 0; i < cap(deliveriesDone)-1; i++ {
  1286. peer := fmt.Sprintf("fake-peer%d", i)
  1287. go func() {
  1288. ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1289. deliveriesDone <- struct{}{}
  1290. }()
  1291. }
  1292. // None of the extra deliveries should block.
  1293. timeout := time.After(60 * time.Second)
  1294. launched := false
  1295. for i := 0; i < cap(deliveriesDone); i++ {
  1296. select {
  1297. case <-deliveriesDone:
  1298. if !launched {
  1299. // Start delivering the requested headers
  1300. // after one of the flooding responses has arrived.
  1301. go func() {
  1302. ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1303. deliveriesDone <- struct{}{}
  1304. }()
  1305. launched = true
  1306. }
  1307. case <-timeout:
  1308. panic("blocked")
  1309. }
  1310. }
  1311. return nil
  1312. }
  1313. func TestRemoteHeaderRequestSpan(t *testing.T) {
  1314. testCases := []struct {
  1315. remoteHeight uint64
  1316. localHeight uint64
  1317. expected []int
  1318. }{
  1319. // Remote is way higher. We should ask for the remote head and go backwards
  1320. {1500, 1000,
  1321. []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1322. },
  1323. {15000, 13006,
  1324. []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1325. },
  1326. // Remote is pretty close to us. We don't have to fetch as many
  1327. {1200, 1150,
  1328. []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1329. },
  1330. // Remote is equal to us (so on a fork with higher td)
  1331. // We should get the closest couple of ancestors
  1332. {1500, 1500,
  1333. []int{1497, 1499},
  1334. },
  1335. // We're higher than the remote! Odd
  1336. {1000, 1500,
  1337. []int{997, 999},
  1338. },
  1339. // Check some weird edgecases that it behaves somewhat rationally
  1340. {0, 1500,
  1341. []int{0, 2},
  1342. },
  1343. {6000000, 0,
  1344. []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1345. },
  1346. {0, 0,
  1347. []int{0, 2},
  1348. },
  1349. }
  1350. reqs := func(from, count, span int) []int {
  1351. var r []int
  1352. num := from
  1353. for len(r) < count {
  1354. r = append(r, num)
  1355. num += span + 1
  1356. }
  1357. return r
  1358. }
  1359. for i, tt := range testCases {
  1360. from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1361. data := reqs(int(from), count, span)
  1362. if max != uint64(data[len(data)-1]) {
  1363. t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1364. }
  1365. failed := false
  1366. if len(data) != len(tt.expected) {
  1367. failed = true
  1368. t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1369. } else {
  1370. for j, n := range data {
  1371. if n != tt.expected[j] {
  1372. failed = true
  1373. break
  1374. }
  1375. }
  1376. }
  1377. if failed {
  1378. res := strings.ReplaceAll(fmt.Sprint(data), " ", ",")
  1379. exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",")
  1380. t.Logf("got: %v\n", res)
  1381. t.Logf("exp: %v\n", exp)
  1382. t.Errorf("test %d: wrong values", i)
  1383. }
  1384. }
  1385. }
  1386. // Tests that peers below a pre-configured checkpoint block are prevented from
  1387. // being fast-synced from, avoiding potential cheap eclipse attacks.
  1388. func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
  1389. func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) }
  1390. func TestCheckpointEnforcement66Light(t *testing.T) {
  1391. testCheckpointEnforcement(t, eth.ETH66, LightSync)
  1392. }
  1393. func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
  1394. t.Parallel()
  1395. // Create a new tester with a particular hard coded checkpoint block
  1396. tester := newTester()
  1397. defer tester.terminate()
  1398. tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1399. chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1400. // Attempt to sync with the peer and validate the result
  1401. tester.newPeer("peer", protocol, chain)
  1402. var expect error
  1403. if mode == FastSync || mode == LightSync {
  1404. expect = errUnsyncedPeer
  1405. }
  1406. if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1407. t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1408. }
  1409. if mode == FastSync || mode == LightSync {
  1410. assertOwnChain(t, tester, 1)
  1411. } else {
  1412. assertOwnChain(t, tester, chain.len())
  1413. }
  1414. }