downloader_test.go 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713
  1. // Copyright 2015 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package downloader
  17. import (
  18. "errors"
  19. "fmt"
  20. "math/big"
  21. "strings"
  22. "sync"
  23. "sync/atomic"
  24. "testing"
  25. "time"
  26. "github.com/ethereum/go-ethereum"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/core/rawdb"
  29. "github.com/ethereum/go-ethereum/core/state/snapshot"
  30. "github.com/ethereum/go-ethereum/core/types"
  31. "github.com/ethereum/go-ethereum/eth/protocols/eth"
  32. "github.com/ethereum/go-ethereum/ethdb"
  33. "github.com/ethereum/go-ethereum/event"
  34. "github.com/ethereum/go-ethereum/trie"
  35. )
  36. // Reduce some of the parameters to make the tester faster.
  37. func init() {
  38. fullMaxForkAncestry = 10000
  39. lightMaxForkAncestry = 10000
  40. blockCacheMaxItems = 1024
  41. fsHeaderContCheck = 500 * time.Millisecond
  42. }
  43. // downloadTester is a test simulator for mocking out local block chain.
  44. type downloadTester struct {
  45. downloader *Downloader
  46. genesis *types.Block // Genesis blocks used by the tester and peers
  47. stateDb ethdb.Database // Database used by the tester for syncing from peers
  48. peerDb ethdb.Database // Database of the peers containing all data
  49. peers map[string]*downloadTesterPeer
  50. ownHashes []common.Hash // Hash chain belonging to the tester
  51. ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester
  52. ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
  53. ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
  54. ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain
  55. ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester
  56. ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester
  57. ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
  58. ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain
  59. lock sync.RWMutex
  60. }
  61. // newTester creates a new downloader test mocker.
  62. func newTester() *downloadTester {
  63. tester := &downloadTester{
  64. genesis: testGenesis,
  65. peerDb: testDB,
  66. peers: make(map[string]*downloadTesterPeer),
  67. ownHashes: []common.Hash{testGenesis.Hash()},
  68. ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
  69. ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
  70. ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
  71. ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
  72. // Initialize ancient store with test genesis block
  73. ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
  74. ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
  75. ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
  76. ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
  77. }
  78. tester.stateDb = rawdb.NewMemoryDatabase()
  79. tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
  80. tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer)
  81. return tester
  82. }
  83. // terminate aborts any operations on the embedded downloader and releases all
  84. // held resources.
  85. func (dl *downloadTester) terminate() {
  86. dl.downloader.Terminate()
  87. }
  88. // sync starts synchronizing with a remote peer, blocking until it completes.
  89. func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
  90. dl.lock.RLock()
  91. hash := dl.peers[id].chain.headBlock().Hash()
  92. // If no particular TD was requested, load from the peer's blockchain
  93. if td == nil {
  94. td = dl.peers[id].chain.td(hash)
  95. }
  96. dl.lock.RUnlock()
  97. // Synchronise with the chosen peer and ensure proper cleanup afterwards
  98. err := dl.downloader.synchronise(id, hash, td, mode)
  99. select {
  100. case <-dl.downloader.cancelCh:
  101. // Ok, downloader fully cancelled after sync cycle
  102. default:
  103. // Downloader is still accepting packets, can block a peer up
  104. panic("downloader active post sync cycle") // panic will be caught by tester
  105. }
  106. return err
  107. }
  108. // HasHeader checks if a header is present in the testers canonical chain.
  109. func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
  110. return dl.GetHeaderByHash(hash) != nil
  111. }
  112. // HasBlock checks if a block is present in the testers canonical chain.
  113. func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
  114. return dl.GetBlockByHash(hash) != nil
  115. }
  116. // HasFastBlock checks if a block is present in the testers canonical chain.
  117. func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
  118. dl.lock.RLock()
  119. defer dl.lock.RUnlock()
  120. if _, ok := dl.ancientReceipts[hash]; ok {
  121. return true
  122. }
  123. _, ok := dl.ownReceipts[hash]
  124. return ok
  125. }
  126. // GetHeader retrieves a header from the testers canonical chain.
  127. func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
  128. dl.lock.RLock()
  129. defer dl.lock.RUnlock()
  130. return dl.getHeaderByHash(hash)
  131. }
  132. // getHeaderByHash returns the header if found either within ancients or own blocks)
  133. // This method assumes that the caller holds at least the read-lock (dl.lock)
  134. func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
  135. header := dl.ancientHeaders[hash]
  136. if header != nil {
  137. return header
  138. }
  139. return dl.ownHeaders[hash]
  140. }
  141. // GetBlock retrieves a block from the testers canonical chain.
  142. func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
  143. dl.lock.RLock()
  144. defer dl.lock.RUnlock()
  145. block := dl.ancientBlocks[hash]
  146. if block != nil {
  147. return block
  148. }
  149. return dl.ownBlocks[hash]
  150. }
  151. // CurrentHeader retrieves the current head header from the canonical chain.
  152. func (dl *downloadTester) CurrentHeader() *types.Header {
  153. dl.lock.RLock()
  154. defer dl.lock.RUnlock()
  155. for i := len(dl.ownHashes) - 1; i >= 0; i-- {
  156. if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
  157. return header
  158. }
  159. if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
  160. return header
  161. }
  162. }
  163. return dl.genesis.Header()
  164. }
  165. // CurrentBlock retrieves the current head block from the canonical chain.
  166. func (dl *downloadTester) CurrentBlock() *types.Block {
  167. dl.lock.RLock()
  168. defer dl.lock.RUnlock()
  169. for i := len(dl.ownHashes) - 1; i >= 0; i-- {
  170. if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
  171. if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
  172. return block
  173. }
  174. return block
  175. }
  176. if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
  177. if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
  178. return block
  179. }
  180. }
  181. }
  182. return dl.genesis
  183. }
  184. // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
  185. func (dl *downloadTester) CurrentFastBlock() *types.Block {
  186. dl.lock.RLock()
  187. defer dl.lock.RUnlock()
  188. for i := len(dl.ownHashes) - 1; i >= 0; i-- {
  189. if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
  190. return block
  191. }
  192. if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
  193. return block
  194. }
  195. }
  196. return dl.genesis
  197. }
  198. // FastSyncCommitHead manually sets the head block to a given hash.
  199. func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
  200. // For now only check that the state trie is correct
  201. if block := dl.GetBlockByHash(hash); block != nil {
  202. _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
  203. return err
  204. }
  205. return fmt.Errorf("non existent block: %x", hash[:4])
  206. }
  207. // GetTd retrieves the block's total difficulty from the canonical chain.
  208. func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
  209. dl.lock.RLock()
  210. defer dl.lock.RUnlock()
  211. return dl.getTd(hash)
  212. }
  213. // getTd retrieves the block's total difficulty if found either within
  214. // ancients or own blocks).
  215. // This method assumes that the caller holds at least the read-lock (dl.lock)
  216. func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
  217. if td := dl.ancientChainTd[hash]; td != nil {
  218. return td
  219. }
  220. return dl.ownChainTd[hash]
  221. }
  222. // InsertHeaderChain injects a new batch of headers into the simulated chain.
  223. func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
  224. dl.lock.Lock()
  225. defer dl.lock.Unlock()
  226. // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
  227. if dl.getHeaderByHash(headers[0].ParentHash) == nil {
  228. return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
  229. }
  230. var hashes []common.Hash
  231. for i := 1; i < len(headers); i++ {
  232. hash := headers[i-1].Hash()
  233. if headers[i].ParentHash != headers[i-1].Hash() {
  234. return i, fmt.Errorf("non-contiguous import at position %d", i)
  235. }
  236. hashes = append(hashes, hash)
  237. }
  238. hashes = append(hashes, headers[len(headers)-1].Hash())
  239. // Do a full insert if pre-checks passed
  240. for i, header := range headers {
  241. hash := hashes[i]
  242. if dl.getHeaderByHash(hash) != nil {
  243. continue
  244. }
  245. if dl.getHeaderByHash(header.ParentHash) == nil {
  246. // This _should_ be impossible, due to precheck and induction
  247. return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
  248. }
  249. dl.ownHashes = append(dl.ownHashes, hash)
  250. dl.ownHeaders[hash] = header
  251. td := dl.getTd(header.ParentHash)
  252. dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
  253. }
  254. return len(headers), nil
  255. }
  256. // InsertChain injects a new batch of blocks into the simulated chain.
  257. func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
  258. dl.lock.Lock()
  259. defer dl.lock.Unlock()
  260. for i, block := range blocks {
  261. if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
  262. return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
  263. } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
  264. return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
  265. }
  266. if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
  267. dl.ownHashes = append(dl.ownHashes, block.Hash())
  268. dl.ownHeaders[block.Hash()] = block.Header()
  269. }
  270. dl.ownBlocks[block.Hash()] = block
  271. dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
  272. dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
  273. td := dl.getTd(block.ParentHash())
  274. dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
  275. }
  276. return len(blocks), nil
  277. }
  278. // InsertReceiptChain injects a new batch of receipts into the simulated chain.
  279. func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
  280. dl.lock.Lock()
  281. defer dl.lock.Unlock()
  282. for i := 0; i < len(blocks) && i < len(receipts); i++ {
  283. if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
  284. return i, errors.New("unknown owner")
  285. }
  286. if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
  287. if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
  288. return i, errors.New("InsertReceiptChain: unknown parent")
  289. }
  290. }
  291. if blocks[i].NumberU64() <= ancientLimit {
  292. dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
  293. dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
  294. // Migrate from active db to ancient db
  295. dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
  296. dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
  297. delete(dl.ownHeaders, blocks[i].Hash())
  298. delete(dl.ownChainTd, blocks[i].Hash())
  299. } else {
  300. dl.ownBlocks[blocks[i].Hash()] = blocks[i]
  301. dl.ownReceipts[blocks[i].Hash()] = receipts[i]
  302. }
  303. }
  304. return len(blocks), nil
  305. }
  306. // SetHead rewinds the local chain to a new head.
  307. func (dl *downloadTester) SetHead(head uint64) error {
  308. dl.lock.Lock()
  309. defer dl.lock.Unlock()
  310. // Find the hash of the head to reset to
  311. var hash common.Hash
  312. for h, header := range dl.ownHeaders {
  313. if header.Number.Uint64() == head {
  314. hash = h
  315. }
  316. }
  317. for h, header := range dl.ancientHeaders {
  318. if header.Number.Uint64() == head {
  319. hash = h
  320. }
  321. }
  322. if hash == (common.Hash{}) {
  323. return fmt.Errorf("unknown head to set: %d", head)
  324. }
  325. // Find the offset in the header chain
  326. var offset int
  327. for o, h := range dl.ownHashes {
  328. if h == hash {
  329. offset = o
  330. break
  331. }
  332. }
  333. // Remove all the hashes and associated data afterwards
  334. for i := offset + 1; i < len(dl.ownHashes); i++ {
  335. delete(dl.ownChainTd, dl.ownHashes[i])
  336. delete(dl.ownHeaders, dl.ownHashes[i])
  337. delete(dl.ownReceipts, dl.ownHashes[i])
  338. delete(dl.ownBlocks, dl.ownHashes[i])
  339. delete(dl.ancientChainTd, dl.ownHashes[i])
  340. delete(dl.ancientHeaders, dl.ownHashes[i])
  341. delete(dl.ancientReceipts, dl.ownHashes[i])
  342. delete(dl.ancientBlocks, dl.ownHashes[i])
  343. }
  344. dl.ownHashes = dl.ownHashes[:offset+1]
  345. return nil
  346. }
  347. // Rollback removes some recently added elements from the chain.
  348. func (dl *downloadTester) Rollback(hashes []common.Hash) {
  349. }
  350. // newPeer registers a new block download source into the downloader.
  351. func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error {
  352. dl.lock.Lock()
  353. defer dl.lock.Unlock()
  354. peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
  355. dl.peers[id] = peer
  356. return dl.downloader.RegisterPeer(id, version, peer)
  357. }
  358. // dropPeer simulates a hard peer removal from the connection pool.
  359. func (dl *downloadTester) dropPeer(id string) {
  360. dl.lock.Lock()
  361. defer dl.lock.Unlock()
  362. delete(dl.peers, id)
  363. dl.downloader.UnregisterPeer(id)
  364. }
  365. // Snapshots implements the BlockChain interface for the downloader, but is a noop.
  366. func (dl *downloadTester) Snapshots() *snapshot.Tree {
  367. return nil
  368. }
  369. type downloadTesterPeer struct {
  370. dl *downloadTester
  371. id string
  372. chain *testChain
  373. missingStates map[common.Hash]bool // State entries that fast sync should not return
  374. }
  375. // Head constructs a function to retrieve a peer's current head hash
  376. // and total difficulty.
  377. func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
  378. b := dlp.chain.headBlock()
  379. return b.Hash(), dlp.chain.td(b.Hash())
  380. }
  381. // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
  382. // origin; associated with a particular peer in the download tester. The returned
  383. // function can be used to retrieve batches of headers from the particular peer.
  384. func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
  385. result := dlp.chain.headersByHash(origin, amount, skip, reverse)
  386. go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
  387. return nil
  388. }
  389. // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
  390. // origin; associated with a particular peer in the download tester. The returned
  391. // function can be used to retrieve batches of headers from the particular peer.
  392. func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
  393. result := dlp.chain.headersByNumber(origin, amount, skip, reverse)
  394. go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
  395. return nil
  396. }
  397. // RequestBodies constructs a getBlockBodies method associated with a particular
  398. // peer in the download tester. The returned function can be used to retrieve
  399. // batches of block bodies from the particularly requested peer.
  400. func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
  401. txs, uncles := dlp.chain.bodies(hashes)
  402. go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
  403. return nil
  404. }
  405. // RequestReceipts constructs a getReceipts method associated with a particular
  406. // peer in the download tester. The returned function can be used to retrieve
  407. // batches of block receipts from the particularly requested peer.
  408. func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
  409. receipts := dlp.chain.receipts(hashes)
  410. go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
  411. return nil
  412. }
  413. // RequestNodeData constructs a getNodeData method associated with a particular
  414. // peer in the download tester. The returned function can be used to retrieve
  415. // batches of node state data from the particularly requested peer.
  416. func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
  417. dlp.dl.lock.RLock()
  418. defer dlp.dl.lock.RUnlock()
  419. results := make([][]byte, 0, len(hashes))
  420. for _, hash := range hashes {
  421. if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
  422. if !dlp.missingStates[hash] {
  423. results = append(results, data)
  424. }
  425. }
  426. }
  427. go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
  428. return nil
  429. }
  430. // assertOwnChain checks if the local chain contains the correct number of items
  431. // of the various chain components.
  432. func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
  433. // Mark this method as a helper to report errors at callsite, not in here
  434. t.Helper()
  435. assertOwnForkedChain(t, tester, 1, []int{length})
  436. }
  437. // assertOwnForkedChain checks if the local forked chain contains the correct
  438. // number of items of the various chain components.
  439. func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
  440. // Mark this method as a helper to report errors at callsite, not in here
  441. t.Helper()
  442. // Initialize the counters for the first fork
  443. headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
  444. // Update the counters for each subsequent fork
  445. for _, length := range lengths[1:] {
  446. headers += length - common
  447. blocks += length - common
  448. receipts += length - common
  449. }
  450. if tester.downloader.getMode() == LightSync {
  451. blocks, receipts = 1, 1
  452. }
  453. if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
  454. t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
  455. }
  456. if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
  457. t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
  458. }
  459. if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
  460. t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
  461. }
  462. }
  463. func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonSync(t, eth.ETH65, FullSync) }
  464. func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonSync(t, eth.ETH65, FastSync) }
  465. func TestCanonicalSynchronisation65Light(t *testing.T) { testCanonSync(t, eth.ETH65, LightSync) }
  466. func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) }
  467. func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, eth.ETH66, FastSync) }
  468. func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
  469. func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
  470. t.Parallel()
  471. tester := newTester()
  472. defer tester.terminate()
  473. // Create a small enough block chain to download
  474. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  475. tester.newPeer("peer", protocol, chain)
  476. // Synchronise with the peer and make sure all relevant data was retrieved
  477. if err := tester.sync("peer", nil, mode); err != nil {
  478. t.Fatalf("failed to synchronise blocks: %v", err)
  479. }
  480. assertOwnChain(t, tester, chain.len())
  481. }
  482. // Tests that if a large batch of blocks are being downloaded, it is throttled
  483. // until the cached blocks are retrieved.
  484. func TestThrottling65Full(t *testing.T) { testThrottling(t, eth.ETH65, FullSync) }
  485. func TestThrottling65Fast(t *testing.T) { testThrottling(t, eth.ETH65, FastSync) }
  486. func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
  487. func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) }
  488. func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
  489. t.Parallel()
  490. tester := newTester()
  491. // Create a long block chain to download and the tester
  492. targetBlocks := testChainBase.len() - 1
  493. tester.newPeer("peer", protocol, testChainBase)
  494. // Wrap the importer to allow stepping
  495. blocked, proceed := uint32(0), make(chan struct{})
  496. tester.downloader.chainInsertHook = func(results []*fetchResult) {
  497. atomic.StoreUint32(&blocked, uint32(len(results)))
  498. <-proceed
  499. }
  500. // Start a synchronisation concurrently
  501. errc := make(chan error, 1)
  502. go func() {
  503. errc <- tester.sync("peer", nil, mode)
  504. }()
  505. // Iteratively take some blocks, always checking the retrieval count
  506. for {
  507. // Check the retrieval count synchronously (! reason for this ugly block)
  508. tester.lock.RLock()
  509. retrieved := len(tester.ownBlocks)
  510. tester.lock.RUnlock()
  511. if retrieved >= targetBlocks+1 {
  512. break
  513. }
  514. // Wait a bit for sync to throttle itself
  515. var cached, frozen int
  516. for start := time.Now(); time.Since(start) < 3*time.Second; {
  517. time.Sleep(25 * time.Millisecond)
  518. tester.lock.Lock()
  519. tester.downloader.queue.lock.Lock()
  520. tester.downloader.queue.resultCache.lock.Lock()
  521. {
  522. cached = tester.downloader.queue.resultCache.countCompleted()
  523. frozen = int(atomic.LoadUint32(&blocked))
  524. retrieved = len(tester.ownBlocks)
  525. }
  526. tester.downloader.queue.resultCache.lock.Unlock()
  527. tester.downloader.queue.lock.Unlock()
  528. tester.lock.Unlock()
  529. if cached == blockCacheMaxItems ||
  530. cached == blockCacheMaxItems-reorgProtHeaderDelay ||
  531. retrieved+cached+frozen == targetBlocks+1 ||
  532. retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
  533. break
  534. }
  535. }
  536. // Make sure we filled up the cache, then exhaust it
  537. time.Sleep(25 * time.Millisecond) // give it a chance to screw up
  538. tester.lock.RLock()
  539. retrieved = len(tester.ownBlocks)
  540. tester.lock.RUnlock()
  541. if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
  542. t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
  543. }
  544. // Permit the blocked blocks to import
  545. if atomic.LoadUint32(&blocked) > 0 {
  546. atomic.StoreUint32(&blocked, uint32(0))
  547. proceed <- struct{}{}
  548. }
  549. }
  550. // Check that we haven't pulled more blocks than available
  551. assertOwnChain(t, tester, targetBlocks+1)
  552. if err := <-errc; err != nil {
  553. t.Fatalf("block synchronization failed: %v", err)
  554. }
  555. tester.terminate()
  556. }
  557. // Tests that simple synchronization against a forked chain works correctly. In
  558. // this test common ancestor lookup should *not* be short circuited, and a full
  559. // binary search should be executed.
  560. func TestForkedSync65Full(t *testing.T) { testForkedSync(t, eth.ETH65, FullSync) }
  561. func TestForkedSync65Fast(t *testing.T) { testForkedSync(t, eth.ETH65, FastSync) }
  562. func TestForkedSync65Light(t *testing.T) { testForkedSync(t, eth.ETH65, LightSync) }
  563. func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) }
  564. func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, eth.ETH66, FastSync) }
  565. func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
  566. func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  567. t.Parallel()
  568. tester := newTester()
  569. defer tester.terminate()
  570. chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
  571. chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
  572. tester.newPeer("fork A", protocol, chainA)
  573. tester.newPeer("fork B", protocol, chainB)
  574. // Synchronise with the peer and make sure all blocks were retrieved
  575. if err := tester.sync("fork A", nil, mode); err != nil {
  576. t.Fatalf("failed to synchronise blocks: %v", err)
  577. }
  578. assertOwnChain(t, tester, chainA.len())
  579. // Synchronise with the second peer and make sure that fork is pulled too
  580. if err := tester.sync("fork B", nil, mode); err != nil {
  581. t.Fatalf("failed to synchronise blocks: %v", err)
  582. }
  583. assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
  584. }
  585. // Tests that synchronising against a much shorter but much heavyer fork works
  586. // corrently and is not dropped.
  587. func TestHeavyForkedSync65Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, FullSync) }
  588. func TestHeavyForkedSync65Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, FastSync) }
  589. func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, LightSync) }
  590. func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) }
  591. func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) }
  592. func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
  593. func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  594. t.Parallel()
  595. tester := newTester()
  596. defer tester.terminate()
  597. chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
  598. chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
  599. tester.newPeer("light", protocol, chainA)
  600. tester.newPeer("heavy", protocol, chainB)
  601. // Synchronise with the peer and make sure all blocks were retrieved
  602. if err := tester.sync("light", nil, mode); err != nil {
  603. t.Fatalf("failed to synchronise blocks: %v", err)
  604. }
  605. assertOwnChain(t, tester, chainA.len())
  606. // Synchronise with the second peer and make sure that fork is pulled too
  607. if err := tester.sync("heavy", nil, mode); err != nil {
  608. t.Fatalf("failed to synchronise blocks: %v", err)
  609. }
  610. assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
  611. }
  612. // Tests that chain forks are contained within a certain interval of the current
  613. // chain head, ensuring that malicious peers cannot waste resources by feeding
  614. // long dead chains.
  615. func TestBoundedForkedSync65Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, FullSync) }
  616. func TestBoundedForkedSync65Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, FastSync) }
  617. func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, LightSync) }
  618. func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) }
  619. func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FastSync) }
  620. func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
  621. func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  622. t.Parallel()
  623. tester := newTester()
  624. defer tester.terminate()
  625. chainA := testChainForkLightA
  626. chainB := testChainForkLightB
  627. tester.newPeer("original", protocol, chainA)
  628. tester.newPeer("rewriter", protocol, chainB)
  629. // Synchronise with the peer and make sure all blocks were retrieved
  630. if err := tester.sync("original", nil, mode); err != nil {
  631. t.Fatalf("failed to synchronise blocks: %v", err)
  632. }
  633. assertOwnChain(t, tester, chainA.len())
  634. // Synchronise with the second peer and ensure that the fork is rejected to being too old
  635. if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
  636. t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
  637. }
  638. }
  639. // Tests that chain forks are contained within a certain interval of the current
  640. // chain head for short but heavy forks too. These are a bit special because they
  641. // take different ancestor lookup paths.
  642. func TestBoundedHeavyForkedSync65Full(t *testing.T) {
  643. testBoundedHeavyForkedSync(t, eth.ETH65, FullSync)
  644. }
  645. func TestBoundedHeavyForkedSync65Fast(t *testing.T) {
  646. testBoundedHeavyForkedSync(t, eth.ETH65, FastSync)
  647. }
  648. func TestBoundedHeavyForkedSync65Light(t *testing.T) {
  649. testBoundedHeavyForkedSync(t, eth.ETH65, LightSync)
  650. }
  651. func TestBoundedHeavyForkedSync66Full(t *testing.T) {
  652. testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
  653. }
  654. func TestBoundedHeavyForkedSync66Fast(t *testing.T) {
  655. testBoundedHeavyForkedSync(t, eth.ETH66, FastSync)
  656. }
  657. func TestBoundedHeavyForkedSync66Light(t *testing.T) {
  658. testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
  659. }
  660. func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  661. t.Parallel()
  662. tester := newTester()
  663. // Create a long enough forked chain
  664. chainA := testChainForkLightA
  665. chainB := testChainForkHeavy
  666. tester.newPeer("original", protocol, chainA)
  667. // Synchronise with the peer and make sure all blocks were retrieved
  668. if err := tester.sync("original", nil, mode); err != nil {
  669. t.Fatalf("failed to synchronise blocks: %v", err)
  670. }
  671. assertOwnChain(t, tester, chainA.len())
  672. tester.newPeer("heavy-rewriter", protocol, chainB)
  673. // Synchronise with the second peer and ensure that the fork is rejected to being too old
  674. if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
  675. t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
  676. }
  677. tester.terminate()
  678. }
  679. // Tests that an inactive downloader will not accept incoming block headers,
  680. // bodies and receipts.
  681. func TestInactiveDownloader63(t *testing.T) {
  682. t.Parallel()
  683. tester := newTester()
  684. defer tester.terminate()
  685. // Check that neither block headers nor bodies are accepted
  686. if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
  687. t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  688. }
  689. if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
  690. t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  691. }
  692. if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
  693. t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  694. }
  695. }
  696. // Tests that a canceled download wipes all previously accumulated state.
  697. func TestCancel65Full(t *testing.T) { testCancel(t, eth.ETH65, FullSync) }
  698. func TestCancel65Fast(t *testing.T) { testCancel(t, eth.ETH65, FastSync) }
  699. func TestCancel65Light(t *testing.T) { testCancel(t, eth.ETH65, LightSync) }
  700. func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) }
  701. func TestCancel66Fast(t *testing.T) { testCancel(t, eth.ETH66, FastSync) }
  702. func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
  703. func testCancel(t *testing.T, protocol uint, mode SyncMode) {
  704. t.Parallel()
  705. tester := newTester()
  706. defer tester.terminate()
  707. chain := testChainBase.shorten(MaxHeaderFetch)
  708. tester.newPeer("peer", protocol, chain)
  709. // Make sure canceling works with a pristine downloader
  710. tester.downloader.Cancel()
  711. if !tester.downloader.queue.Idle() {
  712. t.Errorf("download queue not idle")
  713. }
  714. // Synchronise with the peer, but cancel afterwards
  715. if err := tester.sync("peer", nil, mode); err != nil {
  716. t.Fatalf("failed to synchronise blocks: %v", err)
  717. }
  718. tester.downloader.Cancel()
  719. if !tester.downloader.queue.Idle() {
  720. t.Errorf("download queue not idle")
  721. }
  722. }
  723. // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  724. func TestMultiSynchronisation65Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, FullSync) }
  725. func TestMultiSynchronisation65Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, FastSync) }
  726. func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, LightSync) }
  727. func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) }
  728. func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FastSync) }
  729. func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
  730. func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
  731. t.Parallel()
  732. tester := newTester()
  733. defer tester.terminate()
  734. // Create various peers with various parts of the chain
  735. targetPeers := 8
  736. chain := testChainBase.shorten(targetPeers * 100)
  737. for i := 0; i < targetPeers; i++ {
  738. id := fmt.Sprintf("peer #%d", i)
  739. tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
  740. }
  741. if err := tester.sync("peer #0", nil, mode); err != nil {
  742. t.Fatalf("failed to synchronise blocks: %v", err)
  743. }
  744. assertOwnChain(t, tester, chain.len())
  745. }
  746. // Tests that synchronisations behave well in multi-version protocol environments
  747. // and not wreak havoc on other nodes in the network.
  748. func TestMultiProtoSynchronisation65Full(t *testing.T) { testMultiProtoSync(t, eth.ETH65, FullSync) }
  749. func TestMultiProtoSynchronisation65Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH65, FastSync) }
  750. func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, eth.ETH65, LightSync) }
  751. func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) }
  752. func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FastSync) }
  753. func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
  754. func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
  755. t.Parallel()
  756. tester := newTester()
  757. defer tester.terminate()
  758. // Create a small enough block chain to download
  759. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  760. // Create peers of every type
  761. tester.newPeer("peer 65", eth.ETH65, chain)
  762. tester.newPeer("peer 66", eth.ETH66, chain)
  763. // Synchronise with the requested peer and make sure all blocks were retrieved
  764. if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  765. t.Fatalf("failed to synchronise blocks: %v", err)
  766. }
  767. assertOwnChain(t, tester, chain.len())
  768. // Check that no peers have been dropped off
  769. for _, version := range []int{65, 66} {
  770. peer := fmt.Sprintf("peer %d", version)
  771. if _, ok := tester.peers[peer]; !ok {
  772. t.Errorf("%s dropped", peer)
  773. }
  774. }
  775. }
  776. // Tests that if a block is empty (e.g. header only), no body request should be
  777. // made, and instead the header should be assembled into a whole block in itself.
  778. func TestEmptyShortCircuit65Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, FullSync) }
  779. func TestEmptyShortCircuit65Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, FastSync) }
  780. func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, LightSync) }
  781. func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
  782. func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FastSync) }
  783. func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
  784. func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
  785. t.Parallel()
  786. tester := newTester()
  787. defer tester.terminate()
  788. // Create a block chain to download
  789. chain := testChainBase
  790. tester.newPeer("peer", protocol, chain)
  791. // Instrument the downloader to signal body requests
  792. bodiesHave, receiptsHave := int32(0), int32(0)
  793. tester.downloader.bodyFetchHook = func(headers []*types.Header, _ ...interface{}) {
  794. atomic.AddInt32(&bodiesHave, int32(len(headers)))
  795. }
  796. tester.downloader.receiptFetchHook = func(headers []*types.Header, _ ...interface{}) {
  797. atomic.AddInt32(&receiptsHave, int32(len(headers)))
  798. }
  799. // Synchronise with the peer and make sure all blocks were retrieved
  800. if err := tester.sync("peer", nil, mode); err != nil {
  801. t.Fatalf("failed to synchronise blocks: %v", err)
  802. }
  803. assertOwnChain(t, tester, chain.len())
  804. // Validate the number of block bodies that should have been requested
  805. bodiesNeeded, receiptsNeeded := 0, 0
  806. for _, block := range chain.blockm {
  807. if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  808. bodiesNeeded++
  809. }
  810. }
  811. for _, receipt := range chain.receiptm {
  812. if mode == FastSync && len(receipt) > 0 {
  813. receiptsNeeded++
  814. }
  815. }
  816. if int(bodiesHave) != bodiesNeeded {
  817. t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  818. }
  819. if int(receiptsHave) != receiptsNeeded {
  820. t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  821. }
  822. }
  823. // Tests that headers are enqueued continuously, preventing malicious nodes from
  824. // stalling the downloader by feeding gapped header chains.
  825. func TestMissingHeaderAttack65Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, FullSync) }
  826. func TestMissingHeaderAttack65Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, FastSync) }
  827. func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, LightSync) }
  828. func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
  829. func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FastSync) }
  830. func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
  831. func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
  832. t.Parallel()
  833. tester := newTester()
  834. defer tester.terminate()
  835. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  836. brokenChain := chain.shorten(chain.len())
  837. delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
  838. tester.newPeer("attack", protocol, brokenChain)
  839. if err := tester.sync("attack", nil, mode); err == nil {
  840. t.Fatalf("succeeded attacker synchronisation")
  841. }
  842. // Synchronise with the valid peer and make sure sync succeeds
  843. tester.newPeer("valid", protocol, chain)
  844. if err := tester.sync("valid", nil, mode); err != nil {
  845. t.Fatalf("failed to synchronise blocks: %v", err)
  846. }
  847. assertOwnChain(t, tester, chain.len())
  848. }
  849. // Tests that if requested headers are shifted (i.e. first is missing), the queue
  850. // detects the invalid numbering.
  851. func TestShiftedHeaderAttack65Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, FullSync) }
  852. func TestShiftedHeaderAttack65Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, FastSync) }
  853. func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, LightSync) }
  854. func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
  855. func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FastSync) }
  856. func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
  857. func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
  858. t.Parallel()
  859. tester := newTester()
  860. defer tester.terminate()
  861. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  862. // Attempt a full sync with an attacker feeding shifted headers
  863. brokenChain := chain.shorten(chain.len())
  864. delete(brokenChain.headerm, brokenChain.chain[1])
  865. delete(brokenChain.blockm, brokenChain.chain[1])
  866. delete(brokenChain.receiptm, brokenChain.chain[1])
  867. tester.newPeer("attack", protocol, brokenChain)
  868. if err := tester.sync("attack", nil, mode); err == nil {
  869. t.Fatalf("succeeded attacker synchronisation")
  870. }
  871. // Synchronise with the valid peer and make sure sync succeeds
  872. tester.newPeer("valid", protocol, chain)
  873. if err := tester.sync("valid", nil, mode); err != nil {
  874. t.Fatalf("failed to synchronise blocks: %v", err)
  875. }
  876. assertOwnChain(t, tester, chain.len())
  877. }
  878. // Tests that upon detecting an invalid header, the recent ones are rolled back
  879. // for various failure scenarios. Afterwards a full sync is attempted to make
  880. // sure no state was corrupted.
  881. func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH65, FastSync) }
  882. func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) }
  883. func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
  884. t.Parallel()
  885. tester := newTester()
  886. // Create a small enough block chain to download
  887. targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  888. chain := testChainBase.shorten(targetBlocks)
  889. // Attempt to sync with an attacker that feeds junk during the fast sync phase.
  890. // This should result in the last fsHeaderSafetyNet headers being rolled back.
  891. missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  892. fastAttackChain := chain.shorten(chain.len())
  893. delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
  894. tester.newPeer("fast-attack", protocol, fastAttackChain)
  895. if err := tester.sync("fast-attack", nil, mode); err == nil {
  896. t.Fatalf("succeeded fast attacker synchronisation")
  897. }
  898. if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  899. t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  900. }
  901. // Attempt to sync with an attacker that feeds junk during the block import phase.
  902. // This should result in both the last fsHeaderSafetyNet number of headers being
  903. // rolled back, and also the pivot point being reverted to a non-block status.
  904. missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  905. blockAttackChain := chain.shorten(chain.len())
  906. delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
  907. delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
  908. tester.newPeer("block-attack", protocol, blockAttackChain)
  909. if err := tester.sync("block-attack", nil, mode); err == nil {
  910. t.Fatalf("succeeded block attacker synchronisation")
  911. }
  912. if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  913. t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  914. }
  915. if mode == FastSync {
  916. if head := tester.CurrentBlock().NumberU64(); head != 0 {
  917. t.Errorf("fast sync pivot block #%d not rolled back", head)
  918. }
  919. }
  920. // Attempt to sync with an attacker that withholds promised blocks after the
  921. // fast sync pivot point. This could be a trial to leave the node with a bad
  922. // but already imported pivot block.
  923. withholdAttackChain := chain.shorten(chain.len())
  924. tester.newPeer("withhold-attack", protocol, withholdAttackChain)
  925. tester.downloader.syncInitHook = func(uint64, uint64) {
  926. for i := missing; i < withholdAttackChain.len(); i++ {
  927. delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
  928. }
  929. tester.downloader.syncInitHook = nil
  930. }
  931. if err := tester.sync("withhold-attack", nil, mode); err == nil {
  932. t.Fatalf("succeeded withholding attacker synchronisation")
  933. }
  934. if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  935. t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  936. }
  937. if mode == FastSync {
  938. if head := tester.CurrentBlock().NumberU64(); head != 0 {
  939. t.Errorf("fast sync pivot block #%d not rolled back", head)
  940. }
  941. }
  942. // synchronise with the valid peer and make sure sync succeeds. Since the last rollback
  943. // should also disable fast syncing for this process, verify that we did a fresh full
  944. // sync. Note, we can't assert anything about the receipts since we won't purge the
  945. // database of them, hence we can't use assertOwnChain.
  946. tester.newPeer("valid", protocol, chain)
  947. if err := tester.sync("valid", nil, mode); err != nil {
  948. t.Fatalf("failed to synchronise blocks: %v", err)
  949. }
  950. if hs := len(tester.ownHeaders); hs != chain.len() {
  951. t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
  952. }
  953. if mode != LightSync {
  954. if bs := len(tester.ownBlocks); bs != chain.len() {
  955. t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
  956. }
  957. }
  958. tester.terminate()
  959. }
  960. // Tests that a peer advertising a high TD doesn't get to stall the downloader
  961. // afterwards by not sending any useful hashes.
  962. func TestHighTDStarvationAttack65Full(t *testing.T) {
  963. testHighTDStarvationAttack(t, eth.ETH65, FullSync)
  964. }
  965. func TestHighTDStarvationAttack65Fast(t *testing.T) {
  966. testHighTDStarvationAttack(t, eth.ETH65, FastSync)
  967. }
  968. func TestHighTDStarvationAttack65Light(t *testing.T) {
  969. testHighTDStarvationAttack(t, eth.ETH65, LightSync)
  970. }
  971. func TestHighTDStarvationAttack66Full(t *testing.T) {
  972. testHighTDStarvationAttack(t, eth.ETH66, FullSync)
  973. }
  974. func TestHighTDStarvationAttack66Fast(t *testing.T) {
  975. testHighTDStarvationAttack(t, eth.ETH66, FastSync)
  976. }
  977. func TestHighTDStarvationAttack66Light(t *testing.T) {
  978. testHighTDStarvationAttack(t, eth.ETH66, LightSync)
  979. }
  980. func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
  981. t.Parallel()
  982. tester := newTester()
  983. chain := testChainBase.shorten(1)
  984. tester.newPeer("attack", protocol, chain)
  985. if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  986. t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  987. }
  988. tester.terminate()
  989. }
  990. // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  991. func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH65) }
  992. func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
  993. func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
  994. t.Parallel()
  995. // Define the disconnection requirement for individual hash fetch errors
  996. tests := []struct {
  997. result error
  998. drop bool
  999. }{
  1000. {nil, false}, // Sync succeeded, all is well
  1001. {errBusy, false}, // Sync is already in progress, no problem
  1002. {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
  1003. {errBadPeer, true}, // Peer was deemed bad for some reason, drop it
  1004. {errStallingPeer, true}, // Peer was detected to be stalling, drop it
  1005. {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it
  1006. {errNoPeers, false}, // No peers to download from, soft race, no issue
  1007. {errTimeout, true}, // No hashes received in due time, drop the peer
  1008. {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
  1009. {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
  1010. {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1011. {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
  1012. {errInvalidBody, false}, // A bad peer was detected, but not the sync origin
  1013. {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
  1014. {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1015. }
  1016. // Run the tests and check disconnection status
  1017. tester := newTester()
  1018. defer tester.terminate()
  1019. chain := testChainBase.shorten(1)
  1020. for i, tt := range tests {
  1021. // Register a new peer and ensure its presence
  1022. id := fmt.Sprintf("test %d", i)
  1023. if err := tester.newPeer(id, protocol, chain); err != nil {
  1024. t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1025. }
  1026. if _, ok := tester.peers[id]; !ok {
  1027. t.Fatalf("test %d: registered peer not found", i)
  1028. }
  1029. // Simulate a synchronisation and check the required result
  1030. tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1031. tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1032. if _, ok := tester.peers[id]; !ok != tt.drop {
  1033. t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1034. }
  1035. }
  1036. }
  1037. // Tests that synchronisation progress (origin block number, current block number
  1038. // and highest block number) is tracked and updated correctly.
  1039. func TestSyncProgress65Full(t *testing.T) { testSyncProgress(t, eth.ETH65, FullSync) }
  1040. func TestSyncProgress65Fast(t *testing.T) { testSyncProgress(t, eth.ETH65, FastSync) }
  1041. func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, eth.ETH65, LightSync) }
  1042. func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) }
  1043. func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, eth.ETH66, FastSync) }
  1044. func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
  1045. func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1046. t.Parallel()
  1047. tester := newTester()
  1048. defer tester.terminate()
  1049. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1050. // Set a sync init hook to catch progress changes
  1051. starting := make(chan struct{})
  1052. progress := make(chan struct{})
  1053. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1054. starting <- struct{}{}
  1055. <-progress
  1056. }
  1057. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1058. // Synchronise half the blocks and check initial progress
  1059. tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1060. pending := new(sync.WaitGroup)
  1061. pending.Add(1)
  1062. go func() {
  1063. defer pending.Done()
  1064. if err := tester.sync("peer-half", nil, mode); err != nil {
  1065. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1066. }
  1067. }()
  1068. <-starting
  1069. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1070. HighestBlock: uint64(chain.len()/2 - 1),
  1071. })
  1072. progress <- struct{}{}
  1073. pending.Wait()
  1074. // Synchronise all the blocks and check continuation progress
  1075. tester.newPeer("peer-full", protocol, chain)
  1076. pending.Add(1)
  1077. go func() {
  1078. defer pending.Done()
  1079. if err := tester.sync("peer-full", nil, mode); err != nil {
  1080. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1081. }
  1082. }()
  1083. <-starting
  1084. checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1085. StartingBlock: uint64(chain.len()/2 - 1),
  1086. CurrentBlock: uint64(chain.len()/2 - 1),
  1087. HighestBlock: uint64(chain.len() - 1),
  1088. })
  1089. // Check final progress after successful sync
  1090. progress <- struct{}{}
  1091. pending.Wait()
  1092. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1093. StartingBlock: uint64(chain.len()/2 - 1),
  1094. CurrentBlock: uint64(chain.len() - 1),
  1095. HighestBlock: uint64(chain.len() - 1),
  1096. })
  1097. }
  1098. func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1099. // Mark this method as a helper to report errors at callsite, not in here
  1100. t.Helper()
  1101. p := d.Progress()
  1102. p.KnownStates, p.PulledStates = 0, 0
  1103. want.KnownStates, want.PulledStates = 0, 0
  1104. if p != want {
  1105. t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1106. }
  1107. }
  1108. // Tests that synchronisation progress (origin block number and highest block
  1109. // number) is tracked and updated correctly in case of a fork (or manual head
  1110. // revertal).
  1111. func TestForkedSyncProgress65Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, FullSync) }
  1112. func TestForkedSyncProgress65Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, FastSync) }
  1113. func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, LightSync) }
  1114. func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) }
  1115. func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FastSync) }
  1116. func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
  1117. func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1118. t.Parallel()
  1119. tester := newTester()
  1120. defer tester.terminate()
  1121. chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch)
  1122. chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch)
  1123. // Set a sync init hook to catch progress changes
  1124. starting := make(chan struct{})
  1125. progress := make(chan struct{})
  1126. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1127. starting <- struct{}{}
  1128. <-progress
  1129. }
  1130. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1131. // Synchronise with one of the forks and check progress
  1132. tester.newPeer("fork A", protocol, chainA)
  1133. pending := new(sync.WaitGroup)
  1134. pending.Add(1)
  1135. go func() {
  1136. defer pending.Done()
  1137. if err := tester.sync("fork A", nil, mode); err != nil {
  1138. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1139. }
  1140. }()
  1141. <-starting
  1142. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1143. HighestBlock: uint64(chainA.len() - 1),
  1144. })
  1145. progress <- struct{}{}
  1146. pending.Wait()
  1147. // Simulate a successful sync above the fork
  1148. tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1149. // Synchronise with the second fork and check progress resets
  1150. tester.newPeer("fork B", protocol, chainB)
  1151. pending.Add(1)
  1152. go func() {
  1153. defer pending.Done()
  1154. if err := tester.sync("fork B", nil, mode); err != nil {
  1155. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1156. }
  1157. }()
  1158. <-starting
  1159. checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1160. StartingBlock: uint64(testChainBase.len()) - 1,
  1161. CurrentBlock: uint64(chainA.len() - 1),
  1162. HighestBlock: uint64(chainB.len() - 1),
  1163. })
  1164. // Check final progress after successful sync
  1165. progress <- struct{}{}
  1166. pending.Wait()
  1167. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1168. StartingBlock: uint64(testChainBase.len()) - 1,
  1169. CurrentBlock: uint64(chainB.len() - 1),
  1170. HighestBlock: uint64(chainB.len() - 1),
  1171. })
  1172. }
  1173. // Tests that if synchronisation is aborted due to some failure, then the progress
  1174. // origin is not updated in the next sync cycle, as it should be considered the
  1175. // continuation of the previous sync and not a new instance.
  1176. func TestFailedSyncProgress65Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, FullSync) }
  1177. func TestFailedSyncProgress65Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, FastSync) }
  1178. func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, LightSync) }
  1179. func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) }
  1180. func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FastSync) }
  1181. func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
  1182. func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1183. t.Parallel()
  1184. tester := newTester()
  1185. defer tester.terminate()
  1186. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1187. // Set a sync init hook to catch progress changes
  1188. starting := make(chan struct{})
  1189. progress := make(chan struct{})
  1190. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1191. starting <- struct{}{}
  1192. <-progress
  1193. }
  1194. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1195. // Attempt a full sync with a faulty peer
  1196. brokenChain := chain.shorten(chain.len())
  1197. missing := brokenChain.len() / 2
  1198. delete(brokenChain.headerm, brokenChain.chain[missing])
  1199. delete(brokenChain.blockm, brokenChain.chain[missing])
  1200. delete(brokenChain.receiptm, brokenChain.chain[missing])
  1201. tester.newPeer("faulty", protocol, brokenChain)
  1202. pending := new(sync.WaitGroup)
  1203. pending.Add(1)
  1204. go func() {
  1205. defer pending.Done()
  1206. if err := tester.sync("faulty", nil, mode); err == nil {
  1207. panic("succeeded faulty synchronisation")
  1208. }
  1209. }()
  1210. <-starting
  1211. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1212. HighestBlock: uint64(brokenChain.len() - 1),
  1213. })
  1214. progress <- struct{}{}
  1215. pending.Wait()
  1216. afterFailedSync := tester.downloader.Progress()
  1217. // Synchronise with a good peer and check that the progress origin remind the same
  1218. // after a failure
  1219. tester.newPeer("valid", protocol, chain)
  1220. pending.Add(1)
  1221. go func() {
  1222. defer pending.Done()
  1223. if err := tester.sync("valid", nil, mode); err != nil {
  1224. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1225. }
  1226. }()
  1227. <-starting
  1228. checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1229. // Check final progress after successful sync
  1230. progress <- struct{}{}
  1231. pending.Wait()
  1232. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1233. CurrentBlock: uint64(chain.len() - 1),
  1234. HighestBlock: uint64(chain.len() - 1),
  1235. })
  1236. }
  1237. // Tests that if an attacker fakes a chain height, after the attack is detected,
  1238. // the progress height is successfully reduced at the next sync invocation.
  1239. func TestFakedSyncProgress65Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, FullSync) }
  1240. func TestFakedSyncProgress65Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, FastSync) }
  1241. func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, LightSync) }
  1242. func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) }
  1243. func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FastSync) }
  1244. func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
  1245. func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1246. t.Parallel()
  1247. tester := newTester()
  1248. defer tester.terminate()
  1249. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1250. // Set a sync init hook to catch progress changes
  1251. starting := make(chan struct{})
  1252. progress := make(chan struct{})
  1253. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1254. starting <- struct{}{}
  1255. <-progress
  1256. }
  1257. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1258. // Create and sync with an attacker that promises a higher chain than available.
  1259. brokenChain := chain.shorten(chain.len())
  1260. numMissing := 5
  1261. for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1262. delete(brokenChain.headerm, brokenChain.chain[i])
  1263. }
  1264. tester.newPeer("attack", protocol, brokenChain)
  1265. pending := new(sync.WaitGroup)
  1266. pending.Add(1)
  1267. go func() {
  1268. defer pending.Done()
  1269. if err := tester.sync("attack", nil, mode); err == nil {
  1270. panic("succeeded attacker synchronisation")
  1271. }
  1272. }()
  1273. <-starting
  1274. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1275. HighestBlock: uint64(brokenChain.len() - 1),
  1276. })
  1277. progress <- struct{}{}
  1278. pending.Wait()
  1279. afterFailedSync := tester.downloader.Progress()
  1280. // Synchronise with a good peer and check that the progress height has been reduced to
  1281. // the true value.
  1282. validChain := chain.shorten(chain.len() - numMissing)
  1283. tester.newPeer("valid", protocol, validChain)
  1284. pending.Add(1)
  1285. go func() {
  1286. defer pending.Done()
  1287. if err := tester.sync("valid", nil, mode); err != nil {
  1288. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1289. }
  1290. }()
  1291. <-starting
  1292. checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1293. CurrentBlock: afterFailedSync.CurrentBlock,
  1294. HighestBlock: uint64(validChain.len() - 1),
  1295. })
  1296. // Check final progress after successful sync.
  1297. progress <- struct{}{}
  1298. pending.Wait()
  1299. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1300. CurrentBlock: uint64(validChain.len() - 1),
  1301. HighestBlock: uint64(validChain.len() - 1),
  1302. })
  1303. }
  1304. // This test reproduces an issue where unexpected deliveries would
  1305. // block indefinitely if they arrived at the right time.
  1306. func TestDeliverHeadersHang65Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, FullSync) }
  1307. func TestDeliverHeadersHang65Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, FastSync) }
  1308. func TestDeliverHeadersHang65Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, LightSync) }
  1309. func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FullSync) }
  1310. func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FastSync) }
  1311. func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) }
  1312. func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {
  1313. t.Parallel()
  1314. master := newTester()
  1315. defer master.terminate()
  1316. chain := testChainBase.shorten(15)
  1317. for i := 0; i < 200; i++ {
  1318. tester := newTester()
  1319. tester.peerDb = master.peerDb
  1320. tester.newPeer("peer", protocol, chain)
  1321. // Whenever the downloader requests headers, flood it with
  1322. // a lot of unrequested header deliveries.
  1323. tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1324. peer: tester.downloader.peers.peers["peer"].peer,
  1325. tester: tester,
  1326. }
  1327. if err := tester.sync("peer", nil, mode); err != nil {
  1328. t.Errorf("test %d: sync failed: %v", i, err)
  1329. }
  1330. tester.terminate()
  1331. }
  1332. }
  1333. type floodingTestPeer struct {
  1334. peer Peer
  1335. tester *downloadTester
  1336. }
  1337. func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1338. func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1339. return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1340. }
  1341. func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1342. return ftp.peer.RequestBodies(hashes)
  1343. }
  1344. func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1345. return ftp.peer.RequestReceipts(hashes)
  1346. }
  1347. func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1348. return ftp.peer.RequestNodeData(hashes)
  1349. }
  1350. func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1351. deliveriesDone := make(chan struct{}, 500)
  1352. for i := 0; i < cap(deliveriesDone)-1; i++ {
  1353. peer := fmt.Sprintf("fake-peer%d", i)
  1354. go func() {
  1355. ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1356. deliveriesDone <- struct{}{}
  1357. }()
  1358. }
  1359. // None of the extra deliveries should block.
  1360. timeout := time.After(60 * time.Second)
  1361. launched := false
  1362. for i := 0; i < cap(deliveriesDone); i++ {
  1363. select {
  1364. case <-deliveriesDone:
  1365. if !launched {
  1366. // Start delivering the requested headers
  1367. // after one of the flooding responses has arrived.
  1368. go func() {
  1369. ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1370. deliveriesDone <- struct{}{}
  1371. }()
  1372. launched = true
  1373. }
  1374. case <-timeout:
  1375. panic("blocked")
  1376. }
  1377. }
  1378. return nil
  1379. }
  1380. func TestRemoteHeaderRequestSpan(t *testing.T) {
  1381. testCases := []struct {
  1382. remoteHeight uint64
  1383. localHeight uint64
  1384. expected []int
  1385. }{
  1386. // Remote is way higher. We should ask for the remote head and go backwards
  1387. {1500, 1000,
  1388. []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1389. },
  1390. {15000, 13006,
  1391. []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1392. },
  1393. // Remote is pretty close to us. We don't have to fetch as many
  1394. {1200, 1150,
  1395. []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1396. },
  1397. // Remote is equal to us (so on a fork with higher td)
  1398. // We should get the closest couple of ancestors
  1399. {1500, 1500,
  1400. []int{1497, 1499},
  1401. },
  1402. // We're higher than the remote! Odd
  1403. {1000, 1500,
  1404. []int{997, 999},
  1405. },
  1406. // Check some weird edgecases that it behaves somewhat rationally
  1407. {0, 1500,
  1408. []int{0, 2},
  1409. },
  1410. {6000000, 0,
  1411. []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1412. },
  1413. {0, 0,
  1414. []int{0, 2},
  1415. },
  1416. }
  1417. reqs := func(from, count, span int) []int {
  1418. var r []int
  1419. num := from
  1420. for len(r) < count {
  1421. r = append(r, num)
  1422. num += span + 1
  1423. }
  1424. return r
  1425. }
  1426. for i, tt := range testCases {
  1427. from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1428. data := reqs(int(from), count, span)
  1429. if max != uint64(data[len(data)-1]) {
  1430. t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1431. }
  1432. failed := false
  1433. if len(data) != len(tt.expected) {
  1434. failed = true
  1435. t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1436. } else {
  1437. for j, n := range data {
  1438. if n != tt.expected[j] {
  1439. failed = true
  1440. break
  1441. }
  1442. }
  1443. }
  1444. if failed {
  1445. res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1446. exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1447. t.Logf("got: %v\n", res)
  1448. t.Logf("exp: %v\n", exp)
  1449. t.Errorf("test %d: wrong values", i)
  1450. }
  1451. }
  1452. }
  1453. // Tests that peers below a pre-configured checkpoint block are prevented from
  1454. // being fast-synced from, avoiding potential cheap eclipse attacks.
  1455. func TestCheckpointEnforcement65Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FullSync) }
  1456. func TestCheckpointEnforcement65Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FastSync) }
  1457. func TestCheckpointEnforcement65Light(t *testing.T) {
  1458. testCheckpointEnforcement(t, eth.ETH65, LightSync)
  1459. }
  1460. func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
  1461. func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) }
  1462. func TestCheckpointEnforcement66Light(t *testing.T) {
  1463. testCheckpointEnforcement(t, eth.ETH66, LightSync)
  1464. }
  1465. func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
  1466. t.Parallel()
  1467. // Create a new tester with a particular hard coded checkpoint block
  1468. tester := newTester()
  1469. defer tester.terminate()
  1470. tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1471. chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1472. // Attempt to sync with the peer and validate the result
  1473. tester.newPeer("peer", protocol, chain)
  1474. var expect error
  1475. if mode == FastSync || mode == LightSync {
  1476. expect = errUnsyncedPeer
  1477. }
  1478. if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1479. t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1480. }
  1481. if mode == FastSync || mode == LightSync {
  1482. assertOwnChain(t, tester, 1)
  1483. } else {
  1484. assertOwnChain(t, tester, chain.len())
  1485. }
  1486. }