downloader_test.go 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707
  1. // Copyright 2015 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package downloader
  17. import (
  18. "errors"
  19. "fmt"
  20. "math/big"
  21. "strings"
  22. "sync"
  23. "sync/atomic"
  24. "testing"
  25. "time"
  26. "github.com/ethereum/go-ethereum"
  27. "github.com/ethereum/go-ethereum/common"
  28. "github.com/ethereum/go-ethereum/core/rawdb"
  29. "github.com/ethereum/go-ethereum/core/types"
  30. "github.com/ethereum/go-ethereum/eth/protocols/eth"
  31. "github.com/ethereum/go-ethereum/ethdb"
  32. "github.com/ethereum/go-ethereum/event"
  33. "github.com/ethereum/go-ethereum/trie"
  34. )
  35. // Reduce some of the parameters to make the tester faster.
  36. func init() {
  37. fullMaxForkAncestry = 10000
  38. lightMaxForkAncestry = 10000
  39. blockCacheMaxItems = 1024
  40. fsHeaderContCheck = 500 * time.Millisecond
  41. }
  42. // downloadTester is a test simulator for mocking out local block chain.
  43. type downloadTester struct {
  44. downloader *Downloader
  45. genesis *types.Block // Genesis blocks used by the tester and peers
  46. stateDb ethdb.Database // Database used by the tester for syncing from peers
  47. peerDb ethdb.Database // Database of the peers containing all data
  48. peers map[string]*downloadTesterPeer
  49. ownHashes []common.Hash // Hash chain belonging to the tester
  50. ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester
  51. ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
  52. ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
  53. ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain
  54. ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester
  55. ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester
  56. ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
  57. ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain
  58. lock sync.RWMutex
  59. }
  60. // newTester creates a new downloader test mocker.
  61. func newTester() *downloadTester {
  62. tester := &downloadTester{
  63. genesis: testGenesis,
  64. peerDb: testDB,
  65. peers: make(map[string]*downloadTesterPeer),
  66. ownHashes: []common.Hash{testGenesis.Hash()},
  67. ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
  68. ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
  69. ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
  70. ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
  71. // Initialize ancient store with test genesis block
  72. ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
  73. ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
  74. ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
  75. ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
  76. }
  77. tester.stateDb = rawdb.NewMemoryDatabase()
  78. tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
  79. tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer)
  80. return tester
  81. }
  82. // terminate aborts any operations on the embedded downloader and releases all
  83. // held resources.
  84. func (dl *downloadTester) terminate() {
  85. dl.downloader.Terminate()
  86. }
  87. // sync starts synchronizing with a remote peer, blocking until it completes.
  88. func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
  89. dl.lock.RLock()
  90. hash := dl.peers[id].chain.headBlock().Hash()
  91. // If no particular TD was requested, load from the peer's blockchain
  92. if td == nil {
  93. td = dl.peers[id].chain.td(hash)
  94. }
  95. dl.lock.RUnlock()
  96. // Synchronise with the chosen peer and ensure proper cleanup afterwards
  97. err := dl.downloader.synchronise(id, hash, td, mode)
  98. select {
  99. case <-dl.downloader.cancelCh:
  100. // Ok, downloader fully cancelled after sync cycle
  101. default:
  102. // Downloader is still accepting packets, can block a peer up
  103. panic("downloader active post sync cycle") // panic will be caught by tester
  104. }
  105. return err
  106. }
  107. // HasHeader checks if a header is present in the testers canonical chain.
  108. func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
  109. return dl.GetHeaderByHash(hash) != nil
  110. }
  111. // HasBlock checks if a block is present in the testers canonical chain.
  112. func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
  113. return dl.GetBlockByHash(hash) != nil
  114. }
  115. // HasFastBlock checks if a block is present in the testers canonical chain.
  116. func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
  117. dl.lock.RLock()
  118. defer dl.lock.RUnlock()
  119. if _, ok := dl.ancientReceipts[hash]; ok {
  120. return true
  121. }
  122. _, ok := dl.ownReceipts[hash]
  123. return ok
  124. }
  125. // GetHeader retrieves a header from the testers canonical chain.
  126. func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
  127. dl.lock.RLock()
  128. defer dl.lock.RUnlock()
  129. return dl.getHeaderByHash(hash)
  130. }
  131. // getHeaderByHash returns the header if found either within ancients or own blocks)
  132. // This method assumes that the caller holds at least the read-lock (dl.lock)
  133. func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
  134. header := dl.ancientHeaders[hash]
  135. if header != nil {
  136. return header
  137. }
  138. return dl.ownHeaders[hash]
  139. }
  140. // GetBlock retrieves a block from the testers canonical chain.
  141. func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
  142. dl.lock.RLock()
  143. defer dl.lock.RUnlock()
  144. block := dl.ancientBlocks[hash]
  145. if block != nil {
  146. return block
  147. }
  148. return dl.ownBlocks[hash]
  149. }
  150. // CurrentHeader retrieves the current head header from the canonical chain.
  151. func (dl *downloadTester) CurrentHeader() *types.Header {
  152. dl.lock.RLock()
  153. defer dl.lock.RUnlock()
  154. for i := len(dl.ownHashes) - 1; i >= 0; i-- {
  155. if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
  156. return header
  157. }
  158. if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
  159. return header
  160. }
  161. }
  162. return dl.genesis.Header()
  163. }
  164. // CurrentBlock retrieves the current head block from the canonical chain.
  165. func (dl *downloadTester) CurrentBlock() *types.Block {
  166. dl.lock.RLock()
  167. defer dl.lock.RUnlock()
  168. for i := len(dl.ownHashes) - 1; i >= 0; i-- {
  169. if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
  170. if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
  171. return block
  172. }
  173. return block
  174. }
  175. if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
  176. if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
  177. return block
  178. }
  179. }
  180. }
  181. return dl.genesis
  182. }
  183. // CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
  184. func (dl *downloadTester) CurrentFastBlock() *types.Block {
  185. dl.lock.RLock()
  186. defer dl.lock.RUnlock()
  187. for i := len(dl.ownHashes) - 1; i >= 0; i-- {
  188. if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
  189. return block
  190. }
  191. if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
  192. return block
  193. }
  194. }
  195. return dl.genesis
  196. }
  197. // FastSyncCommitHead manually sets the head block to a given hash.
  198. func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
  199. // For now only check that the state trie is correct
  200. if block := dl.GetBlockByHash(hash); block != nil {
  201. _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
  202. return err
  203. }
  204. return fmt.Errorf("non existent block: %x", hash[:4])
  205. }
  206. // GetTd retrieves the block's total difficulty from the canonical chain.
  207. func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
  208. dl.lock.RLock()
  209. defer dl.lock.RUnlock()
  210. return dl.getTd(hash)
  211. }
  212. // getTd retrieves the block's total difficulty if found either within
  213. // ancients or own blocks).
  214. // This method assumes that the caller holds at least the read-lock (dl.lock)
  215. func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
  216. if td := dl.ancientChainTd[hash]; td != nil {
  217. return td
  218. }
  219. return dl.ownChainTd[hash]
  220. }
  221. // InsertHeaderChain injects a new batch of headers into the simulated chain.
  222. func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
  223. dl.lock.Lock()
  224. defer dl.lock.Unlock()
  225. // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
  226. if dl.getHeaderByHash(headers[0].ParentHash) == nil {
  227. return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
  228. }
  229. var hashes []common.Hash
  230. for i := 1; i < len(headers); i++ {
  231. hash := headers[i-1].Hash()
  232. if headers[i].ParentHash != headers[i-1].Hash() {
  233. return i, fmt.Errorf("non-contiguous import at position %d", i)
  234. }
  235. hashes = append(hashes, hash)
  236. }
  237. hashes = append(hashes, headers[len(headers)-1].Hash())
  238. // Do a full insert if pre-checks passed
  239. for i, header := range headers {
  240. hash := hashes[i]
  241. if dl.getHeaderByHash(hash) != nil {
  242. continue
  243. }
  244. if dl.getHeaderByHash(header.ParentHash) == nil {
  245. // This _should_ be impossible, due to precheck and induction
  246. return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
  247. }
  248. dl.ownHashes = append(dl.ownHashes, hash)
  249. dl.ownHeaders[hash] = header
  250. td := dl.getTd(header.ParentHash)
  251. dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
  252. }
  253. return len(headers), nil
  254. }
  255. // InsertChain injects a new batch of blocks into the simulated chain.
  256. func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
  257. dl.lock.Lock()
  258. defer dl.lock.Unlock()
  259. for i, block := range blocks {
  260. if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
  261. return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
  262. } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
  263. return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
  264. }
  265. if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
  266. dl.ownHashes = append(dl.ownHashes, block.Hash())
  267. dl.ownHeaders[block.Hash()] = block.Header()
  268. }
  269. dl.ownBlocks[block.Hash()] = block
  270. dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
  271. dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
  272. td := dl.getTd(block.ParentHash())
  273. dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
  274. }
  275. return len(blocks), nil
  276. }
  277. // InsertReceiptChain injects a new batch of receipts into the simulated chain.
  278. func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
  279. dl.lock.Lock()
  280. defer dl.lock.Unlock()
  281. for i := 0; i < len(blocks) && i < len(receipts); i++ {
  282. if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
  283. return i, errors.New("unknown owner")
  284. }
  285. if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
  286. if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
  287. return i, errors.New("InsertReceiptChain: unknown parent")
  288. }
  289. }
  290. if blocks[i].NumberU64() <= ancientLimit {
  291. dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
  292. dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
  293. // Migrate from active db to ancient db
  294. dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
  295. dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
  296. delete(dl.ownHeaders, blocks[i].Hash())
  297. delete(dl.ownChainTd, blocks[i].Hash())
  298. } else {
  299. dl.ownBlocks[blocks[i].Hash()] = blocks[i]
  300. dl.ownReceipts[blocks[i].Hash()] = receipts[i]
  301. }
  302. }
  303. return len(blocks), nil
  304. }
  305. // SetHead rewinds the local chain to a new head.
  306. func (dl *downloadTester) SetHead(head uint64) error {
  307. dl.lock.Lock()
  308. defer dl.lock.Unlock()
  309. // Find the hash of the head to reset to
  310. var hash common.Hash
  311. for h, header := range dl.ownHeaders {
  312. if header.Number.Uint64() == head {
  313. hash = h
  314. }
  315. }
  316. for h, header := range dl.ancientHeaders {
  317. if header.Number.Uint64() == head {
  318. hash = h
  319. }
  320. }
  321. if hash == (common.Hash{}) {
  322. return fmt.Errorf("unknown head to set: %d", head)
  323. }
  324. // Find the offset in the header chain
  325. var offset int
  326. for o, h := range dl.ownHashes {
  327. if h == hash {
  328. offset = o
  329. break
  330. }
  331. }
  332. // Remove all the hashes and associated data afterwards
  333. for i := offset + 1; i < len(dl.ownHashes); i++ {
  334. delete(dl.ownChainTd, dl.ownHashes[i])
  335. delete(dl.ownHeaders, dl.ownHashes[i])
  336. delete(dl.ownReceipts, dl.ownHashes[i])
  337. delete(dl.ownBlocks, dl.ownHashes[i])
  338. delete(dl.ancientChainTd, dl.ownHashes[i])
  339. delete(dl.ancientHeaders, dl.ownHashes[i])
  340. delete(dl.ancientReceipts, dl.ownHashes[i])
  341. delete(dl.ancientBlocks, dl.ownHashes[i])
  342. }
  343. dl.ownHashes = dl.ownHashes[:offset+1]
  344. return nil
  345. }
  346. // Rollback removes some recently added elements from the chain.
  347. func (dl *downloadTester) Rollback(hashes []common.Hash) {
  348. }
  349. // newPeer registers a new block download source into the downloader.
  350. func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error {
  351. dl.lock.Lock()
  352. defer dl.lock.Unlock()
  353. peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
  354. dl.peers[id] = peer
  355. return dl.downloader.RegisterPeer(id, version, peer)
  356. }
  357. // dropPeer simulates a hard peer removal from the connection pool.
  358. func (dl *downloadTester) dropPeer(id string) {
  359. dl.lock.Lock()
  360. defer dl.lock.Unlock()
  361. delete(dl.peers, id)
  362. dl.downloader.UnregisterPeer(id)
  363. }
  364. type downloadTesterPeer struct {
  365. dl *downloadTester
  366. id string
  367. chain *testChain
  368. missingStates map[common.Hash]bool // State entries that fast sync should not return
  369. }
  370. // Head constructs a function to retrieve a peer's current head hash
  371. // and total difficulty.
  372. func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
  373. b := dlp.chain.headBlock()
  374. return b.Hash(), dlp.chain.td(b.Hash())
  375. }
  376. // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
  377. // origin; associated with a particular peer in the download tester. The returned
  378. // function can be used to retrieve batches of headers from the particular peer.
  379. func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
  380. result := dlp.chain.headersByHash(origin, amount, skip, reverse)
  381. go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
  382. return nil
  383. }
  384. // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
  385. // origin; associated with a particular peer in the download tester. The returned
  386. // function can be used to retrieve batches of headers from the particular peer.
  387. func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
  388. result := dlp.chain.headersByNumber(origin, amount, skip, reverse)
  389. go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
  390. return nil
  391. }
  392. // RequestBodies constructs a getBlockBodies method associated with a particular
  393. // peer in the download tester. The returned function can be used to retrieve
  394. // batches of block bodies from the particularly requested peer.
  395. func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
  396. txs, uncles := dlp.chain.bodies(hashes)
  397. go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
  398. return nil
  399. }
  400. // RequestReceipts constructs a getReceipts method associated with a particular
  401. // peer in the download tester. The returned function can be used to retrieve
  402. // batches of block receipts from the particularly requested peer.
  403. func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
  404. receipts := dlp.chain.receipts(hashes)
  405. go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
  406. return nil
  407. }
  408. // RequestNodeData constructs a getNodeData method associated with a particular
  409. // peer in the download tester. The returned function can be used to retrieve
  410. // batches of node state data from the particularly requested peer.
  411. func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
  412. dlp.dl.lock.RLock()
  413. defer dlp.dl.lock.RUnlock()
  414. results := make([][]byte, 0, len(hashes))
  415. for _, hash := range hashes {
  416. if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
  417. if !dlp.missingStates[hash] {
  418. results = append(results, data)
  419. }
  420. }
  421. }
  422. go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
  423. return nil
  424. }
  425. // assertOwnChain checks if the local chain contains the correct number of items
  426. // of the various chain components.
  427. func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
  428. // Mark this method as a helper to report errors at callsite, not in here
  429. t.Helper()
  430. assertOwnForkedChain(t, tester, 1, []int{length})
  431. }
  432. // assertOwnForkedChain checks if the local forked chain contains the correct
  433. // number of items of the various chain components.
  434. func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
  435. // Mark this method as a helper to report errors at callsite, not in here
  436. t.Helper()
  437. // Initialize the counters for the first fork
  438. headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
  439. // Update the counters for each subsequent fork
  440. for _, length := range lengths[1:] {
  441. headers += length - common
  442. blocks += length - common
  443. receipts += length - common
  444. }
  445. if tester.downloader.getMode() == LightSync {
  446. blocks, receipts = 1, 1
  447. }
  448. if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
  449. t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
  450. }
  451. if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
  452. t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
  453. }
  454. if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
  455. t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
  456. }
  457. }
  458. func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonSync(t, eth.ETH65, FullSync) }
  459. func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonSync(t, eth.ETH65, FastSync) }
  460. func TestCanonicalSynchronisation65Light(t *testing.T) { testCanonSync(t, eth.ETH65, LightSync) }
  461. func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) }
  462. func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, eth.ETH66, FastSync) }
  463. func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
  464. func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
  465. t.Parallel()
  466. tester := newTester()
  467. defer tester.terminate()
  468. // Create a small enough block chain to download
  469. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  470. tester.newPeer("peer", protocol, chain)
  471. // Synchronise with the peer and make sure all relevant data was retrieved
  472. if err := tester.sync("peer", nil, mode); err != nil {
  473. t.Fatalf("failed to synchronise blocks: %v", err)
  474. }
  475. assertOwnChain(t, tester, chain.len())
  476. }
  477. // Tests that if a large batch of blocks are being downloaded, it is throttled
  478. // until the cached blocks are retrieved.
  479. func TestThrottling65Full(t *testing.T) { testThrottling(t, eth.ETH65, FullSync) }
  480. func TestThrottling65Fast(t *testing.T) { testThrottling(t, eth.ETH65, FastSync) }
  481. func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
  482. func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) }
  483. func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
  484. t.Parallel()
  485. tester := newTester()
  486. // Create a long block chain to download and the tester
  487. targetBlocks := testChainBase.len() - 1
  488. tester.newPeer("peer", protocol, testChainBase)
  489. // Wrap the importer to allow stepping
  490. blocked, proceed := uint32(0), make(chan struct{})
  491. tester.downloader.chainInsertHook = func(results []*fetchResult) {
  492. atomic.StoreUint32(&blocked, uint32(len(results)))
  493. <-proceed
  494. }
  495. // Start a synchronisation concurrently
  496. errc := make(chan error, 1)
  497. go func() {
  498. errc <- tester.sync("peer", nil, mode)
  499. }()
  500. // Iteratively take some blocks, always checking the retrieval count
  501. for {
  502. // Check the retrieval count synchronously (! reason for this ugly block)
  503. tester.lock.RLock()
  504. retrieved := len(tester.ownBlocks)
  505. tester.lock.RUnlock()
  506. if retrieved >= targetBlocks+1 {
  507. break
  508. }
  509. // Wait a bit for sync to throttle itself
  510. var cached, frozen int
  511. for start := time.Now(); time.Since(start) < 3*time.Second; {
  512. time.Sleep(25 * time.Millisecond)
  513. tester.lock.Lock()
  514. tester.downloader.queue.lock.Lock()
  515. tester.downloader.queue.resultCache.lock.Lock()
  516. {
  517. cached = tester.downloader.queue.resultCache.countCompleted()
  518. frozen = int(atomic.LoadUint32(&blocked))
  519. retrieved = len(tester.ownBlocks)
  520. }
  521. tester.downloader.queue.resultCache.lock.Unlock()
  522. tester.downloader.queue.lock.Unlock()
  523. tester.lock.Unlock()
  524. if cached == blockCacheMaxItems ||
  525. cached == blockCacheMaxItems-reorgProtHeaderDelay ||
  526. retrieved+cached+frozen == targetBlocks+1 ||
  527. retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
  528. break
  529. }
  530. }
  531. // Make sure we filled up the cache, then exhaust it
  532. time.Sleep(25 * time.Millisecond) // give it a chance to screw up
  533. tester.lock.RLock()
  534. retrieved = len(tester.ownBlocks)
  535. tester.lock.RUnlock()
  536. if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
  537. t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
  538. }
  539. // Permit the blocked blocks to import
  540. if atomic.LoadUint32(&blocked) > 0 {
  541. atomic.StoreUint32(&blocked, uint32(0))
  542. proceed <- struct{}{}
  543. }
  544. }
  545. // Check that we haven't pulled more blocks than available
  546. assertOwnChain(t, tester, targetBlocks+1)
  547. if err := <-errc; err != nil {
  548. t.Fatalf("block synchronization failed: %v", err)
  549. }
  550. tester.terminate()
  551. }
  552. // Tests that simple synchronization against a forked chain works correctly. In
  553. // this test common ancestor lookup should *not* be short circuited, and a full
  554. // binary search should be executed.
  555. func TestForkedSync65Full(t *testing.T) { testForkedSync(t, eth.ETH65, FullSync) }
  556. func TestForkedSync65Fast(t *testing.T) { testForkedSync(t, eth.ETH65, FastSync) }
  557. func TestForkedSync65Light(t *testing.T) { testForkedSync(t, eth.ETH65, LightSync) }
  558. func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) }
  559. func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, eth.ETH66, FastSync) }
  560. func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
  561. func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  562. t.Parallel()
  563. tester := newTester()
  564. defer tester.terminate()
  565. chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
  566. chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
  567. tester.newPeer("fork A", protocol, chainA)
  568. tester.newPeer("fork B", protocol, chainB)
  569. // Synchronise with the peer and make sure all blocks were retrieved
  570. if err := tester.sync("fork A", nil, mode); err != nil {
  571. t.Fatalf("failed to synchronise blocks: %v", err)
  572. }
  573. assertOwnChain(t, tester, chainA.len())
  574. // Synchronise with the second peer and make sure that fork is pulled too
  575. if err := tester.sync("fork B", nil, mode); err != nil {
  576. t.Fatalf("failed to synchronise blocks: %v", err)
  577. }
  578. assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
  579. }
  580. // Tests that synchronising against a much shorter but much heavyer fork works
  581. // corrently and is not dropped.
  582. func TestHeavyForkedSync65Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, FullSync) }
  583. func TestHeavyForkedSync65Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, FastSync) }
  584. func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, LightSync) }
  585. func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) }
  586. func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) }
  587. func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
  588. func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  589. t.Parallel()
  590. tester := newTester()
  591. defer tester.terminate()
  592. chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
  593. chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
  594. tester.newPeer("light", protocol, chainA)
  595. tester.newPeer("heavy", protocol, chainB)
  596. // Synchronise with the peer and make sure all blocks were retrieved
  597. if err := tester.sync("light", nil, mode); err != nil {
  598. t.Fatalf("failed to synchronise blocks: %v", err)
  599. }
  600. assertOwnChain(t, tester, chainA.len())
  601. // Synchronise with the second peer and make sure that fork is pulled too
  602. if err := tester.sync("heavy", nil, mode); err != nil {
  603. t.Fatalf("failed to synchronise blocks: %v", err)
  604. }
  605. assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
  606. }
  607. // Tests that chain forks are contained within a certain interval of the current
  608. // chain head, ensuring that malicious peers cannot waste resources by feeding
  609. // long dead chains.
  610. func TestBoundedForkedSync65Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, FullSync) }
  611. func TestBoundedForkedSync65Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, FastSync) }
  612. func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, LightSync) }
  613. func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) }
  614. func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FastSync) }
  615. func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
  616. func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  617. t.Parallel()
  618. tester := newTester()
  619. defer tester.terminate()
  620. chainA := testChainForkLightA
  621. chainB := testChainForkLightB
  622. tester.newPeer("original", protocol, chainA)
  623. tester.newPeer("rewriter", protocol, chainB)
  624. // Synchronise with the peer and make sure all blocks were retrieved
  625. if err := tester.sync("original", nil, mode); err != nil {
  626. t.Fatalf("failed to synchronise blocks: %v", err)
  627. }
  628. assertOwnChain(t, tester, chainA.len())
  629. // Synchronise with the second peer and ensure that the fork is rejected to being too old
  630. if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
  631. t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
  632. }
  633. }
  634. // Tests that chain forks are contained within a certain interval of the current
  635. // chain head for short but heavy forks too. These are a bit special because they
  636. // take different ancestor lookup paths.
  637. func TestBoundedHeavyForkedSync65Full(t *testing.T) {
  638. testBoundedHeavyForkedSync(t, eth.ETH65, FullSync)
  639. }
  640. func TestBoundedHeavyForkedSync65Fast(t *testing.T) {
  641. testBoundedHeavyForkedSync(t, eth.ETH65, FastSync)
  642. }
  643. func TestBoundedHeavyForkedSync65Light(t *testing.T) {
  644. testBoundedHeavyForkedSync(t, eth.ETH65, LightSync)
  645. }
  646. func TestBoundedHeavyForkedSync66Full(t *testing.T) {
  647. testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
  648. }
  649. func TestBoundedHeavyForkedSync66Fast(t *testing.T) {
  650. testBoundedHeavyForkedSync(t, eth.ETH66, FastSync)
  651. }
  652. func TestBoundedHeavyForkedSync66Light(t *testing.T) {
  653. testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
  654. }
  655. func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
  656. t.Parallel()
  657. tester := newTester()
  658. // Create a long enough forked chain
  659. chainA := testChainForkLightA
  660. chainB := testChainForkHeavy
  661. tester.newPeer("original", protocol, chainA)
  662. // Synchronise with the peer and make sure all blocks were retrieved
  663. if err := tester.sync("original", nil, mode); err != nil {
  664. t.Fatalf("failed to synchronise blocks: %v", err)
  665. }
  666. assertOwnChain(t, tester, chainA.len())
  667. tester.newPeer("heavy-rewriter", protocol, chainB)
  668. // Synchronise with the second peer and ensure that the fork is rejected to being too old
  669. if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
  670. t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
  671. }
  672. tester.terminate()
  673. }
  674. // Tests that an inactive downloader will not accept incoming block headers,
  675. // bodies and receipts.
  676. func TestInactiveDownloader63(t *testing.T) {
  677. t.Parallel()
  678. tester := newTester()
  679. defer tester.terminate()
  680. // Check that neither block headers nor bodies are accepted
  681. if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
  682. t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  683. }
  684. if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
  685. t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  686. }
  687. if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
  688. t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
  689. }
  690. }
  691. // Tests that a canceled download wipes all previously accumulated state.
  692. func TestCancel65Full(t *testing.T) { testCancel(t, eth.ETH65, FullSync) }
  693. func TestCancel65Fast(t *testing.T) { testCancel(t, eth.ETH65, FastSync) }
  694. func TestCancel65Light(t *testing.T) { testCancel(t, eth.ETH65, LightSync) }
  695. func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) }
  696. func TestCancel66Fast(t *testing.T) { testCancel(t, eth.ETH66, FastSync) }
  697. func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
  698. func testCancel(t *testing.T, protocol uint, mode SyncMode) {
  699. t.Parallel()
  700. tester := newTester()
  701. defer tester.terminate()
  702. chain := testChainBase.shorten(MaxHeaderFetch)
  703. tester.newPeer("peer", protocol, chain)
  704. // Make sure canceling works with a pristine downloader
  705. tester.downloader.Cancel()
  706. if !tester.downloader.queue.Idle() {
  707. t.Errorf("download queue not idle")
  708. }
  709. // Synchronise with the peer, but cancel afterwards
  710. if err := tester.sync("peer", nil, mode); err != nil {
  711. t.Fatalf("failed to synchronise blocks: %v", err)
  712. }
  713. tester.downloader.Cancel()
  714. if !tester.downloader.queue.Idle() {
  715. t.Errorf("download queue not idle")
  716. }
  717. }
  718. // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
  719. func TestMultiSynchronisation65Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, FullSync) }
  720. func TestMultiSynchronisation65Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, FastSync) }
  721. func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, LightSync) }
  722. func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) }
  723. func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FastSync) }
  724. func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
  725. func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
  726. t.Parallel()
  727. tester := newTester()
  728. defer tester.terminate()
  729. // Create various peers with various parts of the chain
  730. targetPeers := 8
  731. chain := testChainBase.shorten(targetPeers * 100)
  732. for i := 0; i < targetPeers; i++ {
  733. id := fmt.Sprintf("peer #%d", i)
  734. tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
  735. }
  736. if err := tester.sync("peer #0", nil, mode); err != nil {
  737. t.Fatalf("failed to synchronise blocks: %v", err)
  738. }
  739. assertOwnChain(t, tester, chain.len())
  740. }
  741. // Tests that synchronisations behave well in multi-version protocol environments
  742. // and not wreak havoc on other nodes in the network.
  743. func TestMultiProtoSynchronisation65Full(t *testing.T) { testMultiProtoSync(t, eth.ETH65, FullSync) }
  744. func TestMultiProtoSynchronisation65Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH65, FastSync) }
  745. func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, eth.ETH65, LightSync) }
  746. func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) }
  747. func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FastSync) }
  748. func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
  749. func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
  750. t.Parallel()
  751. tester := newTester()
  752. defer tester.terminate()
  753. // Create a small enough block chain to download
  754. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  755. // Create peers of every type
  756. tester.newPeer("peer 65", eth.ETH65, chain)
  757. tester.newPeer("peer 66", eth.ETH66, chain)
  758. // Synchronise with the requested peer and make sure all blocks were retrieved
  759. if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
  760. t.Fatalf("failed to synchronise blocks: %v", err)
  761. }
  762. assertOwnChain(t, tester, chain.len())
  763. // Check that no peers have been dropped off
  764. for _, version := range []int{65, 66} {
  765. peer := fmt.Sprintf("peer %d", version)
  766. if _, ok := tester.peers[peer]; !ok {
  767. t.Errorf("%s dropped", peer)
  768. }
  769. }
  770. }
  771. // Tests that if a block is empty (e.g. header only), no body request should be
  772. // made, and instead the header should be assembled into a whole block in itself.
  773. func TestEmptyShortCircuit65Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, FullSync) }
  774. func TestEmptyShortCircuit65Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, FastSync) }
  775. func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, LightSync) }
  776. func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
  777. func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FastSync) }
  778. func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
  779. func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
  780. t.Parallel()
  781. tester := newTester()
  782. defer tester.terminate()
  783. // Create a block chain to download
  784. chain := testChainBase
  785. tester.newPeer("peer", protocol, chain)
  786. // Instrument the downloader to signal body requests
  787. bodiesHave, receiptsHave := int32(0), int32(0)
  788. tester.downloader.bodyFetchHook = func(headers []*types.Header) {
  789. atomic.AddInt32(&bodiesHave, int32(len(headers)))
  790. }
  791. tester.downloader.receiptFetchHook = func(headers []*types.Header) {
  792. atomic.AddInt32(&receiptsHave, int32(len(headers)))
  793. }
  794. // Synchronise with the peer and make sure all blocks were retrieved
  795. if err := tester.sync("peer", nil, mode); err != nil {
  796. t.Fatalf("failed to synchronise blocks: %v", err)
  797. }
  798. assertOwnChain(t, tester, chain.len())
  799. // Validate the number of block bodies that should have been requested
  800. bodiesNeeded, receiptsNeeded := 0, 0
  801. for _, block := range chain.blockm {
  802. if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
  803. bodiesNeeded++
  804. }
  805. }
  806. for _, receipt := range chain.receiptm {
  807. if mode == FastSync && len(receipt) > 0 {
  808. receiptsNeeded++
  809. }
  810. }
  811. if int(bodiesHave) != bodiesNeeded {
  812. t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
  813. }
  814. if int(receiptsHave) != receiptsNeeded {
  815. t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
  816. }
  817. }
  818. // Tests that headers are enqueued continuously, preventing malicious nodes from
  819. // stalling the downloader by feeding gapped header chains.
  820. func TestMissingHeaderAttack65Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, FullSync) }
  821. func TestMissingHeaderAttack65Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, FastSync) }
  822. func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, LightSync) }
  823. func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
  824. func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FastSync) }
  825. func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
  826. func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
  827. t.Parallel()
  828. tester := newTester()
  829. defer tester.terminate()
  830. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  831. brokenChain := chain.shorten(chain.len())
  832. delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
  833. tester.newPeer("attack", protocol, brokenChain)
  834. if err := tester.sync("attack", nil, mode); err == nil {
  835. t.Fatalf("succeeded attacker synchronisation")
  836. }
  837. // Synchronise with the valid peer and make sure sync succeeds
  838. tester.newPeer("valid", protocol, chain)
  839. if err := tester.sync("valid", nil, mode); err != nil {
  840. t.Fatalf("failed to synchronise blocks: %v", err)
  841. }
  842. assertOwnChain(t, tester, chain.len())
  843. }
  844. // Tests that if requested headers are shifted (i.e. first is missing), the queue
  845. // detects the invalid numbering.
  846. func TestShiftedHeaderAttack65Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, FullSync) }
  847. func TestShiftedHeaderAttack65Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, FastSync) }
  848. func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, LightSync) }
  849. func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
  850. func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FastSync) }
  851. func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
  852. func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
  853. t.Parallel()
  854. tester := newTester()
  855. defer tester.terminate()
  856. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  857. // Attempt a full sync with an attacker feeding shifted headers
  858. brokenChain := chain.shorten(chain.len())
  859. delete(brokenChain.headerm, brokenChain.chain[1])
  860. delete(brokenChain.blockm, brokenChain.chain[1])
  861. delete(brokenChain.receiptm, brokenChain.chain[1])
  862. tester.newPeer("attack", protocol, brokenChain)
  863. if err := tester.sync("attack", nil, mode); err == nil {
  864. t.Fatalf("succeeded attacker synchronisation")
  865. }
  866. // Synchronise with the valid peer and make sure sync succeeds
  867. tester.newPeer("valid", protocol, chain)
  868. if err := tester.sync("valid", nil, mode); err != nil {
  869. t.Fatalf("failed to synchronise blocks: %v", err)
  870. }
  871. assertOwnChain(t, tester, chain.len())
  872. }
  873. // Tests that upon detecting an invalid header, the recent ones are rolled back
  874. // for various failure scenarios. Afterwards a full sync is attempted to make
  875. // sure no state was corrupted.
  876. func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH65, FastSync) }
  877. func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) }
  878. func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
  879. t.Parallel()
  880. tester := newTester()
  881. // Create a small enough block chain to download
  882. targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
  883. chain := testChainBase.shorten(targetBlocks)
  884. // Attempt to sync with an attacker that feeds junk during the fast sync phase.
  885. // This should result in the last fsHeaderSafetyNet headers being rolled back.
  886. missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
  887. fastAttackChain := chain.shorten(chain.len())
  888. delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
  889. tester.newPeer("fast-attack", protocol, fastAttackChain)
  890. if err := tester.sync("fast-attack", nil, mode); err == nil {
  891. t.Fatalf("succeeded fast attacker synchronisation")
  892. }
  893. if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
  894. t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
  895. }
  896. // Attempt to sync with an attacker that feeds junk during the block import phase.
  897. // This should result in both the last fsHeaderSafetyNet number of headers being
  898. // rolled back, and also the pivot point being reverted to a non-block status.
  899. missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
  900. blockAttackChain := chain.shorten(chain.len())
  901. delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
  902. delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
  903. tester.newPeer("block-attack", protocol, blockAttackChain)
  904. if err := tester.sync("block-attack", nil, mode); err == nil {
  905. t.Fatalf("succeeded block attacker synchronisation")
  906. }
  907. if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  908. t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  909. }
  910. if mode == FastSync {
  911. if head := tester.CurrentBlock().NumberU64(); head != 0 {
  912. t.Errorf("fast sync pivot block #%d not rolled back", head)
  913. }
  914. }
  915. // Attempt to sync with an attacker that withholds promised blocks after the
  916. // fast sync pivot point. This could be a trial to leave the node with a bad
  917. // but already imported pivot block.
  918. withholdAttackChain := chain.shorten(chain.len())
  919. tester.newPeer("withhold-attack", protocol, withholdAttackChain)
  920. tester.downloader.syncInitHook = func(uint64, uint64) {
  921. for i := missing; i < withholdAttackChain.len(); i++ {
  922. delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
  923. }
  924. tester.downloader.syncInitHook = nil
  925. }
  926. if err := tester.sync("withhold-attack", nil, mode); err == nil {
  927. t.Fatalf("succeeded withholding attacker synchronisation")
  928. }
  929. if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
  930. t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
  931. }
  932. if mode == FastSync {
  933. if head := tester.CurrentBlock().NumberU64(); head != 0 {
  934. t.Errorf("fast sync pivot block #%d not rolled back", head)
  935. }
  936. }
  937. // synchronise with the valid peer and make sure sync succeeds. Since the last rollback
  938. // should also disable fast syncing for this process, verify that we did a fresh full
  939. // sync. Note, we can't assert anything about the receipts since we won't purge the
  940. // database of them, hence we can't use assertOwnChain.
  941. tester.newPeer("valid", protocol, chain)
  942. if err := tester.sync("valid", nil, mode); err != nil {
  943. t.Fatalf("failed to synchronise blocks: %v", err)
  944. }
  945. if hs := len(tester.ownHeaders); hs != chain.len() {
  946. t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
  947. }
  948. if mode != LightSync {
  949. if bs := len(tester.ownBlocks); bs != chain.len() {
  950. t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
  951. }
  952. }
  953. tester.terminate()
  954. }
  955. // Tests that a peer advertising a high TD doesn't get to stall the downloader
  956. // afterwards by not sending any useful hashes.
  957. func TestHighTDStarvationAttack65Full(t *testing.T) {
  958. testHighTDStarvationAttack(t, eth.ETH65, FullSync)
  959. }
  960. func TestHighTDStarvationAttack65Fast(t *testing.T) {
  961. testHighTDStarvationAttack(t, eth.ETH65, FastSync)
  962. }
  963. func TestHighTDStarvationAttack65Light(t *testing.T) {
  964. testHighTDStarvationAttack(t, eth.ETH65, LightSync)
  965. }
  966. func TestHighTDStarvationAttack66Full(t *testing.T) {
  967. testHighTDStarvationAttack(t, eth.ETH66, FullSync)
  968. }
  969. func TestHighTDStarvationAttack66Fast(t *testing.T) {
  970. testHighTDStarvationAttack(t, eth.ETH66, FastSync)
  971. }
  972. func TestHighTDStarvationAttack66Light(t *testing.T) {
  973. testHighTDStarvationAttack(t, eth.ETH66, LightSync)
  974. }
  975. func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
  976. t.Parallel()
  977. tester := newTester()
  978. chain := testChainBase.shorten(1)
  979. tester.newPeer("attack", protocol, chain)
  980. if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
  981. t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
  982. }
  983. tester.terminate()
  984. }
  985. // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
  986. func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH65) }
  987. func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
  988. func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
  989. t.Parallel()
  990. // Define the disconnection requirement for individual hash fetch errors
  991. tests := []struct {
  992. result error
  993. drop bool
  994. }{
  995. {nil, false}, // Sync succeeded, all is well
  996. {errBusy, false}, // Sync is already in progress, no problem
  997. {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
  998. {errBadPeer, true}, // Peer was deemed bad for some reason, drop it
  999. {errStallingPeer, true}, // Peer was detected to be stalling, drop it
  1000. {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it
  1001. {errNoPeers, false}, // No peers to download from, soft race, no issue
  1002. {errTimeout, true}, // No hashes received in due time, drop the peer
  1003. {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
  1004. {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
  1005. {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
  1006. {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
  1007. {errInvalidBody, false}, // A bad peer was detected, but not the sync origin
  1008. {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
  1009. {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
  1010. }
  1011. // Run the tests and check disconnection status
  1012. tester := newTester()
  1013. defer tester.terminate()
  1014. chain := testChainBase.shorten(1)
  1015. for i, tt := range tests {
  1016. // Register a new peer and ensure its presence
  1017. id := fmt.Sprintf("test %d", i)
  1018. if err := tester.newPeer(id, protocol, chain); err != nil {
  1019. t.Fatalf("test %d: failed to register new peer: %v", i, err)
  1020. }
  1021. if _, ok := tester.peers[id]; !ok {
  1022. t.Fatalf("test %d: registered peer not found", i)
  1023. }
  1024. // Simulate a synchronisation and check the required result
  1025. tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
  1026. tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
  1027. if _, ok := tester.peers[id]; !ok != tt.drop {
  1028. t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
  1029. }
  1030. }
  1031. }
  1032. // Tests that synchronisation progress (origin block number, current block number
  1033. // and highest block number) is tracked and updated correctly.
  1034. func TestSyncProgress65Full(t *testing.T) { testSyncProgress(t, eth.ETH65, FullSync) }
  1035. func TestSyncProgress65Fast(t *testing.T) { testSyncProgress(t, eth.ETH65, FastSync) }
  1036. func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, eth.ETH65, LightSync) }
  1037. func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) }
  1038. func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, eth.ETH66, FastSync) }
  1039. func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
  1040. func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1041. t.Parallel()
  1042. tester := newTester()
  1043. defer tester.terminate()
  1044. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1045. // Set a sync init hook to catch progress changes
  1046. starting := make(chan struct{})
  1047. progress := make(chan struct{})
  1048. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1049. starting <- struct{}{}
  1050. <-progress
  1051. }
  1052. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1053. // Synchronise half the blocks and check initial progress
  1054. tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
  1055. pending := new(sync.WaitGroup)
  1056. pending.Add(1)
  1057. go func() {
  1058. defer pending.Done()
  1059. if err := tester.sync("peer-half", nil, mode); err != nil {
  1060. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1061. }
  1062. }()
  1063. <-starting
  1064. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1065. HighestBlock: uint64(chain.len()/2 - 1),
  1066. })
  1067. progress <- struct{}{}
  1068. pending.Wait()
  1069. // Synchronise all the blocks and check continuation progress
  1070. tester.newPeer("peer-full", protocol, chain)
  1071. pending.Add(1)
  1072. go func() {
  1073. defer pending.Done()
  1074. if err := tester.sync("peer-full", nil, mode); err != nil {
  1075. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1076. }
  1077. }()
  1078. <-starting
  1079. checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1080. StartingBlock: uint64(chain.len()/2 - 1),
  1081. CurrentBlock: uint64(chain.len()/2 - 1),
  1082. HighestBlock: uint64(chain.len() - 1),
  1083. })
  1084. // Check final progress after successful sync
  1085. progress <- struct{}{}
  1086. pending.Wait()
  1087. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1088. StartingBlock: uint64(chain.len()/2 - 1),
  1089. CurrentBlock: uint64(chain.len() - 1),
  1090. HighestBlock: uint64(chain.len() - 1),
  1091. })
  1092. }
  1093. func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
  1094. // Mark this method as a helper to report errors at callsite, not in here
  1095. t.Helper()
  1096. p := d.Progress()
  1097. p.KnownStates, p.PulledStates = 0, 0
  1098. want.KnownStates, want.PulledStates = 0, 0
  1099. if p != want {
  1100. t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
  1101. }
  1102. }
  1103. // Tests that synchronisation progress (origin block number and highest block
  1104. // number) is tracked and updated correctly in case of a fork (or manual head
  1105. // revertal).
  1106. func TestForkedSyncProgress65Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, FullSync) }
  1107. func TestForkedSyncProgress65Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, FastSync) }
  1108. func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, LightSync) }
  1109. func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) }
  1110. func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FastSync) }
  1111. func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
  1112. func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1113. t.Parallel()
  1114. tester := newTester()
  1115. defer tester.terminate()
  1116. chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch)
  1117. chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch)
  1118. // Set a sync init hook to catch progress changes
  1119. starting := make(chan struct{})
  1120. progress := make(chan struct{})
  1121. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1122. starting <- struct{}{}
  1123. <-progress
  1124. }
  1125. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1126. // Synchronise with one of the forks and check progress
  1127. tester.newPeer("fork A", protocol, chainA)
  1128. pending := new(sync.WaitGroup)
  1129. pending.Add(1)
  1130. go func() {
  1131. defer pending.Done()
  1132. if err := tester.sync("fork A", nil, mode); err != nil {
  1133. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1134. }
  1135. }()
  1136. <-starting
  1137. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1138. HighestBlock: uint64(chainA.len() - 1),
  1139. })
  1140. progress <- struct{}{}
  1141. pending.Wait()
  1142. // Simulate a successful sync above the fork
  1143. tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
  1144. // Synchronise with the second fork and check progress resets
  1145. tester.newPeer("fork B", protocol, chainB)
  1146. pending.Add(1)
  1147. go func() {
  1148. defer pending.Done()
  1149. if err := tester.sync("fork B", nil, mode); err != nil {
  1150. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1151. }
  1152. }()
  1153. <-starting
  1154. checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
  1155. StartingBlock: uint64(testChainBase.len()) - 1,
  1156. CurrentBlock: uint64(chainA.len() - 1),
  1157. HighestBlock: uint64(chainB.len() - 1),
  1158. })
  1159. // Check final progress after successful sync
  1160. progress <- struct{}{}
  1161. pending.Wait()
  1162. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1163. StartingBlock: uint64(testChainBase.len()) - 1,
  1164. CurrentBlock: uint64(chainB.len() - 1),
  1165. HighestBlock: uint64(chainB.len() - 1),
  1166. })
  1167. }
  1168. // Tests that if synchronisation is aborted due to some failure, then the progress
  1169. // origin is not updated in the next sync cycle, as it should be considered the
  1170. // continuation of the previous sync and not a new instance.
  1171. func TestFailedSyncProgress65Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, FullSync) }
  1172. func TestFailedSyncProgress65Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, FastSync) }
  1173. func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, LightSync) }
  1174. func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) }
  1175. func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FastSync) }
  1176. func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
  1177. func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1178. t.Parallel()
  1179. tester := newTester()
  1180. defer tester.terminate()
  1181. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1182. // Set a sync init hook to catch progress changes
  1183. starting := make(chan struct{})
  1184. progress := make(chan struct{})
  1185. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1186. starting <- struct{}{}
  1187. <-progress
  1188. }
  1189. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1190. // Attempt a full sync with a faulty peer
  1191. brokenChain := chain.shorten(chain.len())
  1192. missing := brokenChain.len() / 2
  1193. delete(brokenChain.headerm, brokenChain.chain[missing])
  1194. delete(brokenChain.blockm, brokenChain.chain[missing])
  1195. delete(brokenChain.receiptm, brokenChain.chain[missing])
  1196. tester.newPeer("faulty", protocol, brokenChain)
  1197. pending := new(sync.WaitGroup)
  1198. pending.Add(1)
  1199. go func() {
  1200. defer pending.Done()
  1201. if err := tester.sync("faulty", nil, mode); err == nil {
  1202. panic("succeeded faulty synchronisation")
  1203. }
  1204. }()
  1205. <-starting
  1206. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1207. HighestBlock: uint64(brokenChain.len() - 1),
  1208. })
  1209. progress <- struct{}{}
  1210. pending.Wait()
  1211. afterFailedSync := tester.downloader.Progress()
  1212. // Synchronise with a good peer and check that the progress origin remind the same
  1213. // after a failure
  1214. tester.newPeer("valid", protocol, chain)
  1215. pending.Add(1)
  1216. go func() {
  1217. defer pending.Done()
  1218. if err := tester.sync("valid", nil, mode); err != nil {
  1219. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1220. }
  1221. }()
  1222. <-starting
  1223. checkProgress(t, tester.downloader, "completing", afterFailedSync)
  1224. // Check final progress after successful sync
  1225. progress <- struct{}{}
  1226. pending.Wait()
  1227. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1228. CurrentBlock: uint64(chain.len() - 1),
  1229. HighestBlock: uint64(chain.len() - 1),
  1230. })
  1231. }
  1232. // Tests that if an attacker fakes a chain height, after the attack is detected,
  1233. // the progress height is successfully reduced at the next sync invocation.
  1234. func TestFakedSyncProgress65Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, FullSync) }
  1235. func TestFakedSyncProgress65Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, FastSync) }
  1236. func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, LightSync) }
  1237. func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) }
  1238. func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FastSync) }
  1239. func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
  1240. func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
  1241. t.Parallel()
  1242. tester := newTester()
  1243. defer tester.terminate()
  1244. chain := testChainBase.shorten(blockCacheMaxItems - 15)
  1245. // Set a sync init hook to catch progress changes
  1246. starting := make(chan struct{})
  1247. progress := make(chan struct{})
  1248. tester.downloader.syncInitHook = func(origin, latest uint64) {
  1249. starting <- struct{}{}
  1250. <-progress
  1251. }
  1252. checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
  1253. // Create and sync with an attacker that promises a higher chain than available.
  1254. brokenChain := chain.shorten(chain.len())
  1255. numMissing := 5
  1256. for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
  1257. delete(brokenChain.headerm, brokenChain.chain[i])
  1258. }
  1259. tester.newPeer("attack", protocol, brokenChain)
  1260. pending := new(sync.WaitGroup)
  1261. pending.Add(1)
  1262. go func() {
  1263. defer pending.Done()
  1264. if err := tester.sync("attack", nil, mode); err == nil {
  1265. panic("succeeded attacker synchronisation")
  1266. }
  1267. }()
  1268. <-starting
  1269. checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
  1270. HighestBlock: uint64(brokenChain.len() - 1),
  1271. })
  1272. progress <- struct{}{}
  1273. pending.Wait()
  1274. afterFailedSync := tester.downloader.Progress()
  1275. // Synchronise with a good peer and check that the progress height has been reduced to
  1276. // the true value.
  1277. validChain := chain.shorten(chain.len() - numMissing)
  1278. tester.newPeer("valid", protocol, validChain)
  1279. pending.Add(1)
  1280. go func() {
  1281. defer pending.Done()
  1282. if err := tester.sync("valid", nil, mode); err != nil {
  1283. panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
  1284. }
  1285. }()
  1286. <-starting
  1287. checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
  1288. CurrentBlock: afterFailedSync.CurrentBlock,
  1289. HighestBlock: uint64(validChain.len() - 1),
  1290. })
  1291. // Check final progress after successful sync.
  1292. progress <- struct{}{}
  1293. pending.Wait()
  1294. checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
  1295. CurrentBlock: uint64(validChain.len() - 1),
  1296. HighestBlock: uint64(validChain.len() - 1),
  1297. })
  1298. }
  1299. // This test reproduces an issue where unexpected deliveries would
  1300. // block indefinitely if they arrived at the right time.
  1301. func TestDeliverHeadersHang65Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, FullSync) }
  1302. func TestDeliverHeadersHang65Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, FastSync) }
  1303. func TestDeliverHeadersHang65Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, LightSync) }
  1304. func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FullSync) }
  1305. func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FastSync) }
  1306. func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) }
  1307. func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {
  1308. t.Parallel()
  1309. master := newTester()
  1310. defer master.terminate()
  1311. chain := testChainBase.shorten(15)
  1312. for i := 0; i < 200; i++ {
  1313. tester := newTester()
  1314. tester.peerDb = master.peerDb
  1315. tester.newPeer("peer", protocol, chain)
  1316. // Whenever the downloader requests headers, flood it with
  1317. // a lot of unrequested header deliveries.
  1318. tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
  1319. peer: tester.downloader.peers.peers["peer"].peer,
  1320. tester: tester,
  1321. }
  1322. if err := tester.sync("peer", nil, mode); err != nil {
  1323. t.Errorf("test %d: sync failed: %v", i, err)
  1324. }
  1325. tester.terminate()
  1326. }
  1327. }
  1328. type floodingTestPeer struct {
  1329. peer Peer
  1330. tester *downloadTester
  1331. }
  1332. func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
  1333. func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
  1334. return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
  1335. }
  1336. func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
  1337. return ftp.peer.RequestBodies(hashes)
  1338. }
  1339. func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
  1340. return ftp.peer.RequestReceipts(hashes)
  1341. }
  1342. func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
  1343. return ftp.peer.RequestNodeData(hashes)
  1344. }
  1345. func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
  1346. deliveriesDone := make(chan struct{}, 500)
  1347. for i := 0; i < cap(deliveriesDone)-1; i++ {
  1348. peer := fmt.Sprintf("fake-peer%d", i)
  1349. go func() {
  1350. ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
  1351. deliveriesDone <- struct{}{}
  1352. }()
  1353. }
  1354. // None of the extra deliveries should block.
  1355. timeout := time.After(60 * time.Second)
  1356. launched := false
  1357. for i := 0; i < cap(deliveriesDone); i++ {
  1358. select {
  1359. case <-deliveriesDone:
  1360. if !launched {
  1361. // Start delivering the requested headers
  1362. // after one of the flooding responses has arrived.
  1363. go func() {
  1364. ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
  1365. deliveriesDone <- struct{}{}
  1366. }()
  1367. launched = true
  1368. }
  1369. case <-timeout:
  1370. panic("blocked")
  1371. }
  1372. }
  1373. return nil
  1374. }
  1375. func TestRemoteHeaderRequestSpan(t *testing.T) {
  1376. testCases := []struct {
  1377. remoteHeight uint64
  1378. localHeight uint64
  1379. expected []int
  1380. }{
  1381. // Remote is way higher. We should ask for the remote head and go backwards
  1382. {1500, 1000,
  1383. []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
  1384. },
  1385. {15000, 13006,
  1386. []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
  1387. },
  1388. // Remote is pretty close to us. We don't have to fetch as many
  1389. {1200, 1150,
  1390. []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
  1391. },
  1392. // Remote is equal to us (so on a fork with higher td)
  1393. // We should get the closest couple of ancestors
  1394. {1500, 1500,
  1395. []int{1497, 1499},
  1396. },
  1397. // We're higher than the remote! Odd
  1398. {1000, 1500,
  1399. []int{997, 999},
  1400. },
  1401. // Check some weird edgecases that it behaves somewhat rationally
  1402. {0, 1500,
  1403. []int{0, 2},
  1404. },
  1405. {6000000, 0,
  1406. []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
  1407. },
  1408. {0, 0,
  1409. []int{0, 2},
  1410. },
  1411. }
  1412. reqs := func(from, count, span int) []int {
  1413. var r []int
  1414. num := from
  1415. for len(r) < count {
  1416. r = append(r, num)
  1417. num += span + 1
  1418. }
  1419. return r
  1420. }
  1421. for i, tt := range testCases {
  1422. from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
  1423. data := reqs(int(from), count, span)
  1424. if max != uint64(data[len(data)-1]) {
  1425. t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
  1426. }
  1427. failed := false
  1428. if len(data) != len(tt.expected) {
  1429. failed = true
  1430. t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
  1431. } else {
  1432. for j, n := range data {
  1433. if n != tt.expected[j] {
  1434. failed = true
  1435. break
  1436. }
  1437. }
  1438. }
  1439. if failed {
  1440. res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
  1441. exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
  1442. t.Logf("got: %v\n", res)
  1443. t.Logf("exp: %v\n", exp)
  1444. t.Errorf("test %d: wrong values", i)
  1445. }
  1446. }
  1447. }
  1448. // Tests that peers below a pre-configured checkpoint block are prevented from
  1449. // being fast-synced from, avoiding potential cheap eclipse attacks.
  1450. func TestCheckpointEnforcement65Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FullSync) }
  1451. func TestCheckpointEnforcement65Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FastSync) }
  1452. func TestCheckpointEnforcement65Light(t *testing.T) {
  1453. testCheckpointEnforcement(t, eth.ETH65, LightSync)
  1454. }
  1455. func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
  1456. func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) }
  1457. func TestCheckpointEnforcement66Light(t *testing.T) {
  1458. testCheckpointEnforcement(t, eth.ETH66, LightSync)
  1459. }
  1460. func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
  1461. t.Parallel()
  1462. // Create a new tester with a particular hard coded checkpoint block
  1463. tester := newTester()
  1464. defer tester.terminate()
  1465. tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
  1466. chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
  1467. // Attempt to sync with the peer and validate the result
  1468. tester.newPeer("peer", protocol, chain)
  1469. var expect error
  1470. if mode == FastSync || mode == LightSync {
  1471. expect = errUnsyncedPeer
  1472. }
  1473. if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
  1474. t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
  1475. }
  1476. if mode == FastSync || mode == LightSync {
  1477. assertOwnChain(t, tester, 1)
  1478. } else {
  1479. assertOwnChain(t, tester, chain.len())
  1480. }
  1481. }