skeleton_test.go 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894
  1. // Copyright 2022 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package downloader
  17. import (
  18. "encoding/json"
  19. "errors"
  20. "fmt"
  21. "math/big"
  22. "sync/atomic"
  23. "testing"
  24. "time"
  25. "github.com/ethereum/go-ethereum/common"
  26. "github.com/ethereum/go-ethereum/core/rawdb"
  27. "github.com/ethereum/go-ethereum/core/types"
  28. "github.com/ethereum/go-ethereum/eth/protocols/eth"
  29. "github.com/ethereum/go-ethereum/log"
  30. )
  31. // hookedBackfiller is a tester backfiller with all interface methods mocked and
  32. // hooked so tests can implement only the things they need.
  33. type hookedBackfiller struct {
  34. // suspendHook is an optional hook to be called when the filler is requested
  35. // to be suspended.
  36. suspendHook func()
  37. // resumeHook is an optional hook to be called when the filler is requested
  38. // to be resumed.
  39. resumeHook func()
  40. }
  41. // newHookedBackfiller creates a hooked backfiller with all callbacks disabled,
  42. // essentially acting as a noop.
  43. func newHookedBackfiller() backfiller {
  44. return new(hookedBackfiller)
  45. }
  46. // suspend requests the backfiller to abort any running full or snap sync
  47. // based on the skeleton chain as it might be invalid. The backfiller should
  48. // gracefully handle multiple consecutive suspends without a resume, even
  49. // on initial startup.
  50. func (hf *hookedBackfiller) suspend() *types.Header {
  51. if hf.suspendHook != nil {
  52. hf.suspendHook()
  53. }
  54. return nil // we don't really care about header cleanups for now
  55. }
  56. // resume requests the backfiller to start running fill or snap sync based on
  57. // the skeleton chain as it has successfully been linked. Appending new heads
  58. // to the end of the chain will not result in suspend/resume cycles.
  59. func (hf *hookedBackfiller) resume() {
  60. if hf.resumeHook != nil {
  61. hf.resumeHook()
  62. }
  63. }
  64. // skeletonTestPeer is a mock peer that can only serve header requests from a
  65. // pre-perated header chain (which may be arbitrarily wrong for testing).
  66. //
  67. // Requesting anything else from these peers will hard panic. Note, do *not*
  68. // implement any other methods. We actually want to make sure that the skeleton
  69. // syncer only depends on - and will only ever do so - on header requests.
  70. type skeletonTestPeer struct {
  71. id string // Unique identifier of the mock peer
  72. headers []*types.Header // Headers to serve when requested
  73. serve func(origin uint64) []*types.Header // Hook to allow custom responses
  74. served uint64 // Number of headers served by this peer
  75. dropped uint64 // Flag whether the peer was dropped (stop responding)
  76. }
  77. // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with.
  78. func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer {
  79. return &skeletonTestPeer{
  80. id: id,
  81. headers: headers,
  82. }
  83. }
  84. // newSkeletonTestPeer creates a new mock peer to test the skeleton sync with,
  85. // and sets an optional serve hook that can return headers for delivery instead
  86. // of the predefined chain. Useful for emulating malicious behavior that would
  87. // otherwise require dedicated peer types.
  88. func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer {
  89. return &skeletonTestPeer{
  90. id: id,
  91. headers: headers,
  92. serve: serve,
  93. }
  94. }
  95. // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
  96. // origin; associated with a particular peer in the download tester. The returned
  97. // function can be used to retrieve batches of headers from the particular peer.
  98. func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
  99. // Since skeleton test peer are in-memory mocks, dropping the does not make
  100. // them inaccessible. As such, check a local `dropped` field to see if the
  101. // peer has been dropped and should not respond any more.
  102. if atomic.LoadUint64(&p.dropped) != 0 {
  103. return nil, errors.New("peer already dropped")
  104. }
  105. // Skeleton sync retrieves batches of headers going backward without gaps.
  106. // This ensures we can follow a clean parent progression without any reorg
  107. // hiccups. There is no need for any other type of header retrieval, so do
  108. // panic if there's such a request.
  109. if !reverse || skip != 0 {
  110. // Note, if other clients want to do these kinds of requests, it's their
  111. // problem, it will still work. We just don't want *us* making complicated
  112. // requests without a very strong reason to.
  113. panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip))
  114. }
  115. // If the skeleton syncer requests the genesis block, panic. Whilst it could
  116. // be considered a valid request, our code specifically should not request it
  117. // ever since we want to link up headers to an existing local chain, which at
  118. // worse will be the genesis.
  119. if int64(origin)-int64(amount) < 0 {
  120. panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount))
  121. }
  122. // To make concurrency easier, the skeleton syncer always requests fixed size
  123. // batches of headers. Panic if the peer is requested an amount other than the
  124. // configured batch size (apart from the request leading to the genesis).
  125. if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) {
  126. panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin))
  127. }
  128. // Simple reverse header retrieval. Fill from the peer's chain and return.
  129. // If the tester has a serve hook set, try to use that before falling back
  130. // to the default behavior.
  131. var headers []*types.Header
  132. if p.serve != nil {
  133. headers = p.serve(origin)
  134. }
  135. if headers == nil {
  136. headers = make([]*types.Header, 0, amount)
  137. if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin
  138. for i := 0; i < amount; i++ {
  139. // Consider nil headers as a form of attack and withhold them. Nil
  140. // cannot be decoded from RLP, so it's not possible to produce an
  141. // attack by sending/receiving those over eth.
  142. header := p.headers[int(origin)-i]
  143. if header == nil {
  144. continue
  145. }
  146. headers = append(headers, header)
  147. }
  148. }
  149. }
  150. atomic.AddUint64(&p.served, uint64(len(headers)))
  151. hashes := make([]common.Hash, len(headers))
  152. for i, header := range headers {
  153. hashes[i] = header.Hash()
  154. }
  155. // Deliver the headers to the downloader
  156. req := &eth.Request{
  157. Peer: p.id,
  158. }
  159. res := &eth.Response{
  160. Req: req,
  161. Res: (*eth.BlockHeadersPacket)(&headers),
  162. Meta: hashes,
  163. Time: 1,
  164. Done: make(chan error),
  165. }
  166. go func() {
  167. sink <- res
  168. if err := <-res.Done; err != nil {
  169. log.Warn("Skeleton test peer response rejected", "err", err)
  170. atomic.AddUint64(&p.dropped, 1)
  171. }
  172. }()
  173. return req, nil
  174. }
  175. func (p *skeletonTestPeer) Head() (common.Hash, *big.Int) {
  176. panic("skeleton sync must not request the remote head")
  177. }
  178. func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) {
  179. panic("skeleton sync must not request headers by hash")
  180. }
  181. func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) {
  182. panic("skeleton sync must not request block bodies")
  183. }
  184. func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) {
  185. panic("skeleton sync must not request receipts")
  186. }
  187. // Tests various sync initializations based on previous leftovers in the database
  188. // and announced heads.
  189. func TestSkeletonSyncInit(t *testing.T) {
  190. // Create a few key headers
  191. var (
  192. genesis = &types.Header{Number: big.NewInt(0)}
  193. block49 = &types.Header{Number: big.NewInt(49)}
  194. block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
  195. block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
  196. )
  197. tests := []struct {
  198. headers []*types.Header // Database content (beside the genesis)
  199. oldstate []*subchain // Old sync state with various interrupted subchains
  200. head *types.Header // New head header to announce to reorg to
  201. newstate []*subchain // Expected sync state after the reorg
  202. }{
  203. // Completely empty database with only the genesis set. The sync is expected
  204. // to create a single subchain with the requested head.
  205. {
  206. head: block50,
  207. newstate: []*subchain{{Head: 50, Tail: 50}},
  208. },
  209. // Empty database with only the genesis set with a leftover empty sync
  210. // progress. This is a synthetic case, just for the sake of covering things.
  211. {
  212. oldstate: []*subchain{},
  213. head: block50,
  214. newstate: []*subchain{{Head: 50, Tail: 50}},
  215. },
  216. // A single leftover subchain is present, older than the new head. The
  217. // old subchain should be left as is and a new one appended to the sync
  218. // status.
  219. {
  220. oldstate: []*subchain{{Head: 10, Tail: 5}},
  221. head: block50,
  222. newstate: []*subchain{
  223. {Head: 50, Tail: 50},
  224. {Head: 10, Tail: 5},
  225. },
  226. },
  227. // Multiple leftover subchains are present, older than the new head. The
  228. // old subchains should be left as is and a new one appended to the sync
  229. // status.
  230. {
  231. oldstate: []*subchain{
  232. {Head: 20, Tail: 15},
  233. {Head: 10, Tail: 5},
  234. },
  235. head: block50,
  236. newstate: []*subchain{
  237. {Head: 50, Tail: 50},
  238. {Head: 20, Tail: 15},
  239. {Head: 10, Tail: 5},
  240. },
  241. },
  242. // A single leftover subchain is present, newer than the new head. The
  243. // newer subchain should be deleted and a fresh one created for the head.
  244. {
  245. oldstate: []*subchain{{Head: 65, Tail: 60}},
  246. head: block50,
  247. newstate: []*subchain{{Head: 50, Tail: 50}},
  248. },
  249. // Multiple leftover subchain is present, newer than the new head. The
  250. // newer subchains should be deleted and a fresh one created for the head.
  251. {
  252. oldstate: []*subchain{
  253. {Head: 75, Tail: 70},
  254. {Head: 65, Tail: 60},
  255. },
  256. head: block50,
  257. newstate: []*subchain{{Head: 50, Tail: 50}},
  258. },
  259. // Two leftover subchains are present, one fully older and one fully
  260. // newer than the announced head. The head should delete the newer one,
  261. // keeping the older one.
  262. {
  263. oldstate: []*subchain{
  264. {Head: 65, Tail: 60},
  265. {Head: 10, Tail: 5},
  266. },
  267. head: block50,
  268. newstate: []*subchain{
  269. {Head: 50, Tail: 50},
  270. {Head: 10, Tail: 5},
  271. },
  272. },
  273. // Multiple leftover subchains are present, some fully older and some
  274. // fully newer than the announced head. The head should delete the newer
  275. // ones, keeping the older ones.
  276. {
  277. oldstate: []*subchain{
  278. {Head: 75, Tail: 70},
  279. {Head: 65, Tail: 60},
  280. {Head: 20, Tail: 15},
  281. {Head: 10, Tail: 5},
  282. },
  283. head: block50,
  284. newstate: []*subchain{
  285. {Head: 50, Tail: 50},
  286. {Head: 20, Tail: 15},
  287. {Head: 10, Tail: 5},
  288. },
  289. },
  290. // A single leftover subchain is present and the new head is extending
  291. // it with one more header. We expect the subchain head to be pushed
  292. // forward.
  293. {
  294. headers: []*types.Header{block49},
  295. oldstate: []*subchain{{Head: 49, Tail: 5}},
  296. head: block50,
  297. newstate: []*subchain{{Head: 50, Tail: 5}},
  298. },
  299. // A single leftover subchain is present and although the new head does
  300. // extend it number wise, the hash chain does not link up. We expect a
  301. // new subchain to be created for the dangling head.
  302. {
  303. headers: []*types.Header{block49B},
  304. oldstate: []*subchain{{Head: 49, Tail: 5}},
  305. head: block50,
  306. newstate: []*subchain{
  307. {Head: 50, Tail: 50},
  308. {Head: 49, Tail: 5},
  309. },
  310. },
  311. // A single leftover subchain is present. A new head is announced that
  312. // links into the middle of it, correctly anchoring into an existing
  313. // header. We expect the old subchain to be truncated and extended with
  314. // the new head.
  315. {
  316. headers: []*types.Header{block49},
  317. oldstate: []*subchain{{Head: 100, Tail: 5}},
  318. head: block50,
  319. newstate: []*subchain{{Head: 50, Tail: 5}},
  320. },
  321. // A single leftover subchain is present. A new head is announced that
  322. // links into the middle of it, but does not anchor into an existing
  323. // header. We expect the old subchain to be truncated and a new chain
  324. // be created for the dangling head.
  325. {
  326. headers: []*types.Header{block49B},
  327. oldstate: []*subchain{{Head: 100, Tail: 5}},
  328. head: block50,
  329. newstate: []*subchain{
  330. {Head: 50, Tail: 50},
  331. {Head: 49, Tail: 5},
  332. },
  333. },
  334. }
  335. for i, tt := range tests {
  336. // Create a fresh database and initialize it with the starting state
  337. db := rawdb.NewMemoryDatabase()
  338. rawdb.WriteHeader(db, genesis)
  339. for _, header := range tt.headers {
  340. rawdb.WriteSkeletonHeader(db, header)
  341. }
  342. if tt.oldstate != nil {
  343. blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate})
  344. rawdb.WriteSkeletonSyncStatus(db, blob)
  345. }
  346. // Create a skeleton sync and run a cycle
  347. wait := make(chan struct{})
  348. skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
  349. skeleton.syncStarting = func() { close(wait) }
  350. skeleton.Sync(tt.head, true)
  351. <-wait
  352. skeleton.Terminate()
  353. // Ensure the correct resulting sync status
  354. var progress skeletonProgress
  355. json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
  356. if len(progress.Subchains) != len(tt.newstate) {
  357. t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate))
  358. continue
  359. }
  360. for j := 0; j < len(progress.Subchains); j++ {
  361. if progress.Subchains[j].Head != tt.newstate[j].Head {
  362. t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head)
  363. }
  364. if progress.Subchains[j].Tail != tt.newstate[j].Tail {
  365. t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail)
  366. }
  367. }
  368. }
  369. }
  370. // Tests that a running skeleton sync can be extended with properly linked up
  371. // headers but not with side chains.
  372. func TestSkeletonSyncExtend(t *testing.T) {
  373. // Create a few key headers
  374. var (
  375. genesis = &types.Header{Number: big.NewInt(0)}
  376. block49 = &types.Header{Number: big.NewInt(49)}
  377. block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
  378. block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
  379. block51 = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()}
  380. )
  381. tests := []struct {
  382. head *types.Header // New head header to announce to reorg to
  383. extend *types.Header // New head header to announce to extend with
  384. newstate []*subchain // Expected sync state after the reorg
  385. err error // Whether extension succeeds or not
  386. }{
  387. // Initialize a sync and try to extend it with a subsequent block.
  388. {
  389. head: block49,
  390. extend: block50,
  391. newstate: []*subchain{
  392. {Head: 50, Tail: 49},
  393. },
  394. },
  395. // Initialize a sync and try to extend it with the existing head block.
  396. {
  397. head: block49,
  398. extend: block49,
  399. newstate: []*subchain{
  400. {Head: 49, Tail: 49},
  401. },
  402. },
  403. // Initialize a sync and try to extend it with a sibling block.
  404. {
  405. head: block49,
  406. extend: block49B,
  407. newstate: []*subchain{
  408. {Head: 49, Tail: 49},
  409. },
  410. err: errReorgDenied,
  411. },
  412. // Initialize a sync and try to extend it with a number-wise sequential
  413. // header, but a hash wise non-linking one.
  414. {
  415. head: block49B,
  416. extend: block50,
  417. newstate: []*subchain{
  418. {Head: 49, Tail: 49},
  419. },
  420. err: errReorgDenied,
  421. },
  422. // Initialize a sync and try to extend it with a non-linking future block.
  423. {
  424. head: block49,
  425. extend: block51,
  426. newstate: []*subchain{
  427. {Head: 49, Tail: 49},
  428. },
  429. err: errReorgDenied,
  430. },
  431. // Initialize a sync and try to extend it with a past canonical block.
  432. {
  433. head: block50,
  434. extend: block49,
  435. newstate: []*subchain{
  436. {Head: 50, Tail: 50},
  437. },
  438. err: errReorgDenied,
  439. },
  440. // Initialize a sync and try to extend it with a past sidechain block.
  441. {
  442. head: block50,
  443. extend: block49B,
  444. newstate: []*subchain{
  445. {Head: 50, Tail: 50},
  446. },
  447. err: errReorgDenied,
  448. },
  449. }
  450. for i, tt := range tests {
  451. // Create a fresh database and initialize it with the starting state
  452. db := rawdb.NewMemoryDatabase()
  453. rawdb.WriteHeader(db, genesis)
  454. // Create a skeleton sync and run a cycle
  455. wait := make(chan struct{})
  456. skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
  457. skeleton.syncStarting = func() { close(wait) }
  458. skeleton.Sync(tt.head, true)
  459. <-wait
  460. if err := skeleton.Sync(tt.extend, false); err != tt.err {
  461. t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err)
  462. }
  463. skeleton.Terminate()
  464. // Ensure the correct resulting sync status
  465. var progress skeletonProgress
  466. json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
  467. if len(progress.Subchains) != len(tt.newstate) {
  468. t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate))
  469. continue
  470. }
  471. for j := 0; j < len(progress.Subchains); j++ {
  472. if progress.Subchains[j].Head != tt.newstate[j].Head {
  473. t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head)
  474. }
  475. if progress.Subchains[j].Tail != tt.newstate[j].Tail {
  476. t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail)
  477. }
  478. }
  479. }
  480. }
  481. // Tests that the skeleton sync correctly retrieves headers from one or more
  482. // peers without duplicates or other strange side effects.
  483. func TestSkeletonSyncRetrievals(t *testing.T) {
  484. //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
  485. // Since skeleton headers don't need to be meaningful, beyond a parent hash
  486. // progression, create a long fake chain to test with.
  487. chain := []*types.Header{{Number: big.NewInt(0)}}
  488. for i := 1; i < 10000; i++ {
  489. chain = append(chain, &types.Header{
  490. ParentHash: chain[i-1].Hash(),
  491. Number: big.NewInt(int64(i)),
  492. })
  493. }
  494. tests := []struct {
  495. headers []*types.Header // Database content (beside the genesis)
  496. oldstate []*subchain // Old sync state with various interrupted subchains
  497. head *types.Header // New head header to announce to reorg to
  498. peers []*skeletonTestPeer // Initial peer set to start the sync with
  499. midstate []*subchain // Expected sync state after initial cycle
  500. midserve uint64 // Expected number of header retrievals after initial cycle
  501. middrop uint64 // Expected number of peers dropped after initial cycle
  502. newHead *types.Header // New header to anoint on top of the old one
  503. newPeer *skeletonTestPeer // New peer to join the skeleton syncer
  504. endstate []*subchain // Expected sync state after the post-init event
  505. endserve uint64 // Expected number of header retrievals after the post-init event
  506. enddrop uint64 // Expected number of peers dropped after the post-init event
  507. }{
  508. // Completely empty database with only the genesis set. The sync is expected
  509. // to create a single subchain with the requested head. No peers however, so
  510. // the sync should be stuck without any progression.
  511. //
  512. // When a new peer is added, it should detect the join and fill the headers
  513. // to the genesis block.
  514. {
  515. head: chain[len(chain)-1],
  516. midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}},
  517. newPeer: newSkeletonTestPeer("test-peer", chain),
  518. endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
  519. endserve: uint64(len(chain) - 2), // len - head - genesis
  520. },
  521. // Completely empty database with only the genesis set. The sync is expected
  522. // to create a single subchain with the requested head. With one valid peer,
  523. // the sync is expected to complete already in the initial round.
  524. //
  525. // Adding a second peer should not have any effect.
  526. {
  527. head: chain[len(chain)-1],
  528. peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)},
  529. midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
  530. midserve: uint64(len(chain) - 2), // len - head - genesis
  531. newPeer: newSkeletonTestPeer("test-peer-2", chain),
  532. endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
  533. endserve: uint64(len(chain) - 2), // len - head - genesis
  534. },
  535. // Completely empty database with only the genesis set. The sync is expected
  536. // to create a single subchain with the requested head. With many valid peers,
  537. // the sync is expected to complete already in the initial round.
  538. //
  539. // Adding a new peer should not have any effect.
  540. {
  541. head: chain[len(chain)-1],
  542. peers: []*skeletonTestPeer{
  543. newSkeletonTestPeer("test-peer-1", chain),
  544. newSkeletonTestPeer("test-peer-2", chain),
  545. newSkeletonTestPeer("test-peer-3", chain),
  546. },
  547. midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
  548. midserve: uint64(len(chain) - 2), // len - head - genesis
  549. newPeer: newSkeletonTestPeer("test-peer-4", chain),
  550. endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
  551. endserve: uint64(len(chain) - 2), // len - head - genesis
  552. },
  553. // This test checks if a peer tries to withhold a header - *on* the sync
  554. // boundary - instead of sending the requested amount. The malicious short
  555. // package should not be accepted.
  556. //
  557. // Joining with a new peer should however unblock the sync.
  558. {
  559. head: chain[requestHeaders+100],
  560. peers: []*skeletonTestPeer{
  561. newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)),
  562. },
  563. midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
  564. midserve: requestHeaders + 101 - 3, // len - head - genesis - missing
  565. middrop: 1, // penalize shortened header deliveries
  566. newPeer: newSkeletonTestPeer("good-peer", chain),
  567. endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
  568. endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
  569. enddrop: 1, // no new drops
  570. },
  571. // This test checks if a peer tries to withhold a header - *off* the sync
  572. // boundary - instead of sending the requested amount. The malicious short
  573. // package should not be accepted.
  574. //
  575. // Joining with a new peer should however unblock the sync.
  576. {
  577. head: chain[requestHeaders+100],
  578. peers: []*skeletonTestPeer{
  579. newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)),
  580. },
  581. midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
  582. midserve: requestHeaders + 101 - 3, // len - head - genesis - missing
  583. middrop: 1, // penalize shortened header deliveries
  584. newPeer: newSkeletonTestPeer("good-peer", chain),
  585. endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
  586. endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
  587. enddrop: 1, // no new drops
  588. },
  589. // This test checks if a peer tries to duplicate a header - *on* the sync
  590. // boundary - instead of sending the correct sequence. The malicious duped
  591. // package should not be accepted.
  592. //
  593. // Joining with a new peer should however unblock the sync.
  594. {
  595. head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
  596. peers: []*skeletonTestPeer{
  597. newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)),
  598. },
  599. midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
  600. midserve: requestHeaders + 101 - 2, // len - head - genesis
  601. middrop: 1, // penalize invalid header sequences
  602. newPeer: newSkeletonTestPeer("good-peer", chain),
  603. endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
  604. endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
  605. enddrop: 1, // no new drops
  606. },
  607. // This test checks if a peer tries to duplicate a header - *off* the sync
  608. // boundary - instead of sending the correct sequence. The malicious duped
  609. // package should not be accepted.
  610. //
  611. // Joining with a new peer should however unblock the sync.
  612. {
  613. head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
  614. peers: []*skeletonTestPeer{
  615. newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)),
  616. },
  617. midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
  618. midserve: requestHeaders + 101 - 2, // len - head - genesis
  619. middrop: 1, // penalize invalid header sequences
  620. newPeer: newSkeletonTestPeer("good-peer", chain),
  621. endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
  622. endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
  623. enddrop: 1, // no new drops
  624. },
  625. // This test checks if a peer tries to inject a different header - *on*
  626. // the sync boundary - instead of sending the correct sequence. The bad
  627. // package should not be accepted.
  628. //
  629. // Joining with a new peer should however unblock the sync.
  630. {
  631. head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
  632. peers: []*skeletonTestPeer{
  633. newSkeletonTestPeer("header-changer",
  634. append(
  635. append(
  636. append([]*types.Header{}, chain[:99]...),
  637. &types.Header{
  638. ParentHash: chain[98].Hash(),
  639. Number: big.NewInt(int64(99)),
  640. GasLimit: 1,
  641. },
  642. ), chain[100:]...,
  643. ),
  644. ),
  645. },
  646. midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
  647. midserve: requestHeaders + 101 - 2, // len - head - genesis
  648. middrop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync?
  649. newPeer: newSkeletonTestPeer("good-peer", chain),
  650. endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
  651. endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
  652. enddrop: 1, // no new drops
  653. },
  654. // This test checks if a peer tries to inject a different header - *off*
  655. // the sync boundary - instead of sending the correct sequence. The bad
  656. // package should not be accepted.
  657. //
  658. // Joining with a new peer should however unblock the sync.
  659. {
  660. head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
  661. peers: []*skeletonTestPeer{
  662. newSkeletonTestPeer("header-changer",
  663. append(
  664. append(
  665. append([]*types.Header{}, chain[:50]...),
  666. &types.Header{
  667. ParentHash: chain[49].Hash(),
  668. Number: big.NewInt(int64(50)),
  669. GasLimit: 1,
  670. },
  671. ), chain[51:]...,
  672. ),
  673. ),
  674. },
  675. midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
  676. midserve: requestHeaders + 101 - 2, // len - head - genesis
  677. middrop: 1, // different set of headers, drop
  678. newPeer: newSkeletonTestPeer("good-peer", chain),
  679. endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
  680. endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
  681. enddrop: 1, // no new drops
  682. },
  683. // This test reproduces a bug caught during review (kudos to @holiman)
  684. // where a subchain is merged with a previously interrupted one, causing
  685. // pending data in the scratch space to become "invalid" (since we jump
  686. // ahead during subchain merge). In that case it is expected to ignore
  687. // the queued up data instead of trying to process on top of a shifted
  688. // task set.
  689. //
  690. // The test is a bit convoluted since it needs to trigger a concurrency
  691. // issue. First we sync up an initial chain of 2x512 items. Then announce
  692. // 2x512+2 as head and delay delivering the head batch to fill the scratch
  693. // space first. The delivery head should merge with the previous download
  694. // and the scratch space must not be consumed further.
  695. {
  696. head: chain[2*requestHeaders],
  697. peers: []*skeletonTestPeer{
  698. newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header {
  699. if origin == chain[2*requestHeaders+1].Number.Uint64() {
  700. time.Sleep(100 * time.Millisecond)
  701. }
  702. return nil // Fallback to default behavior, just delayed
  703. }),
  704. newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header {
  705. if origin == chain[2*requestHeaders+1].Number.Uint64() {
  706. time.Sleep(100 * time.Millisecond)
  707. }
  708. return nil // Fallback to default behavior, just delayed
  709. }),
  710. },
  711. midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}},
  712. midserve: 2*requestHeaders - 1, // len - head - genesis
  713. newHead: chain[2*requestHeaders+2],
  714. endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}},
  715. endserve: 4 * requestHeaders,
  716. },
  717. }
  718. for i, tt := range tests {
  719. // Create a fresh database and initialize it with the starting state
  720. db := rawdb.NewMemoryDatabase()
  721. rawdb.WriteHeader(db, chain[0])
  722. // Create a peer set to feed headers through
  723. peerset := newPeerSet()
  724. for _, peer := range tt.peers {
  725. peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id)))
  726. }
  727. // Create a peer dropper to track malicious peers
  728. dropped := make(map[string]int)
  729. drop := func(peer string) {
  730. if p := peerset.Peer(peer); p != nil {
  731. atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1)
  732. }
  733. peerset.Unregister(peer)
  734. dropped[peer]++
  735. }
  736. // Create a skeleton sync and run a cycle
  737. skeleton := newSkeleton(db, peerset, drop, newHookedBackfiller())
  738. skeleton.Sync(tt.head, true)
  739. var progress skeletonProgress
  740. // Wait a bit (bleah) for the initial sync loop to go to idle. This might
  741. // be either a finish or a never-start hence why there's no event to hook.
  742. check := func() error {
  743. if len(progress.Subchains) != len(tt.midstate) {
  744. return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate))
  745. }
  746. for j := 0; j < len(progress.Subchains); j++ {
  747. if progress.Subchains[j].Head != tt.midstate[j].Head {
  748. return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head)
  749. }
  750. if progress.Subchains[j].Tail != tt.midstate[j].Tail {
  751. return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail)
  752. }
  753. }
  754. return nil
  755. }
  756. waitStart := time.Now()
  757. for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 {
  758. time.Sleep(waitTime)
  759. // Check the post-init end state if it matches the required results
  760. json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
  761. if err := check(); err == nil {
  762. break
  763. }
  764. }
  765. if err := check(); err != nil {
  766. t.Error(err)
  767. continue
  768. }
  769. var served uint64
  770. for _, peer := range tt.peers {
  771. served += atomic.LoadUint64(&peer.served)
  772. }
  773. if served != tt.midserve {
  774. t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve)
  775. }
  776. var drops uint64
  777. for _, peer := range tt.peers {
  778. drops += atomic.LoadUint64(&peer.dropped)
  779. }
  780. if drops != tt.middrop {
  781. t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
  782. }
  783. // Apply the post-init events if there's any
  784. if tt.newHead != nil {
  785. skeleton.Sync(tt.newHead, true)
  786. }
  787. if tt.newPeer != nil {
  788. if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
  789. t.Errorf("test %d: failed to register new peer: %v", i, err)
  790. }
  791. }
  792. // Wait a bit (bleah) for the second sync loop to go to idle. This might
  793. // be either a finish or a never-start hence why there's no event to hook.
  794. check = func() error {
  795. if len(progress.Subchains) != len(tt.endstate) {
  796. return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate))
  797. }
  798. for j := 0; j < len(progress.Subchains); j++ {
  799. if progress.Subchains[j].Head != tt.endstate[j].Head {
  800. return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head)
  801. }
  802. if progress.Subchains[j].Tail != tt.endstate[j].Tail {
  803. return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail)
  804. }
  805. }
  806. return nil
  807. }
  808. waitStart = time.Now()
  809. for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 {
  810. time.Sleep(waitTime)
  811. // Check the post-init end state if it matches the required results
  812. json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
  813. if err := check(); err == nil {
  814. break
  815. }
  816. }
  817. if err := check(); err != nil {
  818. t.Error(err)
  819. continue
  820. }
  821. // Check that the peers served no more headers than we actually needed
  822. served = 0
  823. for _, peer := range tt.peers {
  824. served += atomic.LoadUint64(&peer.served)
  825. }
  826. if tt.newPeer != nil {
  827. served += atomic.LoadUint64(&tt.newPeer.served)
  828. }
  829. if served != tt.endserve {
  830. t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve)
  831. }
  832. drops = 0
  833. for _, peer := range tt.peers {
  834. drops += atomic.LoadUint64(&peer.dropped)
  835. }
  836. if tt.newPeer != nil {
  837. drops += atomic.LoadUint64(&tt.newPeer.dropped)
  838. }
  839. if drops != tt.middrop {
  840. t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
  841. }
  842. // Clean up any leftover skeleton sync resources
  843. skeleton.Terminate()
  844. }
  845. }