handler_test.go 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. // Copyright 2016 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package les
  17. import (
  18. "encoding/binary"
  19. "math/big"
  20. "math/rand"
  21. "testing"
  22. "time"
  23. "github.com/ethereum/go-ethereum/common"
  24. "github.com/ethereum/go-ethereum/common/mclock"
  25. "github.com/ethereum/go-ethereum/consensus/ethash"
  26. "github.com/ethereum/go-ethereum/core"
  27. "github.com/ethereum/go-ethereum/core/rawdb"
  28. "github.com/ethereum/go-ethereum/core/types"
  29. "github.com/ethereum/go-ethereum/crypto"
  30. "github.com/ethereum/go-ethereum/eth/downloader"
  31. "github.com/ethereum/go-ethereum/light"
  32. "github.com/ethereum/go-ethereum/p2p"
  33. "github.com/ethereum/go-ethereum/params"
  34. "github.com/ethereum/go-ethereum/rlp"
  35. "github.com/ethereum/go-ethereum/trie"
  36. )
  37. func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {
  38. type resp struct {
  39. ReqID, BV uint64
  40. Data interface{}
  41. }
  42. return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
  43. }
  44. // Tests that block headers can be retrieved from a remote chain based on user queries.
  45. func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
  46. func TestGetBlockHeadersLes3(t *testing.T) { testGetBlockHeaders(t, 3) }
  47. func TestGetBlockHeadersLes4(t *testing.T) { testGetBlockHeaders(t, 4) }
  48. func testGetBlockHeaders(t *testing.T, protocol int) {
  49. server, tearDown := newServerEnv(t, downloader.MaxHeaderFetch+15, protocol, nil, false, true, 0)
  50. defer tearDown()
  51. bc := server.handler.blockchain
  52. // Create a "random" unknown hash for testing
  53. var unknown common.Hash
  54. for i := range unknown {
  55. unknown[i] = byte(i)
  56. }
  57. // Create a batch of tests for various scenarios
  58. limit := uint64(MaxHeaderFetch)
  59. tests := []struct {
  60. query *GetBlockHeadersData // The query to execute for header retrieval
  61. expect []common.Hash // The hashes of the block whose headers are expected
  62. }{
  63. // A single random block should be retrievable by hash and number too
  64. {
  65. &GetBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
  66. []common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
  67. }, {
  68. &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
  69. []common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
  70. },
  71. // Multiple headers should be retrievable in both directions
  72. {
  73. &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
  74. []common.Hash{
  75. bc.GetBlockByNumber(limit / 2).Hash(),
  76. bc.GetBlockByNumber(limit/2 + 1).Hash(),
  77. bc.GetBlockByNumber(limit/2 + 2).Hash(),
  78. },
  79. }, {
  80. &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
  81. []common.Hash{
  82. bc.GetBlockByNumber(limit / 2).Hash(),
  83. bc.GetBlockByNumber(limit/2 - 1).Hash(),
  84. bc.GetBlockByNumber(limit/2 - 2).Hash(),
  85. },
  86. },
  87. // Multiple headers with skip lists should be retrievable
  88. {
  89. &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
  90. []common.Hash{
  91. bc.GetBlockByNumber(limit / 2).Hash(),
  92. bc.GetBlockByNumber(limit/2 + 4).Hash(),
  93. bc.GetBlockByNumber(limit/2 + 8).Hash(),
  94. },
  95. }, {
  96. &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
  97. []common.Hash{
  98. bc.GetBlockByNumber(limit / 2).Hash(),
  99. bc.GetBlockByNumber(limit/2 - 4).Hash(),
  100. bc.GetBlockByNumber(limit/2 - 8).Hash(),
  101. },
  102. },
  103. // The chain endpoints should be retrievable
  104. {
  105. &GetBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
  106. []common.Hash{bc.GetBlockByNumber(0).Hash()},
  107. }, {
  108. &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1},
  109. []common.Hash{bc.CurrentBlock().Hash()},
  110. },
  111. // Ensure protocol limits are honored
  112. //{
  113. // &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},
  114. // []common.Hash{},
  115. //},
  116. // Check that requesting more than available is handled gracefully
  117. {
  118. &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},
  119. []common.Hash{
  120. bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
  121. bc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(),
  122. },
  123. }, {
  124. &GetBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
  125. []common.Hash{
  126. bc.GetBlockByNumber(4).Hash(),
  127. bc.GetBlockByNumber(0).Hash(),
  128. },
  129. },
  130. // Check that requesting more than available is handled gracefully, even if mid skip
  131. {
  132. &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},
  133. []common.Hash{
  134. bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),
  135. bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(),
  136. },
  137. }, {
  138. &GetBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
  139. []common.Hash{
  140. bc.GetBlockByNumber(4).Hash(),
  141. bc.GetBlockByNumber(1).Hash(),
  142. },
  143. },
  144. // Check that non existing headers aren't returned
  145. {
  146. &GetBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
  147. []common.Hash{},
  148. }, {
  149. &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1},
  150. []common.Hash{},
  151. },
  152. }
  153. // Run each of the tests and verify the results against the chain
  154. var reqID uint64
  155. for i, tt := range tests {
  156. // Collect the headers to expect in the response
  157. var headers []*types.Header
  158. for _, hash := range tt.expect {
  159. headers = append(headers, bc.GetHeaderByHash(hash))
  160. }
  161. // Send the hash request and verify the response
  162. reqID++
  163. sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, tt.query)
  164. if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
  165. t.Errorf("test %d: headers mismatch: %v", i, err)
  166. }
  167. }
  168. }
  169. // Tests that block contents can be retrieved from a remote chain based on their hashes.
  170. func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
  171. func TestGetBlockBodiesLes3(t *testing.T) { testGetBlockBodies(t, 3) }
  172. func TestGetBlockBodiesLes4(t *testing.T) { testGetBlockBodies(t, 4) }
  173. func testGetBlockBodies(t *testing.T, protocol int) {
  174. server, tearDown := newServerEnv(t, downloader.MaxBlockFetch+15, protocol, nil, false, true, 0)
  175. defer tearDown()
  176. bc := server.handler.blockchain
  177. // Create a batch of tests for various scenarios
  178. limit := MaxBodyFetch
  179. tests := []struct {
  180. random int // Number of blocks to fetch randomly from the chain
  181. explicit []common.Hash // Explicitly requested blocks
  182. available []bool // Availability of explicitly requested blocks
  183. expected int // Total number of existing blocks to expect
  184. }{
  185. {1, nil, nil, 1}, // A single random block should be retrievable
  186. {10, nil, nil, 10}, // Multiple random blocks should be retrievable
  187. {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
  188. //{limit + 1, nil, nil, limit}, // No more than the possible block count should be returned
  189. {0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
  190. {0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
  191. {0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned
  192. // Existing and non-existing blocks interleaved should not cause problems
  193. {0, []common.Hash{
  194. {},
  195. bc.GetBlockByNumber(1).Hash(),
  196. {},
  197. bc.GetBlockByNumber(10).Hash(),
  198. {},
  199. bc.GetBlockByNumber(100).Hash(),
  200. {},
  201. }, []bool{false, true, false, true, false, true, false}, 3},
  202. }
  203. // Run each of the tests and verify the results against the chain
  204. var reqID uint64
  205. for i, tt := range tests {
  206. // Collect the hashes to request, and the response to expect
  207. var hashes []common.Hash
  208. seen := make(map[int64]bool)
  209. var bodies []*types.Body
  210. for j := 0; j < tt.random; j++ {
  211. for {
  212. num := rand.Int63n(int64(bc.CurrentBlock().NumberU64()))
  213. if !seen[num] {
  214. seen[num] = true
  215. block := bc.GetBlockByNumber(uint64(num))
  216. hashes = append(hashes, block.Hash())
  217. if len(bodies) < tt.expected {
  218. bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
  219. }
  220. break
  221. }
  222. }
  223. }
  224. for j, hash := range tt.explicit {
  225. hashes = append(hashes, hash)
  226. if tt.available[j] && len(bodies) < tt.expected {
  227. block := bc.GetBlockByHash(hash)
  228. bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
  229. }
  230. }
  231. reqID++
  232. // Send the hash request and verify the response
  233. sendRequest(server.peer.app, GetBlockBodiesMsg, reqID, hashes)
  234. if err := expectResponse(server.peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
  235. t.Errorf("test %d: bodies mismatch: %v", i, err)
  236. }
  237. }
  238. }
  239. // Tests that the contract codes can be retrieved based on account addresses.
  240. func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
  241. func TestGetCodeLes3(t *testing.T) { testGetCode(t, 3) }
  242. func TestGetCodeLes4(t *testing.T) { testGetCode(t, 4) }
  243. func testGetCode(t *testing.T, protocol int) {
  244. // Assemble the test environment
  245. server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
  246. defer tearDown()
  247. bc := server.handler.blockchain
  248. var codereqs []*CodeReq
  249. var codes [][]byte
  250. for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
  251. header := bc.GetHeaderByNumber(i)
  252. req := &CodeReq{
  253. BHash: header.Hash(),
  254. AccKey: crypto.Keccak256(testContractAddr[:]),
  255. }
  256. codereqs = append(codereqs, req)
  257. if i >= testContractDeployed {
  258. codes = append(codes, testContractCodeDeployed)
  259. }
  260. }
  261. sendRequest(server.peer.app, GetCodeMsg, 42, codereqs)
  262. if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
  263. t.Errorf("codes mismatch: %v", err)
  264. }
  265. }
  266. // Tests that the stale contract codes can't be retrieved based on account addresses.
  267. func TestGetStaleCodeLes2(t *testing.T) { testGetStaleCode(t, 2) }
  268. func TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) }
  269. func TestGetStaleCodeLes4(t *testing.T) { testGetStaleCode(t, 4) }
  270. func testGetStaleCode(t *testing.T, protocol int) {
  271. server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
  272. defer tearDown()
  273. bc := server.handler.blockchain
  274. check := func(number uint64, expected [][]byte) {
  275. req := &CodeReq{
  276. BHash: bc.GetHeaderByNumber(number).Hash(),
  277. AccKey: crypto.Keccak256(testContractAddr[:]),
  278. }
  279. sendRequest(server.peer.app, GetCodeMsg, 42, []*CodeReq{req})
  280. if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
  281. t.Errorf("codes mismatch: %v", err)
  282. }
  283. }
  284. check(0, [][]byte{}) // Non-exist contract
  285. check(testContractDeployed, [][]byte{}) // Stale contract
  286. check(bc.CurrentHeader().Number.Uint64(), [][]byte{testContractCodeDeployed}) // Fresh contract
  287. }
  288. // Tests that the transaction receipts can be retrieved based on hashes.
  289. func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
  290. func TestGetReceiptLes3(t *testing.T) { testGetReceipt(t, 3) }
  291. func TestGetReceiptLes4(t *testing.T) { testGetReceipt(t, 4) }
  292. func testGetReceipt(t *testing.T, protocol int) {
  293. // Assemble the test environment
  294. server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
  295. defer tearDown()
  296. bc := server.handler.blockchain
  297. // Collect the hashes to request, and the response to expect
  298. var receipts []types.Receipts
  299. var hashes []common.Hash
  300. for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
  301. block := bc.GetBlockByNumber(i)
  302. hashes = append(hashes, block.Hash())
  303. receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64()))
  304. }
  305. // Send the hash request and verify the response
  306. sendRequest(server.peer.app, GetReceiptsMsg, 42, hashes)
  307. if err := expectResponse(server.peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
  308. t.Errorf("receipts mismatch: %v", err)
  309. }
  310. }
  311. // Tests that trie merkle proofs can be retrieved
  312. func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
  313. func TestGetProofsLes3(t *testing.T) { testGetProofs(t, 3) }
  314. func TestGetProofsLes4(t *testing.T) { testGetProofs(t, 4) }
  315. func testGetProofs(t *testing.T, protocol int) {
  316. // Assemble the test environment
  317. server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0)
  318. defer tearDown()
  319. bc := server.handler.blockchain
  320. var proofreqs []ProofReq
  321. proofsV2 := light.NewNodeSet()
  322. accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
  323. for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
  324. header := bc.GetHeaderByNumber(i)
  325. trie, _ := trie.New(header.Root, trie.NewDatabase(server.db))
  326. for _, acc := range accounts {
  327. req := ProofReq{
  328. BHash: header.Hash(),
  329. Key: crypto.Keccak256(acc[:]),
  330. }
  331. proofreqs = append(proofreqs, req)
  332. trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)
  333. }
  334. }
  335. // Send the proof request and verify the response
  336. sendRequest(server.peer.app, GetProofsV2Msg, 42, proofreqs)
  337. if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
  338. t.Errorf("proofs mismatch: %v", err)
  339. }
  340. }
  341. // Tests that the stale contract codes can't be retrieved based on account addresses.
  342. func TestGetStaleProofLes2(t *testing.T) { testGetStaleProof(t, 2) }
  343. func TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) }
  344. func TestGetStaleProofLes4(t *testing.T) { testGetStaleProof(t, 4) }
  345. func testGetStaleProof(t *testing.T, protocol int) {
  346. server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0)
  347. defer tearDown()
  348. bc := server.handler.blockchain
  349. check := func(number uint64, wantOK bool) {
  350. var (
  351. header = bc.GetHeaderByNumber(number)
  352. account = crypto.Keccak256(userAddr1.Bytes())
  353. )
  354. req := &ProofReq{
  355. BHash: header.Hash(),
  356. Key: account,
  357. }
  358. sendRequest(server.peer.app, GetProofsV2Msg, 42, []*ProofReq{req})
  359. var expected []rlp.RawValue
  360. if wantOK {
  361. proofsV2 := light.NewNodeSet()
  362. t, _ := trie.New(header.Root, trie.NewDatabase(server.db))
  363. t.Prove(account, 0, proofsV2)
  364. expected = proofsV2.NodeList()
  365. }
  366. if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
  367. t.Errorf("codes mismatch: %v", err)
  368. }
  369. }
  370. check(0, false) // Non-exist proof
  371. check(2, false) // Stale proof
  372. check(bc.CurrentHeader().Number.Uint64(), true) // Fresh proof
  373. }
  374. // Tests that CHT proofs can be correctly retrieved.
  375. func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
  376. func TestGetCHTProofsLes3(t *testing.T) { testGetCHTProofs(t, 3) }
  377. func TestGetCHTProofsLes4(t *testing.T) { testGetCHTProofs(t, 4) }
  378. func testGetCHTProofs(t *testing.T, protocol int) {
  379. config := light.TestServerIndexerConfig
  380. waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
  381. for {
  382. cs, _, _ := cIndexer.Sections()
  383. if cs >= 1 {
  384. break
  385. }
  386. time.Sleep(10 * time.Millisecond)
  387. }
  388. }
  389. server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers, false, true, 0)
  390. defer tearDown()
  391. bc := server.handler.blockchain
  392. // Assemble the proofs from the different protocols
  393. header := bc.GetHeaderByNumber(config.ChtSize - 1)
  394. rlp, _ := rlp.EncodeToBytes(header)
  395. key := make([]byte, 8)
  396. binary.BigEndian.PutUint64(key, config.ChtSize-1)
  397. proofsV2 := HelperTrieResps{
  398. AuxData: [][]byte{rlp},
  399. }
  400. root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
  401. trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
  402. trie.Prove(key, 0, &proofsV2.Proofs)
  403. // Assemble the requests for the different protocols
  404. requestsV2 := []HelperTrieReq{{
  405. Type: htCanonical,
  406. TrieIdx: 0,
  407. Key: key,
  408. AuxReq: htAuxHeader,
  409. }}
  410. // Send the proof request and verify the response
  411. sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requestsV2)
  412. if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
  413. t.Errorf("proofs mismatch: %v", err)
  414. }
  415. }
  416. func TestGetBloombitsProofsLes2(t *testing.T) { testGetBloombitsProofs(t, 2) }
  417. func TestGetBloombitsProofsLes3(t *testing.T) { testGetBloombitsProofs(t, 3) }
  418. func TestGetBloombitsProofsLes4(t *testing.T) { testGetBloombitsProofs(t, 4) }
  419. // Tests that bloombits proofs can be correctly retrieved.
  420. func testGetBloombitsProofs(t *testing.T, protocol int) {
  421. config := light.TestServerIndexerConfig
  422. waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
  423. for {
  424. bts, _, _ := btIndexer.Sections()
  425. if bts >= 1 {
  426. break
  427. }
  428. time.Sleep(10 * time.Millisecond)
  429. }
  430. }
  431. server, tearDown := newServerEnv(t, int(config.BloomTrieSize+config.BloomTrieConfirms), protocol, waitIndexers, false, true, 0)
  432. defer tearDown()
  433. bc := server.handler.blockchain
  434. // Request and verify each bit of the bloom bits proofs
  435. for bit := 0; bit < 2048; bit++ {
  436. // Assemble the request and proofs for the bloombits
  437. key := make([]byte, 10)
  438. binary.BigEndian.PutUint16(key[:2], uint16(bit))
  439. // Only the first bloom section has data.
  440. binary.BigEndian.PutUint64(key[2:], 0)
  441. requests := []HelperTrieReq{{
  442. Type: htBloomBits,
  443. TrieIdx: 0,
  444. Key: key,
  445. }}
  446. var proofs HelperTrieResps
  447. root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
  448. trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
  449. trie.Prove(key, 0, &proofs.Proofs)
  450. // Send the proof request and verify the response
  451. sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requests)
  452. if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
  453. t.Errorf("bit %d: proofs mismatch: %v", bit, err)
  454. }
  455. }
  456. }
  457. func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, 2) }
  458. func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, 3) }
  459. func TestTransactionStatusLes4(t *testing.T) { testTransactionStatus(t, 4) }
  460. func testTransactionStatus(t *testing.T, protocol int) {
  461. server, tearDown := newServerEnv(t, 0, protocol, nil, false, true, 0)
  462. defer tearDown()
  463. server.handler.addTxsSync = true
  464. chain := server.handler.blockchain
  465. var reqID uint64
  466. test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
  467. reqID++
  468. if send {
  469. sendRequest(server.peer.app, SendTxV2Msg, reqID, types.Transactions{tx})
  470. } else {
  471. sendRequest(server.peer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()})
  472. }
  473. if err := expectResponse(server.peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
  474. t.Errorf("transaction status mismatch")
  475. }
  476. }
  477. signer := types.HomesteadSigner{}
  478. // test error status by sending an underpriced transaction
  479. tx0, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)
  480. test(tx0, true, light.TxStatus{Status: core.TxStatusUnknown, Error: core.ErrUnderpriced.Error()})
  481. tx1, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
  482. test(tx1, false, light.TxStatus{Status: core.TxStatusUnknown}) // query before sending, should be unknown
  483. test(tx1, true, light.TxStatus{Status: core.TxStatusPending}) // send valid processable tx, should return pending
  484. test(tx1, true, light.TxStatus{Status: core.TxStatusPending}) // adding it again should not return an error
  485. tx2, _ := types.SignTx(types.NewTransaction(1, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
  486. tx3, _ := types.SignTx(types.NewTransaction(2, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
  487. // send transactions in the wrong order, tx3 should be queued
  488. test(tx3, true, light.TxStatus{Status: core.TxStatusQueued})
  489. test(tx2, true, light.TxStatus{Status: core.TxStatusPending})
  490. // query again, now tx3 should be pending too
  491. test(tx3, false, light.TxStatus{Status: core.TxStatusPending})
  492. // generate and add a block with tx1 and tx2 included
  493. gchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 1, func(i int, block *core.BlockGen) {
  494. block.AddTx(tx1)
  495. block.AddTx(tx2)
  496. })
  497. if _, err := chain.InsertChain(gchain); err != nil {
  498. panic(err)
  499. }
  500. // wait until TxPool processes the inserted block
  501. for i := 0; i < 10; i++ {
  502. if pending, _ := server.handler.txpool.Stats(); pending == 1 {
  503. break
  504. }
  505. time.Sleep(100 * time.Millisecond)
  506. }
  507. if pending, _ := server.handler.txpool.Stats(); pending != 1 {
  508. t.Fatalf("pending count mismatch: have %d, want 1", pending)
  509. }
  510. // Discard new block announcement
  511. msg, _ := server.peer.app.ReadMsg()
  512. msg.Discard()
  513. // check if their status is included now
  514. block1hash := rawdb.ReadCanonicalHash(server.db, 1)
  515. test(tx1, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
  516. test(tx2, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
  517. // create a reorg that rolls them back
  518. gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 2, func(i int, block *core.BlockGen) {})
  519. if _, err := chain.InsertChain(gchain); err != nil {
  520. panic(err)
  521. }
  522. // wait until TxPool processes the reorg
  523. for i := 0; i < 10; i++ {
  524. if pending, _ := server.handler.txpool.Stats(); pending == 3 {
  525. break
  526. }
  527. time.Sleep(100 * time.Millisecond)
  528. }
  529. if pending, _ := server.handler.txpool.Stats(); pending != 3 {
  530. t.Fatalf("pending count mismatch: have %d, want 3", pending)
  531. }
  532. // Discard new block announcement
  533. msg, _ = server.peer.app.ReadMsg()
  534. msg.Discard()
  535. // check if their status is pending again
  536. test(tx1, false, light.TxStatus{Status: core.TxStatusPending})
  537. test(tx2, false, light.TxStatus{Status: core.TxStatusPending})
  538. }
  539. func TestStopResumeLes3(t *testing.T) {
  540. server, tearDown := newServerEnv(t, 0, 3, nil, true, true, testBufLimit/10)
  541. defer tearDown()
  542. server.handler.server.costTracker.testing = true
  543. var (
  544. reqID uint64
  545. expBuf = testBufLimit
  546. testCost = testBufLimit / 10
  547. )
  548. header := server.handler.blockchain.CurrentHeader()
  549. req := func() {
  550. reqID++
  551. sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
  552. }
  553. for i := 1; i <= 5; i++ {
  554. // send requests while we still have enough buffer and expect a response
  555. for expBuf >= testCost {
  556. req()
  557. expBuf -= testCost
  558. if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {
  559. t.Errorf("expected response and failed: %v", err)
  560. }
  561. }
  562. // send some more requests in excess and expect a single StopMsg
  563. c := i
  564. for c > 0 {
  565. req()
  566. c--
  567. }
  568. if err := p2p.ExpectMsg(server.peer.app, StopMsg, nil); err != nil {
  569. t.Errorf("expected StopMsg and failed: %v", err)
  570. }
  571. // wait until the buffer is recharged by half of the limit
  572. wait := testBufLimit / testBufRecharge / 2
  573. server.clock.(*mclock.Simulated).Run(time.Millisecond * time.Duration(wait))
  574. // expect a ResumeMsg with the partially recharged buffer value
  575. expBuf += testBufRecharge * wait
  576. if err := p2p.ExpectMsg(server.peer.app, ResumeMsg, expBuf); err != nil {
  577. t.Errorf("expected ResumeMsg and failed: %v", err)
  578. }
  579. }
  580. }