protocol_test.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. // Copyright 2014 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package eth
  17. import (
  18. "fmt"
  19. "math/big"
  20. "sync"
  21. "sync/atomic"
  22. "testing"
  23. "time"
  24. "github.com/ethereum/go-ethereum/common"
  25. "github.com/ethereum/go-ethereum/consensus/ethash"
  26. "github.com/ethereum/go-ethereum/core"
  27. "github.com/ethereum/go-ethereum/core/forkid"
  28. "github.com/ethereum/go-ethereum/core/rawdb"
  29. "github.com/ethereum/go-ethereum/core/types"
  30. "github.com/ethereum/go-ethereum/core/vm"
  31. "github.com/ethereum/go-ethereum/crypto"
  32. "github.com/ethereum/go-ethereum/eth/downloader"
  33. "github.com/ethereum/go-ethereum/event"
  34. "github.com/ethereum/go-ethereum/p2p"
  35. "github.com/ethereum/go-ethereum/p2p/enode"
  36. "github.com/ethereum/go-ethereum/params"
  37. "github.com/ethereum/go-ethereum/rlp"
  38. )
  39. func init() {
  40. // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
  41. }
  42. var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
  43. // Tests that handshake failures are detected and reported correctly.
  44. func TestStatusMsgErrors63(t *testing.T) {
  45. pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
  46. var (
  47. genesis = pm.blockchain.Genesis()
  48. head = pm.blockchain.CurrentHeader()
  49. td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
  50. )
  51. defer pm.Stop()
  52. tests := []struct {
  53. code uint64
  54. data interface{}
  55. wantError error
  56. }{
  57. {
  58. code: TransactionMsg, data: []interface{}{},
  59. wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
  60. },
  61. {
  62. code: StatusMsg, data: statusData63{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash()},
  63. wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 63),
  64. },
  65. {
  66. code: StatusMsg, data: statusData63{63, 999, td, head.Hash(), genesis.Hash()},
  67. wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId),
  68. },
  69. {
  70. code: StatusMsg, data: statusData63{63, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}},
  71. wantError: errResp(ErrGenesisMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]),
  72. },
  73. }
  74. for i, test := range tests {
  75. p, errc := newTestPeer("peer", 63, pm, false)
  76. // The send call might hang until reset because
  77. // the protocol might not read the payload.
  78. go p2p.Send(p.app, test.code, test.data)
  79. select {
  80. case err := <-errc:
  81. if err == nil {
  82. t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError)
  83. } else if err.Error() != test.wantError.Error() {
  84. t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError)
  85. }
  86. case <-time.After(2 * time.Second):
  87. t.Errorf("protocol did not shut down within 2 seconds")
  88. }
  89. p.close()
  90. }
  91. }
  92. func TestStatusMsgErrors64(t *testing.T) {
  93. pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
  94. var (
  95. genesis = pm.blockchain.Genesis()
  96. head = pm.blockchain.CurrentHeader()
  97. td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
  98. forkID = forkid.NewID(pm.blockchain)
  99. )
  100. defer pm.Stop()
  101. tests := []struct {
  102. code uint64
  103. data interface{}
  104. wantError error
  105. }{
  106. {
  107. code: TransactionMsg, data: []interface{}{},
  108. wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
  109. },
  110. {
  111. code: StatusMsg, data: statusData{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkID},
  112. wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 64),
  113. },
  114. {
  115. code: StatusMsg, data: statusData{64, 999, td, head.Hash(), genesis.Hash(), forkID},
  116. wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId),
  117. },
  118. {
  119. code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}, forkID},
  120. wantError: errResp(ErrGenesisMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis.Hash()),
  121. },
  122. {
  123. code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}},
  124. wantError: errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()),
  125. },
  126. }
  127. for i, test := range tests {
  128. p, errc := newTestPeer("peer", 64, pm, false)
  129. // The send call might hang until reset because
  130. // the protocol might not read the payload.
  131. go p2p.Send(p.app, test.code, test.data)
  132. select {
  133. case err := <-errc:
  134. if err == nil {
  135. t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError)
  136. } else if err.Error() != test.wantError.Error() {
  137. t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError)
  138. }
  139. case <-time.After(2 * time.Second):
  140. t.Errorf("protocol did not shut down within 2 seconds")
  141. }
  142. p.close()
  143. }
  144. }
  145. func TestForkIDSplit(t *testing.T) {
  146. var (
  147. engine = ethash.NewFaker()
  148. configNoFork = &params.ChainConfig{HomesteadBlock: big.NewInt(1)}
  149. configProFork = &params.ChainConfig{
  150. HomesteadBlock: big.NewInt(1),
  151. EIP150Block: big.NewInt(2),
  152. EIP155Block: big.NewInt(2),
  153. EIP158Block: big.NewInt(2),
  154. ByzantiumBlock: big.NewInt(3),
  155. }
  156. dbNoFork = rawdb.NewMemoryDatabase()
  157. dbProFork = rawdb.NewMemoryDatabase()
  158. gspecNoFork = &core.Genesis{Config: configNoFork}
  159. gspecProFork = &core.Genesis{Config: configProFork}
  160. genesisNoFork = gspecNoFork.MustCommit(dbNoFork)
  161. genesisProFork = gspecProFork.MustCommit(dbProFork)
  162. chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil)
  163. chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil)
  164. blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
  165. blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
  166. ethNoFork, _ = NewProtocolManager(configNoFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainNoFork, dbNoFork, 1, nil, false)
  167. ethProFork, _ = NewProtocolManager(configProFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainProFork, dbProFork, 1, nil, false)
  168. )
  169. ethNoFork.Start(1000)
  170. ethProFork.Start(1000)
  171. // Both nodes should allow the other to connect (same genesis, next fork is the same)
  172. p2pNoFork, p2pProFork := p2p.MsgPipe()
  173. peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
  174. peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
  175. errc := make(chan error, 2)
  176. go func() { errc <- ethNoFork.handle(peerProFork) }()
  177. go func() { errc <- ethProFork.handle(peerNoFork) }()
  178. select {
  179. case err := <-errc:
  180. t.Fatalf("frontier nofork <-> profork failed: %v", err)
  181. case <-time.After(250 * time.Millisecond):
  182. p2pNoFork.Close()
  183. p2pProFork.Close()
  184. }
  185. // Progress into Homestead. Fork's match, so we don't care what the future holds
  186. chainNoFork.InsertChain(blocksNoFork[:1])
  187. chainProFork.InsertChain(blocksProFork[:1])
  188. p2pNoFork, p2pProFork = p2p.MsgPipe()
  189. peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
  190. peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
  191. errc = make(chan error, 2)
  192. go func() { errc <- ethNoFork.handle(peerProFork) }()
  193. go func() { errc <- ethProFork.handle(peerNoFork) }()
  194. select {
  195. case err := <-errc:
  196. t.Fatalf("homestead nofork <-> profork failed: %v", err)
  197. case <-time.After(250 * time.Millisecond):
  198. p2pNoFork.Close()
  199. p2pProFork.Close()
  200. }
  201. // Progress into Spurious. Forks mismatch, signalling differing chains, reject
  202. chainNoFork.InsertChain(blocksNoFork[1:2])
  203. chainProFork.InsertChain(blocksProFork[1:2])
  204. p2pNoFork, p2pProFork = p2p.MsgPipe()
  205. peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
  206. peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
  207. errc = make(chan error, 2)
  208. go func() { errc <- ethNoFork.handle(peerProFork) }()
  209. go func() { errc <- ethProFork.handle(peerNoFork) }()
  210. select {
  211. case err := <-errc:
  212. if want := errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()); err.Error() != want.Error() {
  213. t.Fatalf("fork ID rejection error mismatch: have %v, want %v", err, want)
  214. }
  215. case <-time.After(250 * time.Millisecond):
  216. t.Fatalf("split peers not rejected")
  217. }
  218. }
  219. // This test checks that received transactions are added to the local pool.
  220. func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
  221. func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
  222. func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, 65) }
  223. func testRecvTransactions(t *testing.T, protocol int) {
  224. txAdded := make(chan []*types.Transaction)
  225. pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, txAdded)
  226. pm.acceptTxs = 1 // mark synced to accept transactions
  227. p, _ := newTestPeer("peer", protocol, pm, true)
  228. defer pm.Stop()
  229. defer p.close()
  230. tx := newTestTransaction(testAccount, 0, 0)
  231. if err := p2p.Send(p.app, TransactionMsg, []interface{}{tx}); err != nil {
  232. t.Fatalf("send error: %v", err)
  233. }
  234. select {
  235. case added := <-txAdded:
  236. if len(added) != 1 {
  237. t.Errorf("wrong number of added transactions: got %d, want 1", len(added))
  238. } else if added[0].Hash() != tx.Hash() {
  239. t.Errorf("added wrong tx hash: got %v, want %v", added[0].Hash(), tx.Hash())
  240. }
  241. case <-time.After(2 * time.Second):
  242. t.Errorf("no NewTxsEvent received within 2 seconds")
  243. }
  244. }
  245. // This test checks that pending transactions are sent.
  246. func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
  247. func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
  248. func TestSendTransactions65(t *testing.T) { testSendTransactions(t, 65) }
  249. func testSendTransactions(t *testing.T, protocol int) {
  250. pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
  251. defer pm.Stop()
  252. // Fill the pool with big transactions (use a subscription to wait until all
  253. // the transactions are announced to avoid spurious events causing extra
  254. // broadcasts).
  255. const txsize = txsyncPackSize / 10
  256. alltxs := make([]*types.Transaction, 100)
  257. for nonce := range alltxs {
  258. alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize)
  259. }
  260. pm.txpool.AddRemotes(alltxs)
  261. time.Sleep(100 * time.Millisecond) // Wait until new tx even gets out of the system (lame)
  262. // Connect several peers. They should all receive the pending transactions.
  263. var wg sync.WaitGroup
  264. checktxs := func(p *testPeer) {
  265. defer wg.Done()
  266. defer p.close()
  267. seen := make(map[common.Hash]bool)
  268. for _, tx := range alltxs {
  269. seen[tx.Hash()] = false
  270. }
  271. for n := 0; n < len(alltxs) && !t.Failed(); {
  272. var forAllHashes func(callback func(hash common.Hash))
  273. switch protocol {
  274. case 63:
  275. fallthrough
  276. case 64:
  277. msg, err := p.app.ReadMsg()
  278. if err != nil {
  279. t.Errorf("%v: read error: %v", p.Peer, err)
  280. continue
  281. } else if msg.Code != TransactionMsg {
  282. t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code)
  283. continue
  284. }
  285. var txs []*types.Transaction
  286. if err := msg.Decode(&txs); err != nil {
  287. t.Errorf("%v: %v", p.Peer, err)
  288. continue
  289. }
  290. forAllHashes = func(callback func(hash common.Hash)) {
  291. for _, tx := range txs {
  292. callback(tx.Hash())
  293. }
  294. }
  295. case 65:
  296. msg, err := p.app.ReadMsg()
  297. if err != nil {
  298. t.Errorf("%v: read error: %v", p.Peer, err)
  299. continue
  300. } else if msg.Code != NewPooledTransactionHashesMsg {
  301. t.Errorf("%v: got code %d, want NewPooledTransactionHashesMsg", p.Peer, msg.Code)
  302. continue
  303. }
  304. var hashes []common.Hash
  305. if err := msg.Decode(&hashes); err != nil {
  306. t.Errorf("%v: %v", p.Peer, err)
  307. continue
  308. }
  309. forAllHashes = func(callback func(hash common.Hash)) {
  310. for _, h := range hashes {
  311. callback(h)
  312. }
  313. }
  314. }
  315. forAllHashes(func(hash common.Hash) {
  316. seentx, want := seen[hash]
  317. if seentx {
  318. t.Errorf("%v: got tx more than once: %x", p.Peer, hash)
  319. }
  320. if !want {
  321. t.Errorf("%v: got unexpected tx: %x", p.Peer, hash)
  322. }
  323. seen[hash] = true
  324. n++
  325. })
  326. }
  327. }
  328. for i := 0; i < 3; i++ {
  329. p, _ := newTestPeer(fmt.Sprintf("peer #%d", i), protocol, pm, true)
  330. wg.Add(1)
  331. go checktxs(p)
  332. }
  333. wg.Wait()
  334. }
  335. func TestTransactionPropagation(t *testing.T) { testSyncTransaction(t, true) }
  336. func TestTransactionAnnouncement(t *testing.T) { testSyncTransaction(t, false) }
  337. func testSyncTransaction(t *testing.T, propagtion bool) {
  338. // Create a protocol manager for transaction fetcher and sender
  339. pmFetcher, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil)
  340. defer pmFetcher.Stop()
  341. pmSender, _ := newTestProtocolManagerMust(t, downloader.FastSync, 1024, nil, nil)
  342. pmSender.broadcastTxAnnouncesOnly = !propagtion
  343. defer pmSender.Stop()
  344. // Sync up the two peers
  345. io1, io2 := p2p.MsgPipe()
  346. go pmSender.handle(pmSender.newPeer(65, p2p.NewPeer(enode.ID{}, "sender", nil), io2, pmSender.txpool.Get))
  347. go pmFetcher.handle(pmFetcher.newPeer(65, p2p.NewPeer(enode.ID{}, "fetcher", nil), io1, pmFetcher.txpool.Get))
  348. time.Sleep(250 * time.Millisecond)
  349. pmFetcher.doSync(peerToSyncOp(downloader.FullSync, pmFetcher.peers.BestPeer()))
  350. atomic.StoreUint32(&pmFetcher.acceptTxs, 1)
  351. newTxs := make(chan core.NewTxsEvent, 1024)
  352. sub := pmFetcher.txpool.SubscribeNewTxsEvent(newTxs)
  353. defer sub.Unsubscribe()
  354. // Fill the pool with new transactions
  355. alltxs := make([]*types.Transaction, 1024)
  356. for nonce := range alltxs {
  357. alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), 0)
  358. }
  359. pmSender.txpool.AddRemotes(alltxs)
  360. var got int
  361. loop:
  362. for {
  363. select {
  364. case ev := <-newTxs:
  365. got += len(ev.Txs)
  366. if got == 1024 {
  367. break loop
  368. }
  369. case <-time.NewTimer(time.Second).C:
  370. t.Fatal("Failed to retrieve all transaction")
  371. }
  372. }
  373. }
  374. // Tests that the custom union field encoder and decoder works correctly.
  375. func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
  376. // Create a "random" hash for testing
  377. var hash common.Hash
  378. for i := range hash {
  379. hash[i] = byte(i)
  380. }
  381. // Assemble some table driven tests
  382. tests := []struct {
  383. packet *getBlockHeadersData
  384. fail bool
  385. }{
  386. // Providing the origin as either a hash or a number should both work
  387. {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}}},
  388. {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}}},
  389. // Providing arbitrary query field should also work
  390. {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},
  391. {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},
  392. // Providing both the origin hash and origin number must fail
  393. {fail: true, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash, Number: 314}}},
  394. }
  395. // Iterate over each of the tests and try to encode and then decode
  396. for i, tt := range tests {
  397. bytes, err := rlp.EncodeToBytes(tt.packet)
  398. if err != nil && !tt.fail {
  399. t.Fatalf("test %d: failed to encode packet: %v", i, err)
  400. } else if err == nil && tt.fail {
  401. t.Fatalf("test %d: encode should have failed", i)
  402. }
  403. if !tt.fail {
  404. packet := new(getBlockHeadersData)
  405. if err := rlp.DecodeBytes(bytes, packet); err != nil {
  406. t.Fatalf("test %d: failed to decode packet: %v", i, err)
  407. }
  408. if packet.Origin.Hash != tt.packet.Origin.Hash || packet.Origin.Number != tt.packet.Origin.Number || packet.Amount != tt.packet.Amount ||
  409. packet.Skip != tt.packet.Skip || packet.Reverse != tt.packet.Reverse {
  410. t.Fatalf("test %d: encode decode mismatch: have %+v, want %+v", i, packet, tt.packet)
  411. }
  412. }
  413. }
  414. }