handler.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. // Copyright 2020 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snap
  17. import (
  18. "bytes"
  19. "fmt"
  20. "time"
  21. "github.com/ethereum/go-ethereum/common"
  22. "github.com/ethereum/go-ethereum/core"
  23. "github.com/ethereum/go-ethereum/light"
  24. "github.com/ethereum/go-ethereum/log"
  25. "github.com/ethereum/go-ethereum/metrics"
  26. "github.com/ethereum/go-ethereum/p2p"
  27. "github.com/ethereum/go-ethereum/p2p/enode"
  28. "github.com/ethereum/go-ethereum/p2p/enr"
  29. "github.com/ethereum/go-ethereum/trie"
  30. )
  31. const (
  32. // softResponseLimit is the target maximum size of replies to data retrievals.
  33. softResponseLimit = 2 * 1024 * 1024
  34. // maxCodeLookups is the maximum number of bytecodes to serve. This number is
  35. // there to limit the number of disk lookups.
  36. maxCodeLookups = 1024
  37. // stateLookupSlack defines the ratio by how much a state response can exceed
  38. // the requested limit in order to try and avoid breaking up contracts into
  39. // multiple packages and proving them.
  40. stateLookupSlack = 0.1
  41. // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
  42. // number is there to limit the number of disk lookups.
  43. maxTrieNodeLookups = 1024
  44. // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.
  45. // If we spend too much time, then it's a fairly high chance of timing out
  46. // at the remote side, which means all the work is in vain.
  47. maxTrieNodeTimeSpent = 5 * time.Second
  48. )
  49. // Handler is a callback to invoke from an outside runner after the boilerplate
  50. // exchanges have passed.
  51. type Handler func(peer *Peer) error
  52. // Backend defines the data retrieval methods to serve remote requests and the
  53. // callback methods to invoke on remote deliveries.
  54. type Backend interface {
  55. // Chain retrieves the blockchain object to serve data.
  56. Chain() *core.BlockChain
  57. // RunPeer is invoked when a peer joins on the `eth` protocol. The handler
  58. // should do any peer maintenance work, handshakes and validations. If all
  59. // is passed, control should be given back to the `handler` to process the
  60. // inbound messages going forward.
  61. RunPeer(peer *Peer, handler Handler) error
  62. // PeerInfo retrieves all known `snap` information about a peer.
  63. PeerInfo(id enode.ID) interface{}
  64. // Handle is a callback to be invoked when a data packet is received from
  65. // the remote peer. Only packets not consumed by the protocol handler will
  66. // be forwarded to the backend.
  67. Handle(peer *Peer, packet Packet) error
  68. }
  69. // MakeProtocols constructs the P2P protocol definitions for `snap`.
  70. func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
  71. // Filter the discovery iterator for nodes advertising snap support.
  72. dnsdisc = enode.Filter(dnsdisc, func(n *enode.Node) bool {
  73. var snap enrEntry
  74. return n.Load(&snap) == nil
  75. })
  76. protocols := make([]p2p.Protocol, len(ProtocolVersions))
  77. for i, version := range ProtocolVersions {
  78. version := version // Closure
  79. protocols[i] = p2p.Protocol{
  80. Name: ProtocolName,
  81. Version: version,
  82. Length: protocolLengths[version],
  83. Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
  84. return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error {
  85. return Handle(backend, peer)
  86. })
  87. },
  88. NodeInfo: func() interface{} {
  89. return nodeInfo(backend.Chain())
  90. },
  91. PeerInfo: func(id enode.ID) interface{} {
  92. return backend.PeerInfo(id)
  93. },
  94. Attributes: []enr.Entry{&enrEntry{}},
  95. DialCandidates: dnsdisc,
  96. }
  97. }
  98. return protocols
  99. }
  100. // Handle is the callback invoked to manage the life cycle of a `snap` peer.
  101. // When this function terminates, the peer is disconnected.
  102. func Handle(backend Backend, peer *Peer) error {
  103. for {
  104. if err := HandleMessage(backend, peer); err != nil {
  105. peer.Log().Debug("Message handling failed in `snap`", "err", err)
  106. return err
  107. }
  108. }
  109. }
  110. // HandleMessage is invoked whenever an inbound message is received from a
  111. // remote peer on the `snap` protocol. The remote connection is torn down upon
  112. // returning any error.
  113. func HandleMessage(backend Backend, peer *Peer) error {
  114. // Read the next message from the remote peer, and ensure it's fully consumed
  115. msg, err := peer.rw.ReadMsg()
  116. if err != nil {
  117. return err
  118. }
  119. if msg.Size > maxMessageSize {
  120. return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
  121. }
  122. defer msg.Discard()
  123. start := time.Now()
  124. // Track the emount of time it takes to serve the request and run the handler
  125. if metrics.Enabled {
  126. h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
  127. defer func(start time.Time) {
  128. sampler := func() metrics.Sample {
  129. return metrics.ResettingSample(
  130. metrics.NewExpDecaySample(1028, 0.015),
  131. )
  132. }
  133. metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
  134. }(start)
  135. }
  136. // Handle the message depending on its contents
  137. switch {
  138. case msg.Code == GetAccountRangeMsg:
  139. // Decode the account retrieval request
  140. var req GetAccountRangePacket
  141. if err := msg.Decode(&req); err != nil {
  142. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  143. }
  144. // Service the request, potentially returning nothing in case of errors
  145. accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req)
  146. // Send back anything accumulated (or empty in case of errors)
  147. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
  148. ID: req.ID,
  149. Accounts: accounts,
  150. Proof: proofs,
  151. })
  152. case msg.Code == AccountRangeMsg:
  153. // A range of accounts arrived to one of our previous requests
  154. res := new(AccountRangePacket)
  155. if err := msg.Decode(res); err != nil {
  156. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  157. }
  158. // Ensure the range is monotonically increasing
  159. for i := 1; i < len(res.Accounts); i++ {
  160. if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
  161. return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
  162. }
  163. }
  164. requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
  165. return backend.Handle(peer, res)
  166. case msg.Code == GetStorageRangesMsg:
  167. // Decode the storage retrieval request
  168. var req GetStorageRangesPacket
  169. if err := msg.Decode(&req); err != nil {
  170. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  171. }
  172. // Service the request, potentially returning nothing in case of errors
  173. slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req)
  174. // Send back anything accumulated (or empty in case of errors)
  175. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
  176. ID: req.ID,
  177. Slots: slots,
  178. Proof: proofs,
  179. })
  180. case msg.Code == StorageRangesMsg:
  181. // A range of storage slots arrived to one of our previous requests
  182. res := new(StorageRangesPacket)
  183. if err := msg.Decode(res); err != nil {
  184. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  185. }
  186. // Ensure the ranges are monotonically increasing
  187. for i, slots := range res.Slots {
  188. for j := 1; j < len(slots); j++ {
  189. if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
  190. return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
  191. }
  192. }
  193. }
  194. requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
  195. return backend.Handle(peer, res)
  196. case msg.Code == GetByteCodesMsg:
  197. // Decode bytecode retrieval request
  198. var req GetByteCodesPacket
  199. if err := msg.Decode(&req); err != nil {
  200. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  201. }
  202. // Service the request, potentially returning nothing in case of errors
  203. codes := ServiceGetByteCodesQuery(backend.Chain(), &req)
  204. // Send back anything accumulated (or empty in case of errors)
  205. return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
  206. ID: req.ID,
  207. Codes: codes,
  208. })
  209. case msg.Code == ByteCodesMsg:
  210. // A batch of byte codes arrived to one of our previous requests
  211. res := new(ByteCodesPacket)
  212. if err := msg.Decode(res); err != nil {
  213. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  214. }
  215. requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
  216. return backend.Handle(peer, res)
  217. case msg.Code == GetTrieNodesMsg:
  218. // Decode trie node retrieval request
  219. var req GetTrieNodesPacket
  220. if err := msg.Decode(&req); err != nil {
  221. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  222. }
  223. // Service the request, potentially returning nothing in case of errors
  224. nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start)
  225. if err != nil {
  226. return err
  227. }
  228. // Send back anything accumulated (or empty in case of errors)
  229. return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
  230. ID: req.ID,
  231. Nodes: nodes,
  232. })
  233. case msg.Code == TrieNodesMsg:
  234. // A batch of trie nodes arrived to one of our previous requests
  235. res := new(TrieNodesPacket)
  236. if err := msg.Decode(res); err != nil {
  237. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  238. }
  239. requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
  240. return backend.Handle(peer, res)
  241. default:
  242. return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
  243. }
  244. }
  245. // ServiceGetAccountRangeQuery assembles the response to an account range query.
  246. // It is exposed to allow external packages to test protocol behavior.
  247. func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) {
  248. if req.Bytes > softResponseLimit {
  249. req.Bytes = softResponseLimit
  250. }
  251. // Retrieve the requested state and bail out if non existent
  252. tr, err := trie.New(common.Hash{}, req.Root, chain.StateCache().TrieDB())
  253. if err != nil {
  254. return nil, nil
  255. }
  256. it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin)
  257. if err != nil {
  258. return nil, nil
  259. }
  260. // Iterate over the requested range and pile accounts up
  261. var (
  262. accounts []*AccountData
  263. size uint64
  264. last common.Hash
  265. )
  266. for it.Next() {
  267. hash, account := it.Hash(), common.CopyBytes(it.Account())
  268. // Track the returned interval for the Merkle proofs
  269. last = hash
  270. // Assemble the reply item
  271. size += uint64(common.HashLength + len(account))
  272. accounts = append(accounts, &AccountData{
  273. Hash: hash,
  274. Body: account,
  275. })
  276. // If we've exceeded the request threshold, abort
  277. if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
  278. break
  279. }
  280. if size > req.Bytes {
  281. break
  282. }
  283. }
  284. it.Release()
  285. // Generate the Merkle proofs for the first and last account
  286. proof := light.NewNodeSet()
  287. if err := tr.Prove(req.Origin[:], 0, proof); err != nil {
  288. log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
  289. return nil, nil
  290. }
  291. if last != (common.Hash{}) {
  292. if err := tr.Prove(last[:], 0, proof); err != nil {
  293. log.Warn("Failed to prove account range", "last", last, "err", err)
  294. return nil, nil
  295. }
  296. }
  297. var proofs [][]byte
  298. for _, blob := range proof.NodeList() {
  299. proofs = append(proofs, blob)
  300. }
  301. return accounts, proofs
  302. }
  303. func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) {
  304. if req.Bytes > softResponseLimit {
  305. req.Bytes = softResponseLimit
  306. }
  307. // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
  308. // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user
  309. // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
  310. // Calculate the hard limit at which to abort, even if mid storage trie
  311. hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
  312. // Retrieve storage ranges until the packet limit is reached
  313. var (
  314. slots [][]*StorageData
  315. proofs [][]byte
  316. size uint64
  317. )
  318. for _, account := range req.Accounts {
  319. // If we've exceeded the requested data limit, abort without opening
  320. // a new storage range (that we'd need to prove due to exceeded size)
  321. if size >= req.Bytes {
  322. break
  323. }
  324. // The first account might start from a different origin and end sooner
  325. var origin common.Hash
  326. if len(req.Origin) > 0 {
  327. origin, req.Origin = common.BytesToHash(req.Origin), nil
  328. }
  329. var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  330. if len(req.Limit) > 0 {
  331. limit, req.Limit = common.BytesToHash(req.Limit), nil
  332. }
  333. // Retrieve the requested state and bail out if non existent
  334. it, err := chain.Snapshots().StorageIterator(req.Root, account, origin)
  335. if err != nil {
  336. return nil, nil
  337. }
  338. // Iterate over the requested range and pile slots up
  339. var (
  340. storage []*StorageData
  341. last common.Hash
  342. abort bool
  343. )
  344. for it.Next() {
  345. if size >= hardLimit {
  346. abort = true
  347. break
  348. }
  349. hash, slot := it.Hash(), common.CopyBytes(it.Slot())
  350. // Track the returned interval for the Merkle proofs
  351. last = hash
  352. // Assemble the reply item
  353. size += uint64(common.HashLength + len(slot))
  354. storage = append(storage, &StorageData{
  355. Hash: hash,
  356. Body: slot,
  357. })
  358. // If we've exceeded the request threshold, abort
  359. if bytes.Compare(hash[:], limit[:]) >= 0 {
  360. break
  361. }
  362. }
  363. if len(storage) > 0 {
  364. slots = append(slots, storage)
  365. }
  366. it.Release()
  367. // Generate the Merkle proofs for the first and last storage slot, but
  368. // only if the response was capped. If the entire storage trie included
  369. // in the response, no need for any proofs.
  370. if origin != (common.Hash{}) || (abort && len(storage) > 0) {
  371. // Request started at a non-zero hash or was capped prematurely, add
  372. // the endpoint Merkle proofs
  373. accTrie, err := trie.NewStateTrie(common.Hash{}, req.Root, chain.StateCache().TrieDB())
  374. if err != nil {
  375. return nil, nil
  376. }
  377. acc, err := accTrie.TryGetAccountWithPreHashedKey(account[:])
  378. if err != nil || acc == nil {
  379. return nil, nil
  380. }
  381. stTrie, err := trie.NewStateTrie(account, acc.Root, chain.StateCache().TrieDB())
  382. if err != nil {
  383. return nil, nil
  384. }
  385. proof := light.NewNodeSet()
  386. if err := stTrie.Prove(origin[:], 0, proof); err != nil {
  387. log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
  388. return nil, nil
  389. }
  390. if last != (common.Hash{}) {
  391. if err := stTrie.Prove(last[:], 0, proof); err != nil {
  392. log.Warn("Failed to prove storage range", "last", last, "err", err)
  393. return nil, nil
  394. }
  395. }
  396. for _, blob := range proof.NodeList() {
  397. proofs = append(proofs, blob)
  398. }
  399. // Proof terminates the reply as proofs are only added if a node
  400. // refuses to serve more data (exception when a contract fetch is
  401. // finishing, but that's that).
  402. break
  403. }
  404. }
  405. return slots, proofs
  406. }
  407. // ServiceGetByteCodesQuery assembles the response to a byte codes query.
  408. // It is exposed to allow external packages to test protocol behavior.
  409. func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte {
  410. if req.Bytes > softResponseLimit {
  411. req.Bytes = softResponseLimit
  412. }
  413. if len(req.Hashes) > maxCodeLookups {
  414. req.Hashes = req.Hashes[:maxCodeLookups]
  415. }
  416. // Retrieve bytecodes until the packet size limit is reached
  417. var (
  418. codes [][]byte
  419. bytes uint64
  420. )
  421. for _, hash := range req.Hashes {
  422. if hash == emptyCode {
  423. // Peers should not request the empty code, but if they do, at
  424. // least sent them back a correct response without db lookups
  425. codes = append(codes, []byte{})
  426. } else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil {
  427. codes = append(codes, blob)
  428. bytes += uint64(len(blob))
  429. }
  430. if bytes > req.Bytes {
  431. break
  432. }
  433. }
  434. return codes
  435. }
  436. // ServiceGetTrieNodesQuery assembles the response to a trie nodes query.
  437. // It is exposed to allow external packages to test protocol behavior.
  438. func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) {
  439. if req.Bytes > softResponseLimit {
  440. req.Bytes = softResponseLimit
  441. }
  442. // Make sure we have the state associated with the request
  443. triedb := chain.StateCache().TrieDB()
  444. accTrie, err := trie.NewStateTrie(common.Hash{}, req.Root, triedb)
  445. if err != nil {
  446. // We don't have the requested state available, bail out
  447. return nil, nil
  448. }
  449. snap := chain.Snapshots().Snapshot(req.Root)
  450. if snap == nil {
  451. // We don't have the requested state snapshotted yet, bail out.
  452. // In reality we could still serve using the account and storage
  453. // tries only, but let's protect the node a bit while it's doing
  454. // snapshot generation.
  455. return nil, nil
  456. }
  457. // Retrieve trie nodes until the packet size limit is reached
  458. var (
  459. nodes [][]byte
  460. bytes uint64
  461. loads int // Trie hash expansions to count database reads
  462. )
  463. for _, pathset := range req.Paths {
  464. switch len(pathset) {
  465. case 0:
  466. // Ensure we penalize invalid requests
  467. return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
  468. case 1:
  469. // If we're only retrieving an account trie node, fetch it directly
  470. blob, resolved, err := accTrie.TryGetNode(pathset[0])
  471. loads += resolved // always account database reads, even for failures
  472. if err != nil {
  473. break
  474. }
  475. nodes = append(nodes, blob)
  476. bytes += uint64(len(blob))
  477. default:
  478. // Storage slots requested, open the storage trie and retrieve from there
  479. account, err := snap.Account(common.BytesToHash(pathset[0]))
  480. loads++ // always account database reads, even for failures
  481. if err != nil || account == nil {
  482. break
  483. }
  484. stTrie, err := trie.NewStateTrie(common.BytesToHash(pathset[0]), common.BytesToHash(account.Root), triedb)
  485. loads++ // always account database reads, even for failures
  486. if err != nil {
  487. break
  488. }
  489. for _, path := range pathset[1:] {
  490. blob, resolved, err := stTrie.TryGetNode(path)
  491. loads += resolved // always account database reads, even for failures
  492. if err != nil {
  493. break
  494. }
  495. nodes = append(nodes, blob)
  496. bytes += uint64(len(blob))
  497. // Sanity check limits to avoid DoS on the store trie loads
  498. if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
  499. break
  500. }
  501. }
  502. }
  503. // Abort request processing if we've exceeded our limits
  504. if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
  505. break
  506. }
  507. }
  508. return nodes, nil
  509. }
  510. // NodeInfo represents a short summary of the `snap` sub-protocol metadata
  511. // known about the host peer.
  512. type NodeInfo struct{}
  513. // nodeInfo retrieves some `snap` protocol metadata about the running host node.
  514. func nodeInfo(chain *core.BlockChain) *NodeInfo {
  515. return &NodeInfo{}
  516. }