handler.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. // Copyright 2020 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snap
  17. import (
  18. "bytes"
  19. "fmt"
  20. "time"
  21. "github.com/ethereum/go-ethereum/common"
  22. "github.com/ethereum/go-ethereum/core"
  23. "github.com/ethereum/go-ethereum/core/state"
  24. "github.com/ethereum/go-ethereum/light"
  25. "github.com/ethereum/go-ethereum/log"
  26. "github.com/ethereum/go-ethereum/metrics"
  27. "github.com/ethereum/go-ethereum/p2p"
  28. "github.com/ethereum/go-ethereum/p2p/enode"
  29. "github.com/ethereum/go-ethereum/p2p/enr"
  30. "github.com/ethereum/go-ethereum/rlp"
  31. "github.com/ethereum/go-ethereum/trie"
  32. )
  33. const (
  34. // softResponseLimit is the target maximum size of replies to data retrievals.
  35. softResponseLimit = 2 * 1024 * 1024
  36. // maxCodeLookups is the maximum number of bytecodes to serve. This number is
  37. // there to limit the number of disk lookups.
  38. maxCodeLookups = 1024
  39. // stateLookupSlack defines the ratio by how much a state response can exceed
  40. // the requested limit in order to try and avoid breaking up contracts into
  41. // multiple packages and proving them.
  42. stateLookupSlack = 0.1
  43. // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
  44. // number is there to limit the number of disk lookups.
  45. maxTrieNodeLookups = 1024
  46. // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.
  47. // If we spend too much time, then it's a fairly high chance of timing out
  48. // at the remote side, which means all the work is in vain.
  49. maxTrieNodeTimeSpent = 5 * time.Second
  50. )
  51. // Handler is a callback to invoke from an outside runner after the boilerplate
  52. // exchanges have passed.
  53. type Handler func(peer *Peer) error
  54. // Backend defines the data retrieval methods to serve remote requests and the
  55. // callback methods to invoke on remote deliveries.
  56. type Backend interface {
  57. // Chain retrieves the blockchain object to serve data.
  58. Chain() *core.BlockChain
  59. // RunPeer is invoked when a peer joins on the `eth` protocol. The handler
  60. // should do any peer maintenance work, handshakes and validations. If all
  61. // is passed, control should be given back to the `handler` to process the
  62. // inbound messages going forward.
  63. RunPeer(peer *Peer, handler Handler) error
  64. // PeerInfo retrieves all known `snap` information about a peer.
  65. PeerInfo(id enode.ID) interface{}
  66. // Handle is a callback to be invoked when a data packet is received from
  67. // the remote peer. Only packets not consumed by the protocol handler will
  68. // be forwarded to the backend.
  69. Handle(peer *Peer, packet Packet) error
  70. }
  71. // MakeProtocols constructs the P2P protocol definitions for `snap`.
  72. func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
  73. protocols := make([]p2p.Protocol, len(ProtocolVersions))
  74. for i, version := range ProtocolVersions {
  75. version := version // Closure
  76. protocols[i] = p2p.Protocol{
  77. Name: ProtocolName,
  78. Version: version,
  79. Length: protocolLengths[version],
  80. Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
  81. return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error {
  82. return handle(backend, peer)
  83. })
  84. },
  85. NodeInfo: func() interface{} {
  86. return nodeInfo(backend.Chain())
  87. },
  88. PeerInfo: func(id enode.ID) interface{} {
  89. return backend.PeerInfo(id)
  90. },
  91. Attributes: []enr.Entry{&enrEntry{}},
  92. DialCandidates: dnsdisc,
  93. }
  94. }
  95. return protocols
  96. }
  97. // handle is the callback invoked to manage the life cycle of a `snap` peer.
  98. // When this function terminates, the peer is disconnected.
  99. func handle(backend Backend, peer *Peer) error {
  100. for {
  101. if err := handleMessage(backend, peer); err != nil {
  102. peer.Log().Debug("Message handling failed in `snap`", "err", err)
  103. return err
  104. }
  105. }
  106. }
  107. // handleMessage is invoked whenever an inbound message is received from a
  108. // remote peer on the `snap` protocol. The remote connection is torn down upon
  109. // returning any error.
  110. func handleMessage(backend Backend, peer *Peer) error {
  111. // Read the next message from the remote peer, and ensure it's fully consumed
  112. msg, err := peer.rw.ReadMsg()
  113. if err != nil {
  114. return err
  115. }
  116. if msg.Size > maxMessageSize {
  117. return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
  118. }
  119. defer msg.Discard()
  120. start := time.Now()
  121. // Track the emount of time it takes to serve the request and run the handler
  122. if metrics.Enabled {
  123. h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
  124. defer func(start time.Time) {
  125. sampler := func() metrics.Sample {
  126. return metrics.ResettingSample(
  127. metrics.NewExpDecaySample(1028, 0.015),
  128. )
  129. }
  130. metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
  131. }(start)
  132. }
  133. // Handle the message depending on its contents
  134. switch {
  135. case msg.Code == GetAccountRangeMsg:
  136. // Decode the account retrieval request
  137. var req GetAccountRangePacket
  138. if err := msg.Decode(&req); err != nil {
  139. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  140. }
  141. if req.Bytes > softResponseLimit {
  142. req.Bytes = softResponseLimit
  143. }
  144. // Retrieve the requested state and bail out if non existent
  145. tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
  146. if err != nil {
  147. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
  148. }
  149. it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin)
  150. if err != nil {
  151. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
  152. }
  153. // Iterate over the requested range and pile accounts up
  154. var (
  155. accounts []*AccountData
  156. size uint64
  157. last common.Hash
  158. )
  159. for it.Next() && size < req.Bytes {
  160. hash, account := it.Hash(), common.CopyBytes(it.Account())
  161. // Track the returned interval for the Merkle proofs
  162. last = hash
  163. // Assemble the reply item
  164. size += uint64(common.HashLength + len(account))
  165. accounts = append(accounts, &AccountData{
  166. Hash: hash,
  167. Body: account,
  168. })
  169. // If we've exceeded the request threshold, abort
  170. if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
  171. break
  172. }
  173. }
  174. it.Release()
  175. // Generate the Merkle proofs for the first and last account
  176. proof := light.NewNodeSet()
  177. if err := tr.Prove(req.Origin[:], 0, proof); err != nil {
  178. log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
  179. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
  180. }
  181. if last != (common.Hash{}) {
  182. if err := tr.Prove(last[:], 0, proof); err != nil {
  183. log.Warn("Failed to prove account range", "last", last, "err", err)
  184. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
  185. }
  186. }
  187. var proofs [][]byte
  188. for _, blob := range proof.NodeList() {
  189. proofs = append(proofs, blob)
  190. }
  191. // Send back anything accumulated
  192. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
  193. ID: req.ID,
  194. Accounts: accounts,
  195. Proof: proofs,
  196. })
  197. case msg.Code == AccountRangeMsg:
  198. // A range of accounts arrived to one of our previous requests
  199. res := new(AccountRangePacket)
  200. if err := msg.Decode(res); err != nil {
  201. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  202. }
  203. // Ensure the range is monotonically increasing
  204. for i := 1; i < len(res.Accounts); i++ {
  205. if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
  206. return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
  207. }
  208. }
  209. requestTracker.Fulfil(peer.id, peer.version, AccountRangeMsg, res.ID)
  210. return backend.Handle(peer, res)
  211. case msg.Code == GetStorageRangesMsg:
  212. // Decode the storage retrieval request
  213. var req GetStorageRangesPacket
  214. if err := msg.Decode(&req); err != nil {
  215. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  216. }
  217. if req.Bytes > softResponseLimit {
  218. req.Bytes = softResponseLimit
  219. }
  220. // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
  221. // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user
  222. // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
  223. // Calculate the hard limit at which to abort, even if mid storage trie
  224. hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
  225. // Retrieve storage ranges until the packet limit is reached
  226. var (
  227. slots [][]*StorageData
  228. proofs [][]byte
  229. size uint64
  230. )
  231. for _, account := range req.Accounts {
  232. // If we've exceeded the requested data limit, abort without opening
  233. // a new storage range (that we'd need to prove due to exceeded size)
  234. if size >= req.Bytes {
  235. break
  236. }
  237. // The first account might start from a different origin and end sooner
  238. var origin common.Hash
  239. if len(req.Origin) > 0 {
  240. origin, req.Origin = common.BytesToHash(req.Origin), nil
  241. }
  242. var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  243. if len(req.Limit) > 0 {
  244. limit, req.Limit = common.BytesToHash(req.Limit), nil
  245. }
  246. // Retrieve the requested state and bail out if non existent
  247. it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin)
  248. if err != nil {
  249. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  250. }
  251. // Iterate over the requested range and pile slots up
  252. var (
  253. storage []*StorageData
  254. last common.Hash
  255. abort bool
  256. )
  257. for it.Next() {
  258. if size >= hardLimit {
  259. abort = true
  260. break
  261. }
  262. hash, slot := it.Hash(), common.CopyBytes(it.Slot())
  263. // Track the returned interval for the Merkle proofs
  264. last = hash
  265. // Assemble the reply item
  266. size += uint64(common.HashLength + len(slot))
  267. storage = append(storage, &StorageData{
  268. Hash: hash,
  269. Body: slot,
  270. })
  271. // If we've exceeded the request threshold, abort
  272. if bytes.Compare(hash[:], limit[:]) >= 0 {
  273. break
  274. }
  275. }
  276. slots = append(slots, storage)
  277. it.Release()
  278. // Generate the Merkle proofs for the first and last storage slot, but
  279. // only if the response was capped. If the entire storage trie included
  280. // in the response, no need for any proofs.
  281. if origin != (common.Hash{}) || abort {
  282. // Request started at a non-zero hash or was capped prematurely, add
  283. // the endpoint Merkle proofs
  284. accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
  285. if err != nil {
  286. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  287. }
  288. var acc state.Account
  289. if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
  290. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  291. }
  292. stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB())
  293. if err != nil {
  294. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  295. }
  296. proof := light.NewNodeSet()
  297. if err := stTrie.Prove(origin[:], 0, proof); err != nil {
  298. log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
  299. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  300. }
  301. if last != (common.Hash{}) {
  302. if err := stTrie.Prove(last[:], 0, proof); err != nil {
  303. log.Warn("Failed to prove storage range", "last", last, "err", err)
  304. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  305. }
  306. }
  307. for _, blob := range proof.NodeList() {
  308. proofs = append(proofs, blob)
  309. }
  310. // Proof terminates the reply as proofs are only added if a node
  311. // refuses to serve more data (exception when a contract fetch is
  312. // finishing, but that's that).
  313. break
  314. }
  315. }
  316. // Send back anything accumulated
  317. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
  318. ID: req.ID,
  319. Slots: slots,
  320. Proof: proofs,
  321. })
  322. case msg.Code == StorageRangesMsg:
  323. // A range of storage slots arrived to one of our previous requests
  324. res := new(StorageRangesPacket)
  325. if err := msg.Decode(res); err != nil {
  326. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  327. }
  328. // Ensure the ranges are monotonically increasing
  329. for i, slots := range res.Slots {
  330. for j := 1; j < len(slots); j++ {
  331. if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
  332. return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
  333. }
  334. }
  335. }
  336. requestTracker.Fulfil(peer.id, peer.version, StorageRangesMsg, res.ID)
  337. return backend.Handle(peer, res)
  338. case msg.Code == GetByteCodesMsg:
  339. // Decode bytecode retrieval request
  340. var req GetByteCodesPacket
  341. if err := msg.Decode(&req); err != nil {
  342. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  343. }
  344. if req.Bytes > softResponseLimit {
  345. req.Bytes = softResponseLimit
  346. }
  347. if len(req.Hashes) > maxCodeLookups {
  348. req.Hashes = req.Hashes[:maxCodeLookups]
  349. }
  350. // Retrieve bytecodes until the packet size limit is reached
  351. var (
  352. codes [][]byte
  353. bytes uint64
  354. )
  355. for _, hash := range req.Hashes {
  356. if hash == emptyCode {
  357. // Peers should not request the empty code, but if they do, at
  358. // least sent them back a correct response without db lookups
  359. codes = append(codes, []byte{})
  360. } else if blob, err := backend.Chain().ContractCode(hash); err == nil {
  361. codes = append(codes, blob)
  362. bytes += uint64(len(blob))
  363. }
  364. if bytes > req.Bytes {
  365. break
  366. }
  367. }
  368. // Send back anything accumulated
  369. return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
  370. ID: req.ID,
  371. Codes: codes,
  372. })
  373. case msg.Code == ByteCodesMsg:
  374. // A batch of byte codes arrived to one of our previous requests
  375. res := new(ByteCodesPacket)
  376. if err := msg.Decode(res); err != nil {
  377. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  378. }
  379. requestTracker.Fulfil(peer.id, peer.version, ByteCodesMsg, res.ID)
  380. return backend.Handle(peer, res)
  381. case msg.Code == GetTrieNodesMsg:
  382. // Decode trie node retrieval request
  383. var req GetTrieNodesPacket
  384. if err := msg.Decode(&req); err != nil {
  385. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  386. }
  387. if req.Bytes > softResponseLimit {
  388. req.Bytes = softResponseLimit
  389. }
  390. // Make sure we have the state associated with the request
  391. triedb := backend.Chain().StateCache().TrieDB()
  392. accTrie, err := trie.NewSecure(req.Root, triedb)
  393. if err != nil {
  394. // We don't have the requested state available, bail out
  395. return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
  396. }
  397. snap := backend.Chain().Snapshots().Snapshot(req.Root)
  398. if snap == nil {
  399. // We don't have the requested state snapshotted yet, bail out.
  400. // In reality we could still serve using the account and storage
  401. // tries only, but let's protect the node a bit while it's doing
  402. // snapshot generation.
  403. return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
  404. }
  405. // Retrieve trie nodes until the packet size limit is reached
  406. var (
  407. nodes [][]byte
  408. bytes uint64
  409. loads int // Trie hash expansions to cound database reads
  410. )
  411. for _, pathset := range req.Paths {
  412. switch len(pathset) {
  413. case 0:
  414. // Ensure we penalize invalid requests
  415. return fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
  416. case 1:
  417. // If we're only retrieving an account trie node, fetch it directly
  418. blob, resolved, err := accTrie.TryGetNode(pathset[0])
  419. loads += resolved // always account database reads, even for failures
  420. if err != nil {
  421. break
  422. }
  423. nodes = append(nodes, blob)
  424. bytes += uint64(len(blob))
  425. default:
  426. // Storage slots requested, open the storage trie and retrieve from there
  427. account, err := snap.Account(common.BytesToHash(pathset[0]))
  428. loads++ // always account database reads, even for failures
  429. if err != nil {
  430. break
  431. }
  432. stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb)
  433. loads++ // always account database reads, even for failures
  434. if err != nil {
  435. break
  436. }
  437. for _, path := range pathset[1:] {
  438. blob, resolved, err := stTrie.TryGetNode(path)
  439. loads += resolved // always account database reads, even for failures
  440. if err != nil {
  441. break
  442. }
  443. nodes = append(nodes, blob)
  444. bytes += uint64(len(blob))
  445. // Sanity check limits to avoid DoS on the store trie loads
  446. if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
  447. break
  448. }
  449. }
  450. }
  451. // Abort request processing if we've exceeded our limits
  452. if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
  453. break
  454. }
  455. }
  456. // Send back anything accumulated
  457. return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
  458. ID: req.ID,
  459. Nodes: nodes,
  460. })
  461. case msg.Code == TrieNodesMsg:
  462. // A batch of trie nodes arrived to one of our previous requests
  463. res := new(TrieNodesPacket)
  464. if err := msg.Decode(res); err != nil {
  465. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  466. }
  467. requestTracker.Fulfil(peer.id, peer.version, TrieNodesMsg, res.ID)
  468. return backend.Handle(peer, res)
  469. default:
  470. return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
  471. }
  472. }
  473. // NodeInfo represents a short summary of the `snap` sub-protocol metadata
  474. // known about the host peer.
  475. type NodeInfo struct{}
  476. // nodeInfo retrieves some `snap` protocol metadata about the running host node.
  477. func nodeInfo(chain *core.BlockChain) *NodeInfo {
  478. return &NodeInfo{}
  479. }