handler.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. // Copyright 2020 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snap
  17. import (
  18. "bytes"
  19. "fmt"
  20. "github.com/ethereum/go-ethereum/common"
  21. "github.com/ethereum/go-ethereum/core"
  22. "github.com/ethereum/go-ethereum/core/state"
  23. "github.com/ethereum/go-ethereum/light"
  24. "github.com/ethereum/go-ethereum/log"
  25. "github.com/ethereum/go-ethereum/p2p"
  26. "github.com/ethereum/go-ethereum/p2p/enode"
  27. "github.com/ethereum/go-ethereum/p2p/enr"
  28. "github.com/ethereum/go-ethereum/rlp"
  29. "github.com/ethereum/go-ethereum/trie"
  30. )
  31. const (
  32. // softResponseLimit is the target maximum size of replies to data retrievals.
  33. softResponseLimit = 2 * 1024 * 1024
  34. // maxCodeLookups is the maximum number of bytecodes to serve. This number is
  35. // there to limit the number of disk lookups.
  36. maxCodeLookups = 1024
  37. // stateLookupSlack defines the ratio by how much a state response can exceed
  38. // the requested limit in order to try and avoid breaking up contracts into
  39. // multiple packages and proving them.
  40. stateLookupSlack = 0.1
  41. // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
  42. // number is there to limit the number of disk lookups.
  43. maxTrieNodeLookups = 1024
  44. )
  45. // Handler is a callback to invoke from an outside runner after the boilerplate
  46. // exchanges have passed.
  47. type Handler func(peer *Peer) error
  48. // Backend defines the data retrieval methods to serve remote requests and the
  49. // callback methods to invoke on remote deliveries.
  50. type Backend interface {
  51. // Chain retrieves the blockchain object to serve data.
  52. Chain() *core.BlockChain
  53. // RunPeer is invoked when a peer joins on the `eth` protocol. The handler
  54. // should do any peer maintenance work, handshakes and validations. If all
  55. // is passed, control should be given back to the `handler` to process the
  56. // inbound messages going forward.
  57. RunPeer(peer *Peer, handler Handler) error
  58. // PeerInfo retrieves all known `snap` information about a peer.
  59. PeerInfo(id enode.ID) interface{}
  60. // Handle is a callback to be invoked when a data packet is received from
  61. // the remote peer. Only packets not consumed by the protocol handler will
  62. // be forwarded to the backend.
  63. Handle(peer *Peer, packet Packet) error
  64. }
  65. // MakeProtocols constructs the P2P protocol definitions for `snap`.
  66. func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {
  67. protocols := make([]p2p.Protocol, len(ProtocolVersions))
  68. for i, version := range ProtocolVersions {
  69. version := version // Closure
  70. protocols[i] = p2p.Protocol{
  71. Name: ProtocolName,
  72. Version: version,
  73. Length: protocolLengths[version],
  74. Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
  75. return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error {
  76. return handle(backend, peer)
  77. })
  78. },
  79. NodeInfo: func() interface{} {
  80. return nodeInfo(backend.Chain())
  81. },
  82. PeerInfo: func(id enode.ID) interface{} {
  83. return backend.PeerInfo(id)
  84. },
  85. Attributes: []enr.Entry{&enrEntry{}},
  86. DialCandidates: dnsdisc,
  87. }
  88. }
  89. return protocols
  90. }
  91. // handle is the callback invoked to manage the life cycle of a `snap` peer.
  92. // When this function terminates, the peer is disconnected.
  93. func handle(backend Backend, peer *Peer) error {
  94. for {
  95. if err := handleMessage(backend, peer); err != nil {
  96. peer.Log().Debug("Message handling failed in `snap`", "err", err)
  97. return err
  98. }
  99. }
  100. }
  101. // handleMessage is invoked whenever an inbound message is received from a
  102. // remote peer on the `spap` protocol. The remote connection is torn down upon
  103. // returning any error.
  104. func handleMessage(backend Backend, peer *Peer) error {
  105. // Read the next message from the remote peer, and ensure it's fully consumed
  106. msg, err := peer.rw.ReadMsg()
  107. if err != nil {
  108. return err
  109. }
  110. if msg.Size > maxMessageSize {
  111. return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize)
  112. }
  113. defer msg.Discard()
  114. // Handle the message depending on its contents
  115. switch {
  116. case msg.Code == GetAccountRangeMsg:
  117. // Decode the account retrieval request
  118. var req GetAccountRangePacket
  119. if err := msg.Decode(&req); err != nil {
  120. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  121. }
  122. if req.Bytes > softResponseLimit {
  123. req.Bytes = softResponseLimit
  124. }
  125. // Retrieve the requested state and bail out if non existent
  126. tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
  127. if err != nil {
  128. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
  129. }
  130. it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin)
  131. if err != nil {
  132. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
  133. }
  134. // Iterate over the requested range and pile accounts up
  135. var (
  136. accounts []*AccountData
  137. size uint64
  138. last common.Hash
  139. )
  140. for it.Next() && size < req.Bytes {
  141. hash, account := it.Hash(), common.CopyBytes(it.Account())
  142. // Track the returned interval for the Merkle proofs
  143. last = hash
  144. // Assemble the reply item
  145. size += uint64(common.HashLength + len(account))
  146. accounts = append(accounts, &AccountData{
  147. Hash: hash,
  148. Body: account,
  149. })
  150. // If we've exceeded the request threshold, abort
  151. if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
  152. break
  153. }
  154. }
  155. it.Release()
  156. // Generate the Merkle proofs for the first and last account
  157. proof := light.NewNodeSet()
  158. if err := tr.Prove(req.Origin[:], 0, proof); err != nil {
  159. log.Warn("Failed to prove account range", "origin", req.Origin, "err", err)
  160. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
  161. }
  162. if last != (common.Hash{}) {
  163. if err := tr.Prove(last[:], 0, proof); err != nil {
  164. log.Warn("Failed to prove account range", "last", last, "err", err)
  165. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
  166. }
  167. }
  168. var proofs [][]byte
  169. for _, blob := range proof.NodeList() {
  170. proofs = append(proofs, blob)
  171. }
  172. // Send back anything accumulated
  173. return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{
  174. ID: req.ID,
  175. Accounts: accounts,
  176. Proof: proofs,
  177. })
  178. case msg.Code == AccountRangeMsg:
  179. // A range of accounts arrived to one of our previous requests
  180. res := new(AccountRangePacket)
  181. if err := msg.Decode(res); err != nil {
  182. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  183. }
  184. // Ensure the range is monotonically increasing
  185. for i := 1; i < len(res.Accounts); i++ {
  186. if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
  187. return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
  188. }
  189. }
  190. return backend.Handle(peer, res)
  191. case msg.Code == GetStorageRangesMsg:
  192. // Decode the storage retrieval request
  193. var req GetStorageRangesPacket
  194. if err := msg.Decode(&req); err != nil {
  195. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  196. }
  197. if req.Bytes > softResponseLimit {
  198. req.Bytes = softResponseLimit
  199. }
  200. // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?
  201. // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user
  202. // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)
  203. // Calculate the hard limit at which to abort, even if mid storage trie
  204. hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))
  205. // Retrieve storage ranges until the packet limit is reached
  206. var (
  207. slots [][]*StorageData
  208. proofs [][]byte
  209. size uint64
  210. )
  211. for _, account := range req.Accounts {
  212. // If we've exceeded the requested data limit, abort without opening
  213. // a new storage range (that we'd need to prove due to exceeded size)
  214. if size >= req.Bytes {
  215. break
  216. }
  217. // The first account might start from a different origin and end sooner
  218. var origin common.Hash
  219. if len(req.Origin) > 0 {
  220. origin, req.Origin = common.BytesToHash(req.Origin), nil
  221. }
  222. var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
  223. if len(req.Limit) > 0 {
  224. limit, req.Limit = common.BytesToHash(req.Limit), nil
  225. }
  226. // Retrieve the requested state and bail out if non existent
  227. it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin)
  228. if err != nil {
  229. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  230. }
  231. // Iterate over the requested range and pile slots up
  232. var (
  233. storage []*StorageData
  234. last common.Hash
  235. )
  236. for it.Next() && size < hardLimit {
  237. hash, slot := it.Hash(), common.CopyBytes(it.Slot())
  238. // Track the returned interval for the Merkle proofs
  239. last = hash
  240. // Assemble the reply item
  241. size += uint64(common.HashLength + len(slot))
  242. storage = append(storage, &StorageData{
  243. Hash: hash,
  244. Body: slot,
  245. })
  246. // If we've exceeded the request threshold, abort
  247. if bytes.Compare(hash[:], limit[:]) >= 0 {
  248. break
  249. }
  250. }
  251. slots = append(slots, storage)
  252. it.Release()
  253. // Generate the Merkle proofs for the first and last storage slot, but
  254. // only if the response was capped. If the entire storage trie included
  255. // in the response, no need for any proofs.
  256. if origin != (common.Hash{}) || size >= hardLimit {
  257. // Request started at a non-zero hash or was capped prematurely, add
  258. // the endpoint Merkle proofs
  259. accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
  260. if err != nil {
  261. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  262. }
  263. var acc state.Account
  264. if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
  265. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  266. }
  267. stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB())
  268. if err != nil {
  269. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  270. }
  271. proof := light.NewNodeSet()
  272. if err := stTrie.Prove(origin[:], 0, proof); err != nil {
  273. log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err)
  274. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  275. }
  276. if last != (common.Hash{}) {
  277. if err := stTrie.Prove(last[:], 0, proof); err != nil {
  278. log.Warn("Failed to prove storage range", "last", last, "err", err)
  279. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
  280. }
  281. }
  282. for _, blob := range proof.NodeList() {
  283. proofs = append(proofs, blob)
  284. }
  285. // Proof terminates the reply as proofs are only added if a node
  286. // refuses to serve more data (exception when a contract fetch is
  287. // finishing, but that's that).
  288. break
  289. }
  290. }
  291. // Send back anything accumulated
  292. return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{
  293. ID: req.ID,
  294. Slots: slots,
  295. Proof: proofs,
  296. })
  297. case msg.Code == StorageRangesMsg:
  298. // A range of storage slots arrived to one of our previous requests
  299. res := new(StorageRangesPacket)
  300. if err := msg.Decode(res); err != nil {
  301. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  302. }
  303. // Ensure the ranges ae monotonically increasing
  304. for i, slots := range res.Slots {
  305. for j := 1; j < len(slots); j++ {
  306. if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
  307. return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
  308. }
  309. }
  310. }
  311. return backend.Handle(peer, res)
  312. case msg.Code == GetByteCodesMsg:
  313. // Decode bytecode retrieval request
  314. var req GetByteCodesPacket
  315. if err := msg.Decode(&req); err != nil {
  316. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  317. }
  318. if req.Bytes > softResponseLimit {
  319. req.Bytes = softResponseLimit
  320. }
  321. if len(req.Hashes) > maxCodeLookups {
  322. req.Hashes = req.Hashes[:maxCodeLookups]
  323. }
  324. // Retrieve bytecodes until the packet size limit is reached
  325. var (
  326. codes [][]byte
  327. bytes uint64
  328. )
  329. for _, hash := range req.Hashes {
  330. if hash == emptyCode {
  331. // Peers should not request the empty code, but if they do, at
  332. // least sent them back a correct response without db lookups
  333. codes = append(codes, []byte{})
  334. } else if blob, err := backend.Chain().ContractCode(hash); err == nil {
  335. codes = append(codes, blob)
  336. bytes += uint64(len(blob))
  337. }
  338. if bytes > req.Bytes {
  339. break
  340. }
  341. }
  342. // Send back anything accumulated
  343. return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{
  344. ID: req.ID,
  345. Codes: codes,
  346. })
  347. case msg.Code == ByteCodesMsg:
  348. // A batch of byte codes arrived to one of our previous requests
  349. res := new(ByteCodesPacket)
  350. if err := msg.Decode(res); err != nil {
  351. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  352. }
  353. return backend.Handle(peer, res)
  354. case msg.Code == GetTrieNodesMsg:
  355. // Decode trie node retrieval request
  356. var req GetTrieNodesPacket
  357. if err := msg.Decode(&req); err != nil {
  358. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  359. }
  360. if req.Bytes > softResponseLimit {
  361. req.Bytes = softResponseLimit
  362. }
  363. // Make sure we have the state associated with the request
  364. triedb := backend.Chain().StateCache().TrieDB()
  365. accTrie, err := trie.NewSecure(req.Root, triedb)
  366. if err != nil {
  367. // We don't have the requested state available, bail out
  368. return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
  369. }
  370. snap := backend.Chain().Snapshots().Snapshot(req.Root)
  371. if snap == nil {
  372. // We don't have the requested state snapshotted yet, bail out.
  373. // In reality we could still serve using the account and storage
  374. // tries only, but let's protect the node a bit while it's doing
  375. // snapshot generation.
  376. return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
  377. }
  378. // Retrieve trie nodes until the packet size limit is reached
  379. var (
  380. nodes [][]byte
  381. bytes uint64
  382. loads int // Trie hash expansions to cound database reads
  383. )
  384. for _, pathset := range req.Paths {
  385. switch len(pathset) {
  386. case 0:
  387. // Ensure we penalize invalid requests
  388. return fmt.Errorf("%w: zero-item pathset requested", errBadRequest)
  389. case 1:
  390. // If we're only retrieving an account trie node, fetch it directly
  391. blob, resolved, err := accTrie.TryGetNode(pathset[0])
  392. loads += resolved // always account database reads, even for failures
  393. if err != nil {
  394. break
  395. }
  396. nodes = append(nodes, blob)
  397. bytes += uint64(len(blob))
  398. default:
  399. // Storage slots requested, open the storage trie and retrieve from there
  400. account, err := snap.Account(common.BytesToHash(pathset[0]))
  401. loads++ // always account database reads, even for failures
  402. if err != nil {
  403. break
  404. }
  405. stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb)
  406. loads++ // always account database reads, even for failures
  407. if err != nil {
  408. break
  409. }
  410. for _, path := range pathset[1:] {
  411. blob, resolved, err := stTrie.TryGetNode(path)
  412. loads += resolved // always account database reads, even for failures
  413. if err != nil {
  414. break
  415. }
  416. nodes = append(nodes, blob)
  417. bytes += uint64(len(blob))
  418. // Sanity check limits to avoid DoS on the store trie loads
  419. if bytes > req.Bytes || loads > maxTrieNodeLookups {
  420. break
  421. }
  422. }
  423. }
  424. // Abort request processing if we've exceeded our limits
  425. if bytes > req.Bytes || loads > maxTrieNodeLookups {
  426. break
  427. }
  428. }
  429. // Send back anything accumulated
  430. return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{
  431. ID: req.ID,
  432. Nodes: nodes,
  433. })
  434. case msg.Code == TrieNodesMsg:
  435. // A batch of trie nodes arrived to one of our previous requests
  436. res := new(TrieNodesPacket)
  437. if err := msg.Decode(res); err != nil {
  438. return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
  439. }
  440. return backend.Handle(peer, res)
  441. default:
  442. return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code)
  443. }
  444. }
  445. // NodeInfo represents a short summary of the `snap` sub-protocol metadata
  446. // known about the host peer.
  447. type NodeInfo struct{}
  448. // nodeInfo retrieves some `snap` protocol metadata about the running host node.
  449. func nodeInfo(chain *core.BlockChain) *NodeInfo {
  450. return &NodeInfo{}
  451. }