sync.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. // Copyright 2015 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package trie
  17. import (
  18. "errors"
  19. "fmt"
  20. "github.com/ethereum/go-ethereum/common"
  21. "github.com/ethereum/go-ethereum/common/prque"
  22. "github.com/ethereum/go-ethereum/core/rawdb"
  23. "github.com/ethereum/go-ethereum/ethdb"
  24. )
  25. // ErrNotRequested is returned by the trie sync when it's requested to process a
  26. // node it did not request.
  27. var ErrNotRequested = errors.New("not requested")
  28. // ErrAlreadyProcessed is returned by the trie sync when it's requested to process a
  29. // node it already processed previously.
  30. var ErrAlreadyProcessed = errors.New("already processed")
  31. // maxFetchesPerDepth is the maximum number of pending trie nodes per depth. The
  32. // role of this value is to limit the number of trie nodes that get expanded in
  33. // memory if the node was configured with a significant number of peers.
  34. const maxFetchesPerDepth = 16384
  35. // request represents a scheduled or already in-flight state retrieval request.
  36. type request struct {
  37. path []byte // Merkle path leading to this node for prioritization
  38. hash common.Hash // Hash of the node data content to retrieve
  39. data []byte // Data content of the node, cached until all subtrees complete
  40. code bool // Whether this is a code entry
  41. parents []*request // Parent state nodes referencing this entry (notify all upon completion)
  42. deps int // Number of dependencies before allowed to commit this node
  43. callback LeafCallback // Callback to invoke if a leaf node it reached on this branch
  44. }
  45. // SyncPath is a path tuple identifying a particular trie node either in a single
  46. // trie (account) or a layered trie (account -> storage).
  47. //
  48. // Content wise the tuple either has 1 element if it addresses a node in a single
  49. // trie or 2 elements if it addresses a node in a stacked trie.
  50. //
  51. // To support aiming arbitrary trie nodes, the path needs to support odd nibble
  52. // lengths. To avoid transferring expanded hex form over the network, the last
  53. // part of the tuple (which needs to index into the middle of a trie) is compact
  54. // encoded. In case of a 2-tuple, the first item is always 32 bytes so that is
  55. // simple binary encoded.
  56. //
  57. // Examples:
  58. // - Path 0x9 -> {0x19}
  59. // - Path 0x99 -> {0x0099}
  60. // - Path 0x01234567890123456789012345678901012345678901234567890123456789019 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x19}
  61. // - Path 0x012345678901234567890123456789010123456789012345678901234567890199 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x0099}
  62. type SyncPath [][]byte
  63. // newSyncPath converts an expanded trie path from nibble form into a compact
  64. // version that can be sent over the network.
  65. func newSyncPath(path []byte) SyncPath {
  66. // If the hash is from the account trie, append a single item, if it
  67. // is from the a storage trie, append a tuple. Note, the length 64 is
  68. // clashing between account leaf and storage root. It's fine though
  69. // because having a trie node at 64 depth means a hash collision was
  70. // found and we're long dead.
  71. if len(path) < 64 {
  72. return SyncPath{hexToCompact(path)}
  73. }
  74. return SyncPath{hexToKeybytes(path[:64]), hexToCompact(path[64:])}
  75. }
  76. // SyncResult is a response with requested data along with it's hash.
  77. type SyncResult struct {
  78. Hash common.Hash // Hash of the originally unknown trie node
  79. Data []byte // Data content of the retrieved node
  80. }
  81. // syncMemBatch is an in-memory buffer of successfully downloaded but not yet
  82. // persisted data items.
  83. type syncMemBatch struct {
  84. nodes map[common.Hash][]byte // In-memory membatch of recently completed nodes
  85. codes map[common.Hash][]byte // In-memory membatch of recently completed codes
  86. }
  87. // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
  88. func newSyncMemBatch() *syncMemBatch {
  89. return &syncMemBatch{
  90. nodes: make(map[common.Hash][]byte),
  91. codes: make(map[common.Hash][]byte),
  92. }
  93. }
  94. // hasNode reports the trie node with specific hash is already cached.
  95. func (batch *syncMemBatch) hasNode(hash common.Hash) bool {
  96. _, ok := batch.nodes[hash]
  97. return ok
  98. }
  99. // hasCode reports the contract code with specific hash is already cached.
  100. func (batch *syncMemBatch) hasCode(hash common.Hash) bool {
  101. _, ok := batch.codes[hash]
  102. return ok
  103. }
  104. // Sync is the main state trie synchronisation scheduler, which provides yet
  105. // unknown trie hashes to retrieve, accepts node data associated with said hashes
  106. // and reconstructs the trie step by step until all is done.
  107. type Sync struct {
  108. database ethdb.KeyValueReader // Persistent database to check for existing entries
  109. membatch *syncMemBatch // Memory buffer to avoid frequent database writes
  110. nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash
  111. codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash
  112. queue *prque.Prque // Priority queue with the pending requests
  113. fetches map[int]int // Number of active fetches per trie node depth
  114. bloom *SyncBloom // Bloom filter for fast state existence checks
  115. }
  116. // NewSync creates a new trie data download scheduler.
  117. func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync {
  118. ts := &Sync{
  119. database: database,
  120. membatch: newSyncMemBatch(),
  121. nodeReqs: make(map[common.Hash]*request),
  122. codeReqs: make(map[common.Hash]*request),
  123. queue: prque.New(nil),
  124. fetches: make(map[int]int),
  125. bloom: bloom,
  126. }
  127. ts.AddSubTrie(root, nil, common.Hash{}, callback)
  128. return ts
  129. }
  130. // AddSubTrie registers a new trie to the sync code, rooted at the designated parent.
  131. func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, callback LeafCallback) {
  132. // Short circuit if the trie is empty or already known
  133. if root == emptyRoot {
  134. return
  135. }
  136. if s.membatch.hasNode(root) {
  137. return
  138. }
  139. if s.bloom == nil || s.bloom.Contains(root[:]) {
  140. // Bloom filter says this might be a duplicate, double check.
  141. // If database says yes, then at least the trie node is present
  142. // and we hold the assumption that it's NOT legacy contract code.
  143. blob := rawdb.ReadTrieNode(s.database, root)
  144. if len(blob) > 0 {
  145. return
  146. }
  147. // False positive, bump fault meter
  148. bloomFaultMeter.Mark(1)
  149. }
  150. // Assemble the new sub-trie sync request
  151. req := &request{
  152. path: path,
  153. hash: root,
  154. callback: callback,
  155. }
  156. // If this sub-trie has a designated parent, link them together
  157. if parent != (common.Hash{}) {
  158. ancestor := s.nodeReqs[parent]
  159. if ancestor == nil {
  160. panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent))
  161. }
  162. ancestor.deps++
  163. req.parents = append(req.parents, ancestor)
  164. }
  165. s.schedule(req)
  166. }
  167. // AddCodeEntry schedules the direct retrieval of a contract code that should not
  168. // be interpreted as a trie node, but rather accepted and stored into the database
  169. // as is.
  170. func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) {
  171. // Short circuit if the entry is empty or already known
  172. if hash == emptyState {
  173. return
  174. }
  175. if s.membatch.hasCode(hash) {
  176. return
  177. }
  178. if s.bloom == nil || s.bloom.Contains(hash[:]) {
  179. // Bloom filter says this might be a duplicate, double check.
  180. // If database says yes, the blob is present for sure.
  181. // Note we only check the existence with new code scheme, fast
  182. // sync is expected to run with a fresh new node. Even there
  183. // exists the code with legacy format, fetch and store with
  184. // new scheme anyway.
  185. if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 {
  186. return
  187. }
  188. // False positive, bump fault meter
  189. bloomFaultMeter.Mark(1)
  190. }
  191. // Assemble the new sub-trie sync request
  192. req := &request{
  193. path: path,
  194. hash: hash,
  195. code: true,
  196. }
  197. // If this sub-trie has a designated parent, link them together
  198. if parent != (common.Hash{}) {
  199. ancestor := s.nodeReqs[parent] // the parent of codereq can ONLY be nodereq
  200. if ancestor == nil {
  201. panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent))
  202. }
  203. ancestor.deps++
  204. req.parents = append(req.parents, ancestor)
  205. }
  206. s.schedule(req)
  207. }
  208. // Missing retrieves the known missing nodes from the trie for retrieval. To aid
  209. // both eth/6x style fast sync and snap/1x style state sync, the paths of trie
  210. // nodes are returned too, as well as separate hash list for codes.
  211. func (s *Sync) Missing(max int) (nodes []common.Hash, paths []SyncPath, codes []common.Hash) {
  212. var (
  213. nodeHashes []common.Hash
  214. nodePaths []SyncPath
  215. codeHashes []common.Hash
  216. )
  217. for !s.queue.Empty() && (max == 0 || len(nodeHashes)+len(codeHashes) < max) {
  218. // Retrieve th enext item in line
  219. item, prio := s.queue.Peek()
  220. // If we have too many already-pending tasks for this depth, throttle
  221. depth := int(prio >> 56)
  222. if s.fetches[depth] > maxFetchesPerDepth {
  223. break
  224. }
  225. // Item is allowed to be scheduled, add it to the task list
  226. s.queue.Pop()
  227. s.fetches[depth]++
  228. hash := item.(common.Hash)
  229. if req, ok := s.nodeReqs[hash]; ok {
  230. nodeHashes = append(nodeHashes, hash)
  231. nodePaths = append(nodePaths, newSyncPath(req.path))
  232. } else {
  233. codeHashes = append(codeHashes, hash)
  234. }
  235. }
  236. return nodeHashes, nodePaths, codeHashes
  237. }
  238. // Process injects the received data for requested item. Note it can
  239. // happpen that the single response commits two pending requests(e.g.
  240. // there are two requests one for code and one for node but the hash
  241. // is same). In this case the second response for the same hash will
  242. // be treated as "non-requested" item or "already-processed" item but
  243. // there is no downside.
  244. func (s *Sync) Process(result SyncResult) error {
  245. // If the item was not requested either for code or node, bail out
  246. if s.nodeReqs[result.Hash] == nil && s.codeReqs[result.Hash] == nil {
  247. return ErrNotRequested
  248. }
  249. // There is an pending code request for this data, commit directly
  250. var filled bool
  251. if req := s.codeReqs[result.Hash]; req != nil && req.data == nil {
  252. filled = true
  253. req.data = result.Data
  254. s.commit(req)
  255. }
  256. // There is an pending node request for this data, fill it.
  257. if req := s.nodeReqs[result.Hash]; req != nil && req.data == nil {
  258. filled = true
  259. // Decode the node data content and update the request
  260. node, err := decodeNode(result.Hash[:], result.Data)
  261. if err != nil {
  262. return err
  263. }
  264. req.data = result.Data
  265. // Create and schedule a request for all the children nodes
  266. requests, err := s.children(req, node)
  267. if err != nil {
  268. return err
  269. }
  270. if len(requests) == 0 && req.deps == 0 {
  271. s.commit(req)
  272. } else {
  273. req.deps += len(requests)
  274. for _, child := range requests {
  275. s.schedule(child)
  276. }
  277. }
  278. }
  279. if !filled {
  280. return ErrAlreadyProcessed
  281. }
  282. return nil
  283. }
  284. // Commit flushes the data stored in the internal membatch out to persistent
  285. // storage, returning any occurred error.
  286. func (s *Sync) Commit(dbw ethdb.Batch) error {
  287. // Dump the membatch into a database dbw
  288. for key, value := range s.membatch.nodes {
  289. rawdb.WriteTrieNode(dbw, key, value)
  290. s.bloom.Add(key[:])
  291. }
  292. for key, value := range s.membatch.codes {
  293. rawdb.WriteCode(dbw, key, value)
  294. s.bloom.Add(key[:])
  295. }
  296. // Drop the membatch data and return
  297. s.membatch = newSyncMemBatch()
  298. return nil
  299. }
  300. // Pending returns the number of state entries currently pending for download.
  301. func (s *Sync) Pending() int {
  302. return len(s.nodeReqs) + len(s.codeReqs)
  303. }
  304. // schedule inserts a new state retrieval request into the fetch queue. If there
  305. // is already a pending request for this node, the new request will be discarded
  306. // and only a parent reference added to the old one.
  307. func (s *Sync) schedule(req *request) {
  308. var reqset = s.nodeReqs
  309. if req.code {
  310. reqset = s.codeReqs
  311. }
  312. // If we're already requesting this node, add a new reference and stop
  313. if old, ok := reqset[req.hash]; ok {
  314. old.parents = append(old.parents, req.parents...)
  315. return
  316. }
  317. reqset[req.hash] = req
  318. // Schedule the request for future retrieval. This queue is shared
  319. // by both node requests and code requests. It can happen that there
  320. // is a trie node and code has same hash. In this case two elements
  321. // with same hash and same or different depth will be pushed. But it's
  322. // ok the worst case is the second response will be treated as duplicated.
  323. prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents
  324. for i := 0; i < 14 && i < len(req.path); i++ {
  325. prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order
  326. }
  327. s.queue.Push(req.hash, prio)
  328. }
  329. // children retrieves all the missing children of a state trie entry for future
  330. // retrieval scheduling.
  331. func (s *Sync) children(req *request, object node) ([]*request, error) {
  332. // Gather all the children of the node, irrelevant whether known or not
  333. type child struct {
  334. path []byte
  335. node node
  336. }
  337. var children []child
  338. switch node := (object).(type) {
  339. case *shortNode:
  340. key := node.Key
  341. if hasTerm(key) {
  342. key = key[:len(key)-1]
  343. }
  344. children = []child{{
  345. node: node.Val,
  346. path: append(append([]byte(nil), req.path...), key...),
  347. }}
  348. case *fullNode:
  349. for i := 0; i < 17; i++ {
  350. if node.Children[i] != nil {
  351. children = append(children, child{
  352. node: node.Children[i],
  353. path: append(append([]byte(nil), req.path...), byte(i)),
  354. })
  355. }
  356. }
  357. default:
  358. panic(fmt.Sprintf("unknown node: %+v", node))
  359. }
  360. // Iterate over the children, and request all unknown ones
  361. requests := make([]*request, 0, len(children))
  362. for _, child := range children {
  363. // Notify any external watcher of a new key/value node
  364. if req.callback != nil {
  365. if node, ok := (child.node).(valueNode); ok {
  366. if err := req.callback(child.path, node, req.hash); err != nil {
  367. return nil, err
  368. }
  369. }
  370. }
  371. // If the child references another node, resolve or schedule
  372. if node, ok := (child.node).(hashNode); ok {
  373. // Try to resolve the node from the local database
  374. hash := common.BytesToHash(node)
  375. if s.membatch.hasNode(hash) {
  376. continue
  377. }
  378. if s.bloom == nil || s.bloom.Contains(node) {
  379. // Bloom filter says this might be a duplicate, double check.
  380. // If database says yes, then at least the trie node is present
  381. // and we hold the assumption that it's NOT legacy contract code.
  382. if blob := rawdb.ReadTrieNode(s.database, common.BytesToHash(node)); len(blob) > 0 {
  383. continue
  384. }
  385. // False positive, bump fault meter
  386. bloomFaultMeter.Mark(1)
  387. }
  388. // Locally unknown node, schedule for retrieval
  389. requests = append(requests, &request{
  390. path: child.path,
  391. hash: hash,
  392. parents: []*request{req},
  393. callback: req.callback,
  394. })
  395. }
  396. }
  397. return requests, nil
  398. }
  399. // commit finalizes a retrieval request and stores it into the membatch. If any
  400. // of the referencing parent requests complete due to this commit, they are also
  401. // committed themselves.
  402. func (s *Sync) commit(req *request) (err error) {
  403. // Write the node content to the membatch
  404. if req.code {
  405. s.membatch.codes[req.hash] = req.data
  406. delete(s.codeReqs, req.hash)
  407. s.fetches[len(req.path)]--
  408. } else {
  409. s.membatch.nodes[req.hash] = req.data
  410. delete(s.nodeReqs, req.hash)
  411. s.fetches[len(req.path)]--
  412. }
  413. // Check all parents for completion
  414. for _, parent := range req.parents {
  415. parent.deps--
  416. if parent.deps == 0 {
  417. if err := s.commit(parent); err != nil {
  418. return err
  419. }
  420. }
  421. }
  422. return nil
  423. }