sync.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. // Copyright 2015 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package trie
  17. import (
  18. "errors"
  19. "fmt"
  20. "github.com/ethereum/go-ethereum/common"
  21. "github.com/ethereum/go-ethereum/common/prque"
  22. "github.com/ethereum/go-ethereum/ethdb"
  23. )
  24. // ErrNotRequested is returned by the trie sync when it's requested to process a
  25. // node it did not request.
  26. var ErrNotRequested = errors.New("not requested")
  27. // ErrAlreadyProcessed is returned by the trie sync when it's requested to process a
  28. // node it already processed previously.
  29. var ErrAlreadyProcessed = errors.New("already processed")
  30. // request represents a scheduled or already in-flight state retrieval request.
  31. type request struct {
  32. hash common.Hash // Hash of the node data content to retrieve
  33. data []byte // Data content of the node, cached until all subtrees complete
  34. raw bool // Whether this is a raw entry (code) or a trie node
  35. parents []*request // Parent state nodes referencing this entry (notify all upon completion)
  36. depth int // Depth level within the trie the node is located to prioritise DFS
  37. deps int // Number of dependencies before allowed to commit this node
  38. callback LeafCallback // Callback to invoke if a leaf node it reached on this branch
  39. }
  40. // SyncResult is a simple list to return missing nodes along with their request
  41. // hashes.
  42. type SyncResult struct {
  43. Hash common.Hash // Hash of the originally unknown trie node
  44. Data []byte // Data content of the retrieved node
  45. }
  46. // syncMemBatch is an in-memory buffer of successfully downloaded but not yet
  47. // persisted data items.
  48. type syncMemBatch struct {
  49. batch map[common.Hash][]byte // In-memory membatch of recently completed items
  50. }
  51. // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
  52. func newSyncMemBatch() *syncMemBatch {
  53. return &syncMemBatch{
  54. batch: make(map[common.Hash][]byte),
  55. }
  56. }
  57. // Sync is the main state trie synchronisation scheduler, which provides yet
  58. // unknown trie hashes to retrieve, accepts node data associated with said hashes
  59. // and reconstructs the trie step by step until all is done.
  60. type Sync struct {
  61. database ethdb.KeyValueReader // Persistent database to check for existing entries
  62. membatch *syncMemBatch // Memory buffer to avoid frequent database writes
  63. requests map[common.Hash]*request // Pending requests pertaining to a key hash
  64. queue *prque.Prque // Priority queue with the pending requests
  65. bloom *SyncBloom // Bloom filter for fast node existence checks
  66. }
  67. // NewSync creates a new trie data download scheduler.
  68. func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync {
  69. ts := &Sync{
  70. database: database,
  71. membatch: newSyncMemBatch(),
  72. requests: make(map[common.Hash]*request),
  73. queue: prque.New(nil),
  74. bloom: bloom,
  75. }
  76. ts.AddSubTrie(root, 0, common.Hash{}, callback)
  77. return ts
  78. }
  79. // AddSubTrie registers a new trie to the sync code, rooted at the designated parent.
  80. func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback LeafCallback) {
  81. // Short circuit if the trie is empty or already known
  82. if root == emptyRoot {
  83. return
  84. }
  85. if _, ok := s.membatch.batch[root]; ok {
  86. return
  87. }
  88. if s.bloom.Contains(root[:]) {
  89. // Bloom filter says this might be a duplicate, double check
  90. blob, _ := s.database.Get(root[:])
  91. if local, err := decodeNode(root[:], blob); local != nil && err == nil {
  92. return
  93. }
  94. // False positive, bump fault meter
  95. bloomFaultMeter.Mark(1)
  96. }
  97. // Assemble the new sub-trie sync request
  98. req := &request{
  99. hash: root,
  100. depth: depth,
  101. callback: callback,
  102. }
  103. // If this sub-trie has a designated parent, link them together
  104. if parent != (common.Hash{}) {
  105. ancestor := s.requests[parent]
  106. if ancestor == nil {
  107. panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent))
  108. }
  109. ancestor.deps++
  110. req.parents = append(req.parents, ancestor)
  111. }
  112. s.schedule(req)
  113. }
  114. // AddRawEntry schedules the direct retrieval of a state entry that should not be
  115. // interpreted as a trie node, but rather accepted and stored into the database
  116. // as is. This method's goal is to support misc state metadata retrievals (e.g.
  117. // contract code).
  118. func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) {
  119. // Short circuit if the entry is empty or already known
  120. if hash == emptyState {
  121. return
  122. }
  123. if _, ok := s.membatch.batch[hash]; ok {
  124. return
  125. }
  126. if s.bloom.Contains(hash[:]) {
  127. // Bloom filter says this might be a duplicate, double check
  128. if ok, _ := s.database.Has(hash[:]); ok {
  129. return
  130. }
  131. // False positive, bump fault meter
  132. bloomFaultMeter.Mark(1)
  133. }
  134. // Assemble the new sub-trie sync request
  135. req := &request{
  136. hash: hash,
  137. raw: true,
  138. depth: depth,
  139. }
  140. // If this sub-trie has a designated parent, link them together
  141. if parent != (common.Hash{}) {
  142. ancestor := s.requests[parent]
  143. if ancestor == nil {
  144. panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent))
  145. }
  146. ancestor.deps++
  147. req.parents = append(req.parents, ancestor)
  148. }
  149. s.schedule(req)
  150. }
  151. // Missing retrieves the known missing nodes from the trie for retrieval.
  152. func (s *Sync) Missing(max int) []common.Hash {
  153. var requests []common.Hash
  154. for !s.queue.Empty() && (max == 0 || len(requests) < max) {
  155. requests = append(requests, s.queue.PopItem().(common.Hash))
  156. }
  157. return requests
  158. }
  159. // Process injects a batch of retrieved trie nodes data, returning if something
  160. // was committed to the database and also the index of an entry if its processing
  161. // failed.
  162. func (s *Sync) Process(results []SyncResult) (bool, int, error) {
  163. committed := false
  164. for i, item := range results {
  165. // If the item was not requested, bail out
  166. request := s.requests[item.Hash]
  167. if request == nil {
  168. return committed, i, ErrNotRequested
  169. }
  170. if request.data != nil {
  171. return committed, i, ErrAlreadyProcessed
  172. }
  173. // If the item is a raw entry request, commit directly
  174. if request.raw {
  175. request.data = item.Data
  176. s.commit(request)
  177. committed = true
  178. continue
  179. }
  180. // Decode the node data content and update the request
  181. node, err := decodeNode(item.Hash[:], item.Data)
  182. if err != nil {
  183. return committed, i, err
  184. }
  185. request.data = item.Data
  186. // Create and schedule a request for all the children nodes
  187. requests, err := s.children(request, node)
  188. if err != nil {
  189. return committed, i, err
  190. }
  191. if len(requests) == 0 && request.deps == 0 {
  192. s.commit(request)
  193. committed = true
  194. continue
  195. }
  196. request.deps += len(requests)
  197. for _, child := range requests {
  198. s.schedule(child)
  199. }
  200. }
  201. return committed, 0, nil
  202. }
  203. // Commit flushes the data stored in the internal membatch out to persistent
  204. // storage, returning any occurred error.
  205. func (s *Sync) Commit(dbw ethdb.Batch) error {
  206. // Dump the membatch into a database dbw
  207. for key, value := range s.membatch.batch {
  208. if err := dbw.Put(key[:], value); err != nil {
  209. return err
  210. }
  211. s.bloom.Add(key[:])
  212. }
  213. // Drop the membatch data and return
  214. s.membatch = newSyncMemBatch()
  215. return nil
  216. }
  217. // Pending returns the number of state entries currently pending for download.
  218. func (s *Sync) Pending() int {
  219. return len(s.requests)
  220. }
  221. // schedule inserts a new state retrieval request into the fetch queue. If there
  222. // is already a pending request for this node, the new request will be discarded
  223. // and only a parent reference added to the old one.
  224. func (s *Sync) schedule(req *request) {
  225. // If we're already requesting this node, add a new reference and stop
  226. if old, ok := s.requests[req.hash]; ok {
  227. old.parents = append(old.parents, req.parents...)
  228. return
  229. }
  230. // Schedule the request for future retrieval
  231. s.queue.Push(req.hash, int64(req.depth))
  232. s.requests[req.hash] = req
  233. }
  234. // children retrieves all the missing children of a state trie entry for future
  235. // retrieval scheduling.
  236. func (s *Sync) children(req *request, object node) ([]*request, error) {
  237. // Gather all the children of the node, irrelevant whether known or not
  238. type child struct {
  239. node node
  240. depth int
  241. }
  242. var children []child
  243. switch node := (object).(type) {
  244. case *shortNode:
  245. children = []child{{
  246. node: node.Val,
  247. depth: req.depth + len(node.Key),
  248. }}
  249. case *fullNode:
  250. for i := 0; i < 17; i++ {
  251. if node.Children[i] != nil {
  252. children = append(children, child{
  253. node: node.Children[i],
  254. depth: req.depth + 1,
  255. })
  256. }
  257. }
  258. default:
  259. panic(fmt.Sprintf("unknown node: %+v", node))
  260. }
  261. // Iterate over the children, and request all unknown ones
  262. requests := make([]*request, 0, len(children))
  263. for _, child := range children {
  264. // Notify any external watcher of a new key/value node
  265. if req.callback != nil {
  266. if node, ok := (child.node).(valueNode); ok {
  267. if err := req.callback(node, req.hash); err != nil {
  268. return nil, err
  269. }
  270. }
  271. }
  272. // If the child references another node, resolve or schedule
  273. if node, ok := (child.node).(hashNode); ok {
  274. // Try to resolve the node from the local database
  275. hash := common.BytesToHash(node)
  276. if _, ok := s.membatch.batch[hash]; ok {
  277. continue
  278. }
  279. if s.bloom.Contains(node) {
  280. // Bloom filter says this might be a duplicate, double check
  281. if ok, _ := s.database.Has(node); ok {
  282. continue
  283. }
  284. // False positive, bump fault meter
  285. bloomFaultMeter.Mark(1)
  286. }
  287. // Locally unknown node, schedule for retrieval
  288. requests = append(requests, &request{
  289. hash: hash,
  290. parents: []*request{req},
  291. depth: child.depth,
  292. callback: req.callback,
  293. })
  294. }
  295. }
  296. return requests, nil
  297. }
  298. // commit finalizes a retrieval request and stores it into the membatch. If any
  299. // of the referencing parent requests complete due to this commit, they are also
  300. // committed themselves.
  301. func (s *Sync) commit(req *request) (err error) {
  302. // Write the node content to the membatch
  303. s.membatch.batch[req.hash] = req.data
  304. delete(s.requests, req.hash)
  305. // Check all parents for completion
  306. for _, parent := range req.parents {
  307. parent.deps--
  308. if parent.deps == 0 {
  309. if err := s.commit(parent); err != nil {
  310. return err
  311. }
  312. }
  313. }
  314. return nil
  315. }