chain_indexer.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. // Copyright 2017 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package core
  17. import (
  18. "encoding/binary"
  19. "fmt"
  20. "sync"
  21. "sync/atomic"
  22. "time"
  23. "github.com/ethereum/go-ethereum/common"
  24. "github.com/ethereum/go-ethereum/core/types"
  25. "github.com/ethereum/go-ethereum/ethdb"
  26. "github.com/ethereum/go-ethereum/event"
  27. "github.com/ethereum/go-ethereum/log"
  28. )
  29. // ChainIndexerBackend defines the methods needed to process chain segments in
  30. // the background and write the segment results into the database. These can be
  31. // used to create filter blooms or CHTs.
  32. type ChainIndexerBackend interface {
  33. // Reset initiates the processing of a new chain segment, potentially terminating
  34. // any partially completed operations (in case of a reorg).
  35. Reset(section uint64)
  36. // Process crunches through the next header in the chain segment. The caller
  37. // will ensure a sequential order of headers.
  38. Process(header *types.Header)
  39. // Commit finalizes the section metadata and stores it into the database.
  40. Commit() error
  41. }
  42. // ChainIndexer does a post-processing job for equally sized sections of the
  43. // canonical chain (like BlooomBits and CHT structures). A ChainIndexer is
  44. // connected to the blockchain through the event system by starting a
  45. // ChainEventLoop in a goroutine.
  46. //
  47. // Further child ChainIndexers can be added which use the output of the parent
  48. // section indexer. These child indexers receive new head notifications only
  49. // after an entire section has been finished or in case of rollbacks that might
  50. // affect already finished sections.
  51. type ChainIndexer struct {
  52. chainDb ethdb.Database // Chain database to index the data from
  53. indexDb ethdb.Database // Prefixed table-view of the db to write index metadata into
  54. backend ChainIndexerBackend // Background processor generating the index data content
  55. children []*ChainIndexer // Child indexers to cascade chain updates to
  56. active uint32 // Flag whether the event loop was started
  57. update chan struct{} // Notification channel that headers should be processed
  58. quit chan chan error // Quit channel to tear down running goroutines
  59. sectionSize uint64 // Number of blocks in a single chain segment to process
  60. confirmsReq uint64 // Number of confirmations before processing a completed segment
  61. storedSections uint64 // Number of sections successfully indexed into the database
  62. knownSections uint64 // Number of sections known to be complete (block wise)
  63. cascadedHead uint64 // Block number of the last completed section cascaded to subindexers
  64. throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources
  65. log log.Logger
  66. lock sync.RWMutex
  67. }
  68. // NewChainIndexer creates a new chain indexer to do background processing on
  69. // chain segments of a given size after certain number of confirmations passed.
  70. // The throttling parameter might be used to prevent database thrashing.
  71. func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string) *ChainIndexer {
  72. c := &ChainIndexer{
  73. chainDb: chainDb,
  74. indexDb: indexDb,
  75. backend: backend,
  76. update: make(chan struct{}, 1),
  77. quit: make(chan chan error),
  78. sectionSize: section,
  79. confirmsReq: confirm,
  80. throttling: throttling,
  81. log: log.New("type", kind),
  82. }
  83. // Initialize database dependent fields and start the updater
  84. c.loadValidSections()
  85. go c.updateLoop()
  86. return c
  87. }
  88. // Start creates a goroutine to feed chain head events into the indexer for
  89. // cascading background processing. Children do not need to be started, they
  90. // are notified about new events by their parents.
  91. func (c *ChainIndexer) Start(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) {
  92. go c.eventLoop(currentHeader, chainEventer)
  93. }
  94. // Close tears down all goroutines belonging to the indexer and returns any error
  95. // that might have occurred internally.
  96. func (c *ChainIndexer) Close() error {
  97. var errs []error
  98. // Tear down the primary update loop
  99. errc := make(chan error)
  100. c.quit <- errc
  101. if err := <-errc; err != nil {
  102. errs = append(errs, err)
  103. }
  104. // If needed, tear down the secondary event loop
  105. if atomic.LoadUint32(&c.active) != 0 {
  106. c.quit <- errc
  107. if err := <-errc; err != nil {
  108. errs = append(errs, err)
  109. }
  110. }
  111. // Close all children
  112. for _, child := range c.children {
  113. if err := child.Close(); err != nil {
  114. errs = append(errs, err)
  115. }
  116. }
  117. // Return any failures
  118. switch {
  119. case len(errs) == 0:
  120. return nil
  121. case len(errs) == 1:
  122. return errs[0]
  123. default:
  124. return fmt.Errorf("%v", errs)
  125. }
  126. }
  127. // eventLoop is a secondary - optional - event loop of the indexer which is only
  128. // started for the outermost indexer to push chain head events into a processing
  129. // queue.
  130. func (c *ChainIndexer) eventLoop(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) {
  131. // Mark the chain indexer as active, requiring an additional teardown
  132. atomic.StoreUint32(&c.active, 1)
  133. events := make(chan ChainEvent, 10)
  134. sub := chainEventer(events)
  135. defer sub.Unsubscribe()
  136. // Fire the initial new head event to start any outstanding processing
  137. c.newHead(currentHeader.Number.Uint64(), false)
  138. var (
  139. prevHeader = currentHeader
  140. prevHash = currentHeader.Hash()
  141. )
  142. for {
  143. select {
  144. case errc := <-c.quit:
  145. // Chain indexer terminating, report no failure and abort
  146. errc <- nil
  147. return
  148. case ev, ok := <-events:
  149. // Received a new event, ensure it's not nil (closing) and update
  150. if !ok {
  151. errc := <-c.quit
  152. errc <- nil
  153. return
  154. }
  155. header := ev.Block.Header()
  156. if header.ParentHash != prevHash {
  157. c.newHead(FindCommonAncestor(c.chainDb, prevHeader, header).Number.Uint64(), true)
  158. }
  159. c.newHead(header.Number.Uint64(), false)
  160. prevHeader, prevHash = header, header.Hash()
  161. }
  162. }
  163. }
  164. // newHead notifies the indexer about new chain heads and/or reorgs.
  165. func (c *ChainIndexer) newHead(head uint64, reorg bool) {
  166. c.lock.Lock()
  167. defer c.lock.Unlock()
  168. // If a reorg happened, invalidate all sections until that point
  169. if reorg {
  170. // Revert the known section number to the reorg point
  171. changed := head / c.sectionSize
  172. if changed < c.knownSections {
  173. c.knownSections = changed
  174. }
  175. // Revert the stored sections from the database to the reorg point
  176. if changed < c.storedSections {
  177. c.setValidSections(changed)
  178. }
  179. // Update the new head number to te finalized section end and notify children
  180. head = changed * c.sectionSize
  181. if head < c.cascadedHead {
  182. c.cascadedHead = head
  183. for _, child := range c.children {
  184. child.newHead(c.cascadedHead, true)
  185. }
  186. }
  187. return
  188. }
  189. // No reorg, calculate the number of newly known sections and update if high enough
  190. var sections uint64
  191. if head >= c.confirmsReq {
  192. sections = (head + 1 - c.confirmsReq) / c.sectionSize
  193. if sections > c.knownSections {
  194. c.knownSections = sections
  195. select {
  196. case c.update <- struct{}{}:
  197. default:
  198. }
  199. }
  200. }
  201. }
  202. // updateLoop is the main event loop of the indexer which pushes chain segments
  203. // down into the processing backend.
  204. func (c *ChainIndexer) updateLoop() {
  205. var (
  206. updating bool
  207. updated time.Time
  208. )
  209. for {
  210. select {
  211. case errc := <-c.quit:
  212. // Chain indexer terminating, report no failure and abort
  213. errc <- nil
  214. return
  215. case <-c.update:
  216. // Section headers completed (or rolled back), update the index
  217. c.lock.Lock()
  218. if c.knownSections > c.storedSections {
  219. // Periodically print an upgrade log message to the user
  220. if time.Since(updated) > 8*time.Second {
  221. if c.knownSections > c.storedSections+1 {
  222. updating = true
  223. c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections)
  224. }
  225. updated = time.Now()
  226. }
  227. // Cache the current section count and head to allow unlocking the mutex
  228. section := c.storedSections
  229. var oldHead common.Hash
  230. if section > 0 {
  231. oldHead = c.sectionHead(section - 1)
  232. }
  233. // Process the newly defined section in the background
  234. c.lock.Unlock()
  235. newHead, err := c.processSection(section, oldHead)
  236. if err != nil {
  237. c.log.Error("Section processing failed", "error", err)
  238. }
  239. c.lock.Lock()
  240. // If processing succeeded and no reorgs occcurred, mark the section completed
  241. if err == nil && oldHead == c.sectionHead(section-1) {
  242. c.setSectionHead(section, newHead)
  243. c.setValidSections(section + 1)
  244. if c.storedSections == c.knownSections && updating {
  245. updating = false
  246. c.log.Info("Finished upgrading chain index")
  247. }
  248. c.cascadedHead = c.storedSections*c.sectionSize - 1
  249. for _, child := range c.children {
  250. c.log.Trace("Cascading chain index update", "head", c.cascadedHead)
  251. child.newHead(c.cascadedHead, false)
  252. }
  253. } else {
  254. // If processing failed, don't retry until further notification
  255. c.log.Debug("Chain index processing failed", "section", section, "err", err)
  256. c.knownSections = c.storedSections
  257. }
  258. }
  259. // If there are still further sections to process, reschedule
  260. if c.knownSections > c.storedSections {
  261. time.AfterFunc(c.throttling, func() {
  262. select {
  263. case c.update <- struct{}{}:
  264. default:
  265. }
  266. })
  267. }
  268. c.lock.Unlock()
  269. }
  270. }
  271. }
  272. // processSection processes an entire section by calling backend functions while
  273. // ensuring the continuity of the passed headers. Since the chain mutex is not
  274. // held while processing, the continuity can be broken by a long reorg, in which
  275. // case the function returns with an error.
  276. func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (common.Hash, error) {
  277. c.log.Trace("Processing new chain section", "section", section)
  278. // Reset and partial processing
  279. c.backend.Reset(section)
  280. for number := section * c.sectionSize; number < (section+1)*c.sectionSize; number++ {
  281. hash := GetCanonicalHash(c.chainDb, number)
  282. if hash == (common.Hash{}) {
  283. return common.Hash{}, fmt.Errorf("canonical block #%d unknown", number)
  284. }
  285. header := GetHeader(c.chainDb, hash, number)
  286. if header == nil {
  287. return common.Hash{}, fmt.Errorf("block #%d [%x…] not found", number, hash[:4])
  288. } else if header.ParentHash != lastHead {
  289. return common.Hash{}, fmt.Errorf("chain reorged during section processing")
  290. }
  291. c.backend.Process(header)
  292. lastHead = header.Hash()
  293. }
  294. if err := c.backend.Commit(); err != nil {
  295. c.log.Error("Section commit failed", "error", err)
  296. return common.Hash{}, err
  297. }
  298. return lastHead, nil
  299. }
  300. // Sections returns the number of processed sections maintained by the indexer
  301. // and also the information about the last header indexed for potential canonical
  302. // verifications.
  303. func (c *ChainIndexer) Sections() (uint64, uint64, common.Hash) {
  304. c.lock.Lock()
  305. defer c.lock.Unlock()
  306. return c.storedSections, c.storedSections*c.sectionSize - 1, c.sectionHead(c.storedSections - 1)
  307. }
  308. // AddChildIndexer adds a child ChainIndexer that can use the output of this one
  309. func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
  310. c.lock.Lock()
  311. defer c.lock.Unlock()
  312. c.children = append(c.children, indexer)
  313. // Cascade any pending updates to new children too
  314. if c.storedSections > 0 {
  315. indexer.newHead(c.storedSections*c.sectionSize-1, false)
  316. }
  317. }
  318. // loadValidSections reads the number of valid sections from the index database
  319. // and caches is into the local state.
  320. func (c *ChainIndexer) loadValidSections() {
  321. data, _ := c.indexDb.Get([]byte("count"))
  322. if len(data) == 8 {
  323. c.storedSections = binary.BigEndian.Uint64(data[:])
  324. }
  325. }
  326. // setValidSections writes the number of valid sections to the index database
  327. func (c *ChainIndexer) setValidSections(sections uint64) {
  328. // Set the current number of valid sections in the database
  329. var data [8]byte
  330. binary.BigEndian.PutUint64(data[:], sections)
  331. c.indexDb.Put([]byte("count"), data[:])
  332. // Remove any reorged sections, caching the valids in the mean time
  333. for c.storedSections > sections {
  334. c.storedSections--
  335. c.removeSectionHead(c.storedSections)
  336. }
  337. c.storedSections = sections // needed if new > old
  338. }
  339. // sectionHead retrieves the last block hash of a processed section from the
  340. // index database.
  341. func (c *ChainIndexer) sectionHead(section uint64) common.Hash {
  342. var data [8]byte
  343. binary.BigEndian.PutUint64(data[:], section)
  344. hash, _ := c.indexDb.Get(append([]byte("shead"), data[:]...))
  345. if len(hash) == len(common.Hash{}) {
  346. return common.BytesToHash(hash)
  347. }
  348. return common.Hash{}
  349. }
  350. // setSectionHead writes the last block hash of a processed section to the index
  351. // database.
  352. func (c *ChainIndexer) setSectionHead(section uint64, hash common.Hash) {
  353. var data [8]byte
  354. binary.BigEndian.PutUint64(data[:], section)
  355. c.indexDb.Put(append([]byte("shead"), data[:]...), hash.Bytes())
  356. }
  357. // removeSectionHead removes the reference to a processed section from the index
  358. // database.
  359. func (c *ChainIndexer) removeSectionHead(section uint64) {
  360. var data [8]byte
  361. binary.BigEndian.PutUint64(data[:], section)
  362. c.indexDb.Delete(append([]byte("shead"), data[:]...))
  363. }