ethash.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. // Copyright 2017 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package ethash implements the ethash proof-of-work consensus engine.
  17. package ethash
  18. import (
  19. "errors"
  20. "fmt"
  21. "math"
  22. "math/big"
  23. "math/rand"
  24. "os"
  25. "path/filepath"
  26. "reflect"
  27. "strconv"
  28. "sync"
  29. "time"
  30. "unsafe"
  31. mmap "github.com/edsrzf/mmap-go"
  32. "github.com/ethereum/go-ethereum/consensus"
  33. "github.com/ethereum/go-ethereum/log"
  34. "github.com/ethereum/go-ethereum/rpc"
  35. metrics "github.com/rcrowley/go-metrics"
  36. )
  37. var ErrInvalidDumpMagic = errors.New("invalid dump magic")
  38. var (
  39. // maxUint256 is a big integer representing 2^256-1
  40. maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
  41. // sharedEthash is a full instance that can be shared between multiple users.
  42. sharedEthash = New("", 3, 0, "", 1, 0)
  43. // algorithmRevision is the data structure version used for file naming.
  44. algorithmRevision = 23
  45. // dumpMagic is a dataset dump header to sanity check a data dump.
  46. dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
  47. )
  48. // isLittleEndian returns whether the local system is running in little or big
  49. // endian byte order.
  50. func isLittleEndian() bool {
  51. n := uint32(0x01020304)
  52. return *(*byte)(unsafe.Pointer(&n)) == 0x04
  53. }
  54. // memoryMap tries to memory map a file of uint32s for read only access.
  55. func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
  56. file, err := os.OpenFile(path, os.O_RDONLY, 0644)
  57. if err != nil {
  58. return nil, nil, nil, err
  59. }
  60. mem, buffer, err := memoryMapFile(file, false)
  61. if err != nil {
  62. file.Close()
  63. return nil, nil, nil, err
  64. }
  65. for i, magic := range dumpMagic {
  66. if buffer[i] != magic {
  67. mem.Unmap()
  68. file.Close()
  69. return nil, nil, nil, ErrInvalidDumpMagic
  70. }
  71. }
  72. return file, mem, buffer[len(dumpMagic):], err
  73. }
  74. // memoryMapFile tries to memory map an already opened file descriptor.
  75. func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
  76. // Try to memory map the file
  77. flag := mmap.RDONLY
  78. if write {
  79. flag = mmap.RDWR
  80. }
  81. mem, err := mmap.Map(file, flag, 0)
  82. if err != nil {
  83. return nil, nil, err
  84. }
  85. // Yay, we managed to memory map the file, here be dragons
  86. header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
  87. header.Len /= 4
  88. header.Cap /= 4
  89. return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil
  90. }
  91. // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
  92. // access, fill it with the data from a generator and then move it into the final
  93. // path requested.
  94. func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
  95. // Ensure the data folder exists
  96. if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
  97. return nil, nil, nil, err
  98. }
  99. // Create a huge temporary empty file to fill with data
  100. temp := path + "." + strconv.Itoa(rand.Int())
  101. dump, err := os.Create(temp)
  102. if err != nil {
  103. return nil, nil, nil, err
  104. }
  105. if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
  106. return nil, nil, nil, err
  107. }
  108. // Memory map the file for writing and fill it with the generator
  109. mem, buffer, err := memoryMapFile(dump, true)
  110. if err != nil {
  111. dump.Close()
  112. return nil, nil, nil, err
  113. }
  114. copy(buffer, dumpMagic)
  115. data := buffer[len(dumpMagic):]
  116. generator(data)
  117. if err := mem.Unmap(); err != nil {
  118. return nil, nil, nil, err
  119. }
  120. if err := dump.Close(); err != nil {
  121. return nil, nil, nil, err
  122. }
  123. if err := os.Rename(temp, path); err != nil {
  124. return nil, nil, nil, err
  125. }
  126. return memoryMap(path)
  127. }
  128. // cache wraps an ethash cache with some metadata to allow easier concurrent use.
  129. type cache struct {
  130. epoch uint64 // Epoch for which this cache is relevant
  131. dump *os.File // File descriptor of the memory mapped cache
  132. mmap mmap.MMap // Memory map itself to unmap before releasing
  133. cache []uint32 // The actual cache data content (may be memory mapped)
  134. used time.Time // Timestamp of the last use for smarter eviction
  135. once sync.Once // Ensures the cache is generated only once
  136. lock sync.Mutex // Ensures thread safety for updating the usage time
  137. }
  138. // generate ensures that the cache content is generated before use.
  139. func (c *cache) generate(dir string, limit int, test bool) {
  140. c.once.Do(func() {
  141. // If we have a testing cache, generate and return
  142. if test {
  143. c.cache = make([]uint32, 1024/4)
  144. generateCache(c.cache, c.epoch, seedHash(c.epoch*epochLength+1))
  145. return
  146. }
  147. // If we don't store anything on disk, generate and return
  148. size := cacheSize(c.epoch*epochLength + 1)
  149. seed := seedHash(c.epoch*epochLength + 1)
  150. if dir == "" {
  151. c.cache = make([]uint32, size/4)
  152. generateCache(c.cache, c.epoch, seed)
  153. return
  154. }
  155. // Disk storage is needed, this will get fancy
  156. var endian string
  157. if !isLittleEndian() {
  158. endian = ".be"
  159. }
  160. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  161. logger := log.New("epoch", c.epoch)
  162. // Try to load the file from disk and memory map it
  163. var err error
  164. c.dump, c.mmap, c.cache, err = memoryMap(path)
  165. if err == nil {
  166. logger.Debug("Loaded old ethash cache from disk")
  167. return
  168. }
  169. logger.Debug("Failed to load old ethash cache", "err", err)
  170. // No previous cache available, create a new cache file to fill
  171. c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
  172. if err != nil {
  173. logger.Error("Failed to generate mapped ethash cache", "err", err)
  174. c.cache = make([]uint32, size/4)
  175. generateCache(c.cache, c.epoch, seed)
  176. }
  177. // Iterate over all previous instances and delete old ones
  178. for ep := int(c.epoch) - limit; ep >= 0; ep-- {
  179. seed := seedHash(uint64(ep)*epochLength + 1)
  180. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  181. os.Remove(path)
  182. }
  183. })
  184. }
  185. // release closes any file handlers and memory maps open.
  186. func (c *cache) release() {
  187. if c.mmap != nil {
  188. c.mmap.Unmap()
  189. c.mmap = nil
  190. }
  191. if c.dump != nil {
  192. c.dump.Close()
  193. c.dump = nil
  194. }
  195. }
  196. // dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
  197. type dataset struct {
  198. epoch uint64 // Epoch for which this cache is relevant
  199. dump *os.File // File descriptor of the memory mapped cache
  200. mmap mmap.MMap // Memory map itself to unmap before releasing
  201. dataset []uint32 // The actual cache data content
  202. used time.Time // Timestamp of the last use for smarter eviction
  203. once sync.Once // Ensures the cache is generated only once
  204. lock sync.Mutex // Ensures thread safety for updating the usage time
  205. }
  206. // generate ensures that the dataset content is generated before use.
  207. func (d *dataset) generate(dir string, limit int, test bool) {
  208. d.once.Do(func() {
  209. // If we have a testing dataset, generate and return
  210. if test {
  211. cache := make([]uint32, 1024/4)
  212. generateCache(cache, d.epoch, seedHash(d.epoch*epochLength+1))
  213. d.dataset = make([]uint32, 32*1024/4)
  214. generateDataset(d.dataset, d.epoch, cache)
  215. return
  216. }
  217. // If we don't store anything on disk, generate and return
  218. csize := cacheSize(d.epoch*epochLength + 1)
  219. dsize := datasetSize(d.epoch*epochLength + 1)
  220. seed := seedHash(d.epoch*epochLength + 1)
  221. if dir == "" {
  222. cache := make([]uint32, csize/4)
  223. generateCache(cache, d.epoch, seed)
  224. d.dataset = make([]uint32, dsize/4)
  225. generateDataset(d.dataset, d.epoch, cache)
  226. }
  227. // Disk storage is needed, this will get fancy
  228. var endian string
  229. if !isLittleEndian() {
  230. endian = ".be"
  231. }
  232. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  233. logger := log.New("epoch", d.epoch)
  234. // Try to load the file from disk and memory map it
  235. var err error
  236. d.dump, d.mmap, d.dataset, err = memoryMap(path)
  237. if err == nil {
  238. logger.Debug("Loaded old ethash dataset from disk")
  239. return
  240. }
  241. logger.Debug("Failed to load old ethash dataset", "err", err)
  242. // No previous dataset available, create a new dataset file to fill
  243. cache := make([]uint32, csize/4)
  244. generateCache(cache, d.epoch, seed)
  245. d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
  246. if err != nil {
  247. logger.Error("Failed to generate mapped ethash dataset", "err", err)
  248. d.dataset = make([]uint32, dsize/2)
  249. generateDataset(d.dataset, d.epoch, cache)
  250. }
  251. // Iterate over all previous instances and delete old ones
  252. for ep := int(d.epoch) - limit; ep >= 0; ep-- {
  253. seed := seedHash(uint64(ep)*epochLength + 1)
  254. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  255. os.Remove(path)
  256. }
  257. })
  258. }
  259. // release closes any file handlers and memory maps open.
  260. func (d *dataset) release() {
  261. if d.mmap != nil {
  262. d.mmap.Unmap()
  263. d.mmap = nil
  264. }
  265. if d.dump != nil {
  266. d.dump.Close()
  267. d.dump = nil
  268. }
  269. }
  270. // MakeCache generates a new ethash cache and optionally stores it to disk.
  271. func MakeCache(block uint64, dir string) {
  272. c := cache{epoch: block/epochLength + 1}
  273. c.generate(dir, math.MaxInt32, false)
  274. c.release()
  275. }
  276. // MakeDataset generates a new ethash dataset and optionally stores it to disk.
  277. func MakeDataset(block uint64, dir string) {
  278. d := dataset{epoch: block/epochLength + 1}
  279. d.generate(dir, math.MaxInt32, false)
  280. d.release()
  281. }
  282. // Ethash is a consensus engine based on proot-of-work implementing the ethash
  283. // algorithm.
  284. type Ethash struct {
  285. cachedir string // Data directory to store the verification caches
  286. cachesinmem int // Number of caches to keep in memory
  287. cachesondisk int // Number of caches to keep on disk
  288. dagdir string // Data directory to store full mining datasets
  289. dagsinmem int // Number of mining datasets to keep in memory
  290. dagsondisk int // Number of mining datasets to keep on disk
  291. caches map[uint64]*cache // In memory caches to avoid regenerating too often
  292. fcache *cache // Pre-generated cache for the estimated future epoch
  293. datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often
  294. fdataset *dataset // Pre-generated dataset for the estimated future epoch
  295. // Mining related fields
  296. rand *rand.Rand // Properly seeded random source for nonces
  297. threads int // Number of threads to mine on if mining
  298. update chan struct{} // Notification channel to update mining parameters
  299. hashrate metrics.Meter // Meter tracking the average hashrate
  300. // The fields below are hooks for testing
  301. tester bool // Flag whether to use a smaller test dataset
  302. shared *Ethash // Shared PoW verifier to avoid cache regeneration
  303. fakeMode bool // Flag whether to disable PoW checking
  304. fakeFull bool // Flag whether to disable all consensus rules
  305. fakeFail uint64 // Block number which fails PoW check even in fake mode
  306. fakeDelay time.Duration // Time delay to sleep for before returning from verify
  307. lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
  308. }
  309. // New creates a full sized ethash PoW scheme.
  310. func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
  311. if cachesinmem <= 0 {
  312. log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
  313. cachesinmem = 1
  314. }
  315. if cachedir != "" && cachesondisk > 0 {
  316. log.Info("Disk storage enabled for ethash caches", "dir", cachedir, "count", cachesondisk)
  317. }
  318. if dagdir != "" && dagsondisk > 0 {
  319. log.Info("Disk storage enabled for ethash DAGs", "dir", dagdir, "count", dagsondisk)
  320. }
  321. return &Ethash{
  322. cachedir: cachedir,
  323. cachesinmem: cachesinmem,
  324. cachesondisk: cachesondisk,
  325. dagdir: dagdir,
  326. dagsinmem: dagsinmem,
  327. dagsondisk: dagsondisk,
  328. caches: make(map[uint64]*cache),
  329. datasets: make(map[uint64]*dataset),
  330. update: make(chan struct{}),
  331. hashrate: metrics.NewMeter(),
  332. }
  333. }
  334. // NewTester creates a small sized ethash PoW scheme useful only for testing
  335. // purposes.
  336. func NewTester() *Ethash {
  337. return &Ethash{
  338. cachesinmem: 1,
  339. caches: make(map[uint64]*cache),
  340. datasets: make(map[uint64]*dataset),
  341. tester: true,
  342. update: make(chan struct{}),
  343. hashrate: metrics.NewMeter(),
  344. }
  345. }
  346. // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
  347. // all blocks' seal as valid, though they still have to conform to the Ethereum
  348. // consensus rules.
  349. func NewFaker() *Ethash {
  350. return &Ethash{fakeMode: true}
  351. }
  352. // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
  353. // accepts all blocks as valid apart from the single one specified, though they
  354. // still have to conform to the Ethereum consensus rules.
  355. func NewFakeFailer(fail uint64) *Ethash {
  356. return &Ethash{fakeMode: true, fakeFail: fail}
  357. }
  358. // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
  359. // accepts all blocks as valid, but delays verifications by some time, though
  360. // they still have to conform to the Ethereum consensus rules.
  361. func NewFakeDelayer(delay time.Duration) *Ethash {
  362. return &Ethash{fakeMode: true, fakeDelay: delay}
  363. }
  364. // NewFullFaker creates a ethash consensus engine with a full fake scheme that
  365. // accepts all blocks as valid, without checking any consensus rules whatsoever.
  366. func NewFullFaker() *Ethash {
  367. return &Ethash{fakeMode: true, fakeFull: true}
  368. }
  369. // NewShared creates a full sized ethash PoW shared between all requesters running
  370. // in the same process.
  371. func NewShared() *Ethash {
  372. return &Ethash{shared: sharedEthash}
  373. }
  374. // cache tries to retrieve a verification cache for the specified block number
  375. // by first checking against a list of in-memory caches, then against caches
  376. // stored on disk, and finally generating one if none can be found.
  377. func (ethash *Ethash) cache(block uint64) []uint32 {
  378. epoch := block / epochLength
  379. // If we have a PoW for that epoch, use that
  380. ethash.lock.Lock()
  381. current, future := ethash.caches[epoch], (*cache)(nil)
  382. if current == nil {
  383. // No in-memory cache, evict the oldest if the cache limit was reached
  384. for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.cachesinmem {
  385. var evict *cache
  386. for _, cache := range ethash.caches {
  387. if evict == nil || evict.used.After(cache.used) {
  388. evict = cache
  389. }
  390. }
  391. delete(ethash.caches, evict.epoch)
  392. evict.release()
  393. log.Trace("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
  394. }
  395. // If we have the new cache pre-generated, use that, otherwise create a new one
  396. if ethash.fcache != nil && ethash.fcache.epoch == epoch {
  397. log.Trace("Using pre-generated cache", "epoch", epoch)
  398. current, ethash.fcache = ethash.fcache, nil
  399. } else {
  400. log.Trace("Requiring new ethash cache", "epoch", epoch)
  401. current = &cache{epoch: epoch}
  402. }
  403. ethash.caches[epoch] = current
  404. // If we just used up the future cache, or need a refresh, regenerate
  405. if ethash.fcache == nil || ethash.fcache.epoch <= epoch {
  406. if ethash.fcache != nil {
  407. ethash.fcache.release()
  408. }
  409. log.Trace("Requiring new future ethash cache", "epoch", epoch+1)
  410. future = &cache{epoch: epoch + 1}
  411. ethash.fcache = future
  412. }
  413. }
  414. current.used = time.Now()
  415. ethash.lock.Unlock()
  416. // Wait for generation finish, bump the timestamp and finalize the cache
  417. current.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
  418. current.lock.Lock()
  419. current.used = time.Now()
  420. current.lock.Unlock()
  421. // If we exhausted the future cache, now's a good time to regenerate it
  422. if future != nil {
  423. go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
  424. }
  425. return current.cache
  426. }
  427. // dataset tries to retrieve a mining dataset for the specified block number
  428. // by first checking against a list of in-memory datasets, then against DAGs
  429. // stored on disk, and finally generating one if none can be found.
  430. func (ethash *Ethash) dataset(block uint64) []uint32 {
  431. epoch := block / epochLength
  432. // If we have a PoW for that epoch, use that
  433. ethash.lock.Lock()
  434. current, future := ethash.datasets[epoch], (*dataset)(nil)
  435. if current == nil {
  436. // No in-memory dataset, evict the oldest if the dataset limit was reached
  437. for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.dagsinmem {
  438. var evict *dataset
  439. for _, dataset := range ethash.datasets {
  440. if evict == nil || evict.used.After(dataset.used) {
  441. evict = dataset
  442. }
  443. }
  444. delete(ethash.datasets, evict.epoch)
  445. evict.release()
  446. log.Trace("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used)
  447. }
  448. // If we have the new cache pre-generated, use that, otherwise create a new one
  449. if ethash.fdataset != nil && ethash.fdataset.epoch == epoch {
  450. log.Trace("Using pre-generated dataset", "epoch", epoch)
  451. current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk
  452. ethash.fdataset = nil
  453. } else {
  454. log.Trace("Requiring new ethash dataset", "epoch", epoch)
  455. current = &dataset{epoch: epoch}
  456. }
  457. ethash.datasets[epoch] = current
  458. // If we just used up the future dataset, or need a refresh, regenerate
  459. if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch {
  460. if ethash.fdataset != nil {
  461. ethash.fdataset.release()
  462. }
  463. log.Trace("Requiring new future ethash dataset", "epoch", epoch+1)
  464. future = &dataset{epoch: epoch + 1}
  465. ethash.fdataset = future
  466. }
  467. }
  468. current.used = time.Now()
  469. ethash.lock.Unlock()
  470. // Wait for generation finish, bump the timestamp and finalize the cache
  471. current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
  472. current.lock.Lock()
  473. current.used = time.Now()
  474. current.lock.Unlock()
  475. // If we exhausted the future dataset, now's a good time to regenerate it
  476. if future != nil {
  477. go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
  478. }
  479. return current.dataset
  480. }
  481. // Threads returns the number of mining threads currently enabled. This doesn't
  482. // necessarily mean that mining is running!
  483. func (ethash *Ethash) Threads() int {
  484. ethash.lock.Lock()
  485. defer ethash.lock.Unlock()
  486. return ethash.threads
  487. }
  488. // SetThreads updates the number of mining threads currently enabled. Calling
  489. // this method does not start mining, only sets the thread count. If zero is
  490. // specified, the miner will use all cores of the machine. Setting a thread
  491. // count below zero is allowed and will cause the miner to idle, without any
  492. // work being done.
  493. func (ethash *Ethash) SetThreads(threads int) {
  494. ethash.lock.Lock()
  495. defer ethash.lock.Unlock()
  496. // If we're running a shared PoW, set the thread count on that instead
  497. if ethash.shared != nil {
  498. ethash.shared.SetThreads(threads)
  499. return
  500. }
  501. // Update the threads and ping any running seal to pull in any changes
  502. ethash.threads = threads
  503. select {
  504. case ethash.update <- struct{}{}:
  505. default:
  506. }
  507. }
  508. // Hashrate implements PoW, returning the measured rate of the search invocations
  509. // per second over the last minute.
  510. func (ethash *Ethash) Hashrate() float64 {
  511. return ethash.hashrate.Rate1()
  512. }
  513. // APIs implements consensus.Engine, returning the user facing RPC APIs. Currently
  514. // that is empty.
  515. func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
  516. return nil
  517. }
  518. // SeedHash is the seed to use for generating a verification cache and the mining
  519. // dataset.
  520. func SeedHash(block uint64) []byte {
  521. return seedHash(block)
  522. }