ethash.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. // Copyright 2017 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package ethash implements the ethash proof-of-work consensus engine.
  17. package ethash
  18. import (
  19. "errors"
  20. "fmt"
  21. "math"
  22. "math/big"
  23. "math/rand"
  24. "os"
  25. "path/filepath"
  26. "reflect"
  27. "runtime"
  28. "strconv"
  29. "sync"
  30. "time"
  31. "unsafe"
  32. mmap "github.com/edsrzf/mmap-go"
  33. "github.com/ethereum/go-ethereum/common"
  34. "github.com/ethereum/go-ethereum/consensus"
  35. "github.com/ethereum/go-ethereum/core/types"
  36. "github.com/ethereum/go-ethereum/log"
  37. "github.com/ethereum/go-ethereum/metrics"
  38. "github.com/ethereum/go-ethereum/rpc"
  39. "github.com/hashicorp/golang-lru/simplelru"
  40. )
  41. var ErrInvalidDumpMagic = errors.New("invalid dump magic")
  42. var (
  43. // two256 is a big integer representing 2^256
  44. two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
  45. // sharedEthash is a full instance that can be shared between multiple users.
  46. sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil)
  47. // algorithmRevision is the data structure version used for file naming.
  48. algorithmRevision = 23
  49. // dumpMagic is a dataset dump header to sanity check a data dump.
  50. dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
  51. )
  52. // isLittleEndian returns whether the local system is running in little or big
  53. // endian byte order.
  54. func isLittleEndian() bool {
  55. n := uint32(0x01020304)
  56. return *(*byte)(unsafe.Pointer(&n)) == 0x04
  57. }
  58. // memoryMap tries to memory map a file of uint32s for read only access.
  59. func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
  60. file, err := os.OpenFile(path, os.O_RDONLY, 0644)
  61. if err != nil {
  62. return nil, nil, nil, err
  63. }
  64. mem, buffer, err := memoryMapFile(file, false)
  65. if err != nil {
  66. file.Close()
  67. return nil, nil, nil, err
  68. }
  69. for i, magic := range dumpMagic {
  70. if buffer[i] != magic {
  71. mem.Unmap()
  72. file.Close()
  73. return nil, nil, nil, ErrInvalidDumpMagic
  74. }
  75. }
  76. return file, mem, buffer[len(dumpMagic):], err
  77. }
  78. // memoryMapFile tries to memory map an already opened file descriptor.
  79. func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
  80. // Try to memory map the file
  81. flag := mmap.RDONLY
  82. if write {
  83. flag = mmap.RDWR
  84. }
  85. mem, err := mmap.Map(file, flag, 0)
  86. if err != nil {
  87. return nil, nil, err
  88. }
  89. // Yay, we managed to memory map the file, here be dragons
  90. header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
  91. header.Len /= 4
  92. header.Cap /= 4
  93. return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil
  94. }
  95. // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
  96. // access, fill it with the data from a generator and then move it into the final
  97. // path requested.
  98. func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
  99. // Ensure the data folder exists
  100. if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
  101. return nil, nil, nil, err
  102. }
  103. // Create a huge temporary empty file to fill with data
  104. temp := path + "." + strconv.Itoa(rand.Int())
  105. dump, err := os.Create(temp)
  106. if err != nil {
  107. return nil, nil, nil, err
  108. }
  109. if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
  110. return nil, nil, nil, err
  111. }
  112. // Memory map the file for writing and fill it with the generator
  113. mem, buffer, err := memoryMapFile(dump, true)
  114. if err != nil {
  115. dump.Close()
  116. return nil, nil, nil, err
  117. }
  118. copy(buffer, dumpMagic)
  119. data := buffer[len(dumpMagic):]
  120. generator(data)
  121. if err := mem.Unmap(); err != nil {
  122. return nil, nil, nil, err
  123. }
  124. if err := dump.Close(); err != nil {
  125. return nil, nil, nil, err
  126. }
  127. if err := os.Rename(temp, path); err != nil {
  128. return nil, nil, nil, err
  129. }
  130. return memoryMap(path)
  131. }
  132. // lru tracks caches or datasets by their last use time, keeping at most N of them.
  133. type lru struct {
  134. what string
  135. new func(epoch uint64) interface{}
  136. mu sync.Mutex
  137. // Items are kept in a LRU cache, but there is a special case:
  138. // We always keep an item for (highest seen epoch) + 1 as the 'future item'.
  139. cache *simplelru.LRU
  140. future uint64
  141. futureItem interface{}
  142. }
  143. // newlru create a new least-recently-used cache for either the verification caches
  144. // or the mining datasets.
  145. func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru {
  146. if maxItems <= 0 {
  147. maxItems = 1
  148. }
  149. cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) {
  150. log.Trace("Evicted ethash "+what, "epoch", key)
  151. })
  152. return &lru{what: what, new: new, cache: cache}
  153. }
  154. // get retrieves or creates an item for the given epoch. The first return value is always
  155. // non-nil. The second return value is non-nil if lru thinks that an item will be useful in
  156. // the near future.
  157. func (lru *lru) get(epoch uint64) (item, future interface{}) {
  158. lru.mu.Lock()
  159. defer lru.mu.Unlock()
  160. // Get or create the item for the requested epoch.
  161. item, ok := lru.cache.Get(epoch)
  162. if !ok {
  163. if lru.future > 0 && lru.future == epoch {
  164. item = lru.futureItem
  165. } else {
  166. log.Trace("Requiring new ethash "+lru.what, "epoch", epoch)
  167. item = lru.new(epoch)
  168. }
  169. lru.cache.Add(epoch, item)
  170. }
  171. // Update the 'future item' if epoch is larger than previously seen.
  172. if epoch < maxEpoch-1 && lru.future < epoch+1 {
  173. log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1)
  174. future = lru.new(epoch + 1)
  175. lru.future = epoch + 1
  176. lru.futureItem = future
  177. }
  178. return item, future
  179. }
  180. // cache wraps an ethash cache with some metadata to allow easier concurrent use.
  181. type cache struct {
  182. epoch uint64 // Epoch for which this cache is relevant
  183. dump *os.File // File descriptor of the memory mapped cache
  184. mmap mmap.MMap // Memory map itself to unmap before releasing
  185. cache []uint32 // The actual cache data content (may be memory mapped)
  186. once sync.Once // Ensures the cache is generated only once
  187. }
  188. // newCache creates a new ethash verification cache and returns it as a plain Go
  189. // interface to be usable in an LRU cache.
  190. func newCache(epoch uint64) interface{} {
  191. return &cache{epoch: epoch}
  192. }
  193. // generate ensures that the cache content is generated before use.
  194. func (c *cache) generate(dir string, limit int, test bool) {
  195. c.once.Do(func() {
  196. size := cacheSize(c.epoch*epochLength + 1)
  197. seed := seedHash(c.epoch*epochLength + 1)
  198. if test {
  199. size = 1024
  200. }
  201. // If we don't store anything on disk, generate and return.
  202. if dir == "" {
  203. c.cache = make([]uint32, size/4)
  204. generateCache(c.cache, c.epoch, seed)
  205. return
  206. }
  207. // Disk storage is needed, this will get fancy
  208. var endian string
  209. if !isLittleEndian() {
  210. endian = ".be"
  211. }
  212. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  213. logger := log.New("epoch", c.epoch)
  214. // We're about to mmap the file, ensure that the mapping is cleaned up when the
  215. // cache becomes unused.
  216. runtime.SetFinalizer(c, (*cache).finalizer)
  217. // Try to load the file from disk and memory map it
  218. var err error
  219. c.dump, c.mmap, c.cache, err = memoryMap(path)
  220. if err == nil {
  221. logger.Debug("Loaded old ethash cache from disk")
  222. return
  223. }
  224. logger.Debug("Failed to load old ethash cache", "err", err)
  225. // No previous cache available, create a new cache file to fill
  226. c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
  227. if err != nil {
  228. logger.Error("Failed to generate mapped ethash cache", "err", err)
  229. c.cache = make([]uint32, size/4)
  230. generateCache(c.cache, c.epoch, seed)
  231. }
  232. // Iterate over all previous instances and delete old ones
  233. for ep := int(c.epoch) - limit; ep >= 0; ep-- {
  234. seed := seedHash(uint64(ep)*epochLength + 1)
  235. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  236. os.Remove(path)
  237. }
  238. })
  239. }
  240. // finalizer unmaps the memory and closes the file.
  241. func (c *cache) finalizer() {
  242. if c.mmap != nil {
  243. c.mmap.Unmap()
  244. c.dump.Close()
  245. c.mmap, c.dump = nil, nil
  246. }
  247. }
  248. // dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
  249. type dataset struct {
  250. epoch uint64 // Epoch for which this cache is relevant
  251. dump *os.File // File descriptor of the memory mapped cache
  252. mmap mmap.MMap // Memory map itself to unmap before releasing
  253. dataset []uint32 // The actual cache data content
  254. once sync.Once // Ensures the cache is generated only once
  255. }
  256. // newDataset creates a new ethash mining dataset and returns it as a plain Go
  257. // interface to be usable in an LRU cache.
  258. func newDataset(epoch uint64) interface{} {
  259. return &dataset{epoch: epoch}
  260. }
  261. // generate ensures that the dataset content is generated before use.
  262. func (d *dataset) generate(dir string, limit int, test bool) {
  263. d.once.Do(func() {
  264. csize := cacheSize(d.epoch*epochLength + 1)
  265. dsize := datasetSize(d.epoch*epochLength + 1)
  266. seed := seedHash(d.epoch*epochLength + 1)
  267. if test {
  268. csize = 1024
  269. dsize = 32 * 1024
  270. }
  271. // If we don't store anything on disk, generate and return
  272. if dir == "" {
  273. cache := make([]uint32, csize/4)
  274. generateCache(cache, d.epoch, seed)
  275. d.dataset = make([]uint32, dsize/4)
  276. generateDataset(d.dataset, d.epoch, cache)
  277. }
  278. // Disk storage is needed, this will get fancy
  279. var endian string
  280. if !isLittleEndian() {
  281. endian = ".be"
  282. }
  283. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  284. logger := log.New("epoch", d.epoch)
  285. // We're about to mmap the file, ensure that the mapping is cleaned up when the
  286. // cache becomes unused.
  287. runtime.SetFinalizer(d, (*dataset).finalizer)
  288. // Try to load the file from disk and memory map it
  289. var err error
  290. d.dump, d.mmap, d.dataset, err = memoryMap(path)
  291. if err == nil {
  292. logger.Debug("Loaded old ethash dataset from disk")
  293. return
  294. }
  295. logger.Debug("Failed to load old ethash dataset", "err", err)
  296. // No previous dataset available, create a new dataset file to fill
  297. cache := make([]uint32, csize/4)
  298. generateCache(cache, d.epoch, seed)
  299. d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
  300. if err != nil {
  301. logger.Error("Failed to generate mapped ethash dataset", "err", err)
  302. d.dataset = make([]uint32, dsize/2)
  303. generateDataset(d.dataset, d.epoch, cache)
  304. }
  305. // Iterate over all previous instances and delete old ones
  306. for ep := int(d.epoch) - limit; ep >= 0; ep-- {
  307. seed := seedHash(uint64(ep)*epochLength + 1)
  308. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  309. os.Remove(path)
  310. }
  311. })
  312. }
  313. // finalizer closes any file handlers and memory maps open.
  314. func (d *dataset) finalizer() {
  315. if d.mmap != nil {
  316. d.mmap.Unmap()
  317. d.dump.Close()
  318. d.mmap, d.dump = nil, nil
  319. }
  320. }
  321. // MakeCache generates a new ethash cache and optionally stores it to disk.
  322. func MakeCache(block uint64, dir string) {
  323. c := cache{epoch: block / epochLength}
  324. c.generate(dir, math.MaxInt32, false)
  325. }
  326. // MakeDataset generates a new ethash dataset and optionally stores it to disk.
  327. func MakeDataset(block uint64, dir string) {
  328. d := dataset{epoch: block / epochLength}
  329. d.generate(dir, math.MaxInt32, false)
  330. }
  331. // Mode defines the type and amount of PoW verification an ethash engine makes.
  332. type Mode uint
  333. const (
  334. ModeNormal Mode = iota
  335. ModeShared
  336. ModeTest
  337. ModeFake
  338. ModeFullFake
  339. )
  340. // Config are the configuration parameters of the ethash.
  341. type Config struct {
  342. CacheDir string
  343. CachesInMem int
  344. CachesOnDisk int
  345. DatasetDir string
  346. DatasetsInMem int
  347. DatasetsOnDisk int
  348. PowMode Mode
  349. }
  350. // mineResult wraps the pow solution parameters for the specified block.
  351. type mineResult struct {
  352. nonce types.BlockNonce
  353. mixDigest common.Hash
  354. hash common.Hash
  355. errc chan error
  356. }
  357. // hashrate wraps the hash rate submitted by the remote sealer.
  358. type hashrate struct {
  359. id common.Hash
  360. ping time.Time
  361. rate uint64
  362. done chan struct{}
  363. }
  364. // sealWork wraps a seal work package for remote sealer.
  365. type sealWork struct {
  366. errc chan error
  367. res chan [3]string
  368. }
  369. // Ethash is a consensus engine based on proof-of-work implementing the ethash
  370. // algorithm.
  371. type Ethash struct {
  372. config Config
  373. caches *lru // In memory caches to avoid regenerating too often
  374. datasets *lru // In memory datasets to avoid regenerating too often
  375. // Mining related fields
  376. rand *rand.Rand // Properly seeded random source for nonces
  377. threads int // Number of threads to mine on if mining
  378. update chan struct{} // Notification channel to update mining parameters
  379. hashrate metrics.Meter // Meter tracking the average hashrate
  380. // Remote sealer related fields
  381. workCh chan *types.Block // Notification channel to push new work to remote sealer
  382. resultCh chan *types.Block // Channel used by mining threads to return result
  383. fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
  384. submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
  385. fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
  386. submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
  387. // The fields below are hooks for testing
  388. shared *Ethash // Shared PoW verifier to avoid cache regeneration
  389. fakeFail uint64 // Block number which fails PoW check even in fake mode
  390. fakeDelay time.Duration // Time delay to sleep for before returning from verify
  391. lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
  392. closeOnce sync.Once // Ensures exit channel will not be closed twice.
  393. exitCh chan chan error // Notification channel to exiting backend threads
  394. }
  395. // New creates a full sized ethash PoW scheme and starts a background thread for
  396. // remote mining, also optionally notifying a batch of remote services of new work
  397. // packages.
  398. func New(config Config, notify []string) *Ethash {
  399. if config.CachesInMem <= 0 {
  400. log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
  401. config.CachesInMem = 1
  402. }
  403. if config.CacheDir != "" && config.CachesOnDisk > 0 {
  404. log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
  405. }
  406. if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
  407. log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
  408. }
  409. ethash := &Ethash{
  410. config: config,
  411. caches: newlru("cache", config.CachesInMem, newCache),
  412. datasets: newlru("dataset", config.DatasetsInMem, newDataset),
  413. update: make(chan struct{}),
  414. hashrate: metrics.NewMeter(),
  415. workCh: make(chan *types.Block),
  416. resultCh: make(chan *types.Block),
  417. fetchWorkCh: make(chan *sealWork),
  418. submitWorkCh: make(chan *mineResult),
  419. fetchRateCh: make(chan chan uint64),
  420. submitRateCh: make(chan *hashrate),
  421. exitCh: make(chan chan error),
  422. }
  423. go ethash.remote(notify)
  424. return ethash
  425. }
  426. // NewTester creates a small sized ethash PoW scheme useful only for testing
  427. // purposes.
  428. func NewTester(notify []string) *Ethash {
  429. ethash := &Ethash{
  430. config: Config{PowMode: ModeTest},
  431. caches: newlru("cache", 1, newCache),
  432. datasets: newlru("dataset", 1, newDataset),
  433. update: make(chan struct{}),
  434. hashrate: metrics.NewMeter(),
  435. workCh: make(chan *types.Block),
  436. resultCh: make(chan *types.Block),
  437. fetchWorkCh: make(chan *sealWork),
  438. submitWorkCh: make(chan *mineResult),
  439. fetchRateCh: make(chan chan uint64),
  440. submitRateCh: make(chan *hashrate),
  441. exitCh: make(chan chan error),
  442. }
  443. go ethash.remote(notify)
  444. return ethash
  445. }
  446. // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
  447. // all blocks' seal as valid, though they still have to conform to the Ethereum
  448. // consensus rules.
  449. func NewFaker() *Ethash {
  450. return &Ethash{
  451. config: Config{
  452. PowMode: ModeFake,
  453. },
  454. }
  455. }
  456. // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
  457. // accepts all blocks as valid apart from the single one specified, though they
  458. // still have to conform to the Ethereum consensus rules.
  459. func NewFakeFailer(fail uint64) *Ethash {
  460. return &Ethash{
  461. config: Config{
  462. PowMode: ModeFake,
  463. },
  464. fakeFail: fail,
  465. }
  466. }
  467. // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
  468. // accepts all blocks as valid, but delays verifications by some time, though
  469. // they still have to conform to the Ethereum consensus rules.
  470. func NewFakeDelayer(delay time.Duration) *Ethash {
  471. return &Ethash{
  472. config: Config{
  473. PowMode: ModeFake,
  474. },
  475. fakeDelay: delay,
  476. }
  477. }
  478. // NewFullFaker creates an ethash consensus engine with a full fake scheme that
  479. // accepts all blocks as valid, without checking any consensus rules whatsoever.
  480. func NewFullFaker() *Ethash {
  481. return &Ethash{
  482. config: Config{
  483. PowMode: ModeFullFake,
  484. },
  485. }
  486. }
  487. // NewShared creates a full sized ethash PoW shared between all requesters running
  488. // in the same process.
  489. func NewShared() *Ethash {
  490. return &Ethash{shared: sharedEthash}
  491. }
  492. // Close closes the exit channel to notify all backend threads exiting.
  493. func (ethash *Ethash) Close() error {
  494. var err error
  495. ethash.closeOnce.Do(func() {
  496. // Short circuit if the exit channel is not allocated.
  497. if ethash.exitCh == nil {
  498. return
  499. }
  500. errc := make(chan error)
  501. ethash.exitCh <- errc
  502. err = <-errc
  503. close(ethash.exitCh)
  504. })
  505. return err
  506. }
  507. // cache tries to retrieve a verification cache for the specified block number
  508. // by first checking against a list of in-memory caches, then against caches
  509. // stored on disk, and finally generating one if none can be found.
  510. func (ethash *Ethash) cache(block uint64) *cache {
  511. epoch := block / epochLength
  512. currentI, futureI := ethash.caches.get(epoch)
  513. current := currentI.(*cache)
  514. // Wait for generation finish.
  515. current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
  516. // If we need a new future cache, now's a good time to regenerate it.
  517. if futureI != nil {
  518. future := futureI.(*cache)
  519. go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
  520. }
  521. return current
  522. }
  523. // dataset tries to retrieve a mining dataset for the specified block number
  524. // by first checking against a list of in-memory datasets, then against DAGs
  525. // stored on disk, and finally generating one if none can be found.
  526. func (ethash *Ethash) dataset(block uint64) *dataset {
  527. epoch := block / epochLength
  528. currentI, futureI := ethash.datasets.get(epoch)
  529. current := currentI.(*dataset)
  530. // Wait for generation finish.
  531. current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
  532. // If we need a new future dataset, now's a good time to regenerate it.
  533. if futureI != nil {
  534. future := futureI.(*dataset)
  535. go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
  536. }
  537. return current
  538. }
  539. // Threads returns the number of mining threads currently enabled. This doesn't
  540. // necessarily mean that mining is running!
  541. func (ethash *Ethash) Threads() int {
  542. ethash.lock.Lock()
  543. defer ethash.lock.Unlock()
  544. return ethash.threads
  545. }
  546. // SetThreads updates the number of mining threads currently enabled. Calling
  547. // this method does not start mining, only sets the thread count. If zero is
  548. // specified, the miner will use all cores of the machine. Setting a thread
  549. // count below zero is allowed and will cause the miner to idle, without any
  550. // work being done.
  551. func (ethash *Ethash) SetThreads(threads int) {
  552. ethash.lock.Lock()
  553. defer ethash.lock.Unlock()
  554. // If we're running a shared PoW, set the thread count on that instead
  555. if ethash.shared != nil {
  556. ethash.shared.SetThreads(threads)
  557. return
  558. }
  559. // Update the threads and ping any running seal to pull in any changes
  560. ethash.threads = threads
  561. select {
  562. case ethash.update <- struct{}{}:
  563. default:
  564. }
  565. }
  566. // Hashrate implements PoW, returning the measured rate of the search invocations
  567. // per second over the last minute.
  568. // Note the returned hashrate includes local hashrate, but also includes the total
  569. // hashrate of all remote miner.
  570. func (ethash *Ethash) Hashrate() float64 {
  571. // Short circuit if we are run the ethash in normal/test mode.
  572. if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest {
  573. return ethash.hashrate.Rate1()
  574. }
  575. var res = make(chan uint64, 1)
  576. select {
  577. case ethash.fetchRateCh <- res:
  578. case <-ethash.exitCh:
  579. // Return local hashrate only if ethash is stopped.
  580. return ethash.hashrate.Rate1()
  581. }
  582. // Gather total submitted hash rate of remote sealers.
  583. return ethash.hashrate.Rate1() + float64(<-res)
  584. }
  585. // APIs implements consensus.Engine, returning the user facing RPC APIs.
  586. func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
  587. // In order to ensure backward compatibility, we exposes ethash RPC APIs
  588. // to both eth and ethash namespaces.
  589. return []rpc.API{
  590. {
  591. Namespace: "eth",
  592. Version: "1.0",
  593. Service: &API{ethash},
  594. Public: true,
  595. },
  596. {
  597. Namespace: "ethash",
  598. Version: "1.0",
  599. Service: &API{ethash},
  600. Public: true,
  601. },
  602. }
  603. }
  604. // SeedHash is the seed to use for generating a verification cache and the mining
  605. // dataset.
  606. func SeedHash(block uint64) []byte {
  607. return seedHash(block)
  608. }