ethash.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. // Copyright 2017 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package ethash implements the ethash proof-of-work consensus engine.
  17. package ethash
  18. import (
  19. "errors"
  20. "fmt"
  21. "math"
  22. "math/big"
  23. "math/rand"
  24. "os"
  25. "path/filepath"
  26. "reflect"
  27. "runtime"
  28. "strconv"
  29. "sync"
  30. "sync/atomic"
  31. "time"
  32. "unsafe"
  33. "github.com/edsrzf/mmap-go"
  34. "github.com/ethereum/go-ethereum/consensus"
  35. "github.com/ethereum/go-ethereum/log"
  36. "github.com/ethereum/go-ethereum/metrics"
  37. "github.com/ethereum/go-ethereum/rpc"
  38. "github.com/hashicorp/golang-lru/simplelru"
  39. )
  40. var ErrInvalidDumpMagic = errors.New("invalid dump magic")
  41. var (
  42. // two256 is a big integer representing 2^256
  43. two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
  44. // sharedEthash is a full instance that can be shared between multiple users.
  45. sharedEthash *Ethash
  46. // algorithmRevision is the data structure version used for file naming.
  47. algorithmRevision = 23
  48. // dumpMagic is a dataset dump header to sanity check a data dump.
  49. dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
  50. )
  51. func init() {
  52. sharedConfig := Config{
  53. PowMode: ModeNormal,
  54. CachesInMem: 3,
  55. DatasetsInMem: 1,
  56. }
  57. sharedEthash = New(sharedConfig, nil, false)
  58. }
  59. // isLittleEndian returns whether the local system is running in little or big
  60. // endian byte order.
  61. func isLittleEndian() bool {
  62. n := uint32(0x01020304)
  63. return *(*byte)(unsafe.Pointer(&n)) == 0x04
  64. }
  65. // memoryMap tries to memory map a file of uint32s for read only access.
  66. func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) {
  67. file, err := os.OpenFile(path, os.O_RDONLY, 0644)
  68. if err != nil {
  69. return nil, nil, nil, err
  70. }
  71. mem, buffer, err := memoryMapFile(file, false)
  72. if err != nil {
  73. file.Close()
  74. return nil, nil, nil, err
  75. }
  76. for i, magic := range dumpMagic {
  77. if buffer[i] != magic {
  78. mem.Unmap()
  79. file.Close()
  80. return nil, nil, nil, ErrInvalidDumpMagic
  81. }
  82. }
  83. if lock {
  84. if err := mem.Lock(); err != nil {
  85. mem.Unmap()
  86. file.Close()
  87. return nil, nil, nil, err
  88. }
  89. }
  90. return file, mem, buffer[len(dumpMagic):], err
  91. }
  92. // memoryMapFile tries to memory map an already opened file descriptor.
  93. func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
  94. // Try to memory map the file
  95. flag := mmap.RDONLY
  96. if write {
  97. flag = mmap.RDWR
  98. }
  99. mem, err := mmap.Map(file, flag, 0)
  100. if err != nil {
  101. return nil, nil, err
  102. }
  103. // Yay, we managed to memory map the file, here be dragons
  104. header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
  105. header.Len /= 4
  106. header.Cap /= 4
  107. return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil
  108. }
  109. // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
  110. // access, fill it with the data from a generator and then move it into the final
  111. // path requested.
  112. func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
  113. // Ensure the data folder exists
  114. if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
  115. return nil, nil, nil, err
  116. }
  117. // Create a huge temporary empty file to fill with data
  118. temp := path + "." + strconv.Itoa(rand.Int())
  119. dump, err := os.Create(temp)
  120. if err != nil {
  121. return nil, nil, nil, err
  122. }
  123. if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
  124. return nil, nil, nil, err
  125. }
  126. // Memory map the file for writing and fill it with the generator
  127. mem, buffer, err := memoryMapFile(dump, true)
  128. if err != nil {
  129. dump.Close()
  130. return nil, nil, nil, err
  131. }
  132. copy(buffer, dumpMagic)
  133. data := buffer[len(dumpMagic):]
  134. generator(data)
  135. if err := mem.Unmap(); err != nil {
  136. return nil, nil, nil, err
  137. }
  138. if err := dump.Close(); err != nil {
  139. return nil, nil, nil, err
  140. }
  141. if err := os.Rename(temp, path); err != nil {
  142. return nil, nil, nil, err
  143. }
  144. return memoryMap(path, lock)
  145. }
  146. // lru tracks caches or datasets by their last use time, keeping at most N of them.
  147. type lru struct {
  148. what string
  149. new func(epoch uint64) interface{}
  150. mu sync.Mutex
  151. // Items are kept in a LRU cache, but there is a special case:
  152. // We always keep an item for (highest seen epoch) + 1 as the 'future item'.
  153. cache *simplelru.LRU
  154. future uint64
  155. futureItem interface{}
  156. }
  157. // newlru create a new least-recently-used cache for either the verification caches
  158. // or the mining datasets.
  159. func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru {
  160. if maxItems <= 0 {
  161. maxItems = 1
  162. }
  163. cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) {
  164. log.Trace("Evicted ethash "+what, "epoch", key)
  165. })
  166. return &lru{what: what, new: new, cache: cache}
  167. }
  168. // get retrieves or creates an item for the given epoch. The first return value is always
  169. // non-nil. The second return value is non-nil if lru thinks that an item will be useful in
  170. // the near future.
  171. func (lru *lru) get(epoch uint64) (item, future interface{}) {
  172. lru.mu.Lock()
  173. defer lru.mu.Unlock()
  174. // Get or create the item for the requested epoch.
  175. item, ok := lru.cache.Get(epoch)
  176. if !ok {
  177. if lru.future > 0 && lru.future == epoch {
  178. item = lru.futureItem
  179. } else {
  180. log.Trace("Requiring new ethash "+lru.what, "epoch", epoch)
  181. item = lru.new(epoch)
  182. }
  183. lru.cache.Add(epoch, item)
  184. }
  185. // Update the 'future item' if epoch is larger than previously seen.
  186. if epoch < maxEpoch-1 && lru.future < epoch+1 {
  187. log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1)
  188. future = lru.new(epoch + 1)
  189. lru.future = epoch + 1
  190. lru.futureItem = future
  191. }
  192. return item, future
  193. }
  194. // cache wraps an ethash cache with some metadata to allow easier concurrent use.
  195. type cache struct {
  196. epoch uint64 // Epoch for which this cache is relevant
  197. dump *os.File // File descriptor of the memory mapped cache
  198. mmap mmap.MMap // Memory map itself to unmap before releasing
  199. cache []uint32 // The actual cache data content (may be memory mapped)
  200. once sync.Once // Ensures the cache is generated only once
  201. }
  202. // newCache creates a new ethash verification cache and returns it as a plain Go
  203. // interface to be usable in an LRU cache.
  204. func newCache(epoch uint64) interface{} {
  205. return &cache{epoch: epoch}
  206. }
  207. // generate ensures that the cache content is generated before use.
  208. func (c *cache) generate(dir string, limit int, lock bool, test bool) {
  209. c.once.Do(func() {
  210. size := cacheSize(c.epoch*epochLength + 1)
  211. seed := seedHash(c.epoch*epochLength + 1)
  212. if test {
  213. size = 1024
  214. }
  215. // If we don't store anything on disk, generate and return.
  216. if dir == "" {
  217. c.cache = make([]uint32, size/4)
  218. generateCache(c.cache, c.epoch, seed)
  219. return
  220. }
  221. // Disk storage is needed, this will get fancy
  222. var endian string
  223. if !isLittleEndian() {
  224. endian = ".be"
  225. }
  226. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  227. logger := log.New("epoch", c.epoch)
  228. // We're about to mmap the file, ensure that the mapping is cleaned up when the
  229. // cache becomes unused.
  230. runtime.SetFinalizer(c, (*cache).finalizer)
  231. // Try to load the file from disk and memory map it
  232. var err error
  233. c.dump, c.mmap, c.cache, err = memoryMap(path, lock)
  234. if err == nil {
  235. logger.Debug("Loaded old ethash cache from disk")
  236. return
  237. }
  238. logger.Debug("Failed to load old ethash cache", "err", err)
  239. // No previous cache available, create a new cache file to fill
  240. c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
  241. if err != nil {
  242. logger.Error("Failed to generate mapped ethash cache", "err", err)
  243. c.cache = make([]uint32, size/4)
  244. generateCache(c.cache, c.epoch, seed)
  245. }
  246. // Iterate over all previous instances and delete old ones
  247. for ep := int(c.epoch) - limit; ep >= 0; ep-- {
  248. seed := seedHash(uint64(ep)*epochLength + 1)
  249. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  250. os.Remove(path)
  251. }
  252. })
  253. }
  254. // finalizer unmaps the memory and closes the file.
  255. func (c *cache) finalizer() {
  256. if c.mmap != nil {
  257. c.mmap.Unmap()
  258. c.dump.Close()
  259. c.mmap, c.dump = nil, nil
  260. }
  261. }
  262. // dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
  263. type dataset struct {
  264. epoch uint64 // Epoch for which this cache is relevant
  265. dump *os.File // File descriptor of the memory mapped cache
  266. mmap mmap.MMap // Memory map itself to unmap before releasing
  267. dataset []uint32 // The actual cache data content
  268. once sync.Once // Ensures the cache is generated only once
  269. done uint32 // Atomic flag to determine generation status
  270. }
  271. // newDataset creates a new ethash mining dataset and returns it as a plain Go
  272. // interface to be usable in an LRU cache.
  273. func newDataset(epoch uint64) interface{} {
  274. return &dataset{epoch: epoch}
  275. }
  276. // generate ensures that the dataset content is generated before use.
  277. func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
  278. d.once.Do(func() {
  279. // Mark the dataset generated after we're done. This is needed for remote
  280. defer atomic.StoreUint32(&d.done, 1)
  281. csize := cacheSize(d.epoch*epochLength + 1)
  282. dsize := datasetSize(d.epoch*epochLength + 1)
  283. seed := seedHash(d.epoch*epochLength + 1)
  284. if test {
  285. csize = 1024
  286. dsize = 32 * 1024
  287. }
  288. // If we don't store anything on disk, generate and return
  289. if dir == "" {
  290. cache := make([]uint32, csize/4)
  291. generateCache(cache, d.epoch, seed)
  292. d.dataset = make([]uint32, dsize/4)
  293. generateDataset(d.dataset, d.epoch, cache)
  294. return
  295. }
  296. // Disk storage is needed, this will get fancy
  297. var endian string
  298. if !isLittleEndian() {
  299. endian = ".be"
  300. }
  301. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  302. logger := log.New("epoch", d.epoch)
  303. // We're about to mmap the file, ensure that the mapping is cleaned up when the
  304. // cache becomes unused.
  305. runtime.SetFinalizer(d, (*dataset).finalizer)
  306. // Try to load the file from disk and memory map it
  307. var err error
  308. d.dump, d.mmap, d.dataset, err = memoryMap(path, lock)
  309. if err == nil {
  310. logger.Debug("Loaded old ethash dataset from disk")
  311. return
  312. }
  313. logger.Debug("Failed to load old ethash dataset", "err", err)
  314. // No previous dataset available, create a new dataset file to fill
  315. cache := make([]uint32, csize/4)
  316. generateCache(cache, d.epoch, seed)
  317. d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
  318. if err != nil {
  319. logger.Error("Failed to generate mapped ethash dataset", "err", err)
  320. d.dataset = make([]uint32, dsize/2)
  321. generateDataset(d.dataset, d.epoch, cache)
  322. }
  323. // Iterate over all previous instances and delete old ones
  324. for ep := int(d.epoch) - limit; ep >= 0; ep-- {
  325. seed := seedHash(uint64(ep)*epochLength + 1)
  326. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  327. os.Remove(path)
  328. }
  329. })
  330. }
  331. // generated returns whether this particular dataset finished generating already
  332. // or not (it may not have been started at all). This is useful for remote miners
  333. // to default to verification caches instead of blocking on DAG generations.
  334. func (d *dataset) generated() bool {
  335. return atomic.LoadUint32(&d.done) == 1
  336. }
  337. // finalizer closes any file handlers and memory maps open.
  338. func (d *dataset) finalizer() {
  339. if d.mmap != nil {
  340. d.mmap.Unmap()
  341. d.dump.Close()
  342. d.mmap, d.dump = nil, nil
  343. }
  344. }
  345. // MakeCache generates a new ethash cache and optionally stores it to disk.
  346. func MakeCache(block uint64, dir string) {
  347. c := cache{epoch: block / epochLength}
  348. c.generate(dir, math.MaxInt32, false, false)
  349. }
  350. // MakeDataset generates a new ethash dataset and optionally stores it to disk.
  351. func MakeDataset(block uint64, dir string) {
  352. d := dataset{epoch: block / epochLength}
  353. d.generate(dir, math.MaxInt32, false, false)
  354. }
  355. // Mode defines the type and amount of PoW verification an ethash engine makes.
  356. type Mode uint
  357. const (
  358. ModeNormal Mode = iota
  359. ModeShared
  360. ModeTest
  361. ModeFake
  362. ModeFullFake
  363. )
  364. // Config are the configuration parameters of the ethash.
  365. type Config struct {
  366. CacheDir string
  367. CachesInMem int
  368. CachesOnDisk int
  369. CachesLockMmap bool
  370. DatasetDir string
  371. DatasetsInMem int
  372. DatasetsOnDisk int
  373. DatasetsLockMmap bool
  374. PowMode Mode
  375. // When set, notifications sent by the remote sealer will
  376. // be block header JSON objects instead of work package arrays.
  377. NotifyFull bool
  378. Log log.Logger `toml:"-"`
  379. }
  380. // Ethash is a consensus engine based on proof-of-work implementing the ethash
  381. // algorithm.
  382. type Ethash struct {
  383. config Config
  384. caches *lru // In memory caches to avoid regenerating too often
  385. datasets *lru // In memory datasets to avoid regenerating too often
  386. // Mining related fields
  387. rand *rand.Rand // Properly seeded random source for nonces
  388. threads int // Number of threads to mine on if mining
  389. update chan struct{} // Notification channel to update mining parameters
  390. hashrate metrics.Meter // Meter tracking the average hashrate
  391. remote *remoteSealer
  392. // The fields below are hooks for testing
  393. shared *Ethash // Shared PoW verifier to avoid cache regeneration
  394. fakeFail uint64 // Block number which fails PoW check even in fake mode
  395. fakeDelay time.Duration // Time delay to sleep for before returning from verify
  396. lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
  397. closeOnce sync.Once // Ensures exit channel will not be closed twice.
  398. }
  399. // New creates a full sized ethash PoW scheme and starts a background thread for
  400. // remote mining, also optionally notifying a batch of remote services of new work
  401. // packages.
  402. func New(config Config, notify []string, noverify bool) *Ethash {
  403. if config.Log == nil {
  404. config.Log = log.Root()
  405. }
  406. if config.CachesInMem <= 0 {
  407. config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
  408. config.CachesInMem = 1
  409. }
  410. if config.CacheDir != "" && config.CachesOnDisk > 0 {
  411. config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
  412. }
  413. if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
  414. config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
  415. }
  416. ethash := &Ethash{
  417. config: config,
  418. caches: newlru("cache", config.CachesInMem, newCache),
  419. datasets: newlru("dataset", config.DatasetsInMem, newDataset),
  420. update: make(chan struct{}),
  421. hashrate: metrics.NewMeterForced(),
  422. }
  423. if config.PowMode == ModeShared {
  424. ethash.shared = sharedEthash
  425. }
  426. ethash.remote = startRemoteSealer(ethash, notify, noverify)
  427. return ethash
  428. }
  429. // NewTester creates a small sized ethash PoW scheme useful only for testing
  430. // purposes.
  431. func NewTester(notify []string, noverify bool) *Ethash {
  432. return New(Config{PowMode: ModeTest}, notify, noverify)
  433. }
  434. // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
  435. // all blocks' seal as valid, though they still have to conform to the Ethereum
  436. // consensus rules.
  437. func NewFaker() *Ethash {
  438. return &Ethash{
  439. config: Config{
  440. PowMode: ModeFake,
  441. Log: log.Root(),
  442. },
  443. }
  444. }
  445. // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
  446. // accepts all blocks as valid apart from the single one specified, though they
  447. // still have to conform to the Ethereum consensus rules.
  448. func NewFakeFailer(fail uint64) *Ethash {
  449. return &Ethash{
  450. config: Config{
  451. PowMode: ModeFake,
  452. Log: log.Root(),
  453. },
  454. fakeFail: fail,
  455. }
  456. }
  457. // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
  458. // accepts all blocks as valid, but delays verifications by some time, though
  459. // they still have to conform to the Ethereum consensus rules.
  460. func NewFakeDelayer(delay time.Duration) *Ethash {
  461. return &Ethash{
  462. config: Config{
  463. PowMode: ModeFake,
  464. Log: log.Root(),
  465. },
  466. fakeDelay: delay,
  467. }
  468. }
  469. // NewFullFaker creates an ethash consensus engine with a full fake scheme that
  470. // accepts all blocks as valid, without checking any consensus rules whatsoever.
  471. func NewFullFaker() *Ethash {
  472. return &Ethash{
  473. config: Config{
  474. PowMode: ModeFullFake,
  475. Log: log.Root(),
  476. },
  477. }
  478. }
  479. // NewShared creates a full sized ethash PoW shared between all requesters running
  480. // in the same process.
  481. func NewShared() *Ethash {
  482. return &Ethash{shared: sharedEthash}
  483. }
  484. // Close closes the exit channel to notify all backend threads exiting.
  485. func (ethash *Ethash) Close() error {
  486. ethash.closeOnce.Do(func() {
  487. // Short circuit if the exit channel is not allocated.
  488. if ethash.remote == nil {
  489. return
  490. }
  491. close(ethash.remote.requestExit)
  492. <-ethash.remote.exitCh
  493. })
  494. return nil
  495. }
  496. // cache tries to retrieve a verification cache for the specified block number
  497. // by first checking against a list of in-memory caches, then against caches
  498. // stored on disk, and finally generating one if none can be found.
  499. func (ethash *Ethash) cache(block uint64) *cache {
  500. epoch := block / epochLength
  501. currentI, futureI := ethash.caches.get(epoch)
  502. current := currentI.(*cache)
  503. // Wait for generation finish.
  504. current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
  505. // If we need a new future cache, now's a good time to regenerate it.
  506. if futureI != nil {
  507. future := futureI.(*cache)
  508. go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
  509. }
  510. return current
  511. }
  512. // dataset tries to retrieve a mining dataset for the specified block number
  513. // by first checking against a list of in-memory datasets, then against DAGs
  514. // stored on disk, and finally generating one if none can be found.
  515. //
  516. // If async is specified, not only the future but the current DAG is also
  517. // generates on a background thread.
  518. func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
  519. // Retrieve the requested ethash dataset
  520. epoch := block / epochLength
  521. currentI, futureI := ethash.datasets.get(epoch)
  522. current := currentI.(*dataset)
  523. // If async is specified, generate everything in a background thread
  524. if async && !current.generated() {
  525. go func() {
  526. current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
  527. if futureI != nil {
  528. future := futureI.(*dataset)
  529. future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
  530. }
  531. }()
  532. } else {
  533. // Either blocking generation was requested, or already done
  534. current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
  535. if futureI != nil {
  536. future := futureI.(*dataset)
  537. go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
  538. }
  539. }
  540. return current
  541. }
  542. // Threads returns the number of mining threads currently enabled. This doesn't
  543. // necessarily mean that mining is running!
  544. func (ethash *Ethash) Threads() int {
  545. ethash.lock.Lock()
  546. defer ethash.lock.Unlock()
  547. return ethash.threads
  548. }
  549. // SetThreads updates the number of mining threads currently enabled. Calling
  550. // this method does not start mining, only sets the thread count. If zero is
  551. // specified, the miner will use all cores of the machine. Setting a thread
  552. // count below zero is allowed and will cause the miner to idle, without any
  553. // work being done.
  554. func (ethash *Ethash) SetThreads(threads int) {
  555. ethash.lock.Lock()
  556. defer ethash.lock.Unlock()
  557. // If we're running a shared PoW, set the thread count on that instead
  558. if ethash.shared != nil {
  559. ethash.shared.SetThreads(threads)
  560. return
  561. }
  562. // Update the threads and ping any running seal to pull in any changes
  563. ethash.threads = threads
  564. select {
  565. case ethash.update <- struct{}{}:
  566. default:
  567. }
  568. }
  569. // Hashrate implements PoW, returning the measured rate of the search invocations
  570. // per second over the last minute.
  571. // Note the returned hashrate includes local hashrate, but also includes the total
  572. // hashrate of all remote miner.
  573. func (ethash *Ethash) Hashrate() float64 {
  574. // Short circuit if we are run the ethash in normal/test mode.
  575. if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest {
  576. return ethash.hashrate.Rate1()
  577. }
  578. var res = make(chan uint64, 1)
  579. select {
  580. case ethash.remote.fetchRateCh <- res:
  581. case <-ethash.remote.exitCh:
  582. // Return local hashrate only if ethash is stopped.
  583. return ethash.hashrate.Rate1()
  584. }
  585. // Gather total submitted hash rate of remote sealers.
  586. return ethash.hashrate.Rate1() + float64(<-res)
  587. }
  588. // APIs implements consensus.Engine, returning the user facing RPC APIs.
  589. func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API {
  590. // In order to ensure backward compatibility, we exposes ethash RPC APIs
  591. // to both eth and ethash namespaces.
  592. return []rpc.API{
  593. {
  594. Namespace: "eth",
  595. Version: "1.0",
  596. Service: &API{ethash},
  597. Public: true,
  598. },
  599. {
  600. Namespace: "ethash",
  601. Version: "1.0",
  602. Service: &API{ethash},
  603. Public: true,
  604. },
  605. }
  606. }
  607. // SeedHash is the seed to use for generating a verification cache and the mining
  608. // dataset.
  609. func SeedHash(block uint64) []byte {
  610. return seedHash(block)
  611. }