ethash.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. // Copyright 2017 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package ethash implements the ethash proof-of-work consensus engine.
  17. package ethash
  18. import (
  19. "errors"
  20. "fmt"
  21. "math"
  22. "math/big"
  23. "math/rand"
  24. "os"
  25. "path/filepath"
  26. "reflect"
  27. "runtime"
  28. "strconv"
  29. "sync"
  30. "sync/atomic"
  31. "time"
  32. "unsafe"
  33. "github.com/edsrzf/mmap-go"
  34. "github.com/ethereum/go-ethereum/consensus"
  35. "github.com/ethereum/go-ethereum/log"
  36. "github.com/ethereum/go-ethereum/metrics"
  37. "github.com/ethereum/go-ethereum/rpc"
  38. "github.com/hashicorp/golang-lru/simplelru"
  39. )
  40. var ErrInvalidDumpMagic = errors.New("invalid dump magic")
  41. var (
  42. // two256 is a big integer representing 2^256
  43. two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
  44. // sharedEthash is a full instance that can be shared between multiple users.
  45. sharedEthash = New(Config{"", 3, 0, false, "", 1, 0, false, ModeNormal, nil}, nil, false)
  46. // algorithmRevision is the data structure version used for file naming.
  47. algorithmRevision = 23
  48. // dumpMagic is a dataset dump header to sanity check a data dump.
  49. dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
  50. )
  51. // isLittleEndian returns whether the local system is running in little or big
  52. // endian byte order.
  53. func isLittleEndian() bool {
  54. n := uint32(0x01020304)
  55. return *(*byte)(unsafe.Pointer(&n)) == 0x04
  56. }
  57. // memoryMap tries to memory map a file of uint32s for read only access.
  58. func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) {
  59. file, err := os.OpenFile(path, os.O_RDONLY, 0644)
  60. if err != nil {
  61. return nil, nil, nil, err
  62. }
  63. mem, buffer, err := memoryMapFile(file, false)
  64. if err != nil {
  65. file.Close()
  66. return nil, nil, nil, err
  67. }
  68. for i, magic := range dumpMagic {
  69. if buffer[i] != magic {
  70. mem.Unmap()
  71. file.Close()
  72. return nil, nil, nil, ErrInvalidDumpMagic
  73. }
  74. }
  75. if lock {
  76. if err := mem.Lock(); err != nil {
  77. mem.Unmap()
  78. file.Close()
  79. return nil, nil, nil, err
  80. }
  81. }
  82. return file, mem, buffer[len(dumpMagic):], err
  83. }
  84. // memoryMapFile tries to memory map an already opened file descriptor.
  85. func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
  86. // Try to memory map the file
  87. flag := mmap.RDONLY
  88. if write {
  89. flag = mmap.RDWR
  90. }
  91. mem, err := mmap.Map(file, flag, 0)
  92. if err != nil {
  93. return nil, nil, err
  94. }
  95. // Yay, we managed to memory map the file, here be dragons
  96. header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
  97. header.Len /= 4
  98. header.Cap /= 4
  99. return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil
  100. }
  101. // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
  102. // access, fill it with the data from a generator and then move it into the final
  103. // path requested.
  104. func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
  105. // Ensure the data folder exists
  106. if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
  107. return nil, nil, nil, err
  108. }
  109. // Create a huge temporary empty file to fill with data
  110. temp := path + "." + strconv.Itoa(rand.Int())
  111. dump, err := os.Create(temp)
  112. if err != nil {
  113. return nil, nil, nil, err
  114. }
  115. if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
  116. return nil, nil, nil, err
  117. }
  118. // Memory map the file for writing and fill it with the generator
  119. mem, buffer, err := memoryMapFile(dump, true)
  120. if err != nil {
  121. dump.Close()
  122. return nil, nil, nil, err
  123. }
  124. copy(buffer, dumpMagic)
  125. data := buffer[len(dumpMagic):]
  126. generator(data)
  127. if err := mem.Unmap(); err != nil {
  128. return nil, nil, nil, err
  129. }
  130. if err := dump.Close(); err != nil {
  131. return nil, nil, nil, err
  132. }
  133. if err := os.Rename(temp, path); err != nil {
  134. return nil, nil, nil, err
  135. }
  136. return memoryMap(path, lock)
  137. }
  138. // lru tracks caches or datasets by their last use time, keeping at most N of them.
  139. type lru struct {
  140. what string
  141. new func(epoch uint64) interface{}
  142. mu sync.Mutex
  143. // Items are kept in a LRU cache, but there is a special case:
  144. // We always keep an item for (highest seen epoch) + 1 as the 'future item'.
  145. cache *simplelru.LRU
  146. future uint64
  147. futureItem interface{}
  148. }
  149. // newlru create a new least-recently-used cache for either the verification caches
  150. // or the mining datasets.
  151. func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru {
  152. if maxItems <= 0 {
  153. maxItems = 1
  154. }
  155. cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) {
  156. log.Trace("Evicted ethash "+what, "epoch", key)
  157. })
  158. return &lru{what: what, new: new, cache: cache}
  159. }
  160. // get retrieves or creates an item for the given epoch. The first return value is always
  161. // non-nil. The second return value is non-nil if lru thinks that an item will be useful in
  162. // the near future.
  163. func (lru *lru) get(epoch uint64) (item, future interface{}) {
  164. lru.mu.Lock()
  165. defer lru.mu.Unlock()
  166. // Get or create the item for the requested epoch.
  167. item, ok := lru.cache.Get(epoch)
  168. if !ok {
  169. if lru.future > 0 && lru.future == epoch {
  170. item = lru.futureItem
  171. } else {
  172. log.Trace("Requiring new ethash "+lru.what, "epoch", epoch)
  173. item = lru.new(epoch)
  174. }
  175. lru.cache.Add(epoch, item)
  176. }
  177. // Update the 'future item' if epoch is larger than previously seen.
  178. if epoch < maxEpoch-1 && lru.future < epoch+1 {
  179. log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1)
  180. future = lru.new(epoch + 1)
  181. lru.future = epoch + 1
  182. lru.futureItem = future
  183. }
  184. return item, future
  185. }
  186. // cache wraps an ethash cache with some metadata to allow easier concurrent use.
  187. type cache struct {
  188. epoch uint64 // Epoch for which this cache is relevant
  189. dump *os.File // File descriptor of the memory mapped cache
  190. mmap mmap.MMap // Memory map itself to unmap before releasing
  191. cache []uint32 // The actual cache data content (may be memory mapped)
  192. once sync.Once // Ensures the cache is generated only once
  193. }
  194. // newCache creates a new ethash verification cache and returns it as a plain Go
  195. // interface to be usable in an LRU cache.
  196. func newCache(epoch uint64) interface{} {
  197. return &cache{epoch: epoch}
  198. }
  199. // generate ensures that the cache content is generated before use.
  200. func (c *cache) generate(dir string, limit int, lock bool, test bool) {
  201. c.once.Do(func() {
  202. size := cacheSize(c.epoch*epochLength + 1)
  203. seed := seedHash(c.epoch*epochLength + 1)
  204. if test {
  205. size = 1024
  206. }
  207. // If we don't store anything on disk, generate and return.
  208. if dir == "" {
  209. c.cache = make([]uint32, size/4)
  210. generateCache(c.cache, c.epoch, seed)
  211. return
  212. }
  213. // Disk storage is needed, this will get fancy
  214. var endian string
  215. if !isLittleEndian() {
  216. endian = ".be"
  217. }
  218. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  219. logger := log.New("epoch", c.epoch)
  220. // We're about to mmap the file, ensure that the mapping is cleaned up when the
  221. // cache becomes unused.
  222. runtime.SetFinalizer(c, (*cache).finalizer)
  223. // Try to load the file from disk and memory map it
  224. var err error
  225. c.dump, c.mmap, c.cache, err = memoryMap(path, lock)
  226. if err == nil {
  227. logger.Debug("Loaded old ethash cache from disk")
  228. return
  229. }
  230. logger.Debug("Failed to load old ethash cache", "err", err)
  231. // No previous cache available, create a new cache file to fill
  232. c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
  233. if err != nil {
  234. logger.Error("Failed to generate mapped ethash cache", "err", err)
  235. c.cache = make([]uint32, size/4)
  236. generateCache(c.cache, c.epoch, seed)
  237. }
  238. // Iterate over all previous instances and delete old ones
  239. for ep := int(c.epoch) - limit; ep >= 0; ep-- {
  240. seed := seedHash(uint64(ep)*epochLength + 1)
  241. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  242. os.Remove(path)
  243. }
  244. })
  245. }
  246. // finalizer unmaps the memory and closes the file.
  247. func (c *cache) finalizer() {
  248. if c.mmap != nil {
  249. c.mmap.Unmap()
  250. c.dump.Close()
  251. c.mmap, c.dump = nil, nil
  252. }
  253. }
  254. // dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
  255. type dataset struct {
  256. epoch uint64 // Epoch for which this cache is relevant
  257. dump *os.File // File descriptor of the memory mapped cache
  258. mmap mmap.MMap // Memory map itself to unmap before releasing
  259. dataset []uint32 // The actual cache data content
  260. once sync.Once // Ensures the cache is generated only once
  261. done uint32 // Atomic flag to determine generation status
  262. }
  263. // newDataset creates a new ethash mining dataset and returns it as a plain Go
  264. // interface to be usable in an LRU cache.
  265. func newDataset(epoch uint64) interface{} {
  266. return &dataset{epoch: epoch}
  267. }
  268. // generate ensures that the dataset content is generated before use.
  269. func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
  270. d.once.Do(func() {
  271. // Mark the dataset generated after we're done. This is needed for remote
  272. defer atomic.StoreUint32(&d.done, 1)
  273. csize := cacheSize(d.epoch*epochLength + 1)
  274. dsize := datasetSize(d.epoch*epochLength + 1)
  275. seed := seedHash(d.epoch*epochLength + 1)
  276. if test {
  277. csize = 1024
  278. dsize = 32 * 1024
  279. }
  280. // If we don't store anything on disk, generate and return
  281. if dir == "" {
  282. cache := make([]uint32, csize/4)
  283. generateCache(cache, d.epoch, seed)
  284. d.dataset = make([]uint32, dsize/4)
  285. generateDataset(d.dataset, d.epoch, cache)
  286. return
  287. }
  288. // Disk storage is needed, this will get fancy
  289. var endian string
  290. if !isLittleEndian() {
  291. endian = ".be"
  292. }
  293. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  294. logger := log.New("epoch", d.epoch)
  295. // We're about to mmap the file, ensure that the mapping is cleaned up when the
  296. // cache becomes unused.
  297. runtime.SetFinalizer(d, (*dataset).finalizer)
  298. // Try to load the file from disk and memory map it
  299. var err error
  300. d.dump, d.mmap, d.dataset, err = memoryMap(path, lock)
  301. if err == nil {
  302. logger.Debug("Loaded old ethash dataset from disk")
  303. return
  304. }
  305. logger.Debug("Failed to load old ethash dataset", "err", err)
  306. // No previous dataset available, create a new dataset file to fill
  307. cache := make([]uint32, csize/4)
  308. generateCache(cache, d.epoch, seed)
  309. d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
  310. if err != nil {
  311. logger.Error("Failed to generate mapped ethash dataset", "err", err)
  312. d.dataset = make([]uint32, dsize/2)
  313. generateDataset(d.dataset, d.epoch, cache)
  314. }
  315. // Iterate over all previous instances and delete old ones
  316. for ep := int(d.epoch) - limit; ep >= 0; ep-- {
  317. seed := seedHash(uint64(ep)*epochLength + 1)
  318. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  319. os.Remove(path)
  320. }
  321. })
  322. }
  323. // generated returns whether this particular dataset finished generating already
  324. // or not (it may not have been started at all). This is useful for remote miners
  325. // to default to verification caches instead of blocking on DAG generations.
  326. func (d *dataset) generated() bool {
  327. return atomic.LoadUint32(&d.done) == 1
  328. }
  329. // finalizer closes any file handlers and memory maps open.
  330. func (d *dataset) finalizer() {
  331. if d.mmap != nil {
  332. d.mmap.Unmap()
  333. d.dump.Close()
  334. d.mmap, d.dump = nil, nil
  335. }
  336. }
  337. // MakeCache generates a new ethash cache and optionally stores it to disk.
  338. func MakeCache(block uint64, dir string) {
  339. c := cache{epoch: block / epochLength}
  340. c.generate(dir, math.MaxInt32, false, false)
  341. }
  342. // MakeDataset generates a new ethash dataset and optionally stores it to disk.
  343. func MakeDataset(block uint64, dir string) {
  344. d := dataset{epoch: block / epochLength}
  345. d.generate(dir, math.MaxInt32, false, false)
  346. }
  347. // Mode defines the type and amount of PoW verification an ethash engine makes.
  348. type Mode uint
  349. const (
  350. ModeNormal Mode = iota
  351. ModeShared
  352. ModeTest
  353. ModeFake
  354. ModeFullFake
  355. )
  356. // Config are the configuration parameters of the ethash.
  357. type Config struct {
  358. CacheDir string
  359. CachesInMem int
  360. CachesOnDisk int
  361. CachesLockMmap bool
  362. DatasetDir string
  363. DatasetsInMem int
  364. DatasetsOnDisk int
  365. DatasetsLockMmap bool
  366. PowMode Mode
  367. Log log.Logger `toml:"-"`
  368. }
  369. // Ethash is a consensus engine based on proof-of-work implementing the ethash
  370. // algorithm.
  371. type Ethash struct {
  372. config Config
  373. caches *lru // In memory caches to avoid regenerating too often
  374. datasets *lru // In memory datasets to avoid regenerating too often
  375. // Mining related fields
  376. rand *rand.Rand // Properly seeded random source for nonces
  377. threads int // Number of threads to mine on if mining
  378. update chan struct{} // Notification channel to update mining parameters
  379. hashrate metrics.Meter // Meter tracking the average hashrate
  380. remote *remoteSealer
  381. // The fields below are hooks for testing
  382. shared *Ethash // Shared PoW verifier to avoid cache regeneration
  383. fakeFail uint64 // Block number which fails PoW check even in fake mode
  384. fakeDelay time.Duration // Time delay to sleep for before returning from verify
  385. lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
  386. closeOnce sync.Once // Ensures exit channel will not be closed twice.
  387. }
  388. // New creates a full sized ethash PoW scheme and starts a background thread for
  389. // remote mining, also optionally notifying a batch of remote services of new work
  390. // packages.
  391. func New(config Config, notify []string, noverify bool) *Ethash {
  392. if config.Log == nil {
  393. config.Log = log.Root()
  394. }
  395. if config.CachesInMem <= 0 {
  396. config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
  397. config.CachesInMem = 1
  398. }
  399. if config.CacheDir != "" && config.CachesOnDisk > 0 {
  400. config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
  401. }
  402. if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
  403. config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
  404. }
  405. ethash := &Ethash{
  406. config: config,
  407. caches: newlru("cache", config.CachesInMem, newCache),
  408. datasets: newlru("dataset", config.DatasetsInMem, newDataset),
  409. update: make(chan struct{}),
  410. hashrate: metrics.NewMeterForced(),
  411. }
  412. ethash.remote = startRemoteSealer(ethash, notify, noverify)
  413. return ethash
  414. }
  415. // NewTester creates a small sized ethash PoW scheme useful only for testing
  416. // purposes.
  417. func NewTester(notify []string, noverify bool) *Ethash {
  418. ethash := &Ethash{
  419. config: Config{PowMode: ModeTest, Log: log.Root()},
  420. caches: newlru("cache", 1, newCache),
  421. datasets: newlru("dataset", 1, newDataset),
  422. update: make(chan struct{}),
  423. hashrate: metrics.NewMeterForced(),
  424. }
  425. ethash.remote = startRemoteSealer(ethash, notify, noverify)
  426. return ethash
  427. }
  428. // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
  429. // all blocks' seal as valid, though they still have to conform to the Ethereum
  430. // consensus rules.
  431. func NewFaker() *Ethash {
  432. return &Ethash{
  433. config: Config{
  434. PowMode: ModeFake,
  435. Log: log.Root(),
  436. },
  437. }
  438. }
  439. // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
  440. // accepts all blocks as valid apart from the single one specified, though they
  441. // still have to conform to the Ethereum consensus rules.
  442. func NewFakeFailer(fail uint64) *Ethash {
  443. return &Ethash{
  444. config: Config{
  445. PowMode: ModeFake,
  446. Log: log.Root(),
  447. },
  448. fakeFail: fail,
  449. }
  450. }
  451. // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
  452. // accepts all blocks as valid, but delays verifications by some time, though
  453. // they still have to conform to the Ethereum consensus rules.
  454. func NewFakeDelayer(delay time.Duration) *Ethash {
  455. return &Ethash{
  456. config: Config{
  457. PowMode: ModeFake,
  458. Log: log.Root(),
  459. },
  460. fakeDelay: delay,
  461. }
  462. }
  463. // NewFullFaker creates an ethash consensus engine with a full fake scheme that
  464. // accepts all blocks as valid, without checking any consensus rules whatsoever.
  465. func NewFullFaker() *Ethash {
  466. return &Ethash{
  467. config: Config{
  468. PowMode: ModeFullFake,
  469. Log: log.Root(),
  470. },
  471. }
  472. }
  473. // NewShared creates a full sized ethash PoW shared between all requesters running
  474. // in the same process.
  475. func NewShared() *Ethash {
  476. return &Ethash{shared: sharedEthash}
  477. }
  478. // Close closes the exit channel to notify all backend threads exiting.
  479. func (ethash *Ethash) Close() error {
  480. var err error
  481. ethash.closeOnce.Do(func() {
  482. // Short circuit if the exit channel is not allocated.
  483. if ethash.remote == nil {
  484. return
  485. }
  486. close(ethash.remote.requestExit)
  487. <-ethash.remote.exitCh
  488. })
  489. return err
  490. }
  491. // cache tries to retrieve a verification cache for the specified block number
  492. // by first checking against a list of in-memory caches, then against caches
  493. // stored on disk, and finally generating one if none can be found.
  494. func (ethash *Ethash) cache(block uint64) *cache {
  495. epoch := block / epochLength
  496. currentI, futureI := ethash.caches.get(epoch)
  497. current := currentI.(*cache)
  498. // Wait for generation finish.
  499. current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
  500. // If we need a new future cache, now's a good time to regenerate it.
  501. if futureI != nil {
  502. future := futureI.(*cache)
  503. go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
  504. }
  505. return current
  506. }
  507. // dataset tries to retrieve a mining dataset for the specified block number
  508. // by first checking against a list of in-memory datasets, then against DAGs
  509. // stored on disk, and finally generating one if none can be found.
  510. //
  511. // If async is specified, not only the future but the current DAG is also
  512. // generates on a background thread.
  513. func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
  514. // Retrieve the requested ethash dataset
  515. epoch := block / epochLength
  516. currentI, futureI := ethash.datasets.get(epoch)
  517. current := currentI.(*dataset)
  518. // If async is specified, generate everything in a background thread
  519. if async && !current.generated() {
  520. go func() {
  521. current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
  522. if futureI != nil {
  523. future := futureI.(*dataset)
  524. future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
  525. }
  526. }()
  527. } else {
  528. // Either blocking generation was requested, or already done
  529. current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
  530. if futureI != nil {
  531. future := futureI.(*dataset)
  532. go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
  533. }
  534. }
  535. return current
  536. }
  537. // Threads returns the number of mining threads currently enabled. This doesn't
  538. // necessarily mean that mining is running!
  539. func (ethash *Ethash) Threads() int {
  540. ethash.lock.Lock()
  541. defer ethash.lock.Unlock()
  542. return ethash.threads
  543. }
  544. // SetThreads updates the number of mining threads currently enabled. Calling
  545. // this method does not start mining, only sets the thread count. If zero is
  546. // specified, the miner will use all cores of the machine. Setting a thread
  547. // count below zero is allowed and will cause the miner to idle, without any
  548. // work being done.
  549. func (ethash *Ethash) SetThreads(threads int) {
  550. ethash.lock.Lock()
  551. defer ethash.lock.Unlock()
  552. // If we're running a shared PoW, set the thread count on that instead
  553. if ethash.shared != nil {
  554. ethash.shared.SetThreads(threads)
  555. return
  556. }
  557. // Update the threads and ping any running seal to pull in any changes
  558. ethash.threads = threads
  559. select {
  560. case ethash.update <- struct{}{}:
  561. default:
  562. }
  563. }
  564. // Hashrate implements PoW, returning the measured rate of the search invocations
  565. // per second over the last minute.
  566. // Note the returned hashrate includes local hashrate, but also includes the total
  567. // hashrate of all remote miner.
  568. func (ethash *Ethash) Hashrate() float64 {
  569. // Short circuit if we are run the ethash in normal/test mode.
  570. if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest {
  571. return ethash.hashrate.Rate1()
  572. }
  573. var res = make(chan uint64, 1)
  574. select {
  575. case ethash.remote.fetchRateCh <- res:
  576. case <-ethash.remote.exitCh:
  577. // Return local hashrate only if ethash is stopped.
  578. return ethash.hashrate.Rate1()
  579. }
  580. // Gather total submitted hash rate of remote sealers.
  581. return ethash.hashrate.Rate1() + float64(<-res)
  582. }
  583. // APIs implements consensus.Engine, returning the user facing RPC APIs.
  584. func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API {
  585. // In order to ensure backward compatibility, we exposes ethash RPC APIs
  586. // to both eth and ethash namespaces.
  587. return []rpc.API{
  588. {
  589. Namespace: "eth",
  590. Version: "1.0",
  591. Service: &API{ethash},
  592. Public: true,
  593. },
  594. {
  595. Namespace: "ethash",
  596. Version: "1.0",
  597. Service: &API{ethash},
  598. Public: true,
  599. },
  600. }
  601. }
  602. // SeedHash is the seed to use for generating a verification cache and the mining
  603. // dataset.
  604. func SeedHash(block uint64) []byte {
  605. return seedHash(block)
  606. }