ethash.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. // Copyright 2017 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. // Package ethash implements the ethash proof-of-work consensus engine.
  17. package ethash
  18. import (
  19. "errors"
  20. "fmt"
  21. "math"
  22. "math/big"
  23. "math/rand"
  24. "os"
  25. "path/filepath"
  26. "reflect"
  27. "runtime"
  28. "strconv"
  29. "sync"
  30. "time"
  31. "unsafe"
  32. mmap "github.com/edsrzf/mmap-go"
  33. "github.com/ethereum/go-ethereum/consensus"
  34. "github.com/ethereum/go-ethereum/log"
  35. "github.com/ethereum/go-ethereum/metrics"
  36. "github.com/ethereum/go-ethereum/rpc"
  37. "github.com/hashicorp/golang-lru/simplelru"
  38. )
  39. var ErrInvalidDumpMagic = errors.New("invalid dump magic")
  40. var (
  41. // maxUint256 is a big integer representing 2^256-1
  42. maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
  43. // sharedEthash is a full instance that can be shared between multiple users.
  44. sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal})
  45. // algorithmRevision is the data structure version used for file naming.
  46. algorithmRevision = 23
  47. // dumpMagic is a dataset dump header to sanity check a data dump.
  48. dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
  49. )
  50. // isLittleEndian returns whether the local system is running in little or big
  51. // endian byte order.
  52. func isLittleEndian() bool {
  53. n := uint32(0x01020304)
  54. return *(*byte)(unsafe.Pointer(&n)) == 0x04
  55. }
  56. // memoryMap tries to memory map a file of uint32s for read only access.
  57. func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
  58. file, err := os.OpenFile(path, os.O_RDONLY, 0644)
  59. if err != nil {
  60. return nil, nil, nil, err
  61. }
  62. mem, buffer, err := memoryMapFile(file, false)
  63. if err != nil {
  64. file.Close()
  65. return nil, nil, nil, err
  66. }
  67. for i, magic := range dumpMagic {
  68. if buffer[i] != magic {
  69. mem.Unmap()
  70. file.Close()
  71. return nil, nil, nil, ErrInvalidDumpMagic
  72. }
  73. }
  74. return file, mem, buffer[len(dumpMagic):], err
  75. }
  76. // memoryMapFile tries to memory map an already opened file descriptor.
  77. func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
  78. // Try to memory map the file
  79. flag := mmap.RDONLY
  80. if write {
  81. flag = mmap.RDWR
  82. }
  83. mem, err := mmap.Map(file, flag, 0)
  84. if err != nil {
  85. return nil, nil, err
  86. }
  87. // Yay, we managed to memory map the file, here be dragons
  88. header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
  89. header.Len /= 4
  90. header.Cap /= 4
  91. return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil
  92. }
  93. // memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
  94. // access, fill it with the data from a generator and then move it into the final
  95. // path requested.
  96. func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
  97. // Ensure the data folder exists
  98. if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
  99. return nil, nil, nil, err
  100. }
  101. // Create a huge temporary empty file to fill with data
  102. temp := path + "." + strconv.Itoa(rand.Int())
  103. dump, err := os.Create(temp)
  104. if err != nil {
  105. return nil, nil, nil, err
  106. }
  107. if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
  108. return nil, nil, nil, err
  109. }
  110. // Memory map the file for writing and fill it with the generator
  111. mem, buffer, err := memoryMapFile(dump, true)
  112. if err != nil {
  113. dump.Close()
  114. return nil, nil, nil, err
  115. }
  116. copy(buffer, dumpMagic)
  117. data := buffer[len(dumpMagic):]
  118. generator(data)
  119. if err := mem.Unmap(); err != nil {
  120. return nil, nil, nil, err
  121. }
  122. if err := dump.Close(); err != nil {
  123. return nil, nil, nil, err
  124. }
  125. if err := os.Rename(temp, path); err != nil {
  126. return nil, nil, nil, err
  127. }
  128. return memoryMap(path)
  129. }
  130. // lru tracks caches or datasets by their last use time, keeping at most N of them.
  131. type lru struct {
  132. what string
  133. new func(epoch uint64) interface{}
  134. mu sync.Mutex
  135. // Items are kept in a LRU cache, but there is a special case:
  136. // We always keep an item for (highest seen epoch) + 1 as the 'future item'.
  137. cache *simplelru.LRU
  138. future uint64
  139. futureItem interface{}
  140. }
  141. // newlru create a new least-recently-used cache for ither the verification caches
  142. // or the mining datasets.
  143. func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru {
  144. if maxItems <= 0 {
  145. maxItems = 1
  146. }
  147. cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) {
  148. log.Trace("Evicted ethash "+what, "epoch", key)
  149. })
  150. return &lru{what: what, new: new, cache: cache}
  151. }
  152. // get retrieves or creates an item for the given epoch. The first return value is always
  153. // non-nil. The second return value is non-nil if lru thinks that an item will be useful in
  154. // the near future.
  155. func (lru *lru) get(epoch uint64) (item, future interface{}) {
  156. lru.mu.Lock()
  157. defer lru.mu.Unlock()
  158. // Get or create the item for the requested epoch.
  159. item, ok := lru.cache.Get(epoch)
  160. if !ok {
  161. if lru.future > 0 && lru.future == epoch {
  162. item = lru.futureItem
  163. } else {
  164. log.Trace("Requiring new ethash "+lru.what, "epoch", epoch)
  165. item = lru.new(epoch)
  166. }
  167. lru.cache.Add(epoch, item)
  168. }
  169. // Update the 'future item' if epoch is larger than previously seen.
  170. if epoch < maxEpoch-1 && lru.future < epoch+1 {
  171. log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1)
  172. future = lru.new(epoch + 1)
  173. lru.future = epoch + 1
  174. lru.futureItem = future
  175. }
  176. return item, future
  177. }
  178. // cache wraps an ethash cache with some metadata to allow easier concurrent use.
  179. type cache struct {
  180. epoch uint64 // Epoch for which this cache is relevant
  181. dump *os.File // File descriptor of the memory mapped cache
  182. mmap mmap.MMap // Memory map itself to unmap before releasing
  183. cache []uint32 // The actual cache data content (may be memory mapped)
  184. once sync.Once // Ensures the cache is generated only once
  185. }
  186. // newCache creates a new ethash verification cache and returns it as a plain Go
  187. // interface to be usable in an LRU cache.
  188. func newCache(epoch uint64) interface{} {
  189. return &cache{epoch: epoch}
  190. }
  191. // generate ensures that the cache content is generated before use.
  192. func (c *cache) generate(dir string, limit int, test bool) {
  193. c.once.Do(func() {
  194. size := cacheSize(c.epoch*epochLength + 1)
  195. seed := seedHash(c.epoch*epochLength + 1)
  196. if test {
  197. size = 1024
  198. }
  199. // If we don't store anything on disk, generate and return.
  200. if dir == "" {
  201. c.cache = make([]uint32, size/4)
  202. generateCache(c.cache, c.epoch, seed)
  203. return
  204. }
  205. // Disk storage is needed, this will get fancy
  206. var endian string
  207. if !isLittleEndian() {
  208. endian = ".be"
  209. }
  210. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  211. logger := log.New("epoch", c.epoch)
  212. // We're about to mmap the file, ensure that the mapping is cleaned up when the
  213. // cache becomes unused.
  214. runtime.SetFinalizer(c, (*cache).finalizer)
  215. // Try to load the file from disk and memory map it
  216. var err error
  217. c.dump, c.mmap, c.cache, err = memoryMap(path)
  218. if err == nil {
  219. logger.Debug("Loaded old ethash cache from disk")
  220. return
  221. }
  222. logger.Debug("Failed to load old ethash cache", "err", err)
  223. // No previous cache available, create a new cache file to fill
  224. c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
  225. if err != nil {
  226. logger.Error("Failed to generate mapped ethash cache", "err", err)
  227. c.cache = make([]uint32, size/4)
  228. generateCache(c.cache, c.epoch, seed)
  229. }
  230. // Iterate over all previous instances and delete old ones
  231. for ep := int(c.epoch) - limit; ep >= 0; ep-- {
  232. seed := seedHash(uint64(ep)*epochLength + 1)
  233. path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
  234. os.Remove(path)
  235. }
  236. })
  237. }
  238. // finalizer unmaps the memory and closes the file.
  239. func (c *cache) finalizer() {
  240. if c.mmap != nil {
  241. c.mmap.Unmap()
  242. c.dump.Close()
  243. c.mmap, c.dump = nil, nil
  244. }
  245. }
  246. // dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
  247. type dataset struct {
  248. epoch uint64 // Epoch for which this cache is relevant
  249. dump *os.File // File descriptor of the memory mapped cache
  250. mmap mmap.MMap // Memory map itself to unmap before releasing
  251. dataset []uint32 // The actual cache data content
  252. once sync.Once // Ensures the cache is generated only once
  253. }
  254. // newDataset creates a new ethash mining dataset and returns it as a plain Go
  255. // interface to be usable in an LRU cache.
  256. func newDataset(epoch uint64) interface{} {
  257. return &dataset{epoch: epoch}
  258. }
  259. // generate ensures that the dataset content is generated before use.
  260. func (d *dataset) generate(dir string, limit int, test bool) {
  261. d.once.Do(func() {
  262. csize := cacheSize(d.epoch*epochLength + 1)
  263. dsize := datasetSize(d.epoch*epochLength + 1)
  264. seed := seedHash(d.epoch*epochLength + 1)
  265. if test {
  266. csize = 1024
  267. dsize = 32 * 1024
  268. }
  269. // If we don't store anything on disk, generate and return
  270. if dir == "" {
  271. cache := make([]uint32, csize/4)
  272. generateCache(cache, d.epoch, seed)
  273. d.dataset = make([]uint32, dsize/4)
  274. generateDataset(d.dataset, d.epoch, cache)
  275. }
  276. // Disk storage is needed, this will get fancy
  277. var endian string
  278. if !isLittleEndian() {
  279. endian = ".be"
  280. }
  281. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  282. logger := log.New("epoch", d.epoch)
  283. // We're about to mmap the file, ensure that the mapping is cleaned up when the
  284. // cache becomes unused.
  285. runtime.SetFinalizer(d, (*dataset).finalizer)
  286. // Try to load the file from disk and memory map it
  287. var err error
  288. d.dump, d.mmap, d.dataset, err = memoryMap(path)
  289. if err == nil {
  290. logger.Debug("Loaded old ethash dataset from disk")
  291. return
  292. }
  293. logger.Debug("Failed to load old ethash dataset", "err", err)
  294. // No previous dataset available, create a new dataset file to fill
  295. cache := make([]uint32, csize/4)
  296. generateCache(cache, d.epoch, seed)
  297. d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
  298. if err != nil {
  299. logger.Error("Failed to generate mapped ethash dataset", "err", err)
  300. d.dataset = make([]uint32, dsize/2)
  301. generateDataset(d.dataset, d.epoch, cache)
  302. }
  303. // Iterate over all previous instances and delete old ones
  304. for ep := int(d.epoch) - limit; ep >= 0; ep-- {
  305. seed := seedHash(uint64(ep)*epochLength + 1)
  306. path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
  307. os.Remove(path)
  308. }
  309. })
  310. }
  311. // finalizer closes any file handlers and memory maps open.
  312. func (d *dataset) finalizer() {
  313. if d.mmap != nil {
  314. d.mmap.Unmap()
  315. d.dump.Close()
  316. d.mmap, d.dump = nil, nil
  317. }
  318. }
  319. // MakeCache generates a new ethash cache and optionally stores it to disk.
  320. func MakeCache(block uint64, dir string) {
  321. c := cache{epoch: block / epochLength}
  322. c.generate(dir, math.MaxInt32, false)
  323. }
  324. // MakeDataset generates a new ethash dataset and optionally stores it to disk.
  325. func MakeDataset(block uint64, dir string) {
  326. d := dataset{epoch: block / epochLength}
  327. d.generate(dir, math.MaxInt32, false)
  328. }
  329. // Mode defines the type and amount of PoW verification an ethash engine makes.
  330. type Mode uint
  331. const (
  332. ModeNormal Mode = iota
  333. ModeShared
  334. ModeTest
  335. ModeFake
  336. ModeFullFake
  337. )
  338. // Config are the configuration parameters of the ethash.
  339. type Config struct {
  340. CacheDir string
  341. CachesInMem int
  342. CachesOnDisk int
  343. DatasetDir string
  344. DatasetsInMem int
  345. DatasetsOnDisk int
  346. PowMode Mode
  347. }
  348. // Ethash is a consensus engine based on proot-of-work implementing the ethash
  349. // algorithm.
  350. type Ethash struct {
  351. config Config
  352. caches *lru // In memory caches to avoid regenerating too often
  353. datasets *lru // In memory datasets to avoid regenerating too often
  354. // Mining related fields
  355. rand *rand.Rand // Properly seeded random source for nonces
  356. threads int // Number of threads to mine on if mining
  357. update chan struct{} // Notification channel to update mining parameters
  358. hashrate metrics.Meter // Meter tracking the average hashrate
  359. // The fields below are hooks for testing
  360. shared *Ethash // Shared PoW verifier to avoid cache regeneration
  361. fakeFail uint64 // Block number which fails PoW check even in fake mode
  362. fakeDelay time.Duration // Time delay to sleep for before returning from verify
  363. lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
  364. }
  365. // New creates a full sized ethash PoW scheme.
  366. func New(config Config) *Ethash {
  367. if config.CachesInMem <= 0 {
  368. log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
  369. config.CachesInMem = 1
  370. }
  371. if config.CacheDir != "" && config.CachesOnDisk > 0 {
  372. log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
  373. }
  374. if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
  375. log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
  376. }
  377. return &Ethash{
  378. config: config,
  379. caches: newlru("cache", config.CachesInMem, newCache),
  380. datasets: newlru("dataset", config.DatasetsInMem, newDataset),
  381. update: make(chan struct{}),
  382. hashrate: metrics.NewMeter(),
  383. }
  384. }
  385. // NewTester creates a small sized ethash PoW scheme useful only for testing
  386. // purposes.
  387. func NewTester() *Ethash {
  388. return New(Config{CachesInMem: 1, PowMode: ModeTest})
  389. }
  390. // NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
  391. // all blocks' seal as valid, though they still have to conform to the Ethereum
  392. // consensus rules.
  393. func NewFaker() *Ethash {
  394. return &Ethash{
  395. config: Config{
  396. PowMode: ModeFake,
  397. },
  398. }
  399. }
  400. // NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
  401. // accepts all blocks as valid apart from the single one specified, though they
  402. // still have to conform to the Ethereum consensus rules.
  403. func NewFakeFailer(fail uint64) *Ethash {
  404. return &Ethash{
  405. config: Config{
  406. PowMode: ModeFake,
  407. },
  408. fakeFail: fail,
  409. }
  410. }
  411. // NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
  412. // accepts all blocks as valid, but delays verifications by some time, though
  413. // they still have to conform to the Ethereum consensus rules.
  414. func NewFakeDelayer(delay time.Duration) *Ethash {
  415. return &Ethash{
  416. config: Config{
  417. PowMode: ModeFake,
  418. },
  419. fakeDelay: delay,
  420. }
  421. }
  422. // NewFullFaker creates an ethash consensus engine with a full fake scheme that
  423. // accepts all blocks as valid, without checking any consensus rules whatsoever.
  424. func NewFullFaker() *Ethash {
  425. return &Ethash{
  426. config: Config{
  427. PowMode: ModeFullFake,
  428. },
  429. }
  430. }
  431. // NewShared creates a full sized ethash PoW shared between all requesters running
  432. // in the same process.
  433. func NewShared() *Ethash {
  434. return &Ethash{shared: sharedEthash}
  435. }
  436. // cache tries to retrieve a verification cache for the specified block number
  437. // by first checking against a list of in-memory caches, then against caches
  438. // stored on disk, and finally generating one if none can be found.
  439. func (ethash *Ethash) cache(block uint64) *cache {
  440. epoch := block / epochLength
  441. currentI, futureI := ethash.caches.get(epoch)
  442. current := currentI.(*cache)
  443. // Wait for generation finish.
  444. current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
  445. // If we need a new future cache, now's a good time to regenerate it.
  446. if futureI != nil {
  447. future := futureI.(*cache)
  448. go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
  449. }
  450. return current
  451. }
  452. // dataset tries to retrieve a mining dataset for the specified block number
  453. // by first checking against a list of in-memory datasets, then against DAGs
  454. // stored on disk, and finally generating one if none can be found.
  455. func (ethash *Ethash) dataset(block uint64) *dataset {
  456. epoch := block / epochLength
  457. currentI, futureI := ethash.datasets.get(epoch)
  458. current := currentI.(*dataset)
  459. // Wait for generation finish.
  460. current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
  461. // If we need a new future dataset, now's a good time to regenerate it.
  462. if futureI != nil {
  463. future := futureI.(*dataset)
  464. go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
  465. }
  466. return current
  467. }
  468. // Threads returns the number of mining threads currently enabled. This doesn't
  469. // necessarily mean that mining is running!
  470. func (ethash *Ethash) Threads() int {
  471. ethash.lock.Lock()
  472. defer ethash.lock.Unlock()
  473. return ethash.threads
  474. }
  475. // SetThreads updates the number of mining threads currently enabled. Calling
  476. // this method does not start mining, only sets the thread count. If zero is
  477. // specified, the miner will use all cores of the machine. Setting a thread
  478. // count below zero is allowed and will cause the miner to idle, without any
  479. // work being done.
  480. func (ethash *Ethash) SetThreads(threads int) {
  481. ethash.lock.Lock()
  482. defer ethash.lock.Unlock()
  483. // If we're running a shared PoW, set the thread count on that instead
  484. if ethash.shared != nil {
  485. ethash.shared.SetThreads(threads)
  486. return
  487. }
  488. // Update the threads and ping any running seal to pull in any changes
  489. ethash.threads = threads
  490. select {
  491. case ethash.update <- struct{}{}:
  492. default:
  493. }
  494. }
  495. // Hashrate implements PoW, returning the measured rate of the search invocations
  496. // per second over the last minute.
  497. func (ethash *Ethash) Hashrate() float64 {
  498. return ethash.hashrate.Rate1()
  499. }
  500. // APIs implements consensus.Engine, returning the user facing RPC APIs. Currently
  501. // that is empty.
  502. func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
  503. return nil
  504. }
  505. // SeedHash is the seed to use for generating a verification cache and the mining
  506. // dataset.
  507. func SeedHash(block uint64) []byte {
  508. return seedHash(block)
  509. }