database.go 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. package ethdb
  2. import (
  3. "strconv"
  4. "strings"
  5. "time"
  6. "github.com/ethereum/go-ethereum/compression/rle"
  7. "github.com/ethereum/go-ethereum/logger"
  8. "github.com/ethereum/go-ethereum/logger/glog"
  9. "github.com/rcrowley/go-metrics"
  10. "github.com/syndtr/goleveldb/leveldb"
  11. "github.com/syndtr/goleveldb/leveldb/errors"
  12. "github.com/syndtr/goleveldb/leveldb/iterator"
  13. "github.com/syndtr/goleveldb/leveldb/opt"
  14. )
  15. var OpenFileLimit = 64
  16. type LDBDatabase struct {
  17. fn string // filename for reporting
  18. db *leveldb.DB // LevelDB instance
  19. GetTimer metrics.Timer // Timer for measuring the database get request counts and latencies
  20. PutTimer metrics.Timer // Timer for measuring the database put request counts and latencies
  21. DelTimer metrics.Timer // Timer for measuring the database delete request counts and latencies
  22. MissMeter metrics.Meter // Meter for measuring the missed database get requests
  23. ReadMeter metrics.Meter // Meter for measuring the database get request data usage
  24. WriteMeter metrics.Meter // Meter for measuring the database put request data usage
  25. CompTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
  26. CompReadMeter metrics.Meter // Meter for measuring the data read during compaction
  27. CompWriteMeter metrics.Meter // Meter for measuring the data written during compaction
  28. }
  29. // NewLDBDatabase returns a LevelDB wrapped object. LDBDatabase does not persist data by
  30. // it self but requires a background poller which syncs every X. `Flush` should be called
  31. // when data needs to be stored and written to disk.
  32. func NewLDBDatabase(file string) (*LDBDatabase, error) {
  33. // Open the db
  34. db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: OpenFileLimit})
  35. // check for curruption and attempt to recover
  36. if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
  37. db, err = leveldb.RecoverFile(file, nil)
  38. }
  39. // (re) check for errors and abort if opening of the db failed
  40. if err != nil {
  41. return nil, err
  42. }
  43. database := &LDBDatabase{
  44. fn: file,
  45. db: db,
  46. }
  47. go database.meter(3 * time.Second)
  48. return database, nil
  49. }
  50. // Put puts the given key / value to the queue
  51. func (self *LDBDatabase) Put(key []byte, value []byte) error {
  52. // Measure the database put latency, if requested
  53. if self.PutTimer != nil {
  54. defer self.PutTimer.UpdateSince(time.Now())
  55. }
  56. // Generate the data to write to disk, update the meter and write
  57. dat := rle.Compress(value)
  58. if self.WriteMeter != nil {
  59. self.WriteMeter.Mark(int64(len(dat)))
  60. }
  61. return self.db.Put(key, dat, nil)
  62. }
  63. // Get returns the given key if it's present.
  64. func (self *LDBDatabase) Get(key []byte) ([]byte, error) {
  65. // Measure the database get latency, if requested
  66. if self.GetTimer != nil {
  67. defer self.GetTimer.UpdateSince(time.Now())
  68. }
  69. // Retrieve the key and increment the miss counter if not found
  70. dat, err := self.db.Get(key, nil)
  71. if err != nil {
  72. if self.MissMeter != nil {
  73. self.MissMeter.Mark(1)
  74. }
  75. return nil, err
  76. }
  77. // Otherwise update the actually retrieved amount of data
  78. if self.ReadMeter != nil {
  79. self.ReadMeter.Mark(int64(len(dat)))
  80. }
  81. return rle.Decompress(dat)
  82. }
  83. // Delete deletes the key from the queue and database
  84. func (self *LDBDatabase) Delete(key []byte) error {
  85. // Measure the database delete latency, if requested
  86. if self.DelTimer != nil {
  87. defer self.DelTimer.UpdateSince(time.Now())
  88. }
  89. // Execute the actual operation
  90. return self.db.Delete(key, nil)
  91. }
  92. func (self *LDBDatabase) NewIterator() iterator.Iterator {
  93. return self.db.NewIterator(nil, nil)
  94. }
  95. // Flush flushes out the queue to leveldb
  96. func (self *LDBDatabase) Flush() error {
  97. return nil
  98. }
  99. func (self *LDBDatabase) Close() {
  100. if err := self.Flush(); err != nil {
  101. glog.V(logger.Error).Infof("error: flush '%s': %v\n", self.fn, err)
  102. }
  103. self.db.Close()
  104. glog.V(logger.Error).Infoln("flushed and closed db:", self.fn)
  105. }
  106. func (self *LDBDatabase) LDB() *leveldb.DB {
  107. return self.db
  108. }
  109. // meter periodically retrieves internal leveldb counters and reports them to
  110. // the metrics subsystem.
  111. //
  112. // This is how a stats table look like (currently):
  113. // Compactions
  114. // Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)
  115. // -------+------------+---------------+---------------+---------------+---------------
  116. // 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098
  117. // 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294
  118. // 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884
  119. // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000
  120. func (self *LDBDatabase) meter(refresh time.Duration) {
  121. // Create the counters to store current and previous values
  122. counters := make([][]float64, 2)
  123. for i := 0; i < 2; i++ {
  124. counters[i] = make([]float64, 3)
  125. }
  126. // Iterate ad infinitum and collect the stats
  127. for i := 1; ; i++ {
  128. // Retrieve the database stats
  129. stats, err := self.db.GetProperty("leveldb.stats")
  130. if err != nil {
  131. glog.V(logger.Error).Infof("failed to read database stats: %v", err)
  132. return
  133. }
  134. // Find the compaction table, skip the header
  135. lines := strings.Split(stats, "\n")
  136. for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
  137. lines = lines[1:]
  138. }
  139. if len(lines) <= 3 {
  140. glog.V(logger.Error).Infof("compaction table not found")
  141. return
  142. }
  143. lines = lines[3:]
  144. // Iterate over all the table rows, and accumulate the entries
  145. for j := 0; j < len(counters[i%2]); j++ {
  146. counters[i%2][j] = 0
  147. }
  148. for _, line := range lines {
  149. parts := strings.Split(line, "|")
  150. if len(parts) != 6 {
  151. break
  152. }
  153. for idx, counter := range parts[3:] {
  154. if value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64); err != nil {
  155. glog.V(logger.Error).Infof("compaction entry parsing failed: %v", err)
  156. return
  157. } else {
  158. counters[i%2][idx] += value
  159. }
  160. }
  161. }
  162. // Update all the requested meters
  163. if self.CompTimeMeter != nil {
  164. self.CompTimeMeter.Mark(int64((counters[i%2][0] - counters[(i-1)%2][0]) * 1000 * 1000 * 1000))
  165. }
  166. if self.CompReadMeter != nil {
  167. self.CompReadMeter.Mark(int64((counters[i%2][1] - counters[(i-1)%2][1]) * 1024 * 1024))
  168. }
  169. if self.CompWriteMeter != nil {
  170. self.CompWriteMeter.Mark(int64((counters[i%2][2] - counters[(i-1)%2][2]) * 1024 * 1024))
  171. }
  172. // Sleep a bit, then repeat the stats collection
  173. time.Sleep(refresh)
  174. }
  175. }