freezer_batch.go 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. // Copyright 2021 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package rawdb
  17. import (
  18. "fmt"
  19. "sync/atomic"
  20. "github.com/ethereum/go-ethereum/common/math"
  21. "github.com/ethereum/go-ethereum/rlp"
  22. "github.com/golang/snappy"
  23. )
  24. // This is the maximum amount of data that will be buffered in memory
  25. // for a single freezer table batch.
  26. const freezerBatchBufferLimit = 2 * 1024 * 1024
  27. // freezerBatch is a write operation of multiple items on a freezer.
  28. type freezerBatch struct {
  29. tables map[string]*freezerTableBatch
  30. }
  31. func newFreezerBatch(f *Freezer) *freezerBatch {
  32. batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))}
  33. for kind, table := range f.tables {
  34. batch.tables[kind] = table.newBatch()
  35. }
  36. return batch
  37. }
  38. // Append adds an RLP-encoded item of the given kind.
  39. func (batch *freezerBatch) Append(kind string, num uint64, item interface{}) error {
  40. return batch.tables[kind].Append(num, item)
  41. }
  42. // AppendRaw adds an item of the given kind.
  43. func (batch *freezerBatch) AppendRaw(kind string, num uint64, item []byte) error {
  44. return batch.tables[kind].AppendRaw(num, item)
  45. }
  46. // reset initializes the batch.
  47. func (batch *freezerBatch) reset() {
  48. for _, tb := range batch.tables {
  49. tb.reset()
  50. }
  51. }
  52. // commit is called at the end of a write operation and
  53. // writes all remaining data to tables.
  54. func (batch *freezerBatch) commit() (item uint64, writeSize int64, err error) {
  55. // Check that count agrees on all batches.
  56. item = uint64(math.MaxUint64)
  57. for name, tb := range batch.tables {
  58. if item < math.MaxUint64 && tb.curItem != item {
  59. return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, tb.curItem, item)
  60. }
  61. item = tb.curItem
  62. }
  63. // Commit all table batches.
  64. for _, tb := range batch.tables {
  65. if err := tb.commit(); err != nil {
  66. return 0, 0, err
  67. }
  68. writeSize += tb.totalBytes
  69. }
  70. return item, writeSize, nil
  71. }
  72. // freezerTableBatch is a batch for a freezer table.
  73. type freezerTableBatch struct {
  74. t *freezerTable
  75. sb *snappyBuffer
  76. encBuffer writeBuffer
  77. dataBuffer []byte
  78. indexBuffer []byte
  79. curItem uint64 // expected index of next append
  80. totalBytes int64 // counts written bytes since reset
  81. }
  82. // newBatch creates a new batch for the freezer table.
  83. func (t *freezerTable) newBatch() *freezerTableBatch {
  84. batch := &freezerTableBatch{t: t}
  85. if !t.noCompression {
  86. batch.sb = new(snappyBuffer)
  87. }
  88. batch.reset()
  89. return batch
  90. }
  91. // reset clears the batch for reuse.
  92. func (batch *freezerTableBatch) reset() {
  93. batch.dataBuffer = batch.dataBuffer[:0]
  94. batch.indexBuffer = batch.indexBuffer[:0]
  95. batch.curItem = atomic.LoadUint64(&batch.t.items)
  96. batch.totalBytes = 0
  97. }
  98. // Append rlp-encodes and adds data at the end of the freezer table. The item number is a
  99. // precautionary parameter to ensure data correctness, but the table will reject already
  100. // existing data.
  101. func (batch *freezerTableBatch) Append(item uint64, data interface{}) error {
  102. if item != batch.curItem {
  103. return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
  104. }
  105. // Encode the item.
  106. batch.encBuffer.Reset()
  107. if err := rlp.Encode(&batch.encBuffer, data); err != nil {
  108. return err
  109. }
  110. encItem := batch.encBuffer.data
  111. if batch.sb != nil {
  112. encItem = batch.sb.compress(encItem)
  113. }
  114. return batch.appendItem(encItem)
  115. }
  116. // AppendRaw injects a binary blob at the end of the freezer table. The item number is a
  117. // precautionary parameter to ensure data correctness, but the table will reject already
  118. // existing data.
  119. func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error {
  120. if item != batch.curItem {
  121. return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
  122. }
  123. encItem := blob
  124. if batch.sb != nil {
  125. encItem = batch.sb.compress(blob)
  126. }
  127. return batch.appendItem(encItem)
  128. }
  129. func (batch *freezerTableBatch) appendItem(data []byte) error {
  130. // Check if item fits into current data file.
  131. itemSize := int64(len(data))
  132. itemOffset := batch.t.headBytes + int64(len(batch.dataBuffer))
  133. if itemOffset+itemSize > int64(batch.t.maxFileSize) {
  134. // It doesn't fit, go to next file first.
  135. if err := batch.commit(); err != nil {
  136. return err
  137. }
  138. if err := batch.t.advanceHead(); err != nil {
  139. return err
  140. }
  141. itemOffset = 0
  142. }
  143. // Put data to buffer.
  144. batch.dataBuffer = append(batch.dataBuffer, data...)
  145. batch.totalBytes += itemSize
  146. // Put index entry to buffer.
  147. entry := indexEntry{filenum: batch.t.headId, offset: uint32(itemOffset + itemSize)}
  148. batch.indexBuffer = entry.append(batch.indexBuffer)
  149. batch.curItem++
  150. return batch.maybeCommit()
  151. }
  152. // maybeCommit writes the buffered data if the buffer is full enough.
  153. func (batch *freezerTableBatch) maybeCommit() error {
  154. if len(batch.dataBuffer) > freezerBatchBufferLimit {
  155. return batch.commit()
  156. }
  157. return nil
  158. }
  159. // commit writes the batched items to the backing freezerTable.
  160. func (batch *freezerTableBatch) commit() error {
  161. // Write data.
  162. _, err := batch.t.head.Write(batch.dataBuffer)
  163. if err != nil {
  164. return err
  165. }
  166. dataSize := int64(len(batch.dataBuffer))
  167. batch.dataBuffer = batch.dataBuffer[:0]
  168. // Write indices.
  169. _, err = batch.t.index.Write(batch.indexBuffer)
  170. if err != nil {
  171. return err
  172. }
  173. indexSize := int64(len(batch.indexBuffer))
  174. batch.indexBuffer = batch.indexBuffer[:0]
  175. // Update headBytes of table.
  176. batch.t.headBytes += dataSize
  177. atomic.StoreUint64(&batch.t.items, batch.curItem)
  178. // Update metrics.
  179. batch.t.sizeGauge.Inc(dataSize + indexSize)
  180. batch.t.writeMeter.Mark(dataSize + indexSize)
  181. return nil
  182. }
  183. // snappyBuffer writes snappy in block format, and can be reused. It is
  184. // reset when WriteTo is called.
  185. type snappyBuffer struct {
  186. dst []byte
  187. }
  188. // compress snappy-compresses the data.
  189. func (s *snappyBuffer) compress(data []byte) []byte {
  190. // The snappy library does not care what the capacity of the buffer is,
  191. // but only checks the length. If the length is too small, it will
  192. // allocate a brand new buffer.
  193. // To avoid that, we check the required size here, and grow the size of the
  194. // buffer to utilize the full capacity.
  195. if n := snappy.MaxEncodedLen(len(data)); len(s.dst) < n {
  196. if cap(s.dst) < n {
  197. s.dst = make([]byte, n)
  198. }
  199. s.dst = s.dst[:n]
  200. }
  201. s.dst = snappy.Encode(s.dst, data)
  202. return s.dst
  203. }
  204. // writeBuffer implements io.Writer for a byte slice.
  205. type writeBuffer struct {
  206. data []byte
  207. }
  208. func (wb *writeBuffer) Write(data []byte) (int, error) {
  209. wb.data = append(wb.data, data...)
  210. return len(data), nil
  211. }
  212. func (wb *writeBuffer) Reset() {
  213. wb.data = wb.data[:0]
  214. }