trie_fuzzer.go 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. // Copyright 2020 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package stacktrie
  17. import (
  18. "bytes"
  19. "encoding/binary"
  20. "errors"
  21. "fmt"
  22. "hash"
  23. "io"
  24. "sort"
  25. "github.com/ethereum/go-ethereum/ethdb"
  26. "github.com/ethereum/go-ethereum/trie"
  27. "golang.org/x/crypto/sha3"
  28. )
  29. type fuzzer struct {
  30. input io.Reader
  31. exhausted bool
  32. debugging bool
  33. }
  34. func (f *fuzzer) read(size int) []byte {
  35. out := make([]byte, size)
  36. if _, err := f.input.Read(out); err != nil {
  37. f.exhausted = true
  38. }
  39. return out
  40. }
  41. func (f *fuzzer) readSlice(min, max int) []byte {
  42. var a uint16
  43. binary.Read(f.input, binary.LittleEndian, &a)
  44. size := min + int(a)%(max-min)
  45. out := make([]byte, size)
  46. if _, err := f.input.Read(out); err != nil {
  47. f.exhausted = true
  48. }
  49. return out
  50. }
  51. // spongeDb is a dummy db backend which accumulates writes in a sponge
  52. type spongeDb struct {
  53. sponge hash.Hash
  54. debug bool
  55. }
  56. func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement me") }
  57. func (s *spongeDb) Get(key []byte) ([]byte, error) { return nil, errors.New("no such elem") }
  58. func (s *spongeDb) Delete(key []byte) error { panic("implement me") }
  59. func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} }
  60. func (s *spongeDb) NewBatchWithSize(size int) ethdb.Batch { return &spongeBatch{s} }
  61. func (s *spongeDb) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") }
  62. func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") }
  63. func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
  64. func (s *spongeDb) Close() error { return nil }
  65. func (s *spongeDb) Put(key []byte, value []byte) error {
  66. if s.debug {
  67. fmt.Printf("db.Put %x : %x\n", key, value)
  68. }
  69. s.sponge.Write(key)
  70. s.sponge.Write(value)
  71. return nil
  72. }
  73. func (s *spongeDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { panic("implement me") }
  74. // spongeBatch is a dummy batch which immediately writes to the underlying spongedb
  75. type spongeBatch struct {
  76. db *spongeDb
  77. }
  78. func (b *spongeBatch) Put(key, value []byte) error {
  79. b.db.Put(key, value)
  80. return nil
  81. }
  82. func (b *spongeBatch) Delete(key []byte) error { panic("implement me") }
  83. func (b *spongeBatch) ValueSize() int { return 100 }
  84. func (b *spongeBatch) Write() error { return nil }
  85. func (b *spongeBatch) Reset() {}
  86. func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
  87. type kv struct {
  88. k, v []byte
  89. }
  90. type kvs []kv
  91. func (k kvs) Len() int {
  92. return len(k)
  93. }
  94. func (k kvs) Less(i, j int) bool {
  95. return bytes.Compare(k[i].k, k[j].k) < 0
  96. }
  97. func (k kvs) Swap(i, j int) {
  98. k[j], k[i] = k[i], k[j]
  99. }
  100. // The function must return
  101. // 1 if the fuzzer should increase priority of the
  102. // given input during subsequent fuzzing (for example, the input is lexically
  103. // correct and was parsed successfully);
  104. // -1 if the input must not be added to corpus even if gives new coverage; and
  105. // 0 otherwise
  106. // other values are reserved for future use.
  107. func Fuzz(data []byte) int {
  108. f := fuzzer{
  109. input: bytes.NewReader(data),
  110. exhausted: false,
  111. }
  112. return f.fuzz()
  113. }
  114. func Debug(data []byte) int {
  115. f := fuzzer{
  116. input: bytes.NewReader(data),
  117. exhausted: false,
  118. debugging: true,
  119. }
  120. return f.fuzz()
  121. }
  122. func (f *fuzzer) fuzz() int {
  123. // This spongeDb is used to check the sequence of disk-db-writes
  124. var (
  125. spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
  126. dbA = trie.NewDatabase(spongeA)
  127. trieA = trie.NewEmpty(dbA)
  128. spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
  129. trieB = trie.NewStackTrie(spongeB)
  130. vals kvs
  131. useful bool
  132. maxElements = 10000
  133. // operate on unique keys only
  134. keys = make(map[string]struct{})
  135. )
  136. // Fill the trie with elements
  137. for i := 0; !f.exhausted && i < maxElements; i++ {
  138. k := f.read(32)
  139. v := f.readSlice(1, 500)
  140. if f.exhausted {
  141. // If it was exhausted while reading, the value may be all zeroes,
  142. // thus 'deletion' which is not supported on stacktrie
  143. break
  144. }
  145. if _, present := keys[string(k)]; present {
  146. // This key is a duplicate, ignore it
  147. continue
  148. }
  149. keys[string(k)] = struct{}{}
  150. vals = append(vals, kv{k: k, v: v})
  151. trieA.Update(k, v)
  152. useful = true
  153. }
  154. if !useful {
  155. return 0
  156. }
  157. // Flush trie -> database
  158. rootA, nodes, err := trieA.Commit(false)
  159. if err != nil {
  160. panic(err)
  161. }
  162. if nodes != nil {
  163. dbA.Update(trie.NewWithNodeSet(nodes))
  164. }
  165. // Flush memdb -> disk (sponge)
  166. dbA.Commit(rootA, false, nil)
  167. // Stacktrie requires sorted insertion
  168. sort.Sort(vals)
  169. for _, kv := range vals {
  170. if f.debugging {
  171. fmt.Printf("{\"%#x\" , \"%#x\"} // stacktrie.Update\n", kv.k, kv.v)
  172. }
  173. trieB.Update(kv.k, kv.v)
  174. }
  175. rootB := trieB.Hash()
  176. if _, err := trieB.Commit(); err != nil {
  177. panic(err)
  178. }
  179. if rootA != rootB {
  180. panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootB))
  181. }
  182. sumA := spongeA.sponge.Sum(nil)
  183. sumB := spongeB.sponge.Sum(nil)
  184. if !bytes.Equal(sumA, sumB) {
  185. panic(fmt.Sprintf("sequence differ: (trie) %x != %x (stacktrie)", sumA, sumB))
  186. }
  187. return 1
  188. }