difflayer_test.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package snapshot
  17. import (
  18. "bytes"
  19. "math/rand"
  20. "testing"
  21. "github.com/VictoriaMetrics/fastcache"
  22. "github.com/ethereum/go-ethereum/common"
  23. "github.com/ethereum/go-ethereum/crypto"
  24. "github.com/ethereum/go-ethereum/ethdb/memorydb"
  25. )
  26. func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} {
  27. copy := make(map[common.Hash]struct{})
  28. for hash := range destructs {
  29. copy[hash] = struct{}{}
  30. }
  31. return copy
  32. }
  33. func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte {
  34. copy := make(map[common.Hash][]byte)
  35. for hash, blob := range accounts {
  36. copy[hash] = blob
  37. }
  38. return copy
  39. }
  40. func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
  41. copy := make(map[common.Hash]map[common.Hash][]byte)
  42. for accHash, slots := range storage {
  43. copy[accHash] = make(map[common.Hash][]byte)
  44. for slotHash, blob := range slots {
  45. copy[accHash][slotHash] = blob
  46. }
  47. }
  48. return copy
  49. }
  50. // TestMergeBasics tests some simple merges
  51. func TestMergeBasics(t *testing.T) {
  52. var (
  53. destructs = make(map[common.Hash]struct{})
  54. accounts = make(map[common.Hash][]byte)
  55. storage = make(map[common.Hash]map[common.Hash][]byte)
  56. )
  57. // Fill up a parent
  58. for i := 0; i < 100; i++ {
  59. h := randomHash()
  60. data := randomAccount()
  61. accounts[h] = data
  62. if rand.Intn(4) == 0 {
  63. destructs[h] = struct{}{}
  64. }
  65. if rand.Intn(2) == 0 {
  66. accStorage := make(map[common.Hash][]byte)
  67. value := make([]byte, 32)
  68. rand.Read(value)
  69. accStorage[randomHash()] = value
  70. storage[h] = accStorage
  71. }
  72. }
  73. // Add some (identical) layers on top
  74. parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  75. child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  76. child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  77. child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  78. child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage))
  79. // And flatten
  80. merged := (child.flatten()).(*diffLayer)
  81. { // Check account lists
  82. if have, want := len(merged.accountList), 0; have != want {
  83. t.Errorf("accountList wrong: have %v, want %v", have, want)
  84. }
  85. if have, want := len(merged.AccountList()), len(accounts); have != want {
  86. t.Errorf("AccountList() wrong: have %v, want %v", have, want)
  87. }
  88. if have, want := len(merged.accountList), len(accounts); have != want {
  89. t.Errorf("accountList [2] wrong: have %v, want %v", have, want)
  90. }
  91. }
  92. { // Check account drops
  93. if have, want := len(merged.destructSet), len(destructs); have != want {
  94. t.Errorf("accountDrop wrong: have %v, want %v", have, want)
  95. }
  96. }
  97. { // Check storage lists
  98. i := 0
  99. for aHash, sMap := range storage {
  100. if have, want := len(merged.storageList), i; have != want {
  101. t.Errorf("[1] storageList wrong: have %v, want %v", have, want)
  102. }
  103. if have, want := len(merged.StorageList(aHash)), len(sMap); have != want {
  104. t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want)
  105. }
  106. if have, want := len(merged.storageList[aHash]), len(sMap); have != want {
  107. t.Errorf("storageList wrong: have %v, want %v", have, want)
  108. }
  109. i++
  110. }
  111. }
  112. }
  113. // TestMergeDelete tests some deletion
  114. func TestMergeDelete(t *testing.T) {
  115. var (
  116. storage = make(map[common.Hash]map[common.Hash][]byte)
  117. )
  118. // Fill up a parent
  119. h1 := common.HexToHash("0x01")
  120. h2 := common.HexToHash("0x02")
  121. flipDrops := func() map[common.Hash]struct{} {
  122. return map[common.Hash]struct{}{
  123. h2: struct{}{},
  124. }
  125. }
  126. flipAccs := func() map[common.Hash][]byte {
  127. return map[common.Hash][]byte{
  128. h1: randomAccount(),
  129. }
  130. }
  131. flopDrops := func() map[common.Hash]struct{} {
  132. return map[common.Hash]struct{}{
  133. h1: struct{}{},
  134. }
  135. }
  136. flopAccs := func() map[common.Hash][]byte {
  137. return map[common.Hash][]byte{
  138. h2: randomAccount(),
  139. }
  140. }
  141. // Add some flipAccs-flopping layers on top
  142. parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage)
  143. child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
  144. child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
  145. child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
  146. child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
  147. child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage)
  148. child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage)
  149. if data, _ := child.Account(h1); data == nil {
  150. t.Errorf("last diff layer: expected %x account to be non-nil", h1)
  151. }
  152. if data, _ := child.Account(h2); data != nil {
  153. t.Errorf("last diff layer: expected %x account to be nil", h2)
  154. }
  155. if _, ok := child.destructSet[h1]; ok {
  156. t.Errorf("last diff layer: expected %x drop to be missing", h1)
  157. }
  158. if _, ok := child.destructSet[h2]; !ok {
  159. t.Errorf("last diff layer: expected %x drop to be present", h1)
  160. }
  161. // And flatten
  162. merged := (child.flatten()).(*diffLayer)
  163. if data, _ := merged.Account(h1); data == nil {
  164. t.Errorf("merged layer: expected %x account to be non-nil", h1)
  165. }
  166. if data, _ := merged.Account(h2); data != nil {
  167. t.Errorf("merged layer: expected %x account to be nil", h2)
  168. }
  169. if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
  170. t.Errorf("merged diff layer: expected %x drop to be present", h1)
  171. }
  172. if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
  173. t.Errorf("merged diff layer: expected %x drop to be present", h1)
  174. }
  175. // If we add more granular metering of memory, we can enable this again,
  176. // but it's not implemented for now
  177. //if have, want := merged.memory, child.memory; have != want {
  178. // t.Errorf("mem wrong: have %d, want %d", have, want)
  179. //}
  180. }
  181. // This tests that if we create a new account, and set a slot, and then merge
  182. // it, the lists will be correct.
  183. func TestInsertAndMerge(t *testing.T) {
  184. // Fill up a parent
  185. var (
  186. acc = common.HexToHash("0x01")
  187. slot = common.HexToHash("0x02")
  188. parent *diffLayer
  189. child *diffLayer
  190. )
  191. {
  192. var (
  193. destructs = make(map[common.Hash]struct{})
  194. accounts = make(map[common.Hash][]byte)
  195. storage = make(map[common.Hash]map[common.Hash][]byte)
  196. )
  197. parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage)
  198. }
  199. {
  200. var (
  201. destructs = make(map[common.Hash]struct{})
  202. accounts = make(map[common.Hash][]byte)
  203. storage = make(map[common.Hash]map[common.Hash][]byte)
  204. )
  205. accounts[acc] = randomAccount()
  206. storage[acc] = make(map[common.Hash][]byte)
  207. storage[acc][slot] = []byte{0x01}
  208. child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  209. }
  210. // And flatten
  211. merged := (child.flatten()).(*diffLayer)
  212. { // Check that slot value is present
  213. have, _ := merged.Storage(acc, slot)
  214. if want := []byte{0x01}; !bytes.Equal(have, want) {
  215. t.Errorf("merged slot value wrong: have %x, want %x", have, want)
  216. }
  217. }
  218. }
  219. func emptyLayer() *diskLayer {
  220. return &diskLayer{
  221. diskdb: memorydb.New(),
  222. cache: fastcache.New(500 * 1024),
  223. }
  224. }
  225. // BenchmarkSearch checks how long it takes to find a non-existing key
  226. // BenchmarkSearch-6 200000 10481 ns/op (1K per layer)
  227. // BenchmarkSearch-6 200000 10760 ns/op (10K per layer)
  228. // BenchmarkSearch-6 100000 17866 ns/op
  229. //
  230. // BenchmarkSearch-6 500000 3723 ns/op (10k per layer, only top-level RLock()
  231. func BenchmarkSearch(b *testing.B) {
  232. // First, we set up 128 diff layers, with 1K items each
  233. fill := func(parent snapshot) *diffLayer {
  234. var (
  235. destructs = make(map[common.Hash]struct{})
  236. accounts = make(map[common.Hash][]byte)
  237. storage = make(map[common.Hash]map[common.Hash][]byte)
  238. )
  239. for i := 0; i < 10000; i++ {
  240. accounts[randomHash()] = randomAccount()
  241. }
  242. return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  243. }
  244. var layer snapshot
  245. layer = emptyLayer()
  246. for i := 0; i < 128; i++ {
  247. layer = fill(layer)
  248. }
  249. key := crypto.Keccak256Hash([]byte{0x13, 0x38})
  250. b.ResetTimer()
  251. for i := 0; i < b.N; i++ {
  252. layer.AccountRLP(key)
  253. }
  254. }
  255. // BenchmarkSearchSlot checks how long it takes to find a non-existing key
  256. // - Number of layers: 128
  257. // - Each layers contains the account, with a couple of storage slots
  258. // BenchmarkSearchSlot-6 100000 14554 ns/op
  259. // BenchmarkSearchSlot-6 100000 22254 ns/op (when checking parent root using mutex)
  260. // BenchmarkSearchSlot-6 100000 14551 ns/op (when checking parent number using atomic)
  261. // With bloom filter:
  262. // BenchmarkSearchSlot-6 3467835 351 ns/op
  263. func BenchmarkSearchSlot(b *testing.B) {
  264. // First, we set up 128 diff layers, with 1K items each
  265. accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
  266. storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37})
  267. accountRLP := randomAccount()
  268. fill := func(parent snapshot) *diffLayer {
  269. var (
  270. destructs = make(map[common.Hash]struct{})
  271. accounts = make(map[common.Hash][]byte)
  272. storage = make(map[common.Hash]map[common.Hash][]byte)
  273. )
  274. accounts[accountKey] = accountRLP
  275. accStorage := make(map[common.Hash][]byte)
  276. for i := 0; i < 5; i++ {
  277. value := make([]byte, 32)
  278. rand.Read(value)
  279. accStorage[randomHash()] = value
  280. storage[accountKey] = accStorage
  281. }
  282. return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  283. }
  284. var layer snapshot
  285. layer = emptyLayer()
  286. for i := 0; i < 128; i++ {
  287. layer = fill(layer)
  288. }
  289. b.ResetTimer()
  290. for i := 0; i < b.N; i++ {
  291. layer.Storage(accountKey, storageKey)
  292. }
  293. }
  294. // With accountList and sorting
  295. // BenchmarkFlatten-6 50 29890856 ns/op
  296. //
  297. // Without sorting and tracking accountlist
  298. // BenchmarkFlatten-6 300 5511511 ns/op
  299. func BenchmarkFlatten(b *testing.B) {
  300. fill := func(parent snapshot) *diffLayer {
  301. var (
  302. destructs = make(map[common.Hash]struct{})
  303. accounts = make(map[common.Hash][]byte)
  304. storage = make(map[common.Hash]map[common.Hash][]byte)
  305. )
  306. for i := 0; i < 100; i++ {
  307. accountKey := randomHash()
  308. accounts[accountKey] = randomAccount()
  309. accStorage := make(map[common.Hash][]byte)
  310. for i := 0; i < 20; i++ {
  311. value := make([]byte, 32)
  312. rand.Read(value)
  313. accStorage[randomHash()] = value
  314. }
  315. storage[accountKey] = accStorage
  316. }
  317. return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  318. }
  319. b.ResetTimer()
  320. for i := 0; i < b.N; i++ {
  321. b.StopTimer()
  322. var layer snapshot
  323. layer = emptyLayer()
  324. for i := 1; i < 128; i++ {
  325. layer = fill(layer)
  326. }
  327. b.StartTimer()
  328. for i := 1; i < 128; i++ {
  329. dl, ok := layer.(*diffLayer)
  330. if !ok {
  331. break
  332. }
  333. layer = dl.flatten()
  334. }
  335. b.StopTimer()
  336. }
  337. }
  338. // This test writes ~324M of diff layers to disk, spread over
  339. // - 128 individual layers,
  340. // - each with 200 accounts
  341. // - containing 200 slots
  342. //
  343. // BenchmarkJournal-6 1 1471373923 ns/ops
  344. // BenchmarkJournal-6 1 1208083335 ns/op // bufio writer
  345. func BenchmarkJournal(b *testing.B) {
  346. fill := func(parent snapshot) *diffLayer {
  347. var (
  348. destructs = make(map[common.Hash]struct{})
  349. accounts = make(map[common.Hash][]byte)
  350. storage = make(map[common.Hash]map[common.Hash][]byte)
  351. )
  352. for i := 0; i < 200; i++ {
  353. accountKey := randomHash()
  354. accounts[accountKey] = randomAccount()
  355. accStorage := make(map[common.Hash][]byte)
  356. for i := 0; i < 200; i++ {
  357. value := make([]byte, 32)
  358. rand.Read(value)
  359. accStorage[randomHash()] = value
  360. }
  361. storage[accountKey] = accStorage
  362. }
  363. return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
  364. }
  365. layer := snapshot(new(diskLayer))
  366. for i := 1; i < 128; i++ {
  367. layer = fill(layer)
  368. }
  369. b.ResetTimer()
  370. for i := 0; i < b.N; i++ {
  371. layer.Journal(new(bytes.Buffer))
  372. }
  373. }