ldbstore_test.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. // Copyright 2016 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package storage
  17. import (
  18. "bytes"
  19. "fmt"
  20. "io/ioutil"
  21. "os"
  22. "sync"
  23. "testing"
  24. "time"
  25. "github.com/ethereum/go-ethereum/common"
  26. "github.com/ethereum/go-ethereum/swarm/log"
  27. "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
  28. ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
  29. )
  30. type testDbStore struct {
  31. *LDBStore
  32. dir string
  33. }
  34. func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
  35. dir, err := ioutil.TempDir("", "bzz-storage-test")
  36. if err != nil {
  37. return nil, func() {}, err
  38. }
  39. var db *LDBStore
  40. storeparams := NewDefaultStoreParams()
  41. params := NewLDBStoreParams(storeparams, dir)
  42. params.Po = testPoFunc
  43. if mock {
  44. globalStore := mem.NewGlobalStore()
  45. addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
  46. mockStore := globalStore.NewNodeStore(addr)
  47. db, err = NewMockDbStore(params, mockStore)
  48. } else {
  49. db, err = NewLDBStore(params)
  50. }
  51. cleanup := func() {
  52. if db != nil {
  53. db.Close()
  54. }
  55. err = os.RemoveAll(dir)
  56. if err != nil {
  57. panic(fmt.Sprintf("db cleanup failed: %v", err))
  58. }
  59. }
  60. return &testDbStore{db, dir}, cleanup, err
  61. }
  62. func testPoFunc(k Address) (ret uint8) {
  63. basekey := make([]byte, 32)
  64. return uint8(Proximity(basekey[:], k[:]))
  65. }
  66. func (db *testDbStore) close() {
  67. db.Close()
  68. err := os.RemoveAll(db.dir)
  69. if err != nil {
  70. panic(err)
  71. }
  72. }
  73. func testDbStoreRandom(n int, processors int, chunksize int64, mock bool, t *testing.T) {
  74. db, cleanup, err := newTestDbStore(mock, true)
  75. defer cleanup()
  76. if err != nil {
  77. t.Fatalf("init dbStore failed: %v", err)
  78. }
  79. testStoreRandom(db, processors, n, chunksize, t)
  80. }
  81. func testDbStoreCorrect(n int, processors int, chunksize int64, mock bool, t *testing.T) {
  82. db, cleanup, err := newTestDbStore(mock, false)
  83. defer cleanup()
  84. if err != nil {
  85. t.Fatalf("init dbStore failed: %v", err)
  86. }
  87. testStoreCorrect(db, processors, n, chunksize, t)
  88. }
  89. func TestDbStoreRandom_1(t *testing.T) {
  90. testDbStoreRandom(1, 1, 0, false, t)
  91. }
  92. func TestDbStoreCorrect_1(t *testing.T) {
  93. testDbStoreCorrect(1, 1, 4096, false, t)
  94. }
  95. func TestDbStoreRandom_1_5k(t *testing.T) {
  96. testDbStoreRandom(8, 5000, 0, false, t)
  97. }
  98. func TestDbStoreRandom_8_5k(t *testing.T) {
  99. testDbStoreRandom(8, 5000, 0, false, t)
  100. }
  101. func TestDbStoreCorrect_1_5k(t *testing.T) {
  102. testDbStoreCorrect(1, 5000, 4096, false, t)
  103. }
  104. func TestDbStoreCorrect_8_5k(t *testing.T) {
  105. testDbStoreCorrect(8, 5000, 4096, false, t)
  106. }
  107. func TestMockDbStoreRandom_1(t *testing.T) {
  108. testDbStoreRandom(1, 1, 0, true, t)
  109. }
  110. func TestMockDbStoreCorrect_1(t *testing.T) {
  111. testDbStoreCorrect(1, 1, 4096, true, t)
  112. }
  113. func TestMockDbStoreRandom_1_5k(t *testing.T) {
  114. testDbStoreRandom(8, 5000, 0, true, t)
  115. }
  116. func TestMockDbStoreRandom_8_5k(t *testing.T) {
  117. testDbStoreRandom(8, 5000, 0, true, t)
  118. }
  119. func TestMockDbStoreCorrect_1_5k(t *testing.T) {
  120. testDbStoreCorrect(1, 5000, 4096, true, t)
  121. }
  122. func TestMockDbStoreCorrect_8_5k(t *testing.T) {
  123. testDbStoreCorrect(8, 5000, 4096, true, t)
  124. }
  125. func testDbStoreNotFound(t *testing.T, mock bool) {
  126. db, cleanup, err := newTestDbStore(mock, false)
  127. defer cleanup()
  128. if err != nil {
  129. t.Fatalf("init dbStore failed: %v", err)
  130. }
  131. _, err = db.Get(ZeroAddr)
  132. if err != ErrChunkNotFound {
  133. t.Errorf("Expected ErrChunkNotFound, got %v", err)
  134. }
  135. }
  136. func TestDbStoreNotFound(t *testing.T) {
  137. testDbStoreNotFound(t, false)
  138. }
  139. func TestMockDbStoreNotFound(t *testing.T) {
  140. testDbStoreNotFound(t, true)
  141. }
  142. func testIterator(t *testing.T, mock bool) {
  143. var chunkcount int = 32
  144. var i int
  145. var poc uint
  146. chunkkeys := NewAddressCollection(chunkcount)
  147. chunkkeys_results := NewAddressCollection(chunkcount)
  148. db, cleanup, err := newTestDbStore(mock, false)
  149. defer cleanup()
  150. if err != nil {
  151. t.Fatalf("init dbStore failed: %v", err)
  152. }
  153. chunks := GenerateRandomChunks(DefaultChunkSize, chunkcount)
  154. wg := &sync.WaitGroup{}
  155. wg.Add(len(chunks))
  156. for i = 0; i < len(chunks); i++ {
  157. db.Put(chunks[i])
  158. chunkkeys[i] = chunks[i].Addr
  159. j := i
  160. go func() {
  161. defer wg.Done()
  162. <-chunks[j].dbStoredC
  163. }()
  164. }
  165. //testSplit(m, l, 128, chunkkeys, t)
  166. for i = 0; i < len(chunkkeys); i++ {
  167. log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
  168. }
  169. wg.Wait()
  170. i = 0
  171. for poc = 0; poc <= 255; poc++ {
  172. err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
  173. log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
  174. chunkkeys_results[n-1] = k
  175. i++
  176. return true
  177. })
  178. if err != nil {
  179. t.Fatalf("Iterator call failed: %v", err)
  180. }
  181. }
  182. for i = 0; i < chunkcount; i++ {
  183. if !bytes.Equal(chunkkeys[i], chunkkeys_results[i]) {
  184. t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeys_results[i])
  185. }
  186. }
  187. }
  188. func TestIterator(t *testing.T) {
  189. testIterator(t, false)
  190. }
  191. func TestMockIterator(t *testing.T) {
  192. testIterator(t, true)
  193. }
  194. func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *testing.B) {
  195. db, cleanup, err := newTestDbStore(mock, true)
  196. defer cleanup()
  197. if err != nil {
  198. b.Fatalf("init dbStore failed: %v", err)
  199. }
  200. benchmarkStorePut(db, processors, n, chunksize, b)
  201. }
  202. func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) {
  203. db, cleanup, err := newTestDbStore(mock, true)
  204. defer cleanup()
  205. if err != nil {
  206. b.Fatalf("init dbStore failed: %v", err)
  207. }
  208. benchmarkStoreGet(db, processors, n, chunksize, b)
  209. }
  210. func BenchmarkDbStorePut_1_500(b *testing.B) {
  211. benchmarkDbStorePut(500, 1, 4096, false, b)
  212. }
  213. func BenchmarkDbStorePut_8_500(b *testing.B) {
  214. benchmarkDbStorePut(500, 8, 4096, false, b)
  215. }
  216. func BenchmarkDbStoreGet_1_500(b *testing.B) {
  217. benchmarkDbStoreGet(500, 1, 4096, false, b)
  218. }
  219. func BenchmarkDbStoreGet_8_500(b *testing.B) {
  220. benchmarkDbStoreGet(500, 8, 4096, false, b)
  221. }
  222. func BenchmarkMockDbStorePut_1_500(b *testing.B) {
  223. benchmarkDbStorePut(500, 1, 4096, true, b)
  224. }
  225. func BenchmarkMockDbStorePut_8_500(b *testing.B) {
  226. benchmarkDbStorePut(500, 8, 4096, true, b)
  227. }
  228. func BenchmarkMockDbStoreGet_1_500(b *testing.B) {
  229. benchmarkDbStoreGet(500, 1, 4096, true, b)
  230. }
  231. func BenchmarkMockDbStoreGet_8_500(b *testing.B) {
  232. benchmarkDbStoreGet(500, 8, 4096, true, b)
  233. }
  234. // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
  235. // retrieve them, provided we don't hit the garbage collection
  236. func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
  237. capacity := 50
  238. n := 10
  239. ldb, cleanup := newLDBStore(t)
  240. ldb.setCapacity(uint64(capacity))
  241. defer cleanup()
  242. chunks := []*Chunk{}
  243. for i := 0; i < n; i++ {
  244. c := GenerateRandomChunk(DefaultChunkSize)
  245. chunks = append(chunks, c)
  246. log.Trace("generate random chunk", "idx", i, "chunk", c)
  247. }
  248. for i := 0; i < n; i++ {
  249. go ldb.Put(chunks[i])
  250. }
  251. // wait for all chunks to be stored
  252. for i := 0; i < n; i++ {
  253. <-chunks[i].dbStoredC
  254. }
  255. log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  256. for i := 0; i < n; i++ {
  257. ret, err := ldb.Get(chunks[i].Addr)
  258. if err != nil {
  259. t.Fatal(err)
  260. }
  261. if !bytes.Equal(ret.SData, chunks[i].SData) {
  262. t.Fatal("expected to get the same data back, but got smth else")
  263. }
  264. log.Info("got back chunk", "chunk", ret)
  265. }
  266. if ldb.entryCnt != uint64(n+1) {
  267. t.Fatalf("expected entryCnt to be equal to %v, but got %v", n+1, ldb.entryCnt)
  268. }
  269. if ldb.accessCnt != uint64(2*n+1) {
  270. t.Fatalf("expected accessCnt to be equal to %v, but got %v", n+1, ldb.accessCnt)
  271. }
  272. }
  273. // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
  274. // retrieve only some of them, because garbage collection must have cleared some of them
  275. func TestLDBStoreCollectGarbage(t *testing.T) {
  276. capacity := 500
  277. n := 2000
  278. ldb, cleanup := newLDBStore(t)
  279. ldb.setCapacity(uint64(capacity))
  280. defer cleanup()
  281. chunks := []*Chunk{}
  282. for i := 0; i < n; i++ {
  283. c := GenerateRandomChunk(DefaultChunkSize)
  284. chunks = append(chunks, c)
  285. log.Trace("generate random chunk", "idx", i, "chunk", c)
  286. }
  287. for i := 0; i < n; i++ {
  288. ldb.Put(chunks[i])
  289. }
  290. // wait for all chunks to be stored
  291. for i := 0; i < n; i++ {
  292. <-chunks[i].dbStoredC
  293. }
  294. log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  295. // wait for garbage collection to kick in on the responsible actor
  296. time.Sleep(5 * time.Second)
  297. var missing int
  298. for i := 0; i < n; i++ {
  299. ret, err := ldb.Get(chunks[i].Addr)
  300. if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
  301. missing++
  302. continue
  303. }
  304. if err != nil {
  305. t.Fatal(err)
  306. }
  307. if !bytes.Equal(ret.SData, chunks[i].SData) {
  308. t.Fatal("expected to get the same data back, but got smth else")
  309. }
  310. log.Trace("got back chunk", "chunk", ret)
  311. }
  312. if missing < n-capacity {
  313. t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", n-capacity, missing)
  314. }
  315. log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  316. }
  317. // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
  318. func TestLDBStoreAddRemove(t *testing.T) {
  319. ldb, cleanup := newLDBStore(t)
  320. ldb.setCapacity(200)
  321. defer cleanup()
  322. n := 100
  323. chunks := []*Chunk{}
  324. for i := 0; i < n; i++ {
  325. c := GenerateRandomChunk(DefaultChunkSize)
  326. chunks = append(chunks, c)
  327. log.Trace("generate random chunk", "idx", i, "chunk", c)
  328. }
  329. for i := 0; i < n; i++ {
  330. go ldb.Put(chunks[i])
  331. }
  332. // wait for all chunks to be stored before continuing
  333. for i := 0; i < n; i++ {
  334. <-chunks[i].dbStoredC
  335. }
  336. for i := 0; i < n; i++ {
  337. // delete all even index chunks
  338. if i%2 == 0 {
  339. key := chunks[i].Addr
  340. ikey := getIndexKey(key)
  341. var indx dpaDBIndex
  342. ldb.tryAccessIdx(ikey, &indx)
  343. ldb.delete(indx.Idx, ikey, ldb.po(key))
  344. }
  345. }
  346. log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  347. for i := 0; i < n; i++ {
  348. ret, err := ldb.Get(chunks[i].Addr)
  349. if i%2 == 0 {
  350. // expect even chunks to be missing
  351. if err == nil || ret != nil {
  352. t.Fatal("expected chunk to be missing, but got no error")
  353. }
  354. } else {
  355. // expect odd chunks to be retrieved successfully
  356. if err != nil {
  357. t.Fatalf("expected no error, but got %s", err)
  358. }
  359. if !bytes.Equal(ret.SData, chunks[i].SData) {
  360. t.Fatal("expected to get the same data back, but got smth else")
  361. }
  362. }
  363. }
  364. }
  365. // TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection
  366. func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
  367. capacity := 10
  368. ldb, cleanup := newLDBStore(t)
  369. ldb.setCapacity(uint64(capacity))
  370. n := 7
  371. chunks := []*Chunk{}
  372. for i := 0; i < capacity; i++ {
  373. c := GenerateRandomChunk(DefaultChunkSize)
  374. chunks = append(chunks, c)
  375. log.Trace("generate random chunk", "idx", i, "chunk", c)
  376. }
  377. for i := 0; i < n; i++ {
  378. ldb.Put(chunks[i])
  379. }
  380. // wait for all chunks to be stored before continuing
  381. for i := 0; i < n; i++ {
  382. <-chunks[i].dbStoredC
  383. }
  384. // delete all chunks
  385. for i := 0; i < n; i++ {
  386. key := chunks[i].Addr
  387. ikey := getIndexKey(key)
  388. var indx dpaDBIndex
  389. ldb.tryAccessIdx(ikey, &indx)
  390. ldb.delete(indx.Idx, ikey, ldb.po(key))
  391. }
  392. log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  393. cleanup()
  394. ldb, cleanup = newLDBStore(t)
  395. ldb.setCapacity(uint64(capacity))
  396. n = 10
  397. for i := 0; i < n; i++ {
  398. ldb.Put(chunks[i])
  399. }
  400. // wait for all chunks to be stored before continuing
  401. for i := 0; i < n; i++ {
  402. <-chunks[i].dbStoredC
  403. }
  404. // expect for first chunk to be missing, because it has the smallest access value
  405. idx := 0
  406. ret, err := ldb.Get(chunks[idx].Addr)
  407. if err == nil || ret != nil {
  408. t.Fatal("expected first chunk to be missing, but got no error")
  409. }
  410. // expect for last chunk to be present, as it has the largest access value
  411. idx = 9
  412. ret, err = ldb.Get(chunks[idx].Addr)
  413. if err != nil {
  414. t.Fatalf("expected no error, but got %s", err)
  415. }
  416. if !bytes.Equal(ret.SData, chunks[idx].SData) {
  417. t.Fatal("expected to get the same data back, but got smth else")
  418. }
  419. }