ldbstore_test.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. // Copyright 2016 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package storage
  17. import (
  18. "bytes"
  19. "context"
  20. "encoding/binary"
  21. "fmt"
  22. "io/ioutil"
  23. "os"
  24. "strconv"
  25. "strings"
  26. "testing"
  27. "github.com/ethereum/go-ethereum/common"
  28. ch "github.com/ethereum/go-ethereum/swarm/chunk"
  29. "github.com/ethereum/go-ethereum/swarm/log"
  30. "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
  31. ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
  32. )
  33. type testDbStore struct {
  34. *LDBStore
  35. dir string
  36. }
  37. func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) {
  38. dir, err := ioutil.TempDir("", "bzz-storage-test")
  39. if err != nil {
  40. return nil, func() {}, err
  41. }
  42. var db *LDBStore
  43. storeparams := NewDefaultStoreParams()
  44. params := NewLDBStoreParams(storeparams, dir)
  45. params.Po = testPoFunc
  46. if mock {
  47. globalStore := mem.NewGlobalStore()
  48. addr := common.HexToAddress("0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed")
  49. mockStore := globalStore.NewNodeStore(addr)
  50. db, err = NewMockDbStore(params, mockStore)
  51. } else {
  52. db, err = NewLDBStore(params)
  53. }
  54. cleanup := func() {
  55. if db != nil {
  56. db.Close()
  57. }
  58. err = os.RemoveAll(dir)
  59. if err != nil {
  60. panic(fmt.Sprintf("db cleanup failed: %v", err))
  61. }
  62. }
  63. return &testDbStore{db, dir}, cleanup, err
  64. }
  65. func testPoFunc(k Address) (ret uint8) {
  66. basekey := make([]byte, 32)
  67. return uint8(Proximity(basekey, k[:]))
  68. }
  69. func testDbStoreRandom(n int, mock bool, t *testing.T) {
  70. db, cleanup, err := newTestDbStore(mock, true)
  71. defer cleanup()
  72. if err != nil {
  73. t.Fatalf("init dbStore failed: %v", err)
  74. }
  75. testStoreRandom(db, n, t)
  76. }
  77. func testDbStoreCorrect(n int, mock bool, t *testing.T) {
  78. db, cleanup, err := newTestDbStore(mock, false)
  79. defer cleanup()
  80. if err != nil {
  81. t.Fatalf("init dbStore failed: %v", err)
  82. }
  83. testStoreCorrect(db, n, t)
  84. }
  85. func TestMarkAccessed(t *testing.T) {
  86. db, cleanup, err := newTestDbStore(false, true)
  87. defer cleanup()
  88. if err != nil {
  89. t.Fatalf("init dbStore failed: %v", err)
  90. }
  91. h := GenerateRandomChunk(ch.DefaultSize)
  92. db.Put(context.Background(), h)
  93. var index dpaDBIndex
  94. addr := h.Address()
  95. idxk := getIndexKey(addr)
  96. idata, err := db.db.Get(idxk)
  97. if err != nil {
  98. t.Fatal(err)
  99. }
  100. decodeIndex(idata, &index)
  101. if index.Access != 0 {
  102. t.Fatalf("Expected the access index to be %d, but it is %d", 0, index.Access)
  103. }
  104. db.MarkAccessed(addr)
  105. db.writeCurrentBatch()
  106. idata, err = db.db.Get(idxk)
  107. if err != nil {
  108. t.Fatal(err)
  109. }
  110. decodeIndex(idata, &index)
  111. if index.Access != 1 {
  112. t.Fatalf("Expected the access index to be %d, but it is %d", 1, index.Access)
  113. }
  114. }
  115. func TestDbStoreRandom_1(t *testing.T) {
  116. testDbStoreRandom(1, false, t)
  117. }
  118. func TestDbStoreCorrect_1(t *testing.T) {
  119. testDbStoreCorrect(1, false, t)
  120. }
  121. func TestDbStoreRandom_1k(t *testing.T) {
  122. testDbStoreRandom(1000, false, t)
  123. }
  124. func TestDbStoreCorrect_1k(t *testing.T) {
  125. testDbStoreCorrect(1000, false, t)
  126. }
  127. func TestMockDbStoreRandom_1(t *testing.T) {
  128. testDbStoreRandom(1, true, t)
  129. }
  130. func TestMockDbStoreCorrect_1(t *testing.T) {
  131. testDbStoreCorrect(1, true, t)
  132. }
  133. func TestMockDbStoreRandom_1k(t *testing.T) {
  134. testDbStoreRandom(1000, true, t)
  135. }
  136. func TestMockDbStoreCorrect_1k(t *testing.T) {
  137. testDbStoreCorrect(1000, true, t)
  138. }
  139. func testDbStoreNotFound(t *testing.T, mock bool) {
  140. db, cleanup, err := newTestDbStore(mock, false)
  141. defer cleanup()
  142. if err != nil {
  143. t.Fatalf("init dbStore failed: %v", err)
  144. }
  145. _, err = db.Get(context.TODO(), ZeroAddr)
  146. if err != ErrChunkNotFound {
  147. t.Errorf("Expected ErrChunkNotFound, got %v", err)
  148. }
  149. }
  150. func TestDbStoreNotFound(t *testing.T) {
  151. testDbStoreNotFound(t, false)
  152. }
  153. func TestMockDbStoreNotFound(t *testing.T) {
  154. testDbStoreNotFound(t, true)
  155. }
  156. func testIterator(t *testing.T, mock bool) {
  157. var chunkcount int = 32
  158. var i int
  159. var poc uint
  160. chunkkeys := NewAddressCollection(chunkcount)
  161. chunkkeysResults := NewAddressCollection(chunkcount)
  162. db, cleanup, err := newTestDbStore(mock, false)
  163. defer cleanup()
  164. if err != nil {
  165. t.Fatalf("init dbStore failed: %v", err)
  166. }
  167. chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount)
  168. for i = 0; i < len(chunks); i++ {
  169. chunkkeys[i] = chunks[i].Address()
  170. err := db.Put(context.TODO(), chunks[i])
  171. if err != nil {
  172. t.Fatalf("dbStore.Put failed: %v", err)
  173. }
  174. }
  175. for i = 0; i < len(chunkkeys); i++ {
  176. log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
  177. }
  178. i = 0
  179. for poc = 0; poc <= 255; poc++ {
  180. err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
  181. log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
  182. chunkkeysResults[n] = k
  183. i++
  184. return true
  185. })
  186. if err != nil {
  187. t.Fatalf("Iterator call failed: %v", err)
  188. }
  189. }
  190. for i = 0; i < chunkcount; i++ {
  191. if !bytes.Equal(chunkkeys[i], chunkkeysResults[i]) {
  192. t.Fatalf("Chunk put #%d key '%v' does not match iterator's key '%v'", i, chunkkeys[i], chunkkeysResults[i])
  193. }
  194. }
  195. }
  196. func TestIterator(t *testing.T) {
  197. testIterator(t, false)
  198. }
  199. func TestMockIterator(t *testing.T) {
  200. testIterator(t, true)
  201. }
  202. func benchmarkDbStorePut(n int, mock bool, b *testing.B) {
  203. db, cleanup, err := newTestDbStore(mock, true)
  204. defer cleanup()
  205. if err != nil {
  206. b.Fatalf("init dbStore failed: %v", err)
  207. }
  208. benchmarkStorePut(db, n, b)
  209. }
  210. func benchmarkDbStoreGet(n int, mock bool, b *testing.B) {
  211. db, cleanup, err := newTestDbStore(mock, true)
  212. defer cleanup()
  213. if err != nil {
  214. b.Fatalf("init dbStore failed: %v", err)
  215. }
  216. benchmarkStoreGet(db, n, b)
  217. }
  218. func BenchmarkDbStorePut_500(b *testing.B) {
  219. benchmarkDbStorePut(500, false, b)
  220. }
  221. func BenchmarkDbStoreGet_500(b *testing.B) {
  222. benchmarkDbStoreGet(500, false, b)
  223. }
  224. func BenchmarkMockDbStorePut_500(b *testing.B) {
  225. benchmarkDbStorePut(500, true, b)
  226. }
  227. func BenchmarkMockDbStoreGet_500(b *testing.B) {
  228. benchmarkDbStoreGet(500, true, b)
  229. }
  230. // TestLDBStoreWithoutCollectGarbage tests that we can put a number of random chunks in the LevelDB store, and
  231. // retrieve them, provided we don't hit the garbage collection
  232. func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
  233. capacity := 50
  234. n := 10
  235. ldb, cleanup := newLDBStore(t)
  236. ldb.setCapacity(uint64(capacity))
  237. defer cleanup()
  238. chunks, err := mputRandomChunks(ldb, n)
  239. if err != nil {
  240. t.Fatal(err.Error())
  241. }
  242. log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  243. for _, ch := range chunks {
  244. ret, err := ldb.Get(context.TODO(), ch.Address())
  245. if err != nil {
  246. t.Fatal(err)
  247. }
  248. if !bytes.Equal(ret.Data(), ch.Data()) {
  249. t.Fatal("expected to get the same data back, but got smth else")
  250. }
  251. }
  252. if ldb.entryCnt != uint64(n) {
  253. t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
  254. }
  255. if ldb.accessCnt != uint64(2*n) {
  256. t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
  257. }
  258. }
  259. // TestLDBStoreCollectGarbage tests that we can put more chunks than LevelDB's capacity, and
  260. // retrieve only some of them, because garbage collection must have partially cleared the store
  261. // Also tests that we can delete chunks and that we can trigger garbage collection
  262. func TestLDBStoreCollectGarbage(t *testing.T) {
  263. // below max ronud
  264. initialCap := defaultMaxGCRound / 100
  265. cap := initialCap / 2
  266. t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
  267. t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
  268. // at max round
  269. cap = initialCap
  270. t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
  271. t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
  272. // more than max around, not on threshold
  273. cap = initialCap + 500
  274. t.Run(fmt.Sprintf("A/%d/%d", cap, cap*4), testLDBStoreCollectGarbage)
  275. t.Run(fmt.Sprintf("B/%d/%d", cap, cap*4), testLDBStoreRemoveThenCollectGarbage)
  276. }
  277. func testLDBStoreCollectGarbage(t *testing.T) {
  278. params := strings.Split(t.Name(), "/")
  279. capacity, err := strconv.Atoi(params[2])
  280. if err != nil {
  281. t.Fatal(err)
  282. }
  283. n, err := strconv.Atoi(params[3])
  284. if err != nil {
  285. t.Fatal(err)
  286. }
  287. ldb, cleanup := newLDBStore(t)
  288. ldb.setCapacity(uint64(capacity))
  289. defer cleanup()
  290. // retrieve the gc round target count for the db capacity
  291. ldb.startGC(capacity)
  292. roundTarget := ldb.gc.target
  293. // split put counts to gc target count threshold, and wait for gc to finish in between
  294. var allChunks []Chunk
  295. remaining := n
  296. for remaining > 0 {
  297. var putCount int
  298. if remaining < roundTarget {
  299. putCount = remaining
  300. } else {
  301. putCount = roundTarget
  302. }
  303. remaining -= putCount
  304. chunks, err := mputRandomChunks(ldb, putCount)
  305. if err != nil {
  306. t.Fatal(err.Error())
  307. }
  308. allChunks = append(allChunks, chunks...)
  309. ldb.lock.RLock()
  310. log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n)
  311. ldb.lock.RUnlock()
  312. waitGc(ldb)
  313. }
  314. // attempt gets on all put chunks
  315. var missing int
  316. for _, ch := range allChunks {
  317. ret, err := ldb.Get(context.TODO(), ch.Address())
  318. if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
  319. missing++
  320. continue
  321. }
  322. if err != nil {
  323. t.Fatal(err)
  324. }
  325. if !bytes.Equal(ret.Data(), ch.Data()) {
  326. t.Fatal("expected to get the same data back, but got smth else")
  327. }
  328. log.Trace("got back chunk", "chunk", ret)
  329. }
  330. // all surplus chunks should be missing
  331. expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
  332. if missing != expectMissing {
  333. t.Fatalf("gc failure: expected to miss %v chunks, but only %v are actually missing", expectMissing, missing)
  334. }
  335. log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  336. }
  337. // TestLDBStoreAddRemove tests that we can put and then delete a given chunk
  338. func TestLDBStoreAddRemove(t *testing.T) {
  339. ldb, cleanup := newLDBStore(t)
  340. ldb.setCapacity(200)
  341. defer cleanup()
  342. n := 100
  343. chunks, err := mputRandomChunks(ldb, n)
  344. if err != nil {
  345. t.Fatalf(err.Error())
  346. }
  347. for i := 0; i < n; i++ {
  348. // delete all even index chunks
  349. if i%2 == 0 {
  350. ldb.Delete(chunks[i].Address())
  351. }
  352. }
  353. log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  354. for i := 0; i < n; i++ {
  355. ret, err := ldb.Get(context.TODO(), chunks[i].Address())
  356. if i%2 == 0 {
  357. // expect even chunks to be missing
  358. if err == nil {
  359. t.Fatal("expected chunk to be missing, but got no error")
  360. }
  361. } else {
  362. // expect odd chunks to be retrieved successfully
  363. if err != nil {
  364. t.Fatalf("expected no error, but got %s", err)
  365. }
  366. if !bytes.Equal(ret.Data(), chunks[i].Data()) {
  367. t.Fatal("expected to get the same data back, but got smth else")
  368. }
  369. }
  370. }
  371. }
  372. func testLDBStoreRemoveThenCollectGarbage(t *testing.T) {
  373. t.Skip("flaky with -race flag")
  374. params := strings.Split(t.Name(), "/")
  375. capacity, err := strconv.Atoi(params[2])
  376. if err != nil {
  377. t.Fatal(err)
  378. }
  379. n, err := strconv.Atoi(params[3])
  380. if err != nil {
  381. t.Fatal(err)
  382. }
  383. ldb, cleanup := newLDBStore(t)
  384. defer cleanup()
  385. ldb.setCapacity(uint64(capacity))
  386. // put capacity count number of chunks
  387. chunks := make([]Chunk, n)
  388. for i := 0; i < n; i++ {
  389. c := GenerateRandomChunk(ch.DefaultSize)
  390. chunks[i] = c
  391. log.Trace("generate random chunk", "idx", i, "chunk", c)
  392. }
  393. for i := 0; i < n; i++ {
  394. err := ldb.Put(context.TODO(), chunks[i])
  395. if err != nil {
  396. t.Fatal(err)
  397. }
  398. }
  399. waitGc(ldb)
  400. // delete all chunks
  401. // (only count the ones actually deleted, the rest will have been gc'd)
  402. deletes := 0
  403. for i := 0; i < n; i++ {
  404. if ldb.Delete(chunks[i].Address()) == nil {
  405. deletes++
  406. }
  407. }
  408. log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  409. if ldb.entryCnt != 0 {
  410. t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
  411. }
  412. // the manual deletes will have increased accesscnt, so we need to add this when we verify the current count
  413. expAccessCnt := uint64(n)
  414. if ldb.accessCnt != expAccessCnt {
  415. t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.accessCnt)
  416. }
  417. // retrieve the gc round target count for the db capacity
  418. ldb.startGC(capacity)
  419. roundTarget := ldb.gc.target
  420. remaining := n
  421. var puts int
  422. for remaining > 0 {
  423. var putCount int
  424. if remaining < roundTarget {
  425. putCount = remaining
  426. } else {
  427. putCount = roundTarget
  428. }
  429. remaining -= putCount
  430. for putCount > 0 {
  431. ldb.Put(context.TODO(), chunks[puts])
  432. ldb.lock.RLock()
  433. log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget)
  434. ldb.lock.RUnlock()
  435. puts++
  436. putCount--
  437. }
  438. waitGc(ldb)
  439. }
  440. // expect first surplus chunks to be missing, because they have the smallest access value
  441. expectMissing := roundTarget + (((n - capacity) / roundTarget) * roundTarget)
  442. for i := 0; i < expectMissing; i++ {
  443. _, err := ldb.Get(context.TODO(), chunks[i].Address())
  444. if err == nil {
  445. t.Fatalf("expected surplus chunk %d to be missing, but got no error", i)
  446. }
  447. }
  448. // expect last chunks to be present, as they have the largest access value
  449. for i := expectMissing; i < n; i++ {
  450. ret, err := ldb.Get(context.TODO(), chunks[i].Address())
  451. if err != nil {
  452. t.Fatalf("chunk %v: expected no error, but got %s", i, err)
  453. }
  454. if !bytes.Equal(ret.Data(), chunks[i].Data()) {
  455. t.Fatal("expected to get the same data back, but got smth else")
  456. }
  457. }
  458. }
  459. // TestLDBStoreCollectGarbageAccessUnlikeIndex tests garbage collection where accesscount differs from indexcount
  460. func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) {
  461. capacity := defaultMaxGCRound / 100 * 2
  462. n := capacity - 1
  463. ldb, cleanup := newLDBStore(t)
  464. ldb.setCapacity(uint64(capacity))
  465. defer cleanup()
  466. chunks, err := mputRandomChunks(ldb, n)
  467. if err != nil {
  468. t.Fatal(err.Error())
  469. }
  470. log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  471. // set first added capacity/2 chunks to highest accesscount
  472. for i := 0; i < capacity/2; i++ {
  473. _, err := ldb.Get(context.TODO(), chunks[i].Address())
  474. if err != nil {
  475. t.Fatalf("fail add chunk #%d - %s: %v", i, chunks[i].Address(), err)
  476. }
  477. }
  478. _, err = mputRandomChunks(ldb, 2)
  479. if err != nil {
  480. t.Fatal(err.Error())
  481. }
  482. // wait for garbage collection to kick in on the responsible actor
  483. waitGc(ldb)
  484. var missing int
  485. for i, ch := range chunks[2 : capacity/2] {
  486. ret, err := ldb.Get(context.TODO(), ch.Address())
  487. if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
  488. t.Fatalf("fail find chunk #%d - %s: %v", i, ch.Address(), err)
  489. }
  490. if !bytes.Equal(ret.Data(), ch.Data()) {
  491. t.Fatal("expected to get the same data back, but got smth else")
  492. }
  493. log.Trace("got back chunk", "chunk", ret)
  494. }
  495. log.Info("ldbstore", "total", n, "missing", missing, "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
  496. }
  497. func TestCleanIndex(t *testing.T) {
  498. capacity := 5000
  499. n := 3
  500. ldb, cleanup := newLDBStore(t)
  501. ldb.setCapacity(uint64(capacity))
  502. defer cleanup()
  503. chunks, err := mputRandomChunks(ldb, n)
  504. if err != nil {
  505. t.Fatal(err)
  506. }
  507. // remove the data of the first chunk
  508. po := ldb.po(chunks[0].Address()[:])
  509. dataKey := make([]byte, 10)
  510. dataKey[0] = keyData
  511. dataKey[1] = byte(po)
  512. // dataKey[2:10] = first chunk has storageIdx 0 on [2:10]
  513. if _, err := ldb.db.Get(dataKey); err != nil {
  514. t.Fatal(err)
  515. }
  516. if err := ldb.db.Delete(dataKey); err != nil {
  517. t.Fatal(err)
  518. }
  519. // remove the gc index row for the first chunk
  520. gcFirstCorrectKey := make([]byte, 9)
  521. gcFirstCorrectKey[0] = keyGCIdx
  522. if err := ldb.db.Delete(gcFirstCorrectKey); err != nil {
  523. t.Fatal(err)
  524. }
  525. // warp the gc data of the second chunk
  526. // this data should be correct again after the clean
  527. gcSecondCorrectKey := make([]byte, 9)
  528. gcSecondCorrectKey[0] = keyGCIdx
  529. binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(1))
  530. gcSecondCorrectVal, err := ldb.db.Get(gcSecondCorrectKey)
  531. if err != nil {
  532. t.Fatal(err)
  533. }
  534. warpedGCVal := make([]byte, len(gcSecondCorrectVal)+1)
  535. copy(warpedGCVal[1:], gcSecondCorrectVal)
  536. if err := ldb.db.Delete(gcSecondCorrectKey); err != nil {
  537. t.Fatal(err)
  538. }
  539. if err := ldb.db.Put(gcSecondCorrectKey, warpedGCVal); err != nil {
  540. t.Fatal(err)
  541. }
  542. if err := ldb.CleanGCIndex(); err != nil {
  543. t.Fatal(err)
  544. }
  545. // the index without corresponding data should have been deleted
  546. idxKey := make([]byte, 33)
  547. idxKey[0] = keyIndex
  548. copy(idxKey[1:], chunks[0].Address())
  549. if _, err := ldb.db.Get(idxKey); err == nil {
  550. t.Fatalf("expected chunk 0 idx to be pruned: %v", idxKey)
  551. }
  552. // the two other indices should be present
  553. copy(idxKey[1:], chunks[1].Address())
  554. if _, err := ldb.db.Get(idxKey); err != nil {
  555. t.Fatalf("expected chunk 1 idx to be present: %v", idxKey)
  556. }
  557. copy(idxKey[1:], chunks[2].Address())
  558. if _, err := ldb.db.Get(idxKey); err != nil {
  559. t.Fatalf("expected chunk 2 idx to be present: %v", idxKey)
  560. }
  561. // first gc index should still be gone
  562. if _, err := ldb.db.Get(gcFirstCorrectKey); err == nil {
  563. t.Fatalf("expected gc 0 idx to be pruned: %v", idxKey)
  564. }
  565. // second gc index should still be fixed
  566. if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
  567. t.Fatalf("expected gc 1 idx to be present: %v", idxKey)
  568. }
  569. // third gc index should be unchanged
  570. binary.BigEndian.PutUint64(gcSecondCorrectKey[1:], uint64(2))
  571. if _, err := ldb.db.Get(gcSecondCorrectKey); err != nil {
  572. t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
  573. }
  574. c, err := ldb.db.Get(keyEntryCnt)
  575. if err != nil {
  576. t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
  577. }
  578. // entrycount should now be one less
  579. entryCount := binary.BigEndian.Uint64(c)
  580. if entryCount != 2 {
  581. t.Fatalf("expected entrycnt to be 2, was %d", c)
  582. }
  583. // the chunks might accidentally be in the same bin
  584. // if so that bin counter will now be 2 - the highest added index.
  585. // if not, the total of them will be 3
  586. poBins := []uint8{ldb.po(chunks[1].Address()), ldb.po(chunks[2].Address())}
  587. if poBins[0] == poBins[1] {
  588. poBins = poBins[:1]
  589. }
  590. var binTotal uint64
  591. var currentBin [2]byte
  592. currentBin[0] = keyDistanceCnt
  593. if len(poBins) == 1 {
  594. currentBin[1] = poBins[0]
  595. c, err := ldb.db.Get(currentBin[:])
  596. if err != nil {
  597. t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
  598. }
  599. binCount := binary.BigEndian.Uint64(c)
  600. if binCount != 2 {
  601. t.Fatalf("expected entrycnt to be 2, was %d", binCount)
  602. }
  603. } else {
  604. for _, bin := range poBins {
  605. currentBin[1] = bin
  606. c, err := ldb.db.Get(currentBin[:])
  607. if err != nil {
  608. t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
  609. }
  610. binCount := binary.BigEndian.Uint64(c)
  611. binTotal += binCount
  612. }
  613. if binTotal != 3 {
  614. t.Fatalf("expected sum of bin indices to be 3, was %d", binTotal)
  615. }
  616. }
  617. // check that the iterator quits properly
  618. chunks, err = mputRandomChunks(ldb, 4100)
  619. if err != nil {
  620. t.Fatal(err)
  621. }
  622. po = ldb.po(chunks[4099].Address()[:])
  623. dataKey = make([]byte, 10)
  624. dataKey[0] = keyData
  625. dataKey[1] = byte(po)
  626. binary.BigEndian.PutUint64(dataKey[2:], 4099+3)
  627. if _, err := ldb.db.Get(dataKey); err != nil {
  628. t.Fatal(err)
  629. }
  630. if err := ldb.db.Delete(dataKey); err != nil {
  631. t.Fatal(err)
  632. }
  633. if err := ldb.CleanGCIndex(); err != nil {
  634. t.Fatal(err)
  635. }
  636. // entrycount should now be one less of added chunks
  637. c, err = ldb.db.Get(keyEntryCnt)
  638. if err != nil {
  639. t.Fatalf("expected gc 2 idx to be present: %v", idxKey)
  640. }
  641. entryCount = binary.BigEndian.Uint64(c)
  642. if entryCount != 4099+2 {
  643. t.Fatalf("expected entrycnt to be 2, was %d", c)
  644. }
  645. }
  646. // Note: waitGc does not guarantee that we wait 1 GC round; it only
  647. // guarantees that if the GC is running we wait for that run to finish
  648. // ticket: https://github.com/ethersphere/go-ethereum/issues/1151
  649. func waitGc(ldb *LDBStore) {
  650. <-ldb.gc.runC
  651. ldb.gc.runC <- struct{}{}
  652. }