freezer_table_test.go 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296
  1. // Copyright 2019 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package rawdb
  17. import (
  18. "bytes"
  19. "encoding/binary"
  20. "fmt"
  21. "math/rand"
  22. "os"
  23. "path/filepath"
  24. "reflect"
  25. "sync/atomic"
  26. "testing"
  27. "testing/quick"
  28. "time"
  29. "github.com/davecgh/go-spew/spew"
  30. "github.com/ethereum/go-ethereum/metrics"
  31. "github.com/stretchr/testify/require"
  32. )
  33. func init() {
  34. rand.Seed(time.Now().Unix())
  35. }
  36. // TestFreezerBasics test initializing a freezertable from scratch, writing to the table,
  37. // and reading it back.
  38. func TestFreezerBasics(t *testing.T) {
  39. t.Parallel()
  40. // set cutoff at 50 bytes
  41. f, err := newTable(os.TempDir(),
  42. fmt.Sprintf("unittest-%d", rand.Uint64()),
  43. metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
  44. if err != nil {
  45. t.Fatal(err)
  46. }
  47. defer f.Close()
  48. // Write 15 bytes 255 times, results in 85 files
  49. writeChunks(t, f, 255, 15)
  50. //print(t, f, 0)
  51. //print(t, f, 1)
  52. //print(t, f, 2)
  53. //
  54. //db[0] = 000000000000000000000000000000
  55. //db[1] = 010101010101010101010101010101
  56. //db[2] = 020202020202020202020202020202
  57. for y := 0; y < 255; y++ {
  58. exp := getChunk(15, y)
  59. got, err := f.Retrieve(uint64(y))
  60. if err != nil {
  61. t.Fatalf("reading item %d: %v", y, err)
  62. }
  63. if !bytes.Equal(got, exp) {
  64. t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
  65. }
  66. }
  67. // Check that we cannot read too far
  68. _, err = f.Retrieve(uint64(255))
  69. if err != errOutOfBounds {
  70. t.Fatal(err)
  71. }
  72. }
  73. // TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between
  74. // every operation
  75. func TestFreezerBasicsClosing(t *testing.T) {
  76. t.Parallel()
  77. // set cutoff at 50 bytes
  78. var (
  79. fname = fmt.Sprintf("basics-close-%d", rand.Uint64())
  80. rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  81. f *freezerTable
  82. err error
  83. )
  84. f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  85. if err != nil {
  86. t.Fatal(err)
  87. }
  88. // Write 15 bytes 255 times, results in 85 files.
  89. // In-between writes, the table is closed and re-opened.
  90. for x := 0; x < 255; x++ {
  91. data := getChunk(15, x)
  92. batch := f.newBatch()
  93. require.NoError(t, batch.AppendRaw(uint64(x), data))
  94. require.NoError(t, batch.commit())
  95. f.Close()
  96. f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  97. if err != nil {
  98. t.Fatal(err)
  99. }
  100. }
  101. defer f.Close()
  102. for y := 0; y < 255; y++ {
  103. exp := getChunk(15, y)
  104. got, err := f.Retrieve(uint64(y))
  105. if err != nil {
  106. t.Fatal(err)
  107. }
  108. if !bytes.Equal(got, exp) {
  109. t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
  110. }
  111. f.Close()
  112. f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  113. if err != nil {
  114. t.Fatal(err)
  115. }
  116. }
  117. }
  118. // TestFreezerRepairDanglingHead tests that we can recover if index entries are removed
  119. func TestFreezerRepairDanglingHead(t *testing.T) {
  120. t.Parallel()
  121. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  122. fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
  123. // Fill table
  124. {
  125. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  126. if err != nil {
  127. t.Fatal(err)
  128. }
  129. // Write 15 bytes 255 times
  130. writeChunks(t, f, 255, 15)
  131. // The last item should be there
  132. if _, err = f.Retrieve(0xfe); err != nil {
  133. t.Fatal(err)
  134. }
  135. f.Close()
  136. }
  137. // open the index
  138. idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
  139. if err != nil {
  140. t.Fatalf("Failed to open index file: %v", err)
  141. }
  142. // Remove 4 bytes
  143. stat, err := idxFile.Stat()
  144. if err != nil {
  145. t.Fatalf("Failed to stat index file: %v", err)
  146. }
  147. idxFile.Truncate(stat.Size() - 4)
  148. idxFile.Close()
  149. // Now open it again
  150. {
  151. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  152. if err != nil {
  153. t.Fatal(err)
  154. }
  155. // The last item should be missing
  156. if _, err = f.Retrieve(0xff); err == nil {
  157. t.Errorf("Expected error for missing index entry")
  158. }
  159. // The one before should still be there
  160. if _, err = f.Retrieve(0xfd); err != nil {
  161. t.Fatalf("Expected no error, got %v", err)
  162. }
  163. }
  164. }
  165. // TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed
  166. func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
  167. t.Parallel()
  168. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  169. fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
  170. // Fill a table and close it
  171. {
  172. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  173. if err != nil {
  174. t.Fatal(err)
  175. }
  176. // Write 15 bytes 255 times
  177. writeChunks(t, f, 255, 15)
  178. // The last item should be there
  179. if _, err = f.Retrieve(f.items - 1); err != nil {
  180. t.Fatal(err)
  181. }
  182. f.Close()
  183. }
  184. // open the index
  185. idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
  186. if err != nil {
  187. t.Fatalf("Failed to open index file: %v", err)
  188. }
  189. // Remove everything but the first item, and leave data unaligned
  190. // 0-indexEntry, 1-indexEntry, corrupt-indexEntry
  191. idxFile.Truncate(2*indexEntrySize + indexEntrySize/2)
  192. idxFile.Close()
  193. // Now open it again
  194. {
  195. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  196. if err != nil {
  197. t.Fatal(err)
  198. }
  199. // The first item should be there
  200. if _, err = f.Retrieve(0); err != nil {
  201. t.Fatal(err)
  202. }
  203. // The second item should be missing
  204. if _, err = f.Retrieve(1); err == nil {
  205. t.Errorf("Expected error for missing index entry")
  206. }
  207. // We should now be able to store items again, from item = 1
  208. batch := f.newBatch()
  209. for x := 1; x < 0xff; x++ {
  210. require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
  211. }
  212. require.NoError(t, batch.commit())
  213. f.Close()
  214. }
  215. // And if we open it, we should now be able to read all of them (new values)
  216. {
  217. f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  218. for y := 1; y < 255; y++ {
  219. exp := getChunk(15, ^y)
  220. got, err := f.Retrieve(uint64(y))
  221. if err != nil {
  222. t.Fatal(err)
  223. }
  224. if !bytes.Equal(got, exp) {
  225. t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
  226. }
  227. }
  228. }
  229. }
  230. // TestSnappyDetection tests that we fail to open a snappy database and vice versa
  231. func TestSnappyDetection(t *testing.T) {
  232. t.Parallel()
  233. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  234. fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
  235. // Open with snappy
  236. {
  237. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  238. if err != nil {
  239. t.Fatal(err)
  240. }
  241. // Write 15 bytes 255 times
  242. writeChunks(t, f, 255, 15)
  243. f.Close()
  244. }
  245. // Open without snappy
  246. {
  247. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
  248. if err != nil {
  249. t.Fatal(err)
  250. }
  251. if _, err = f.Retrieve(0); err == nil {
  252. f.Close()
  253. t.Fatalf("expected empty table")
  254. }
  255. }
  256. // Open with snappy
  257. {
  258. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  259. if err != nil {
  260. t.Fatal(err)
  261. }
  262. // There should be 255 items
  263. if _, err = f.Retrieve(0xfe); err != nil {
  264. f.Close()
  265. t.Fatalf("expected no error, got %v", err)
  266. }
  267. }
  268. }
  269. func assertFileSize(f string, size int64) error {
  270. stat, err := os.Stat(f)
  271. if err != nil {
  272. return err
  273. }
  274. if stat.Size() != size {
  275. return fmt.Errorf("error, expected size %d, got %d", size, stat.Size())
  276. }
  277. return nil
  278. }
  279. // TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data,
  280. // the index is repaired
  281. func TestFreezerRepairDanglingIndex(t *testing.T) {
  282. t.Parallel()
  283. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  284. fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
  285. // Fill a table and close it
  286. {
  287. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  288. if err != nil {
  289. t.Fatal(err)
  290. }
  291. // Write 15 bytes 9 times : 150 bytes
  292. writeChunks(t, f, 9, 15)
  293. // The last item should be there
  294. if _, err = f.Retrieve(f.items - 1); err != nil {
  295. f.Close()
  296. t.Fatal(err)
  297. }
  298. f.Close()
  299. // File sizes should be 45, 45, 45 : items[3, 3, 3)
  300. }
  301. // Crop third file
  302. fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname))
  303. // Truncate third file: 45 ,45, 20
  304. {
  305. if err := assertFileSize(fileToCrop, 45); err != nil {
  306. t.Fatal(err)
  307. }
  308. file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
  309. if err != nil {
  310. t.Fatal(err)
  311. }
  312. file.Truncate(20)
  313. file.Close()
  314. }
  315. // Open db it again
  316. // It should restore the file(s) to
  317. // 45, 45, 15
  318. // with 3+3+1 items
  319. {
  320. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  321. if err != nil {
  322. t.Fatal(err)
  323. }
  324. defer f.Close()
  325. if f.items != 7 {
  326. t.Fatalf("expected %d items, got %d", 7, f.items)
  327. }
  328. if err := assertFileSize(fileToCrop, 15); err != nil {
  329. t.Fatal(err)
  330. }
  331. }
  332. }
  333. func TestFreezerTruncate(t *testing.T) {
  334. t.Parallel()
  335. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  336. fname := fmt.Sprintf("truncation-%d", rand.Uint64())
  337. // Fill table
  338. {
  339. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  340. if err != nil {
  341. t.Fatal(err)
  342. }
  343. // Write 15 bytes 30 times
  344. writeChunks(t, f, 30, 15)
  345. // The last item should be there
  346. if _, err = f.Retrieve(f.items - 1); err != nil {
  347. t.Fatal(err)
  348. }
  349. f.Close()
  350. }
  351. // Reopen, truncate
  352. {
  353. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  354. if err != nil {
  355. t.Fatal(err)
  356. }
  357. defer f.Close()
  358. f.truncateHead(10) // 150 bytes
  359. if f.items != 10 {
  360. t.Fatalf("expected %d items, got %d", 10, f.items)
  361. }
  362. // 45, 45, 45, 15 -- bytes should be 15
  363. if f.headBytes != 15 {
  364. t.Fatalf("expected %d bytes, got %d", 15, f.headBytes)
  365. }
  366. }
  367. }
  368. // TestFreezerRepairFirstFile tests a head file with the very first item only half-written.
  369. // That will rewind the index, and _should_ truncate the head file
  370. func TestFreezerRepairFirstFile(t *testing.T) {
  371. t.Parallel()
  372. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  373. fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
  374. // Fill table
  375. {
  376. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  377. if err != nil {
  378. t.Fatal(err)
  379. }
  380. // Write 80 bytes, splitting out into two files
  381. batch := f.newBatch()
  382. require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF)))
  383. require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE)))
  384. require.NoError(t, batch.commit())
  385. // The last item should be there
  386. if _, err = f.Retrieve(1); err != nil {
  387. t.Fatal(err)
  388. }
  389. f.Close()
  390. }
  391. // Truncate the file in half
  392. fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname))
  393. {
  394. if err := assertFileSize(fileToCrop, 40); err != nil {
  395. t.Fatal(err)
  396. }
  397. file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644)
  398. if err != nil {
  399. t.Fatal(err)
  400. }
  401. file.Truncate(20)
  402. file.Close()
  403. }
  404. // Reopen
  405. {
  406. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  407. if err != nil {
  408. t.Fatal(err)
  409. }
  410. if f.items != 1 {
  411. f.Close()
  412. t.Fatalf("expected %d items, got %d", 0, f.items)
  413. }
  414. // Write 40 bytes
  415. batch := f.newBatch()
  416. require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD)))
  417. require.NoError(t, batch.commit())
  418. f.Close()
  419. // Should have been truncated down to zero and then 40 written
  420. if err := assertFileSize(fileToCrop, 40); err != nil {
  421. t.Fatal(err)
  422. }
  423. }
  424. }
  425. // TestFreezerReadAndTruncate tests:
  426. // - we have a table open
  427. // - do some reads, so files are open in readonly
  428. // - truncate so those files are 'removed'
  429. // - check that we did not keep the rdonly file descriptors
  430. func TestFreezerReadAndTruncate(t *testing.T) {
  431. t.Parallel()
  432. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  433. fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
  434. // Fill table
  435. {
  436. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  437. if err != nil {
  438. t.Fatal(err)
  439. }
  440. // Write 15 bytes 30 times
  441. writeChunks(t, f, 30, 15)
  442. // The last item should be there
  443. if _, err = f.Retrieve(f.items - 1); err != nil {
  444. t.Fatal(err)
  445. }
  446. f.Close()
  447. }
  448. // Reopen and read all files
  449. {
  450. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  451. if err != nil {
  452. t.Fatal(err)
  453. }
  454. if f.items != 30 {
  455. f.Close()
  456. t.Fatalf("expected %d items, got %d", 0, f.items)
  457. }
  458. for y := byte(0); y < 30; y++ {
  459. f.Retrieve(uint64(y))
  460. }
  461. // Now, truncate back to zero
  462. f.truncateHead(0)
  463. // Write the data again
  464. batch := f.newBatch()
  465. for x := 0; x < 30; x++ {
  466. require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
  467. }
  468. require.NoError(t, batch.commit())
  469. f.Close()
  470. }
  471. }
  472. func TestFreezerOffset(t *testing.T) {
  473. t.Parallel()
  474. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  475. fname := fmt.Sprintf("offset-%d", rand.Uint64())
  476. // Fill table
  477. {
  478. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
  479. if err != nil {
  480. t.Fatal(err)
  481. }
  482. // Write 6 x 20 bytes, splitting out into three files
  483. batch := f.newBatch()
  484. require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
  485. require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
  486. require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
  487. require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
  488. require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
  489. require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
  490. require.NoError(t, batch.commit())
  491. t.Log(f.dumpIndexString(0, 100))
  492. f.Close()
  493. }
  494. // Now crop it.
  495. {
  496. // delete files 0 and 1
  497. for i := 0; i < 2; i++ {
  498. p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i))
  499. if err := os.Remove(p); err != nil {
  500. t.Fatal(err)
  501. }
  502. }
  503. // Read the index file
  504. p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
  505. indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
  506. if err != nil {
  507. t.Fatal(err)
  508. }
  509. indexBuf := make([]byte, 7*indexEntrySize)
  510. indexFile.Read(indexBuf)
  511. // Update the index file, so that we store
  512. // [ file = 2, offset = 4 ] at index zero
  513. zeroIndex := indexEntry{
  514. filenum: uint32(2), // First file is 2
  515. offset: uint32(4), // We have removed four items
  516. }
  517. buf := zeroIndex.append(nil)
  518. // Overwrite index zero
  519. copy(indexBuf, buf)
  520. // Remove the four next indices by overwriting
  521. copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
  522. indexFile.WriteAt(indexBuf, 0)
  523. // Need to truncate the moved index items
  524. indexFile.Truncate(indexEntrySize * (1 + 2))
  525. indexFile.Close()
  526. }
  527. // Now open again
  528. {
  529. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
  530. if err != nil {
  531. t.Fatal(err)
  532. }
  533. defer f.Close()
  534. t.Log(f.dumpIndexString(0, 100))
  535. // It should allow writing item 6.
  536. batch := f.newBatch()
  537. require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99)))
  538. require.NoError(t, batch.commit())
  539. checkRetrieveError(t, f, map[uint64]error{
  540. 0: errOutOfBounds,
  541. 1: errOutOfBounds,
  542. 2: errOutOfBounds,
  543. 3: errOutOfBounds,
  544. })
  545. checkRetrieve(t, f, map[uint64][]byte{
  546. 4: getChunk(20, 0xbb),
  547. 5: getChunk(20, 0xaa),
  548. 6: getChunk(20, 0x99),
  549. })
  550. }
  551. // Edit the index again, with a much larger initial offset of 1M.
  552. {
  553. // Read the index file
  554. p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
  555. indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
  556. if err != nil {
  557. t.Fatal(err)
  558. }
  559. indexBuf := make([]byte, 3*indexEntrySize)
  560. indexFile.Read(indexBuf)
  561. // Update the index file, so that we store
  562. // [ file = 2, offset = 1M ] at index zero
  563. zeroIndex := indexEntry{
  564. offset: uint32(1000000), // We have removed 1M items
  565. filenum: uint32(2), // First file is 2
  566. }
  567. buf := zeroIndex.append(nil)
  568. // Overwrite index zero
  569. copy(indexBuf, buf)
  570. indexFile.WriteAt(indexBuf, 0)
  571. indexFile.Close()
  572. }
  573. // Check that existing items have been moved to index 1M.
  574. {
  575. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
  576. if err != nil {
  577. t.Fatal(err)
  578. }
  579. defer f.Close()
  580. t.Log(f.dumpIndexString(0, 100))
  581. checkRetrieveError(t, f, map[uint64]error{
  582. 0: errOutOfBounds,
  583. 1: errOutOfBounds,
  584. 2: errOutOfBounds,
  585. 3: errOutOfBounds,
  586. 999999: errOutOfBounds,
  587. })
  588. checkRetrieve(t, f, map[uint64][]byte{
  589. 1000000: getChunk(20, 0xbb),
  590. 1000001: getChunk(20, 0xaa),
  591. })
  592. }
  593. }
  594. func TestTruncateTail(t *testing.T) {
  595. t.Parallel()
  596. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  597. fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64())
  598. // Fill table
  599. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
  600. if err != nil {
  601. t.Fatal(err)
  602. }
  603. // Write 7 x 20 bytes, splitting out into four files
  604. batch := f.newBatch()
  605. require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
  606. require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
  607. require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
  608. require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
  609. require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
  610. require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
  611. require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
  612. require.NoError(t, batch.commit())
  613. // nothing to do, all the items should still be there.
  614. f.truncateTail(0)
  615. fmt.Println(f.dumpIndexString(0, 1000))
  616. checkRetrieve(t, f, map[uint64][]byte{
  617. 0: getChunk(20, 0xFF),
  618. 1: getChunk(20, 0xEE),
  619. 2: getChunk(20, 0xdd),
  620. 3: getChunk(20, 0xcc),
  621. 4: getChunk(20, 0xbb),
  622. 5: getChunk(20, 0xaa),
  623. 6: getChunk(20, 0x11),
  624. })
  625. // truncate single element( item 0 ), deletion is only supported at file level
  626. f.truncateTail(1)
  627. fmt.Println(f.dumpIndexString(0, 1000))
  628. checkRetrieveError(t, f, map[uint64]error{
  629. 0: errOutOfBounds,
  630. })
  631. checkRetrieve(t, f, map[uint64][]byte{
  632. 1: getChunk(20, 0xEE),
  633. 2: getChunk(20, 0xdd),
  634. 3: getChunk(20, 0xcc),
  635. 4: getChunk(20, 0xbb),
  636. 5: getChunk(20, 0xaa),
  637. 6: getChunk(20, 0x11),
  638. })
  639. // Reopen the table, the deletion information should be persisted as well
  640. f.Close()
  641. f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
  642. if err != nil {
  643. t.Fatal(err)
  644. }
  645. checkRetrieveError(t, f, map[uint64]error{
  646. 0: errOutOfBounds,
  647. })
  648. checkRetrieve(t, f, map[uint64][]byte{
  649. 1: getChunk(20, 0xEE),
  650. 2: getChunk(20, 0xdd),
  651. 3: getChunk(20, 0xcc),
  652. 4: getChunk(20, 0xbb),
  653. 5: getChunk(20, 0xaa),
  654. 6: getChunk(20, 0x11),
  655. })
  656. // truncate two elements( item 0, item 1 ), the file 0 should be deleted
  657. f.truncateTail(2)
  658. checkRetrieveError(t, f, map[uint64]error{
  659. 0: errOutOfBounds,
  660. 1: errOutOfBounds,
  661. })
  662. checkRetrieve(t, f, map[uint64][]byte{
  663. 2: getChunk(20, 0xdd),
  664. 3: getChunk(20, 0xcc),
  665. 4: getChunk(20, 0xbb),
  666. 5: getChunk(20, 0xaa),
  667. 6: getChunk(20, 0x11),
  668. })
  669. // Reopen the table, the above testing should still pass
  670. f.Close()
  671. f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
  672. if err != nil {
  673. t.Fatal(err)
  674. }
  675. defer f.Close()
  676. checkRetrieveError(t, f, map[uint64]error{
  677. 0: errOutOfBounds,
  678. 1: errOutOfBounds,
  679. })
  680. checkRetrieve(t, f, map[uint64][]byte{
  681. 2: getChunk(20, 0xdd),
  682. 3: getChunk(20, 0xcc),
  683. 4: getChunk(20, 0xbb),
  684. 5: getChunk(20, 0xaa),
  685. 6: getChunk(20, 0x11),
  686. })
  687. // truncate all, the entire freezer should be deleted
  688. f.truncateTail(7)
  689. checkRetrieveError(t, f, map[uint64]error{
  690. 0: errOutOfBounds,
  691. 1: errOutOfBounds,
  692. 2: errOutOfBounds,
  693. 3: errOutOfBounds,
  694. 4: errOutOfBounds,
  695. 5: errOutOfBounds,
  696. 6: errOutOfBounds,
  697. })
  698. }
  699. func TestTruncateHead(t *testing.T) {
  700. t.Parallel()
  701. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  702. fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64())
  703. // Fill table
  704. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
  705. if err != nil {
  706. t.Fatal(err)
  707. }
  708. // Write 7 x 20 bytes, splitting out into four files
  709. batch := f.newBatch()
  710. require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
  711. require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
  712. require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
  713. require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
  714. require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
  715. require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
  716. require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
  717. require.NoError(t, batch.commit())
  718. f.truncateTail(4) // Tail = 4
  719. // NewHead is required to be 3, the entire table should be truncated
  720. f.truncateHead(4)
  721. checkRetrieveError(t, f, map[uint64]error{
  722. 0: errOutOfBounds, // Deleted by tail
  723. 1: errOutOfBounds, // Deleted by tail
  724. 2: errOutOfBounds, // Deleted by tail
  725. 3: errOutOfBounds, // Deleted by tail
  726. 4: errOutOfBounds, // Deleted by Head
  727. 5: errOutOfBounds, // Deleted by Head
  728. 6: errOutOfBounds, // Deleted by Head
  729. })
  730. // Append new items
  731. batch = f.newBatch()
  732. require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
  733. require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
  734. require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
  735. require.NoError(t, batch.commit())
  736. checkRetrieve(t, f, map[uint64][]byte{
  737. 4: getChunk(20, 0xbb),
  738. 5: getChunk(20, 0xaa),
  739. 6: getChunk(20, 0x11),
  740. })
  741. }
  742. func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
  743. t.Helper()
  744. for item, wantBytes := range items {
  745. value, err := f.Retrieve(item)
  746. if err != nil {
  747. t.Fatalf("can't get expected item %d: %v", item, err)
  748. }
  749. if !bytes.Equal(value, wantBytes) {
  750. t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes)
  751. }
  752. }
  753. }
  754. func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) {
  755. t.Helper()
  756. for item, wantError := range items {
  757. value, err := f.Retrieve(item)
  758. if err == nil {
  759. t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError)
  760. }
  761. if err != wantError {
  762. t.Fatalf("wrong error for item %d: %v", item, err)
  763. }
  764. }
  765. }
  766. // Gets a chunk of data, filled with 'b'
  767. func getChunk(size int, b int) []byte {
  768. data := make([]byte, size)
  769. for i := range data {
  770. data[i] = byte(b)
  771. }
  772. return data
  773. }
  774. // TODO (?)
  775. // - test that if we remove several head-files, aswell as data last data-file,
  776. // the index is truncated accordingly
  777. // Right now, the freezer would fail on these conditions:
  778. // 1. have data files d0, d1, d2, d3
  779. // 2. remove d2,d3
  780. //
  781. // However, all 'normal' failure modes arising due to failing to sync() or save a file
  782. // should be handled already, and the case described above can only (?) happen if an
  783. // external process/user deletes files from the filesystem.
  784. func writeChunks(t *testing.T, ft *freezerTable, n int, length int) {
  785. t.Helper()
  786. batch := ft.newBatch()
  787. for i := 0; i < n; i++ {
  788. if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil {
  789. t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err)
  790. }
  791. }
  792. if err := batch.commit(); err != nil {
  793. t.Fatalf("Commit returned error: %v", err)
  794. }
  795. }
  796. // TestSequentialRead does some basic tests on the RetrieveItems.
  797. func TestSequentialRead(t *testing.T) {
  798. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  799. fname := fmt.Sprintf("batchread-%d", rand.Uint64())
  800. { // Fill table
  801. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  802. if err != nil {
  803. t.Fatal(err)
  804. }
  805. // Write 15 bytes 30 times
  806. writeChunks(t, f, 30, 15)
  807. f.dumpIndexStdout(0, 30)
  808. f.Close()
  809. }
  810. { // Open it, iterate, verify iteration
  811. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
  812. if err != nil {
  813. t.Fatal(err)
  814. }
  815. items, err := f.RetrieveItems(0, 10000, 100000)
  816. if err != nil {
  817. t.Fatal(err)
  818. }
  819. if have, want := len(items), 30; have != want {
  820. t.Fatalf("want %d items, have %d ", want, have)
  821. }
  822. for i, have := range items {
  823. want := getChunk(15, i)
  824. if !bytes.Equal(want, have) {
  825. t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
  826. }
  827. }
  828. f.Close()
  829. }
  830. { // Open it, iterate, verify byte limit. The byte limit is less than item
  831. // size, so each lookup should only return one item
  832. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
  833. if err != nil {
  834. t.Fatal(err)
  835. }
  836. items, err := f.RetrieveItems(0, 10000, 10)
  837. if err != nil {
  838. t.Fatal(err)
  839. }
  840. if have, want := len(items), 1; have != want {
  841. t.Fatalf("want %d items, have %d ", want, have)
  842. }
  843. for i, have := range items {
  844. want := getChunk(15, i)
  845. if !bytes.Equal(want, have) {
  846. t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want)
  847. }
  848. }
  849. f.Close()
  850. }
  851. }
  852. // TestSequentialReadByteLimit does some more advanced tests on batch reads.
  853. // These tests check that when the byte limit hits, we correctly abort in time,
  854. // but also properly do all the deferred reads for the previous data, regardless
  855. // of whether the data crosses a file boundary or not.
  856. func TestSequentialReadByteLimit(t *testing.T) {
  857. rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
  858. fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
  859. { // Fill table
  860. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
  861. if err != nil {
  862. t.Fatal(err)
  863. }
  864. // Write 10 bytes 30 times,
  865. // Splitting it at every 100 bytes (10 items)
  866. writeChunks(t, f, 30, 10)
  867. f.Close()
  868. }
  869. for i, tc := range []struct {
  870. items uint64
  871. limit uint64
  872. want int
  873. }{
  874. {9, 89, 8},
  875. {10, 99, 9},
  876. {11, 109, 10},
  877. {100, 89, 8},
  878. {100, 99, 9},
  879. {100, 109, 10},
  880. } {
  881. {
  882. f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
  883. if err != nil {
  884. t.Fatal(err)
  885. }
  886. items, err := f.RetrieveItems(0, tc.items, tc.limit)
  887. if err != nil {
  888. t.Fatal(err)
  889. }
  890. if have, want := len(items), tc.want; have != want {
  891. t.Fatalf("test %d: want %d items, have %d ", i, want, have)
  892. }
  893. for ii, have := range items {
  894. want := getChunk(10, ii)
  895. if !bytes.Equal(want, have) {
  896. t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want)
  897. }
  898. }
  899. f.Close()
  900. }
  901. }
  902. }
  903. func TestFreezerReadonly(t *testing.T) {
  904. tmpdir := os.TempDir()
  905. // Case 1: Check it fails on non-existent file.
  906. _, err := newTable(tmpdir,
  907. fmt.Sprintf("readonlytest-%d", rand.Uint64()),
  908. metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
  909. if err == nil {
  910. t.Fatal("readonly table instantiation should fail for non-existent table")
  911. }
  912. // Case 2: Check that it fails on invalid index length.
  913. fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
  914. idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
  915. if err != nil {
  916. t.Errorf("Failed to open index file: %v\n", err)
  917. }
  918. // size should not be a multiple of indexEntrySize.
  919. idxFile.Write(make([]byte, 17))
  920. idxFile.Close()
  921. _, err = newTable(tmpdir, fname,
  922. metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
  923. if err == nil {
  924. t.Errorf("readonly table instantiation should fail for invalid index size")
  925. }
  926. // Case 3: Open table non-readonly table to write some data.
  927. // Then corrupt the head file and make sure opening the table
  928. // again in readonly triggers an error.
  929. fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
  930. f, err := newTable(tmpdir, fname,
  931. metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
  932. if err != nil {
  933. t.Fatalf("failed to instantiate table: %v", err)
  934. }
  935. writeChunks(t, f, 8, 32)
  936. // Corrupt table file
  937. if _, err := f.head.Write([]byte{1, 1}); err != nil {
  938. t.Fatal(err)
  939. }
  940. if err := f.Close(); err != nil {
  941. t.Fatal(err)
  942. }
  943. _, err = newTable(tmpdir, fname,
  944. metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
  945. if err == nil {
  946. t.Errorf("readonly table instantiation should fail for corrupt table file")
  947. }
  948. // Case 4: Write some data to a table and later re-open it as readonly.
  949. // Should be successful.
  950. fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
  951. f, err = newTable(tmpdir, fname,
  952. metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
  953. if err != nil {
  954. t.Fatalf("failed to instantiate table: %v\n", err)
  955. }
  956. writeChunks(t, f, 32, 128)
  957. if err := f.Close(); err != nil {
  958. t.Fatal(err)
  959. }
  960. f, err = newTable(tmpdir, fname,
  961. metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
  962. if err != nil {
  963. t.Fatal(err)
  964. }
  965. v, err := f.Retrieve(10)
  966. if err != nil {
  967. t.Fatal(err)
  968. }
  969. exp := getChunk(128, 10)
  970. if !bytes.Equal(v, exp) {
  971. t.Errorf("retrieved value is incorrect")
  972. }
  973. // Case 5: Now write some data via a batch.
  974. // This should fail either during AppendRaw or Commit
  975. batch := f.newBatch()
  976. writeErr := batch.AppendRaw(32, make([]byte, 1))
  977. if writeErr == nil {
  978. writeErr = batch.commit()
  979. }
  980. if writeErr == nil {
  981. t.Fatalf("Writing to readonly table should fail")
  982. }
  983. }
  984. // randTest performs random freezer table operations.
  985. // Instances of this test are created by Generate.
  986. type randTest []randTestStep
  987. type randTestStep struct {
  988. op int
  989. items []uint64 // for append and retrieve
  990. blobs [][]byte // for append
  991. target uint64 // for truncate(head/tail)
  992. err error // for debugging
  993. }
  994. const (
  995. opReload = iota
  996. opAppend
  997. opRetrieve
  998. opTruncateHead
  999. opTruncateHeadAll
  1000. opTruncateTail
  1001. opTruncateTailAll
  1002. opCheckAll
  1003. opMax // boundary value, not an actual op
  1004. )
  1005. func getVals(first uint64, n int) [][]byte {
  1006. var ret [][]byte
  1007. for i := 0; i < n; i++ {
  1008. val := make([]byte, 8)
  1009. binary.BigEndian.PutUint64(val, first+uint64(i))
  1010. ret = append(ret, val)
  1011. }
  1012. return ret
  1013. }
  1014. func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
  1015. var (
  1016. deleted uint64 // The number of deleted items from tail
  1017. items []uint64 // The index of entries in table
  1018. // getItems retrieves the indexes for items in table.
  1019. getItems = func(n int) []uint64 {
  1020. length := len(items)
  1021. if length == 0 {
  1022. return nil
  1023. }
  1024. var ret []uint64
  1025. index := rand.Intn(length)
  1026. for i := index; len(ret) < n && i < length; i++ {
  1027. ret = append(ret, items[i])
  1028. }
  1029. return ret
  1030. }
  1031. // addItems appends the given length items into the table.
  1032. addItems = func(n int) []uint64 {
  1033. var first = deleted
  1034. if len(items) != 0 {
  1035. first = items[len(items)-1] + 1
  1036. }
  1037. var ret []uint64
  1038. for i := 0; i < n; i++ {
  1039. ret = append(ret, first+uint64(i))
  1040. }
  1041. items = append(items, ret...)
  1042. return ret
  1043. }
  1044. )
  1045. var steps randTest
  1046. for i := 0; i < size; i++ {
  1047. step := randTestStep{op: r.Intn(opMax)}
  1048. switch step.op {
  1049. case opReload, opCheckAll:
  1050. case opAppend:
  1051. num := r.Intn(3)
  1052. step.items = addItems(num)
  1053. if len(step.items) == 0 {
  1054. step.blobs = nil
  1055. } else {
  1056. step.blobs = getVals(step.items[0], num)
  1057. }
  1058. case opRetrieve:
  1059. step.items = getItems(r.Intn(3))
  1060. case opTruncateHead:
  1061. if len(items) == 0 {
  1062. step.target = deleted
  1063. } else {
  1064. index := r.Intn(len(items))
  1065. items = items[:index]
  1066. step.target = deleted + uint64(index)
  1067. }
  1068. case opTruncateHeadAll:
  1069. step.target = deleted
  1070. items = items[:0]
  1071. case opTruncateTail:
  1072. if len(items) == 0 {
  1073. step.target = deleted
  1074. } else {
  1075. index := r.Intn(len(items))
  1076. items = items[index:]
  1077. deleted += uint64(index)
  1078. step.target = deleted
  1079. }
  1080. case opTruncateTailAll:
  1081. step.target = deleted + uint64(len(items))
  1082. items = items[:0]
  1083. deleted = step.target
  1084. }
  1085. steps = append(steps, step)
  1086. }
  1087. return reflect.ValueOf(steps)
  1088. }
  1089. func runRandTest(rt randTest) bool {
  1090. fname := fmt.Sprintf("randtest-%d", rand.Uint64())
  1091. f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
  1092. if err != nil {
  1093. panic("failed to initialize table")
  1094. }
  1095. var values [][]byte
  1096. for i, step := range rt {
  1097. switch step.op {
  1098. case opReload:
  1099. f.Close()
  1100. f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
  1101. if err != nil {
  1102. rt[i].err = fmt.Errorf("failed to reload table %v", err)
  1103. }
  1104. case opCheckAll:
  1105. tail := atomic.LoadUint64(&f.itemHidden)
  1106. head := atomic.LoadUint64(&f.items)
  1107. if tail == head {
  1108. continue
  1109. }
  1110. got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000)
  1111. if err != nil {
  1112. rt[i].err = err
  1113. } else {
  1114. if !reflect.DeepEqual(got, values) {
  1115. rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values)
  1116. }
  1117. }
  1118. case opAppend:
  1119. batch := f.newBatch()
  1120. for i := 0; i < len(step.items); i++ {
  1121. batch.AppendRaw(step.items[i], step.blobs[i])
  1122. }
  1123. batch.commit()
  1124. values = append(values, step.blobs...)
  1125. case opRetrieve:
  1126. var blobs [][]byte
  1127. if len(step.items) == 0 {
  1128. continue
  1129. }
  1130. tail := atomic.LoadUint64(&f.itemHidden)
  1131. for i := 0; i < len(step.items); i++ {
  1132. blobs = append(blobs, values[step.items[i]-tail])
  1133. }
  1134. got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000)
  1135. if err != nil {
  1136. rt[i].err = err
  1137. } else {
  1138. if !reflect.DeepEqual(got, blobs) {
  1139. rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items)
  1140. }
  1141. }
  1142. case opTruncateHead:
  1143. f.truncateHead(step.target)
  1144. length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden)
  1145. values = values[:length]
  1146. case opTruncateHeadAll:
  1147. f.truncateHead(step.target)
  1148. values = nil
  1149. case opTruncateTail:
  1150. prev := atomic.LoadUint64(&f.itemHidden)
  1151. f.truncateTail(step.target)
  1152. truncated := atomic.LoadUint64(&f.itemHidden) - prev
  1153. values = values[truncated:]
  1154. case opTruncateTailAll:
  1155. f.truncateTail(step.target)
  1156. values = nil
  1157. }
  1158. // Abort the test on error.
  1159. if rt[i].err != nil {
  1160. return false
  1161. }
  1162. }
  1163. f.Close()
  1164. return true
  1165. }
  1166. func TestRandom(t *testing.T) {
  1167. if err := quick.Check(runRandTest, nil); err != nil {
  1168. if cerr, ok := err.(*quick.CheckError); ok {
  1169. t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
  1170. }
  1171. t.Fatal(err)
  1172. }
  1173. }