chunker_test.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. // Copyright 2016 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package storage
  17. import (
  18. "bytes"
  19. "context"
  20. "crypto/rand"
  21. "encoding/binary"
  22. "errors"
  23. "fmt"
  24. "io"
  25. "testing"
  26. "github.com/ethereum/go-ethereum/crypto/sha3"
  27. )
  28. /*
  29. Tests TreeChunker by splitting and joining a random byte slice
  30. */
  31. type test interface {
  32. Fatalf(string, ...interface{})
  33. Logf(string, ...interface{})
  34. }
  35. type chunkerTester struct {
  36. inputs map[uint64][]byte
  37. t test
  38. }
  39. // fakeChunkStore doesn't store anything, just implements the ChunkStore interface
  40. // It can be used to inject into a hasherStore if you don't want to actually store data just do the
  41. // hashing
  42. type fakeChunkStore struct {
  43. }
  44. // Put doesn't store anything it is just here to implement ChunkStore
  45. func (f *fakeChunkStore) Put(context.Context, *Chunk) {
  46. }
  47. // Gut doesn't store anything it is just here to implement ChunkStore
  48. func (f *fakeChunkStore) Get(context.Context, Address) (*Chunk, error) {
  49. return nil, errors.New("FakeChunkStore doesn't support Get")
  50. }
  51. // Close doesn't store anything it is just here to implement ChunkStore
  52. func (f *fakeChunkStore) Close() {
  53. }
  54. func newTestHasherStore(chunkStore ChunkStore, hash string) *hasherStore {
  55. return NewHasherStore(chunkStore, MakeHashFunc(hash), false)
  56. }
  57. func testRandomBrokenData(n int, tester *chunkerTester) {
  58. data := io.LimitReader(rand.Reader, int64(n))
  59. brokendata := brokenLimitReader(data, n, n/2)
  60. buf := make([]byte, n)
  61. _, err := brokendata.Read(buf)
  62. if err == nil || err.Error() != "Broken reader" {
  63. tester.t.Fatalf("Broken reader is not broken, hence broken. Returns: %v", err)
  64. }
  65. data = io.LimitReader(rand.Reader, int64(n))
  66. brokendata = brokenLimitReader(data, n, n/2)
  67. putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
  68. expectedError := fmt.Errorf("Broken reader")
  69. addr, _, err := TreeSplit(context.TODO(), brokendata, int64(n), putGetter)
  70. if err == nil || err.Error() != expectedError.Error() {
  71. tester.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err)
  72. }
  73. tester.t.Logf(" Key = %v\n", addr)
  74. }
  75. func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester) Address {
  76. if tester.inputs == nil {
  77. tester.inputs = make(map[uint64][]byte)
  78. }
  79. input, found := tester.inputs[uint64(n)]
  80. var data io.Reader
  81. if !found {
  82. data, input = generateRandomData(n)
  83. tester.inputs[uint64(n)] = input
  84. } else {
  85. data = io.LimitReader(bytes.NewReader(input), int64(n))
  86. }
  87. putGetter := newTestHasherStore(NewMapChunkStore(), hash)
  88. var addr Address
  89. var wait func(context.Context) error
  90. var err error
  91. ctx := context.TODO()
  92. if usePyramid {
  93. addr, wait, err = PyramidSplit(ctx, data, putGetter, putGetter)
  94. } else {
  95. addr, wait, err = TreeSplit(ctx, data, int64(n), putGetter)
  96. }
  97. if err != nil {
  98. tester.t.Fatalf(err.Error())
  99. }
  100. tester.t.Logf(" Key = %v\n", addr)
  101. err = wait(ctx)
  102. if err != nil {
  103. tester.t.Fatalf(err.Error())
  104. }
  105. reader := TreeJoin(context.TODO(), addr, putGetter, 0)
  106. output := make([]byte, n)
  107. r, err := reader.Read(output)
  108. if r != n || err != io.EOF {
  109. tester.t.Fatalf("read error read: %v n = %v err = %v\n", r, n, err)
  110. }
  111. if input != nil {
  112. if !bytes.Equal(output, input) {
  113. tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input, output)
  114. }
  115. }
  116. // testing partial read
  117. for i := 1; i < n; i += 10000 {
  118. readableLength := n - i
  119. output := make([]byte, readableLength)
  120. r, err := reader.ReadAt(output, int64(i))
  121. if r != readableLength || err != io.EOF {
  122. tester.t.Fatalf("readAt error with offset %v read: %v n = %v err = %v\n", i, r, readableLength, err)
  123. }
  124. if input != nil {
  125. if !bytes.Equal(output, input[i:]) {
  126. tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input[i:], output)
  127. }
  128. }
  129. }
  130. return addr
  131. }
  132. func TestSha3ForCorrectness(t *testing.T) {
  133. tester := &chunkerTester{t: t}
  134. size := 4096
  135. input := make([]byte, size+8)
  136. binary.LittleEndian.PutUint64(input[:8], uint64(size))
  137. io.LimitReader(bytes.NewReader(input[8:]), int64(size))
  138. rawSha3 := sha3.NewKeccak256()
  139. rawSha3.Reset()
  140. rawSha3.Write(input)
  141. rawSha3Output := rawSha3.Sum(nil)
  142. sha3FromMakeFunc := MakeHashFunc(SHA3Hash)()
  143. sha3FromMakeFunc.ResetWithLength(input[:8])
  144. sha3FromMakeFunc.Write(input[8:])
  145. sha3FromMakeFuncOutput := sha3FromMakeFunc.Sum(nil)
  146. if len(rawSha3Output) != len(sha3FromMakeFuncOutput) {
  147. tester.t.Fatalf("Original SHA3 and abstracted Sha3 has different length %v:%v\n", len(rawSha3Output), len(sha3FromMakeFuncOutput))
  148. }
  149. if !bytes.Equal(rawSha3Output, sha3FromMakeFuncOutput) {
  150. tester.t.Fatalf("Original SHA3 and abstracted Sha3 mismatch %v:%v\n", rawSha3Output, sha3FromMakeFuncOutput)
  151. }
  152. }
  153. func TestDataAppend(t *testing.T) {
  154. sizes := []int{1, 1, 1, 4095, 4096, 4097, 1, 1, 1, 123456, 2345678, 2345678}
  155. appendSizes := []int{4095, 4096, 4097, 1, 1, 1, 8191, 8192, 8193, 9000, 3000, 5000}
  156. tester := &chunkerTester{t: t}
  157. for i := range sizes {
  158. n := sizes[i]
  159. m := appendSizes[i]
  160. if tester.inputs == nil {
  161. tester.inputs = make(map[uint64][]byte)
  162. }
  163. input, found := tester.inputs[uint64(n)]
  164. var data io.Reader
  165. if !found {
  166. data, input = generateRandomData(n)
  167. tester.inputs[uint64(n)] = input
  168. } else {
  169. data = io.LimitReader(bytes.NewReader(input), int64(n))
  170. }
  171. chunkStore := NewMapChunkStore()
  172. putGetter := newTestHasherStore(chunkStore, SHA3Hash)
  173. ctx := context.TODO()
  174. addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
  175. if err != nil {
  176. tester.t.Fatalf(err.Error())
  177. }
  178. err = wait(ctx)
  179. if err != nil {
  180. tester.t.Fatalf(err.Error())
  181. }
  182. //create a append data stream
  183. appendInput, found := tester.inputs[uint64(m)]
  184. var appendData io.Reader
  185. if !found {
  186. appendData, appendInput = generateRandomData(m)
  187. tester.inputs[uint64(m)] = appendInput
  188. } else {
  189. appendData = io.LimitReader(bytes.NewReader(appendInput), int64(m))
  190. }
  191. putGetter = newTestHasherStore(chunkStore, SHA3Hash)
  192. newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter)
  193. if err != nil {
  194. tester.t.Fatalf(err.Error())
  195. }
  196. err = wait(ctx)
  197. if err != nil {
  198. tester.t.Fatalf(err.Error())
  199. }
  200. reader := TreeJoin(ctx, newAddr, putGetter, 0)
  201. newOutput := make([]byte, n+m)
  202. r, err := reader.Read(newOutput)
  203. if r != (n + m) {
  204. tester.t.Fatalf("read error read: %v n = %v m = %v err = %v\n", r, n, m, err)
  205. }
  206. newInput := append(input, appendInput...)
  207. if !bytes.Equal(newOutput, newInput) {
  208. tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", newInput, newOutput)
  209. }
  210. }
  211. }
  212. func TestRandomData(t *testing.T) {
  213. // This test can validate files up to a relatively short length, as tree chunker slows down drastically.
  214. // Validation of longer files is done by TestLocalStoreAndRetrieve in swarm package.
  215. sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 12287, 12288, 12289, 524288, 524288 + 1, 524288 + 4097, 7 * 524288, 7*524288 + 1, 7*524288 + 4097}
  216. tester := &chunkerTester{t: t}
  217. for _, s := range sizes {
  218. treeChunkerKey := testRandomData(false, SHA3Hash, s, tester)
  219. pyramidChunkerKey := testRandomData(true, SHA3Hash, s, tester)
  220. if treeChunkerKey.String() != pyramidChunkerKey.String() {
  221. tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerKey.String(), pyramidChunkerKey.String())
  222. }
  223. }
  224. for _, s := range sizes {
  225. treeChunkerKey := testRandomData(false, BMTHash, s, tester)
  226. pyramidChunkerKey := testRandomData(true, BMTHash, s, tester)
  227. if treeChunkerKey.String() != pyramidChunkerKey.String() {
  228. tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerKey.String(), pyramidChunkerKey.String())
  229. }
  230. }
  231. }
  232. func TestRandomBrokenData(t *testing.T) {
  233. sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 12287, 12288, 12289, 123456, 2345678}
  234. tester := &chunkerTester{t: t}
  235. for _, s := range sizes {
  236. testRandomBrokenData(s, tester)
  237. }
  238. }
  239. func benchReadAll(reader LazySectionReader) {
  240. size, _ := reader.Size(context.TODO(), nil)
  241. output := make([]byte, 1000)
  242. for pos := int64(0); pos < size; pos += 1000 {
  243. reader.ReadAt(output, pos)
  244. }
  245. }
  246. func benchmarkSplitJoin(n int, t *testing.B) {
  247. t.ReportAllocs()
  248. for i := 0; i < t.N; i++ {
  249. data := testDataReader(n)
  250. putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
  251. ctx := context.TODO()
  252. key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
  253. if err != nil {
  254. t.Fatalf(err.Error())
  255. }
  256. err = wait(ctx)
  257. if err != nil {
  258. t.Fatalf(err.Error())
  259. }
  260. reader := TreeJoin(ctx, key, putGetter, 0)
  261. benchReadAll(reader)
  262. }
  263. }
  264. func benchmarkSplitTreeSHA3(n int, t *testing.B) {
  265. t.ReportAllocs()
  266. for i := 0; i < t.N; i++ {
  267. data := testDataReader(n)
  268. putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash)
  269. _, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter)
  270. if err != nil {
  271. t.Fatalf(err.Error())
  272. }
  273. }
  274. }
  275. func benchmarkSplitTreeBMT(n int, t *testing.B) {
  276. t.ReportAllocs()
  277. for i := 0; i < t.N; i++ {
  278. data := testDataReader(n)
  279. putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash)
  280. _, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter)
  281. if err != nil {
  282. t.Fatalf(err.Error())
  283. }
  284. }
  285. }
  286. func benchmarkSplitPyramidSHA3(n int, t *testing.B) {
  287. t.ReportAllocs()
  288. for i := 0; i < t.N; i++ {
  289. data := testDataReader(n)
  290. putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash)
  291. _, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter)
  292. if err != nil {
  293. t.Fatalf(err.Error())
  294. }
  295. }
  296. }
  297. func benchmarkSplitPyramidBMT(n int, t *testing.B) {
  298. t.ReportAllocs()
  299. for i := 0; i < t.N; i++ {
  300. data := testDataReader(n)
  301. putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash)
  302. _, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter)
  303. if err != nil {
  304. t.Fatalf(err.Error())
  305. }
  306. }
  307. }
  308. func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
  309. t.ReportAllocs()
  310. for i := 0; i < t.N; i++ {
  311. data := testDataReader(n)
  312. data1 := testDataReader(m)
  313. chunkStore := NewMapChunkStore()
  314. putGetter := newTestHasherStore(chunkStore, SHA3Hash)
  315. ctx := context.TODO()
  316. key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
  317. if err != nil {
  318. t.Fatalf(err.Error())
  319. }
  320. err = wait(ctx)
  321. if err != nil {
  322. t.Fatalf(err.Error())
  323. }
  324. putGetter = newTestHasherStore(chunkStore, SHA3Hash)
  325. _, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter)
  326. if err != nil {
  327. t.Fatalf(err.Error())
  328. }
  329. err = wait(ctx)
  330. if err != nil {
  331. t.Fatalf(err.Error())
  332. }
  333. }
  334. }
  335. func BenchmarkSplitJoin_2(t *testing.B) { benchmarkSplitJoin(100, t) }
  336. func BenchmarkSplitJoin_3(t *testing.B) { benchmarkSplitJoin(1000, t) }
  337. func BenchmarkSplitJoin_4(t *testing.B) { benchmarkSplitJoin(10000, t) }
  338. func BenchmarkSplitJoin_5(t *testing.B) { benchmarkSplitJoin(100000, t) }
  339. func BenchmarkSplitJoin_6(t *testing.B) { benchmarkSplitJoin(1000000, t) }
  340. func BenchmarkSplitJoin_7(t *testing.B) { benchmarkSplitJoin(10000000, t) }
  341. // func BenchmarkSplitJoin_8(t *testing.B) { benchmarkJoin(100000000, t) }
  342. func BenchmarkSplitTreeSHA3_2(t *testing.B) { benchmarkSplitTreeSHA3(100, t) }
  343. func BenchmarkSplitTreeSHA3_2h(t *testing.B) { benchmarkSplitTreeSHA3(500, t) }
  344. func BenchmarkSplitTreeSHA3_3(t *testing.B) { benchmarkSplitTreeSHA3(1000, t) }
  345. func BenchmarkSplitTreeSHA3_3h(t *testing.B) { benchmarkSplitTreeSHA3(5000, t) }
  346. func BenchmarkSplitTreeSHA3_4(t *testing.B) { benchmarkSplitTreeSHA3(10000, t) }
  347. func BenchmarkSplitTreeSHA3_4h(t *testing.B) { benchmarkSplitTreeSHA3(50000, t) }
  348. func BenchmarkSplitTreeSHA3_5(t *testing.B) { benchmarkSplitTreeSHA3(100000, t) }
  349. func BenchmarkSplitTreeSHA3_6(t *testing.B) { benchmarkSplitTreeSHA3(1000000, t) }
  350. func BenchmarkSplitTreeSHA3_7(t *testing.B) { benchmarkSplitTreeSHA3(10000000, t) }
  351. // func BenchmarkSplitTreeSHA3_8(t *testing.B) { benchmarkSplitTreeSHA3(100000000, t) }
  352. func BenchmarkSplitTreeBMT_2(t *testing.B) { benchmarkSplitTreeBMT(100, t) }
  353. func BenchmarkSplitTreeBMT_2h(t *testing.B) { benchmarkSplitTreeBMT(500, t) }
  354. func BenchmarkSplitTreeBMT_3(t *testing.B) { benchmarkSplitTreeBMT(1000, t) }
  355. func BenchmarkSplitTreeBMT_3h(t *testing.B) { benchmarkSplitTreeBMT(5000, t) }
  356. func BenchmarkSplitTreeBMT_4(t *testing.B) { benchmarkSplitTreeBMT(10000, t) }
  357. func BenchmarkSplitTreeBMT_4h(t *testing.B) { benchmarkSplitTreeBMT(50000, t) }
  358. func BenchmarkSplitTreeBMT_5(t *testing.B) { benchmarkSplitTreeBMT(100000, t) }
  359. func BenchmarkSplitTreeBMT_6(t *testing.B) { benchmarkSplitTreeBMT(1000000, t) }
  360. func BenchmarkSplitTreeBMT_7(t *testing.B) { benchmarkSplitTreeBMT(10000000, t) }
  361. // func BenchmarkSplitTreeBMT_8(t *testing.B) { benchmarkSplitTreeBMT(100000000, t) }
  362. func BenchmarkSplitPyramidSHA3_2(t *testing.B) { benchmarkSplitPyramidSHA3(100, t) }
  363. func BenchmarkSplitPyramidSHA3_2h(t *testing.B) { benchmarkSplitPyramidSHA3(500, t) }
  364. func BenchmarkSplitPyramidSHA3_3(t *testing.B) { benchmarkSplitPyramidSHA3(1000, t) }
  365. func BenchmarkSplitPyramidSHA3_3h(t *testing.B) { benchmarkSplitPyramidSHA3(5000, t) }
  366. func BenchmarkSplitPyramidSHA3_4(t *testing.B) { benchmarkSplitPyramidSHA3(10000, t) }
  367. func BenchmarkSplitPyramidSHA3_4h(t *testing.B) { benchmarkSplitPyramidSHA3(50000, t) }
  368. func BenchmarkSplitPyramidSHA3_5(t *testing.B) { benchmarkSplitPyramidSHA3(100000, t) }
  369. func BenchmarkSplitPyramidSHA3_6(t *testing.B) { benchmarkSplitPyramidSHA3(1000000, t) }
  370. func BenchmarkSplitPyramidSHA3_7(t *testing.B) { benchmarkSplitPyramidSHA3(10000000, t) }
  371. // func BenchmarkSplitPyramidSHA3_8(t *testing.B) { benchmarkSplitPyramidSHA3(100000000, t) }
  372. func BenchmarkSplitPyramidBMT_2(t *testing.B) { benchmarkSplitPyramidBMT(100, t) }
  373. func BenchmarkSplitPyramidBMT_2h(t *testing.B) { benchmarkSplitPyramidBMT(500, t) }
  374. func BenchmarkSplitPyramidBMT_3(t *testing.B) { benchmarkSplitPyramidBMT(1000, t) }
  375. func BenchmarkSplitPyramidBMT_3h(t *testing.B) { benchmarkSplitPyramidBMT(5000, t) }
  376. func BenchmarkSplitPyramidBMT_4(t *testing.B) { benchmarkSplitPyramidBMT(10000, t) }
  377. func BenchmarkSplitPyramidBMT_4h(t *testing.B) { benchmarkSplitPyramidBMT(50000, t) }
  378. func BenchmarkSplitPyramidBMT_5(t *testing.B) { benchmarkSplitPyramidBMT(100000, t) }
  379. func BenchmarkSplitPyramidBMT_6(t *testing.B) { benchmarkSplitPyramidBMT(1000000, t) }
  380. func BenchmarkSplitPyramidBMT_7(t *testing.B) { benchmarkSplitPyramidBMT(10000000, t) }
  381. // func BenchmarkSplitPyramidBMT_8(t *testing.B) { benchmarkSplitPyramidBMT(100000000, t) }
  382. func BenchmarkSplitAppendPyramid_2(t *testing.B) { benchmarkSplitAppendPyramid(100, 1000, t) }
  383. func BenchmarkSplitAppendPyramid_2h(t *testing.B) { benchmarkSplitAppendPyramid(500, 1000, t) }
  384. func BenchmarkSplitAppendPyramid_3(t *testing.B) { benchmarkSplitAppendPyramid(1000, 1000, t) }
  385. func BenchmarkSplitAppendPyramid_4(t *testing.B) { benchmarkSplitAppendPyramid(10000, 1000, t) }
  386. func BenchmarkSplitAppendPyramid_4h(t *testing.B) { benchmarkSplitAppendPyramid(50000, 1000, t) }
  387. func BenchmarkSplitAppendPyramid_5(t *testing.B) { benchmarkSplitAppendPyramid(1000000, 1000, t) }
  388. func BenchmarkSplitAppendPyramid_6(t *testing.B) { benchmarkSplitAppendPyramid(1000000, 1000, t) }
  389. func BenchmarkSplitAppendPyramid_7(t *testing.B) { benchmarkSplitAppendPyramid(10000000, 1000, t) }
  390. // func BenchmarkAppendPyramid_8(t *testing.B) { benchmarkAppendPyramid(100000000, 1000, t) }
  391. // go test -timeout 20m -cpu 4 -bench=./swarm/storage -run no
  392. // If you dont add the timeout argument above .. the benchmark will timeout and dump