export_test.go 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. // Copyright 2018 The go-ethereum Authors
  2. // This file is part of go-ethereum.
  3. //
  4. // go-ethereum is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // go-ethereum is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU General Public License
  15. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
  16. package main
  17. import (
  18. "archive/tar"
  19. "bytes"
  20. "compress/gzip"
  21. "crypto/md5"
  22. "encoding/base64"
  23. "encoding/hex"
  24. "io"
  25. "io/ioutil"
  26. "net/http"
  27. "os"
  28. "path"
  29. "runtime"
  30. "strings"
  31. "testing"
  32. "github.com/ethereum/go-ethereum/cmd/swarm/testdata"
  33. "github.com/ethereum/go-ethereum/log"
  34. "github.com/ethereum/go-ethereum/swarm"
  35. "github.com/ethereum/go-ethereum/swarm/testutil"
  36. )
  37. const (
  38. DATABASE_FIXTURE_BZZ_ACCOUNT = "0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
  39. DATABASE_FIXTURE_PASSWORD = "pass"
  40. FIXTURE_DATADIR_PREFIX = "swarm/bzz-0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
  41. FixtureBaseKey = "a9f22b3d77b4bdf5f3eefce995d6c8e7cecf2636f20956f08a0d1ed95adb52ad"
  42. )
  43. // TestCLISwarmExportImport perform the following test:
  44. // 1. runs swarm node
  45. // 2. uploads a random file
  46. // 3. runs an export of the local datastore
  47. // 4. runs a second swarm node
  48. // 5. imports the exported datastore
  49. // 6. fetches the uploaded random file from the second node
  50. func TestCLISwarmExportImport(t *testing.T) {
  51. if runtime.GOOS == "windows" {
  52. t.Skip()
  53. }
  54. cluster := newTestCluster(t, 1)
  55. // generate random 1mb file
  56. content := testutil.RandomBytes(1, 1000000)
  57. fileName := testutil.TempFileWithContent(t, string(content))
  58. defer os.Remove(fileName)
  59. // upload the file with 'swarm up' and expect a hash
  60. up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", fileName)
  61. _, matches := up.ExpectRegexp(`[a-f\d]{64}`)
  62. up.ExpectExit()
  63. hash := matches[0]
  64. var info swarm.Info
  65. if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil {
  66. t.Fatal(err)
  67. }
  68. cluster.Stop()
  69. defer cluster.Cleanup()
  70. // generate an export.tar
  71. exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x"))
  72. exportCmd.ExpectExit()
  73. // start second cluster
  74. cluster2 := newTestCluster(t, 1)
  75. var info2 swarm.Info
  76. if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
  77. t.Fatal(err)
  78. }
  79. // stop second cluster, so that we close LevelDB
  80. cluster2.Stop()
  81. defer cluster2.Cleanup()
  82. // import the export.tar
  83. importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
  84. importCmd.ExpectExit()
  85. // spin second cluster back up
  86. cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
  87. // try to fetch imported file
  88. res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash)
  89. if err != nil {
  90. t.Fatal(err)
  91. }
  92. if res.StatusCode != 200 {
  93. t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
  94. }
  95. // compare downloaded file with the generated random file
  96. mustEqualFiles(t, bytes.NewReader(content), res.Body)
  97. }
  98. // TestExportLegacyToNew checks that an old database gets imported correctly into the new localstore structure
  99. // The test sequence is as follows:
  100. // 1. unpack database fixture to tmp dir
  101. // 2. try to open with new swarm binary that should complain about old database
  102. // 3. export from old database
  103. // 4. remove the chunks folder
  104. // 5. import the dump
  105. // 6. file should be accessible
  106. func TestExportLegacyToNew(t *testing.T) {
  107. if runtime.GOOS == "windows" {
  108. t.Skip() // this should be reenabled once the appveyor tests underlying issue is fixed
  109. }
  110. /*
  111. fixture bzz account 0aa159029fa13ffa8fa1c6fff6ebceface99d6a4
  112. */
  113. const UPLOADED_FILE_MD5_HASH = "a001fdae53ba50cae584b8b02b06f821"
  114. const UPLOADED_HASH = "67a86082ee0ea1bc7dd8d955bb1e14d04f61d55ae6a4b37b3d0296a3a95e454a"
  115. tmpdir, err := ioutil.TempDir("", "swarm-test")
  116. log.Trace("running legacy datastore migration test", "temp dir", tmpdir)
  117. defer os.RemoveAll(tmpdir)
  118. if err != nil {
  119. t.Fatal(err)
  120. }
  121. inflateBase64Gzip(t, testdata.DATADIR_MIGRATION_FIXTURE, tmpdir)
  122. tmpPassword := testutil.TempFileWithContent(t, DATABASE_FIXTURE_PASSWORD)
  123. defer os.Remove(tmpPassword)
  124. flags := []string{
  125. "--datadir", tmpdir,
  126. "--bzzaccount", DATABASE_FIXTURE_BZZ_ACCOUNT,
  127. "--password", tmpPassword,
  128. }
  129. newSwarmOldDb := runSwarm(t, flags...)
  130. _, matches := newSwarmOldDb.ExpectRegexp(".+")
  131. newSwarmOldDb.ExpectExit()
  132. if len(matches) == 0 {
  133. t.Fatalf("stdout not matched")
  134. }
  135. if newSwarmOldDb.ExitStatus() == 0 {
  136. t.Fatal("should error")
  137. }
  138. t.Log("exporting legacy database")
  139. actualDataDir := path.Join(tmpdir, FIXTURE_DATADIR_PREFIX)
  140. exportCmd := runSwarm(t, "--verbosity", "5", "db", "export", actualDataDir+"/chunks", tmpdir+"/export.tar", FixtureBaseKey)
  141. exportCmd.ExpectExit()
  142. stat, err := os.Stat(tmpdir + "/export.tar")
  143. if err != nil {
  144. t.Fatal(err)
  145. }
  146. // make some silly size assumption
  147. if stat.Size() < 90000 {
  148. t.Fatal("export size too small")
  149. }
  150. log.Info("removing chunk datadir")
  151. err = os.RemoveAll(path.Join(actualDataDir, "chunks"))
  152. if err != nil {
  153. t.Fatal(err)
  154. }
  155. // start second cluster
  156. cluster2 := newTestCluster(t, 1)
  157. var info2 swarm.Info
  158. if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
  159. t.Fatal(err)
  160. }
  161. // stop second cluster, so that we close LevelDB
  162. cluster2.Stop()
  163. defer cluster2.Cleanup()
  164. // import the export.tar
  165. importCmd := runSwarm(t, "db", "import", "--legacy", info2.Path+"/chunks", tmpdir+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
  166. importCmd.ExpectExit()
  167. // spin second cluster back up
  168. cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
  169. t.Log("trying to http get the file")
  170. // try to fetch imported file
  171. res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + UPLOADED_HASH)
  172. if err != nil {
  173. t.Fatal(err)
  174. }
  175. if res.StatusCode != 200 {
  176. t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
  177. }
  178. h := md5.New()
  179. if _, err := io.Copy(h, res.Body); err != nil {
  180. t.Fatal(err)
  181. }
  182. sum := h.Sum(nil)
  183. b, err := hex.DecodeString(UPLOADED_FILE_MD5_HASH)
  184. if err != nil {
  185. t.Fatal(err)
  186. }
  187. if !bytes.Equal(sum, b) {
  188. t.Fatal("should be equal")
  189. }
  190. }
  191. func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
  192. h := md5.New()
  193. upLen, err := io.Copy(h, up)
  194. if err != nil {
  195. t.Fatal(err)
  196. }
  197. upHash := h.Sum(nil)
  198. h.Reset()
  199. downLen, err := io.Copy(h, down)
  200. if err != nil {
  201. t.Fatal(err)
  202. }
  203. downHash := h.Sum(nil)
  204. if !bytes.Equal(upHash, downHash) || upLen != downLen {
  205. t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
  206. }
  207. }
  208. func inflateBase64Gzip(t *testing.T, base64File, directory string) {
  209. t.Helper()
  210. f := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64File))
  211. gzf, err := gzip.NewReader(f)
  212. if err != nil {
  213. t.Fatal(err)
  214. }
  215. tarReader := tar.NewReader(gzf)
  216. for {
  217. header, err := tarReader.Next()
  218. if err == io.EOF {
  219. break
  220. }
  221. if err != nil {
  222. t.Fatal(err)
  223. }
  224. name := header.Name
  225. switch header.Typeflag {
  226. case tar.TypeDir:
  227. err := os.Mkdir(path.Join(directory, name), os.ModePerm)
  228. if err != nil {
  229. t.Fatal(err)
  230. }
  231. case tar.TypeReg:
  232. file, err := os.Create(path.Join(directory, name))
  233. if err != nil {
  234. t.Fatal(err)
  235. }
  236. if _, err := io.Copy(file, tarReader); err != nil {
  237. t.Fatal(err)
  238. }
  239. file.Close()
  240. default:
  241. t.Fatal("shouldn't happen")
  242. }
  243. }
  244. }