snapshot_retrieval_test.go 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. // Copyright 2018 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package stream
  17. import (
  18. "context"
  19. "fmt"
  20. "os"
  21. "sync"
  22. "testing"
  23. "time"
  24. "github.com/ethereum/go-ethereum/node"
  25. "github.com/ethereum/go-ethereum/p2p/enode"
  26. "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
  27. "github.com/ethereum/go-ethereum/swarm/log"
  28. "github.com/ethereum/go-ethereum/swarm/network"
  29. "github.com/ethereum/go-ethereum/swarm/network/simulation"
  30. "github.com/ethereum/go-ethereum/swarm/state"
  31. "github.com/ethereum/go-ethereum/swarm/storage"
  32. )
  33. //constants for random file generation
  34. const (
  35. minFileSize = 2
  36. maxFileSize = 40
  37. )
  38. //This test is a retrieval test for nodes.
  39. //A configurable number of nodes can be
  40. //provided to the test.
  41. //Files are uploaded to nodes, other nodes try to retrieve the file
  42. //Number of nodes can be provided via commandline too.
  43. func TestFileRetrieval(t *testing.T) {
  44. if *nodes != 0 {
  45. err := runFileRetrievalTest(*nodes)
  46. if err != nil {
  47. t.Fatal(err)
  48. }
  49. } else {
  50. nodeCnt := []int{16}
  51. //if the `longrunning` flag has been provided
  52. //run more test combinations
  53. if *longrunning {
  54. nodeCnt = append(nodeCnt, 32, 64, 128)
  55. }
  56. for _, n := range nodeCnt {
  57. err := runFileRetrievalTest(n)
  58. if err != nil {
  59. t.Fatal(err)
  60. }
  61. }
  62. }
  63. }
  64. //This test is a retrieval test for nodes.
  65. //One node is randomly selected to be the pivot node.
  66. //A configurable number of chunks and nodes can be
  67. //provided to the test, the number of chunks is uploaded
  68. //to the pivot node and other nodes try to retrieve the chunk(s).
  69. //Number of chunks and nodes can be provided via commandline too.
  70. func TestRetrieval(t *testing.T) {
  71. //if nodes/chunks have been provided via commandline,
  72. //run the tests with these values
  73. if *nodes != 0 && *chunks != 0 {
  74. err := runRetrievalTest(*chunks, *nodes)
  75. if err != nil {
  76. t.Fatal(err)
  77. }
  78. } else {
  79. var nodeCnt []int
  80. var chnkCnt []int
  81. //if the `longrunning` flag has been provided
  82. //run more test combinations
  83. if *longrunning {
  84. nodeCnt = []int{16, 32, 128}
  85. chnkCnt = []int{4, 32, 256}
  86. } else {
  87. //default test
  88. nodeCnt = []int{16}
  89. chnkCnt = []int{32}
  90. }
  91. for _, n := range nodeCnt {
  92. for _, c := range chnkCnt {
  93. err := runRetrievalTest(c, n)
  94. if err != nil {
  95. t.Fatal(err)
  96. }
  97. }
  98. }
  99. }
  100. }
  101. var retrievalSimServiceMap = map[string]simulation.ServiceFunc{
  102. "streamer": retrievalStreamerFunc,
  103. }
  104. func retrievalStreamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
  105. n := ctx.Config.Node()
  106. addr := network.NewAddr(n)
  107. store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
  108. if err != nil {
  109. return nil, nil, err
  110. }
  111. bucket.Store(bucketKeyStore, store)
  112. localStore := store.(*storage.LocalStore)
  113. netStore, err := storage.NewNetStore(localStore, nil)
  114. if err != nil {
  115. return nil, nil, err
  116. }
  117. kad := network.NewKademlia(addr.Over(), network.NewKadParams())
  118. delivery := NewDelivery(kad, netStore)
  119. netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
  120. r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
  121. DoSync: true,
  122. SyncUpdateDelay: 3 * time.Second,
  123. DoRetrieve: true,
  124. })
  125. fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
  126. bucket.Store(bucketKeyFileStore, fileStore)
  127. cleanup = func() {
  128. os.RemoveAll(datadir)
  129. netStore.Close()
  130. r.Close()
  131. }
  132. return r, cleanup, nil
  133. }
  134. /*
  135. The test loads a snapshot file to construct the swarm network,
  136. assuming that the snapshot file identifies a healthy
  137. kademlia network. Nevertheless a health check runs in the
  138. simulation's `action` function.
  139. The snapshot should have 'streamer' in its service list.
  140. */
  141. func runFileRetrievalTest(nodeCount int) error {
  142. sim := simulation.New(retrievalSimServiceMap)
  143. defer sim.Close()
  144. log.Info("Initializing test config")
  145. conf := &synctestConfig{}
  146. //map of discover ID to indexes of chunks expected at that ID
  147. conf.idToChunksMap = make(map[enode.ID][]int)
  148. //map of overlay address to discover ID
  149. conf.addrToIDMap = make(map[string]enode.ID)
  150. //array where the generated chunk hashes will be stored
  151. conf.hashes = make([]storage.Address, 0)
  152. err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
  153. if err != nil {
  154. return err
  155. }
  156. ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
  157. defer cancelSimRun()
  158. result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
  159. nodeIDs := sim.UpNodeIDs()
  160. for _, n := range nodeIDs {
  161. //get the kademlia overlay address from this ID
  162. a := n.Bytes()
  163. //append it to the array of all overlay addresses
  164. conf.addrs = append(conf.addrs, a)
  165. //the proximity calculation is on overlay addr,
  166. //the p2p/simulations check func triggers on enode.ID,
  167. //so we need to know which overlay addr maps to which nodeID
  168. conf.addrToIDMap[string(a)] = n
  169. }
  170. //an array for the random files
  171. var randomFiles []string
  172. //channel to signal when the upload has finished
  173. //uploadFinished := make(chan struct{})
  174. //channel to trigger new node checks
  175. conf.hashes, randomFiles, err = uploadFilesToNodes(sim)
  176. if err != nil {
  177. return err
  178. }
  179. if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
  180. return err
  181. }
  182. // File retrieval check is repeated until all uploaded files are retrieved from all nodes
  183. // or until the timeout is reached.
  184. REPEAT:
  185. for {
  186. for _, id := range nodeIDs {
  187. //for each expected file, check if it is in the local store
  188. item, ok := sim.NodeItem(id, bucketKeyFileStore)
  189. if !ok {
  190. return fmt.Errorf("No filestore")
  191. }
  192. fileStore := item.(*storage.FileStore)
  193. //check all chunks
  194. for i, hash := range conf.hashes {
  195. reader, _ := fileStore.Retrieve(context.TODO(), hash)
  196. //check that we can read the file size and that it corresponds to the generated file size
  197. if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
  198. log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
  199. time.Sleep(500 * time.Millisecond)
  200. continue REPEAT
  201. }
  202. log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
  203. }
  204. }
  205. return nil
  206. }
  207. })
  208. if result.Error != nil {
  209. return result.Error
  210. }
  211. return nil
  212. }
  213. /*
  214. The test generates the given number of chunks.
  215. The test loads a snapshot file to construct the swarm network,
  216. assuming that the snapshot file identifies a healthy
  217. kademlia network. Nevertheless a health check runs in the
  218. simulation's `action` function.
  219. The snapshot should have 'streamer' in its service list.
  220. */
  221. func runRetrievalTest(chunkCount int, nodeCount int) error {
  222. sim := simulation.New(retrievalSimServiceMap)
  223. defer sim.Close()
  224. conf := &synctestConfig{}
  225. //map of discover ID to indexes of chunks expected at that ID
  226. conf.idToChunksMap = make(map[enode.ID][]int)
  227. //map of overlay address to discover ID
  228. conf.addrToIDMap = make(map[string]enode.ID)
  229. //array where the generated chunk hashes will be stored
  230. conf.hashes = make([]storage.Address, 0)
  231. err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
  232. if err != nil {
  233. return err
  234. }
  235. ctx := context.Background()
  236. result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
  237. nodeIDs := sim.UpNodeIDs()
  238. for _, n := range nodeIDs {
  239. //get the kademlia overlay address from this ID
  240. a := n.Bytes()
  241. //append it to the array of all overlay addresses
  242. conf.addrs = append(conf.addrs, a)
  243. //the proximity calculation is on overlay addr,
  244. //the p2p/simulations check func triggers on enode.ID,
  245. //so we need to know which overlay addr maps to which nodeID
  246. conf.addrToIDMap[string(a)] = n
  247. }
  248. //this is the node selected for upload
  249. node := sim.RandomUpNode()
  250. item, ok := sim.NodeItem(node.ID, bucketKeyStore)
  251. if !ok {
  252. return fmt.Errorf("No localstore")
  253. }
  254. lstore := item.(*storage.LocalStore)
  255. conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
  256. if err != nil {
  257. return err
  258. }
  259. if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
  260. return err
  261. }
  262. // File retrieval check is repeated until all uploaded files are retrieved from all nodes
  263. // or until the timeout is reached.
  264. REPEAT:
  265. for {
  266. for _, id := range nodeIDs {
  267. //for each expected chunk, check if it is in the local store
  268. //check on the node's FileStore (netstore)
  269. item, ok := sim.NodeItem(id, bucketKeyFileStore)
  270. if !ok {
  271. return fmt.Errorf("No filestore")
  272. }
  273. fileStore := item.(*storage.FileStore)
  274. //check all chunks
  275. for _, hash := range conf.hashes {
  276. reader, _ := fileStore.Retrieve(context.TODO(), hash)
  277. //check that we can read the chunk size and that it corresponds to the generated chunk size
  278. if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) {
  279. log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s)
  280. time.Sleep(500 * time.Millisecond)
  281. continue REPEAT
  282. }
  283. log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash))
  284. }
  285. }
  286. // all nodes and files found, exit loop and return without error
  287. return nil
  288. }
  289. })
  290. if result.Error != nil {
  291. return result.Error
  292. }
  293. return nil
  294. }