|
|
@@ -104,8 +104,47 @@ func TestRetrieval(t *testing.T) {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
+var retrievalSimServiceMap = map[string]simulation.ServiceFunc{
|
|
|
+ "streamer": retrievalStreamerFunc,
|
|
|
+}
|
|
|
+
|
|
|
+func retrievalStreamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
|
|
+ n := ctx.Config.Node()
|
|
|
+ addr := network.NewAddr(n)
|
|
|
+ store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
|
|
|
+ if err != nil {
|
|
|
+ return nil, nil, err
|
|
|
+ }
|
|
|
+ bucket.Store(bucketKeyStore, store)
|
|
|
+
|
|
|
+ localStore := store.(*storage.LocalStore)
|
|
|
+ netStore, err := storage.NewNetStore(localStore, nil)
|
|
|
+ if err != nil {
|
|
|
+ return nil, nil, err
|
|
|
+ }
|
|
|
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
|
|
+ delivery := NewDelivery(kad, netStore)
|
|
|
+ netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
|
|
+
|
|
|
+ r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
|
|
+ DoSync: true,
|
|
|
+ SyncUpdateDelay: 3 * time.Second,
|
|
|
+ DoRetrieve: true,
|
|
|
+ })
|
|
|
+
|
|
|
+ fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
|
|
+ bucket.Store(bucketKeyFileStore, fileStore)
|
|
|
+
|
|
|
+ cleanup = func() {
|
|
|
+ os.RemoveAll(datadir)
|
|
|
+ netStore.Close()
|
|
|
+ r.Close()
|
|
|
+ }
|
|
|
+
|
|
|
+ return r, cleanup, nil
|
|
|
+}
|
|
|
|
|
|
+/*
|
|
|
The test loads a snapshot file to construct the swarm network,
|
|
|
assuming that the snapshot file identifies a healthy
|
|
|
kademlia network. Nevertheless a health check runs in the
|
|
|
@@ -114,43 +153,7 @@ simulation's `action` function.
|
|
|
The snapshot should have 'streamer' in its service list.
|
|
|
*/
|
|
|
func runFileRetrievalTest(nodeCount int) error {
|
|
|
- sim := simulation.New(map[string]simulation.ServiceFunc{
|
|
|
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
|
|
- node := ctx.Config.Node()
|
|
|
- addr := network.NewAddr(node)
|
|
|
- store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
|
|
|
- if err != nil {
|
|
|
- return nil, nil, err
|
|
|
- }
|
|
|
- bucket.Store(bucketKeyStore, store)
|
|
|
-
|
|
|
- localStore := store.(*storage.LocalStore)
|
|
|
- netStore, err := storage.NewNetStore(localStore, nil)
|
|
|
- if err != nil {
|
|
|
- return nil, nil, err
|
|
|
- }
|
|
|
- kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
|
|
- delivery := NewDelivery(kad, netStore)
|
|
|
- netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
|
|
-
|
|
|
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
|
|
- DoSync: true,
|
|
|
- SyncUpdateDelay: 3 * time.Second,
|
|
|
- })
|
|
|
-
|
|
|
- fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
|
|
- bucket.Store(bucketKeyFileStore, fileStore)
|
|
|
-
|
|
|
- cleanup = func() {
|
|
|
- os.RemoveAll(datadir)
|
|
|
- netStore.Close()
|
|
|
- r.Close()
|
|
|
- }
|
|
|
-
|
|
|
- return r, cleanup, nil
|
|
|
-
|
|
|
- },
|
|
|
- })
|
|
|
+ sim := simulation.New(retrievalSimServiceMap)
|
|
|
defer sim.Close()
|
|
|
|
|
|
log.Info("Initializing test config")
|
|
|
@@ -200,49 +203,29 @@ func runFileRetrievalTest(nodeCount int) error {
|
|
|
|
|
|
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
|
|
|
// or until the timeout is reached.
|
|
|
- allSuccess := false
|
|
|
- for !allSuccess {
|
|
|
+ REPEAT:
|
|
|
+ for {
|
|
|
for _, id := range nodeIDs {
|
|
|
- //for each expected chunk, check if it is in the local store
|
|
|
- localChunks := conf.idToChunksMap[id]
|
|
|
- localSuccess := true
|
|
|
- for _, ch := range localChunks {
|
|
|
- //get the real chunk by the index in the index array
|
|
|
- chunk := conf.hashes[ch]
|
|
|
- log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
|
|
|
- //check if the expected chunk is indeed in the localstore
|
|
|
- var err error
|
|
|
- //check on the node's FileStore (netstore)
|
|
|
- item, ok := sim.NodeItem(id, bucketKeyFileStore)
|
|
|
- if !ok {
|
|
|
- return fmt.Errorf("No registry")
|
|
|
- }
|
|
|
- fileStore := item.(*storage.FileStore)
|
|
|
- //check all chunks
|
|
|
- for i, hash := range conf.hashes {
|
|
|
- reader, _ := fileStore.Retrieve(context.TODO(), hash)
|
|
|
- //check that we can read the file size and that it corresponds to the generated file size
|
|
|
- if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
|
|
|
- allSuccess = false
|
|
|
- log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
|
|
|
- } else {
|
|
|
- log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
|
|
|
- }
|
|
|
- }
|
|
|
- if err != nil {
|
|
|
- log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
|
|
|
- localSuccess = false
|
|
|
- } else {
|
|
|
- log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
|
|
|
+ //for each expected file, check if it is in the local store
|
|
|
+ item, ok := sim.NodeItem(id, bucketKeyFileStore)
|
|
|
+ if !ok {
|
|
|
+ return fmt.Errorf("No filestore")
|
|
|
+ }
|
|
|
+ fileStore := item.(*storage.FileStore)
|
|
|
+ //check all chunks
|
|
|
+ for i, hash := range conf.hashes {
|
|
|
+ reader, _ := fileStore.Retrieve(context.TODO(), hash)
|
|
|
+ //check that we can read the file size and that it corresponds to the generated file size
|
|
|
+ if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
|
|
|
+ log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
|
|
|
+ time.Sleep(500 * time.Millisecond)
|
|
|
+ continue REPEAT
|
|
|
}
|
|
|
+ log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
|
|
|
}
|
|
|
- allSuccess = localSuccess
|
|
|
}
|
|
|
+ return nil
|
|
|
}
|
|
|
- if !allSuccess {
|
|
|
- return fmt.Errorf("Not all chunks succeeded!")
|
|
|
- }
|
|
|
- return nil
|
|
|
})
|
|
|
|
|
|
if result.Error != nil {
|
|
|
@@ -263,44 +246,7 @@ simulation's `action` function.
|
|
|
The snapshot should have 'streamer' in its service list.
|
|
|
*/
|
|
|
func runRetrievalTest(chunkCount int, nodeCount int) error {
|
|
|
- sim := simulation.New(map[string]simulation.ServiceFunc{
|
|
|
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
|
|
- node := ctx.Config.Node()
|
|
|
- addr := network.NewAddr(node)
|
|
|
- store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
|
|
|
- if err != nil {
|
|
|
- return nil, nil, err
|
|
|
- }
|
|
|
- bucket.Store(bucketKeyStore, store)
|
|
|
-
|
|
|
- localStore := store.(*storage.LocalStore)
|
|
|
- netStore, err := storage.NewNetStore(localStore, nil)
|
|
|
- if err != nil {
|
|
|
- return nil, nil, err
|
|
|
- }
|
|
|
- kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
|
|
- delivery := NewDelivery(kad, netStore)
|
|
|
- netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
|
|
-
|
|
|
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
|
|
- DoSync: true,
|
|
|
- SyncUpdateDelay: 0,
|
|
|
- })
|
|
|
-
|
|
|
- fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
|
|
- bucketKeyFileStore = simulation.BucketKey("filestore")
|
|
|
- bucket.Store(bucketKeyFileStore, fileStore)
|
|
|
-
|
|
|
- cleanup = func() {
|
|
|
- os.RemoveAll(datadir)
|
|
|
- netStore.Close()
|
|
|
- r.Close()
|
|
|
- }
|
|
|
-
|
|
|
- return r, cleanup, nil
|
|
|
-
|
|
|
- },
|
|
|
- })
|
|
|
+ sim := simulation.New(retrievalSimServiceMap)
|
|
|
defer sim.Close()
|
|
|
|
|
|
conf := &synctestConfig{}
|
|
|
@@ -330,8 +276,6 @@ func runRetrievalTest(chunkCount int, nodeCount int) error {
|
|
|
conf.addrToIDMap[string(a)] = n
|
|
|
}
|
|
|
|
|
|
- //an array for the random files
|
|
|
- var randomFiles []string
|
|
|
//this is the node selected for upload
|
|
|
node := sim.RandomUpNode()
|
|
|
item, ok := sim.NodeItem(node.ID, bucketKeyStore)
|
|
|
@@ -349,49 +293,31 @@ func runRetrievalTest(chunkCount int, nodeCount int) error {
|
|
|
|
|
|
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
|
|
|
// or until the timeout is reached.
|
|
|
- allSuccess := false
|
|
|
- for !allSuccess {
|
|
|
+ REPEAT:
|
|
|
+ for {
|
|
|
for _, id := range nodeIDs {
|
|
|
//for each expected chunk, check if it is in the local store
|
|
|
- localChunks := conf.idToChunksMap[id]
|
|
|
- localSuccess := true
|
|
|
- for _, ch := range localChunks {
|
|
|
- //get the real chunk by the index in the index array
|
|
|
- chunk := conf.hashes[ch]
|
|
|
- log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
|
|
|
- //check if the expected chunk is indeed in the localstore
|
|
|
- var err error
|
|
|
- //check on the node's FileStore (netstore)
|
|
|
- item, ok := sim.NodeItem(id, bucketKeyFileStore)
|
|
|
- if !ok {
|
|
|
- return fmt.Errorf("No registry")
|
|
|
- }
|
|
|
- fileStore := item.(*storage.FileStore)
|
|
|
- //check all chunks
|
|
|
- for i, hash := range conf.hashes {
|
|
|
- reader, _ := fileStore.Retrieve(context.TODO(), hash)
|
|
|
- //check that we can read the file size and that it corresponds to the generated file size
|
|
|
- if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
|
|
|
- allSuccess = false
|
|
|
- log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
|
|
|
- } else {
|
|
|
- log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
|
|
|
- }
|
|
|
- }
|
|
|
- if err != nil {
|
|
|
- log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
|
|
|
- localSuccess = false
|
|
|
- } else {
|
|
|
- log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
|
|
|
+ //check on the node's FileStore (netstore)
|
|
|
+ item, ok := sim.NodeItem(id, bucketKeyFileStore)
|
|
|
+ if !ok {
|
|
|
+ return fmt.Errorf("No filestore")
|
|
|
+ }
|
|
|
+ fileStore := item.(*storage.FileStore)
|
|
|
+ //check all chunks
|
|
|
+ for _, hash := range conf.hashes {
|
|
|
+ reader, _ := fileStore.Retrieve(context.TODO(), hash)
|
|
|
+ //check that we can read the chunk size and that it corresponds to the generated chunk size
|
|
|
+ if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) {
|
|
|
+ log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s)
|
|
|
+ time.Sleep(500 * time.Millisecond)
|
|
|
+ continue REPEAT
|
|
|
}
|
|
|
+ log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash))
|
|
|
}
|
|
|
- allSuccess = localSuccess
|
|
|
}
|
|
|
+ // all nodes and files found, exit loop and return without error
|
|
|
+ return nil
|
|
|
}
|
|
|
- if !allSuccess {
|
|
|
- return fmt.Errorf("Not all chunks succeeded!")
|
|
|
- }
|
|
|
- return nil
|
|
|
})
|
|
|
|
|
|
if result.Error != nil {
|