| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776 |
- // Copyright 2018 The go-ethereum Authors
- // This file is part of the go-ethereum library.
- //
- // The go-ethereum library is free software: you can redistribute it and/or modify
- // it under the terms of the GNU Lesser General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // The go-ethereum library is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU Lesser General Public License for more details.
- //
- // You should have received a copy of the GNU Lesser General Public License
- // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
- package stream
- import (
- "bytes"
- "context"
- "fmt"
- "os"
- "sync"
- "testing"
- "time"
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/p2p/protocols"
- "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
- p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
- "github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/network"
- pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
- "github.com/ethereum/go-ethereum/swarm/network/simulation"
- "github.com/ethereum/go-ethereum/swarm/state"
- "github.com/ethereum/go-ethereum/swarm/storage"
- "github.com/ethereum/go-ethereum/swarm/testutil"
- )
- //Tests initializing a retrieve request
- func TestStreamerRetrieveRequest(t *testing.T) {
- regOpts := &RegistryOptions{
- Retrieval: RetrievalClientOnly,
- Syncing: SyncingDisabled,
- }
- tester, streamer, _, teardown, err := newStreamerTester(t, regOpts)
- defer teardown()
- if err != nil {
- t.Fatal(err)
- }
- node := tester.Nodes[0]
- ctx := context.Background()
- req := network.NewRequest(
- storage.Address(hash0[:]),
- true,
- &sync.Map{},
- )
- streamer.delivery.RequestFromPeers(ctx, req)
- stream := NewStream(swarmChunkServerStreamName, "", true)
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "RetrieveRequestMsg",
- Expects: []p2ptest.Expect{
- { //start expecting a subscription for RETRIEVE_REQUEST due to `RetrievalClientOnly`
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- { //expect a retrieve request message for the given hash
- Code: 5,
- Msg: &RetrieveRequestMsg{
- Addr: hash0[:],
- SkipCheck: true,
- },
- Peer: node.ID(),
- },
- },
- })
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- }
- //Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
- //Should time out as the peer does not have the chunk (no syncing happened previously)
- func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{
- Retrieval: RetrievalEnabled,
- Syncing: SyncingDisabled, //do no syncing
- })
- defer teardown()
- if err != nil {
- t.Fatal(err)
- }
- node := tester.Nodes[0]
- chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
- peer := streamer.getPeer(node.ID())
- stream := NewStream(swarmChunkServerStreamName, "", true)
- //simulate pre-subscription to RETRIEVE_REQUEST stream on peer
- peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- })
- //test the exchange
- err = tester.TestExchanges(p2ptest.Exchange{
- Expects: []p2ptest.Expect{
- { //first expect a subscription to the RETRIEVE_REQUEST stream
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- }, p2ptest.Exchange{
- Label: "RetrieveRequestMsg",
- Triggers: []p2ptest.Trigger{
- { //then the actual RETRIEVE_REQUEST....
- Code: 5,
- Msg: &RetrieveRequestMsg{
- Addr: chunk.Address()[:],
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- { //to which the peer responds with offered hashes
- Code: 1,
- Msg: &OfferedHashesMsg{
- HandoverProof: nil,
- Hashes: nil,
- From: 0,
- To: 0,
- },
- Peer: node.ID(),
- },
- },
- })
- //should fail with a timeout as the peer we are requesting
- //the chunk from does not have the chunk
- expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
- if err == nil || err.Error() != expectedError {
- t.Fatalf("Expected error %v, got %v", expectedError, err)
- }
- }
- // upstream request server receives a retrieve Request and responds with
- // offered hashes or delivery if skipHash is set to true
- func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
- tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
- Retrieval: RetrievalEnabled,
- Syncing: SyncingDisabled,
- })
- defer teardown()
- if err != nil {
- t.Fatal(err)
- }
- node := tester.Nodes[0]
- peer := streamer.getPeer(node.ID())
- stream := NewStream(swarmChunkServerStreamName, "", true)
- peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- })
- hash := storage.Address(hash0[:])
- chunk := storage.NewChunk(hash, hash)
- err = localStore.Put(context.TODO(), chunk)
- if err != nil {
- t.Fatalf("Expected no err got %v", err)
- }
- err = tester.TestExchanges(p2ptest.Exchange{
- Expects: []p2ptest.Expect{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: nil,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- }, p2ptest.Exchange{
- Label: "RetrieveRequestMsg",
- Triggers: []p2ptest.Trigger{
- {
- Code: 5,
- Msg: &RetrieveRequestMsg{
- Addr: hash,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- HandoverProof: &HandoverProof{
- Handover: &Handover{},
- },
- Hashes: hash,
- From: 0,
- // TODO: why is this 32???
- To: 32,
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- hash = storage.Address(hash1[:])
- chunk = storage.NewChunk(hash, hash1[:])
- err = localStore.Put(context.TODO(), chunk)
- if err != nil {
- t.Fatalf("Expected no err got %v", err)
- }
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "RetrieveRequestMsg",
- Triggers: []p2ptest.Trigger{
- {
- Code: 5,
- Msg: &RetrieveRequestMsg{
- Addr: hash,
- SkipCheck: true,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 6,
- Msg: &ChunkDeliveryMsg{
- Addr: hash,
- SData: hash,
- },
- Peer: node.ID(),
- },
- },
- })
- if err != nil {
- t.Fatal(err)
- }
- }
- // if there is one peer in the Kademlia, RequestFromPeers should return it
- func TestRequestFromPeers(t *testing.T) {
- dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
- addr := network.RandomAddr()
- to := network.NewKademlia(addr.OAddr, network.NewKadParams())
- delivery := NewDelivery(to, nil)
- protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
- peer := network.NewPeer(&network.BzzPeer{
- BzzAddr: network.RandomAddr(),
- LightNode: false,
- Peer: protocolsPeer,
- }, to)
- to.On(peer)
- r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
- // an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
- sp := &Peer{
- Peer: protocolsPeer,
- pq: pq.New(int(PriorityQueue), PriorityQueueCap),
- streamer: r,
- }
- r.setPeer(sp)
- req := network.NewRequest(
- storage.Address(hash0[:]),
- true,
- &sync.Map{},
- )
- ctx := context.Background()
- id, _, err := delivery.RequestFromPeers(ctx, req)
- if err != nil {
- t.Fatal(err)
- }
- if *id != dummyPeerID {
- t.Fatalf("Expected an id, got %v", id)
- }
- }
- // RequestFromPeers should not return light nodes
- func TestRequestFromPeersWithLightNode(t *testing.T) {
- dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
- addr := network.RandomAddr()
- to := network.NewKademlia(addr.OAddr, network.NewKadParams())
- delivery := NewDelivery(to, nil)
- protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
- // setting up a lightnode
- peer := network.NewPeer(&network.BzzPeer{
- BzzAddr: network.RandomAddr(),
- LightNode: true,
- Peer: protocolsPeer,
- }, to)
- to.On(peer)
- r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
- // an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
- sp := &Peer{
- Peer: protocolsPeer,
- pq: pq.New(int(PriorityQueue), PriorityQueueCap),
- streamer: r,
- }
- r.setPeer(sp)
- req := network.NewRequest(
- storage.Address(hash0[:]),
- true,
- &sync.Map{},
- )
- ctx := context.Background()
- // making a request which should return with "no peer found"
- _, _, err := delivery.RequestFromPeers(ctx, req)
- expectedError := "no peer found"
- if err.Error() != expectedError {
- t.Fatalf("expected '%v', got %v", expectedError, err)
- }
- }
- func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
- tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
- Retrieval: RetrievalDisabled,
- Syncing: SyncingDisabled,
- })
- defer teardown()
- if err != nil {
- t.Fatal(err)
- }
- streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
- return &testClient{
- t: t,
- }, nil
- })
- node := tester.Nodes[0]
- //subscribe to custom stream
- stream := NewStream("foo", "", true)
- err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- chunkKey := hash0[:]
- chunkData := hash1[:]
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Expects: []p2ptest.Expect{
- { //first expect subscription to the custom stream...
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- },
- p2ptest.Exchange{
- Label: "ChunkDelivery message",
- Triggers: []p2ptest.Trigger{
- { //...then trigger a chunk delivery for the given chunk from peer in order for
- //local node to get the chunk delivered
- Code: 6,
- Msg: &ChunkDeliveryMsg{
- Addr: chunkKey,
- SData: chunkData,
- },
- Peer: node.ID(),
- },
- },
- })
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
- defer cancel()
- // wait for the chunk to get stored
- storedChunk, err := localStore.Get(ctx, chunkKey)
- for err != nil {
- select {
- case <-ctx.Done():
- t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
- default:
- }
- storedChunk, err = localStore.Get(ctx, chunkKey)
- time.Sleep(50 * time.Millisecond)
- }
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- if !bytes.Equal(storedChunk.Data(), chunkData) {
- t.Fatal("Retrieved chunk has different data than original")
- }
- }
- func TestDeliveryFromNodes(t *testing.T) {
- testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
- testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
- testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
- testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
- testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
- }
- func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
- t.Skip("temporarily disabled as simulations.WaitTillHealthy cannot be trusted")
- sim := simulation.New(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- node := ctx.Config.Node()
- addr := network.NewAddr(node)
- store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
- if err != nil {
- return nil, nil, err
- }
- bucket.Store(bucketKeyStore, store)
- cleanup = func() {
- os.RemoveAll(datadir)
- store.Close()
- }
- localStore := store.(*storage.LocalStore)
- netStore, err := storage.NewNetStore(localStore, nil)
- if err != nil {
- return nil, nil, err
- }
- kad := network.NewKademlia(addr.Over(), network.NewKadParams())
- delivery := NewDelivery(kad, netStore)
- netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- SkipCheck: skipCheck,
- Syncing: SyncingDisabled,
- Retrieval: RetrievalEnabled,
- }, nil)
- bucket.Store(bucketKeyRegistry, r)
- fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
- bucket.Store(bucketKeyFileStore, fileStore)
- return r, cleanup, nil
- },
- })
- defer sim.Close()
- log.Info("Adding nodes to simulation")
- _, err := sim.AddNodesAndConnectChain(nodes)
- if err != nil {
- t.Fatal(err)
- }
- log.Info("Starting simulation")
- ctx := context.Background()
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- nodeIDs := sim.UpNodeIDs()
- //determine the pivot node to be the first node of the simulation
- pivot := nodeIDs[0]
- //distribute chunks of a random file into Stores of nodes 1 to nodes
- //we will do this by creating a file store with an underlying round-robin store:
- //the file store will create a hash for the uploaded file, but every chunk will be
- //distributed to different nodes via round-robin scheduling
- log.Debug("Writing file to round-robin file store")
- //to do this, we create an array for chunkstores (length minus one, the pivot node)
- stores := make([]storage.ChunkStore, len(nodeIDs)-1)
- //we then need to get all stores from the sim....
- lStores := sim.NodesItems(bucketKeyStore)
- i := 0
- //...iterate the buckets...
- for id, bucketVal := range lStores {
- //...and remove the one which is the pivot node
- if id == pivot {
- continue
- }
- //the other ones are added to the array...
- stores[i] = bucketVal.(storage.ChunkStore)
- i++
- }
- //...which then gets passed to the round-robin file store
- roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
- //now we can actually upload a (random) file to the round-robin store
- size := chunkCount * chunkSize
- log.Debug("Storing data to file store")
- fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
- // wait until all chunks stored
- if err != nil {
- return err
- }
- err = wait(ctx)
- if err != nil {
- return err
- }
- log.Debug("Waiting for kademlia")
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
- return err
- }
- //get the pivot node's filestore
- item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
- if !ok {
- return fmt.Errorf("No filestore")
- }
- pivotFileStore := item.(*storage.FileStore)
- log.Debug("Starting retrieval routine")
- go func() {
- // start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
- // we must wait for the peer connections to have started before requesting
- n, err := readAll(pivotFileStore, fileHash)
- log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
- if err != nil {
- t.Fatalf("requesting chunks action error: %v", err)
- }
- }()
- log.Debug("Watching for disconnections")
- disconnections := sim.PeerEvents(
- context.Background(),
- sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Drop(),
- )
- go func() {
- for d := range disconnections {
- if d.Error != nil {
- log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- t.Fatal(d.Error)
- }
- }
- }()
- //finally check that the pivot node gets all chunks via the root hash
- log.Debug("Check retrieval")
- success := true
- var total int64
- total, err = readAll(pivotFileStore, fileHash)
- if err != nil {
- return err
- }
- log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
- if err != nil || total != int64(size) {
- success = false
- }
- if !success {
- return fmt.Errorf("Test failed, chunks not available on all nodes")
- }
- log.Debug("Test terminated successfully")
- return nil
- })
- if result.Error != nil {
- t.Fatal(result.Error)
- }
- }
- func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
- for chunks := 32; chunks <= 128; chunks *= 2 {
- for i := 2; i < 32; i *= 2 {
- b.Run(
- fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
- func(b *testing.B) {
- benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
- },
- )
- }
- }
- }
- func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
- for chunks := 32; chunks <= 128; chunks *= 2 {
- for i := 2; i < 32; i *= 2 {
- b.Run(
- fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
- func(b *testing.B) {
- benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
- },
- )
- }
- }
- }
- func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
- sim := simulation.New(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- node := ctx.Config.Node()
- addr := network.NewAddr(node)
- store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
- if err != nil {
- return nil, nil, err
- }
- bucket.Store(bucketKeyStore, store)
- cleanup = func() {
- os.RemoveAll(datadir)
- store.Close()
- }
- localStore := store.(*storage.LocalStore)
- netStore, err := storage.NewNetStore(localStore, nil)
- if err != nil {
- return nil, nil, err
- }
- kad := network.NewKademlia(addr.Over(), network.NewKadParams())
- delivery := NewDelivery(kad, netStore)
- netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- SkipCheck: skipCheck,
- Syncing: SyncingDisabled,
- Retrieval: RetrievalDisabled,
- SyncUpdateDelay: 0,
- }, nil)
- fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
- bucket.Store(bucketKeyFileStore, fileStore)
- return r, cleanup, nil
- },
- })
- defer sim.Close()
- log.Info("Initializing test config")
- _, err := sim.AddNodesAndConnectChain(nodes)
- if err != nil {
- b.Fatal(err)
- }
- ctx := context.Background()
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- nodeIDs := sim.UpNodeIDs()
- node := nodeIDs[len(nodeIDs)-1]
- item, ok := sim.NodeItem(node, bucketKeyFileStore)
- if !ok {
- b.Fatal("No filestore")
- }
- remoteFileStore := item.(*storage.FileStore)
- pivotNode := nodeIDs[0]
- item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
- if !ok {
- b.Fatal("No filestore")
- }
- netStore := item.(*storage.NetStore)
- if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
- return err
- }
- disconnections := sim.PeerEvents(
- context.Background(),
- sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Drop(),
- )
- go func() {
- for d := range disconnections {
- if d.Error != nil {
- log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- b.Fatal(d.Error)
- }
- }
- }()
- // benchmark loop
- b.ResetTimer()
- b.StopTimer()
- Loop:
- for i := 0; i < b.N; i++ {
- // uploading chunkCount random chunks to the last node
- hashes := make([]storage.Address, chunkCount)
- for i := 0; i < chunkCount; i++ {
- // create actual size real chunks
- ctx := context.TODO()
- hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
- if err != nil {
- b.Fatalf("expected no error. got %v", err)
- }
- // wait until all chunks stored
- err = wait(ctx)
- if err != nil {
- b.Fatalf("expected no error. got %v", err)
- }
- // collect the hashes
- hashes[i] = hash
- }
- // now benchmark the actual retrieval
- // netstore.Get is called for each hash in a go routine and errors are collected
- b.StartTimer()
- errs := make(chan error)
- for _, hash := range hashes {
- go func(h storage.Address) {
- _, err := netStore.Get(ctx, h)
- log.Warn("test check netstore get", "hash", h, "err", err)
- errs <- err
- }(hash)
- }
- // count and report retrieval errors
- // if there are misses then chunk timeout is too low for the distance and volume (?)
- var total, misses int
- for err := range errs {
- if err != nil {
- log.Warn(err.Error())
- misses++
- }
- total++
- if total == chunkCount {
- break
- }
- }
- b.StopTimer()
- if misses > 0 {
- err = fmt.Errorf("%v chunk not found out of %v", misses, total)
- break Loop
- }
- }
- if err != nil {
- b.Fatal(err)
- }
- return nil
- })
- if result.Error != nil {
- b.Fatal(result.Error)
- }
- }
|